1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.master.handler;
20
21 import java.io.IOException;
22 import java.io.InterruptedIOException;
23 import java.util.ArrayList;
24 import java.util.HashSet;
25 import java.util.List;
26 import java.util.Map;
27 import java.util.NavigableMap;
28 import java.util.Set;
29 import java.util.concurrent.locks.Lock;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.hadoop.classification.InterfaceAudience;
34 import org.apache.hadoop.hbase.HConstants;
35 import org.apache.hadoop.hbase.HRegionInfo;
36 import org.apache.hadoop.hbase.Server;
37 import org.apache.hadoop.hbase.ServerName;
38 import org.apache.hadoop.hbase.catalog.CatalogTracker;
39 import org.apache.hadoop.hbase.catalog.MetaReader;
40 import org.apache.hadoop.hbase.client.Result;
41 import org.apache.hadoop.hbase.executor.EventHandler;
42 import org.apache.hadoop.hbase.executor.EventType;
43 import org.apache.hadoop.hbase.master.AssignmentManager;
44 import org.apache.hadoop.hbase.master.DeadServer;
45 import org.apache.hadoop.hbase.master.MasterServices;
46 import org.apache.hadoop.hbase.master.RegionState;
47 import org.apache.hadoop.hbase.master.RegionState.State;
48 import org.apache.hadoop.hbase.master.RegionStates;
49 import org.apache.hadoop.hbase.master.ServerManager;
50 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
51 import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
52 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
53 import org.apache.zookeeper.KeeperException;
54
55
56
57
58
59
60 @InterfaceAudience.Private
61 public class ServerShutdownHandler extends EventHandler {
62 private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
63 protected final ServerName serverName;
64 protected final MasterServices services;
65 protected final DeadServer deadServers;
66 protected final boolean shouldSplitHlog;
67 protected final int regionAssignmentWaitTimeout;
68
69 public ServerShutdownHandler(final Server server, final MasterServices services,
70 final DeadServer deadServers, final ServerName serverName,
71 final boolean shouldSplitHlog) {
72 this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
73 shouldSplitHlog);
74 }
75
76 ServerShutdownHandler(final Server server, final MasterServices services,
77 final DeadServer deadServers, final ServerName serverName, EventType type,
78 final boolean shouldSplitHlog) {
79 super(server, type);
80 this.serverName = serverName;
81 this.server = server;
82 this.services = services;
83 this.deadServers = deadServers;
84 if (!this.deadServers.isDeadServer(this.serverName)) {
85 LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
86 }
87 this.shouldSplitHlog = shouldSplitHlog;
88 this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
89 HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
90 }
91
92 @Override
93 public String getInformativeName() {
94 if (serverName != null) {
95 return this.getClass().getSimpleName() + " for " + serverName;
96 } else {
97 return super.getInformativeName();
98 }
99 }
100
101
102
103
104 boolean isCarryingMeta() {
105 return false;
106 }
107
108 @Override
109 public String toString() {
110 String name = "UnknownServerName";
111 if(server != null && server.getServerName() != null) {
112 name = server.getServerName().toString();
113 }
114 return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
115 }
116
117 @Override
118 public void process() throws IOException {
119 boolean hasLogReplayWork = false;
120 final ServerName serverName = this.serverName;
121 try {
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 AssignmentManager am = services.getAssignmentManager();
145 if (isCarryingMeta()
146 || !am.isFailoverCleanupDone()) {
147 this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
148 return;
149 }
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166 NavigableMap<HRegionInfo, Result> hris = null;
167 while (!this.server.isStopped()) {
168 try {
169 this.server.getCatalogTracker().waitForMeta();
170
171 if (!this.server.isStopped()) {
172 hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
173 this.serverName);
174 }
175 break;
176 } catch (InterruptedException e) {
177 Thread.currentThread().interrupt();
178 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
179 } catch (IOException ioe) {
180 LOG.info("Received exception accessing hbase:meta during server shutdown of " +
181 serverName + ", retrying hbase:meta read", ioe);
182 }
183 }
184 if (this.server.isStopped()) {
185 throw new IOException("Server is stopped");
186 }
187
188
189
190 this.services.getMasterFileSystem().setLogRecoveryMode();
191 boolean distributedLogReplay =
192 (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
193
194 try {
195 if (this.shouldSplitHlog) {
196 LOG.info("Splitting logs for " + serverName + " before assignment.");
197 if (distributedLogReplay) {
198 LOG.info("Mark regions in recovery before assignment.");
199 Set<ServerName> serverNames = new HashSet<ServerName>();
200 serverNames.add(serverName);
201 this.services.getMasterFileSystem().prepareLogReplay(serverNames);
202 } else {
203 this.services.getMasterFileSystem().splitLog(serverName);
204 }
205 am.getRegionStates().logSplit(serverName);
206 } else {
207 LOG.info("Skipping log splitting for " + serverName);
208 }
209 } catch (IOException ioe) {
210 resubmit(serverName, ioe);
211 }
212
213
214
215
216
217 List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
218 LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
219 " region(s) that " + (serverName == null? "null": serverName) +
220 " was carrying (and " + regionsInTransition.size() +
221 " regions(s) that were opening on this server)");
222
223 List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
224 toAssignRegions.addAll(regionsInTransition);
225
226
227 if (hris != null) {
228 RegionStates regionStates = am.getRegionStates();
229 for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
230 HRegionInfo hri = e.getKey();
231 if (regionsInTransition.contains(hri)) {
232 continue;
233 }
234 String encodedName = hri.getEncodedName();
235 Lock lock = am.acquireRegionLock(encodedName);
236 try {
237 RegionState rit = regionStates.getRegionTransitionState(hri);
238 if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
239 ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
240 if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
241
242
243 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
244 + " because it has been opened in " + addressFromAM.getServerName());
245 continue;
246 }
247 if (rit != null) {
248 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
249
250 LOG.info("Skip assigning region in transition on other server" + rit);
251 continue;
252 }
253 try{
254
255 LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
256 ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
257 regionStates.updateRegionState(hri, State.OFFLINE);
258 } catch (KeeperException ke) {
259 this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
260 return;
261 }
262 } else if (regionStates.isRegionInState(
263 hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
264 regionStates.regionOffline(hri);
265 }
266 toAssignRegions.add(hri);
267 } else if (rit != null) {
268 if (rit.isPendingCloseOrClosing()
269 && am.getZKTable().isDisablingOrDisabledTable(hri.getTable())) {
270
271
272
273
274
275 regionStates.updateRegionState(hri, State.OFFLINE);
276 am.deleteClosingOrClosedNode(hri, rit.getServerName());
277 am.offlineDisabledRegion(hri);
278 } else {
279 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
280 + rit + " not to be assigned by SSH of server " + serverName);
281 }
282 }
283 } finally {
284 lock.unlock();
285 }
286 }
287 }
288
289 try {
290 am.assign(toAssignRegions);
291 } catch (InterruptedException ie) {
292 LOG.error("Caught " + ie + " during round-robin assignment");
293 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
294 }
295
296 if (this.shouldSplitHlog && distributedLogReplay) {
297
298 for (HRegionInfo hri : toAssignRegions) {
299 try {
300 if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
301
302
303 LOG.warn("Region " + hri.getEncodedName()
304 + " didn't complete assignment in time");
305 }
306 } catch (InterruptedException ie) {
307 throw new InterruptedIOException("Caught " + ie
308 + " during waitOnRegionToClearRegionsInTransition");
309 }
310 }
311
312 this.services.getExecutorService().submit(
313 new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
314 hasLogReplayWork = true;
315 }
316 } finally {
317 this.deadServers.finish(serverName);
318 }
319
320 if (!hasLogReplayWork) {
321 LOG.info("Finished processing of shutdown of " + serverName);
322 }
323 }
324
325 private void resubmit(final ServerName serverName, IOException ex) throws IOException {
326
327
328 this.services.getExecutorService().submit((ServerShutdownHandler) this);
329 this.deadServers.add(serverName);
330 throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
331 }
332
333
334
335
336
337
338
339
340
341
342
343 public static boolean processDeadRegion(HRegionInfo hri, Result result,
344 AssignmentManager assignmentManager, CatalogTracker catalogTracker)
345 throws IOException {
346 boolean tablePresent = assignmentManager.getZKTable().isTablePresent(hri.getTable());
347 if (!tablePresent) {
348 LOG.info("The table " + hri.getTable()
349 + " was deleted. Hence not proceeding.");
350 return false;
351 }
352
353 boolean disabled = assignmentManager.getZKTable().isDisabledTable(hri.getTable());
354 if (disabled){
355 LOG.info("The table " + hri.getTable()
356 + " was disabled. Hence not proceeding.");
357 return false;
358 }
359 if (hri.isOffline() && hri.isSplit()) {
360
361
362
363 return false;
364 }
365 boolean disabling = assignmentManager.getZKTable().isDisablingTable(hri.getTable());
366 if (disabling) {
367 LOG.info("The table " + hri.getTable()
368 + " is disabled. Hence not assigning region" + hri.getEncodedName());
369 return false;
370 }
371 return true;
372 }
373 }