1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.master.handler;
20
21 import java.io.IOException;
22 import java.io.InterruptedIOException;
23 import java.util.ArrayList;
24 import java.util.List;
25 import java.util.Set;
26 import java.util.concurrent.locks.Lock;
27
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.hadoop.hbase.HConstants;
31 import org.apache.hadoop.hbase.HRegionInfo;
32 import org.apache.hadoop.hbase.MetaTableAccessor;
33 import org.apache.hadoop.hbase.Server;
34 import org.apache.hadoop.hbase.ServerName;
35 import org.apache.hadoop.hbase.classification.InterfaceAudience;
36 import org.apache.hadoop.hbase.executor.EventHandler;
37 import org.apache.hadoop.hbase.executor.EventType;
38 import org.apache.hadoop.hbase.master.AssignmentManager;
39 import org.apache.hadoop.hbase.master.DeadServer;
40 import org.apache.hadoop.hbase.master.MasterFileSystem;
41 import org.apache.hadoop.hbase.master.MasterServices;
42 import org.apache.hadoop.hbase.master.RegionState;
43 import org.apache.hadoop.hbase.master.RegionState.State;
44 import org.apache.hadoop.hbase.master.RegionStates;
45 import org.apache.hadoop.hbase.master.ServerManager;
46 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
47 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
48 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
49 import org.apache.hadoop.hbase.util.ConfigUtil;
50 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
51 import org.apache.zookeeper.KeeperException;
52
53
54
55
56
57
58 @InterfaceAudience.Private
59 public class ServerShutdownHandler extends EventHandler {
60 private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
61 protected final ServerName serverName;
62 protected final MasterServices services;
63 protected final DeadServer deadServers;
64 protected final boolean shouldSplitWal;
65 protected final int regionAssignmentWaitTimeout;
66
67 public ServerShutdownHandler(final Server server, final MasterServices services,
68 final DeadServer deadServers, final ServerName serverName,
69 final boolean shouldSplitWal) {
70 this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
71 shouldSplitWal);
72 }
73
74 ServerShutdownHandler(final Server server, final MasterServices services,
75 final DeadServer deadServers, final ServerName serverName, EventType type,
76 final boolean shouldSplitWal) {
77 super(server, type);
78 this.serverName = serverName;
79 this.server = server;
80 this.services = services;
81 this.deadServers = deadServers;
82 if (!this.deadServers.isDeadServer(this.serverName)) {
83 LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
84 }
85 this.shouldSplitWal = shouldSplitWal;
86 this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
87 HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
88 }
89
90 @Override
91 public String getInformativeName() {
92 if (serverName != null) {
93 return this.getClass().getSimpleName() + " for " + serverName;
94 } else {
95 return super.getInformativeName();
96 }
97 }
98
99
100
101
102 boolean isCarryingMeta() {
103 return false;
104 }
105
106 @Override
107 public String toString() {
108 return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
109 }
110
111 @Override
112 public void process() throws IOException {
113 boolean hasLogReplayWork = false;
114 final ServerName serverName = this.serverName;
115 try {
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 AssignmentManager am = services.getAssignmentManager();
139 ServerManager serverManager = services.getServerManager();
140 if (isCarryingMeta()
141 serverManager.processDeadServer(serverName, this.shouldSplitWal);
142 return;
143 }
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160 Set<HRegionInfo> hris = null;
161 while (!this.server.isStopped()) {
162 try {
163 server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
164 if (BaseLoadBalancer.tablesOnMaster(server.getConfiguration())) {
165 while (!this.server.isStopped() && serverManager.countOfRegionServers() < 2) {
166
167
168
169
170 Thread.sleep(100);
171 }
172 }
173
174 if (!this.server.isStopped()) {
175 if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
176 hris = MetaTableAccessor.getServerUserRegions(this.server.getConnection(),
177 this.serverName).keySet();
178 } else {
179
180 hris = am.getRegionStates().getServerRegions(serverName);
181 }
182 }
183 break;
184 } catch (InterruptedException e) {
185 Thread.currentThread().interrupt();
186 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
187 } catch (IOException ioe) {
188 LOG.info("Received exception accessing hbase:meta during server shutdown of " +
189 serverName + ", retrying hbase:meta read", ioe);
190 }
191 }
192 if (this.server.isStopped()) {
193 throw new IOException("Server is stopped");
194 }
195
196
197
198 this.services.getMasterFileSystem().setLogRecoveryMode();
199 boolean distributedLogReplay =
200 (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
201
202 try {
203 if (this.shouldSplitWal) {
204 if (distributedLogReplay) {
205 LOG.info("Mark regions in recovery for crashed server " + serverName +
206 " before assignment; regions=" + hris);
207 MasterFileSystem mfs = this.services.getMasterFileSystem();
208 mfs.prepareLogReplay(serverName, hris);
209 } else {
210 LOG.info("Splitting logs for " + serverName +
211 " before assignment; region count=" + (hris == null ? 0 : hris.size()));
212 this.services.getMasterFileSystem().splitLog(serverName);
213 }
214 am.getRegionStates().logSplit(serverName);
215 } else {
216 LOG.info("Skipping log splitting for " + serverName);
217 }
218 } catch (IOException ioe) {
219 resubmit(serverName, ioe);
220 }
221
222
223
224
225
226 List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
227 LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
228 " region(s) that " + (serverName == null? "null": serverName) +
229 " was carrying (and " + regionsInTransition.size() +
230 " regions(s) that were opening on this server)");
231
232 List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
233 toAssignRegions.addAll(regionsInTransition);
234
235
236 if (hris != null && !hris.isEmpty()) {
237 RegionStates regionStates = am.getRegionStates();
238 for (HRegionInfo hri: hris) {
239 if (regionsInTransition.contains(hri)) {
240 continue;
241 }
242 String encodedName = hri.getEncodedName();
243 Lock lock = am.acquireRegionLock(encodedName);
244 try {
245 RegionState rit = regionStates.getRegionTransitionState(hri);
246 if (processDeadRegion(hri, am)) {
247 ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
248 if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
249
250
251 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
252 + " because it has been opened in " + addressFromAM.getServerName());
253 continue;
254 }
255 if (rit != null) {
256 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
257
258 LOG.info("Skip assigning region in transition on other server" + rit);
259 continue;
260 }
261 try{
262
263 LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
264 ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
265 regionStates.updateRegionState(hri, State.OFFLINE);
266 } catch (KeeperException ke) {
267 this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
268 return;
269 }
270 } else if (regionStates.isRegionInState(
271 hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
272 regionStates.updateRegionState(hri, State.OFFLINE);
273 }
274 toAssignRegions.add(hri);
275 } else if (rit != null) {
276 if ((rit.isPendingCloseOrClosing() || rit.isOffline())
277 && am.getTableStateManager().isTableState(hri.getTable(),
278 ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
279
280
281
282
283
284 regionStates.updateRegionState(hri, State.OFFLINE);
285 am.deleteClosingOrClosedNode(hri, rit.getServerName());
286 am.offlineDisabledRegion(hri);
287 } else {
288 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
289 + rit + " not to be assigned by SSH of server " + serverName);
290 }
291 }
292 } finally {
293 lock.unlock();
294 }
295 }
296 }
297
298 try {
299 am.assign(toAssignRegions);
300 } catch (InterruptedException ie) {
301 LOG.error("Caught " + ie + " during round-robin assignment");
302 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
303 } catch (IOException ioe) {
304 LOG.info("Caught " + ioe + " during region assignment, will retry");
305
306 serverManager.processDeadServer(serverName,
307 this.shouldSplitWal && distributedLogReplay);
308 return;
309 }
310
311 if (this.shouldSplitWal && distributedLogReplay) {
312
313 for (HRegionInfo hri : toAssignRegions) {
314 try {
315 if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
316
317
318 LOG.warn("Region " + hri.getEncodedName()
319 + " didn't complete assignment in time");
320 }
321 } catch (InterruptedException ie) {
322 throw new InterruptedIOException("Caught " + ie
323 + " during waitOnRegionToClearRegionsInTransition");
324 }
325 }
326
327 this.services.getExecutorService().submit(
328 new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
329 hasLogReplayWork = true;
330 }
331 } finally {
332 this.deadServers.finish(serverName);
333 }
334
335 if (!hasLogReplayWork) {
336 LOG.info("Finished processing of shutdown of " + serverName);
337 }
338 }
339
340 private void resubmit(final ServerName serverName, IOException ex) throws IOException {
341
342
343 this.services.getExecutorService().submit((ServerShutdownHandler) this);
344 this.deadServers.add(serverName);
345 throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
346 }
347
348
349
350
351
352
353
354
355
356 public static boolean processDeadRegion(HRegionInfo hri,
357 AssignmentManager assignmentManager)
358 throws IOException {
359 boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
360 if (!tablePresent) {
361 LOG.info("The table " + hri.getTable()
362 + " was deleted. Hence not proceeding.");
363 return false;
364 }
365
366 boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
367 ZooKeeperProtos.Table.State.DISABLED);
368 if (disabled){
369 LOG.info("The table " + hri.getTable()
370 + " was disabled. Hence not proceeding.");
371 return false;
372 }
373 if (hri.isOffline() && hri.isSplit()) {
374
375
376
377 return false;
378 }
379 boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
380 ZooKeeperProtos.Table.State.DISABLING);
381 if (disabling) {
382 LOG.info("The table " + hri.getTable()
383 + " is disabled. Hence not assigning region" + hri.getEncodedName());
384 return false;
385 }
386 return true;
387 }
388 }