View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  import java.util.Set;
26  import java.util.concurrent.locks.Lock;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.HConstants;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.MetaTableAccessor;
33  import org.apache.hadoop.hbase.Server;
34  import org.apache.hadoop.hbase.ServerName;
35  import org.apache.hadoop.hbase.classification.InterfaceAudience;
36  import org.apache.hadoop.hbase.executor.EventHandler;
37  import org.apache.hadoop.hbase.executor.EventType;
38  import org.apache.hadoop.hbase.master.AssignmentManager;
39  import org.apache.hadoop.hbase.master.DeadServer;
40  import org.apache.hadoop.hbase.master.MasterFileSystem;
41  import org.apache.hadoop.hbase.master.MasterServices;
42  import org.apache.hadoop.hbase.master.RegionState;
43  import org.apache.hadoop.hbase.master.RegionState.State;
44  import org.apache.hadoop.hbase.master.RegionStates;
45  import org.apache.hadoop.hbase.master.ServerManager;
46  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
47  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
48  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
49  import org.apache.hadoop.hbase.util.ConfigUtil;
50  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
51  import org.apache.zookeeper.KeeperException;
52  
53  /**
54   * Process server shutdown.
55   * Server-to-handle must be already in the deadservers lists.  See
56   * {@link ServerManager#expireServer(ServerName)}
57   */
58  @InterfaceAudience.Private
59  public class ServerShutdownHandler extends EventHandler {
60    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
61    protected final ServerName serverName;
62    protected final MasterServices services;
63    protected final DeadServer deadServers;
64    protected final boolean shouldSplitWal; // whether to split WAL or not
65    protected final int regionAssignmentWaitTimeout;
66  
67    public ServerShutdownHandler(final Server server, final MasterServices services,
68        final DeadServer deadServers, final ServerName serverName,
69        final boolean shouldSplitWal) {
70      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
71          shouldSplitWal);
72    }
73  
74    ServerShutdownHandler(final Server server, final MasterServices services,
75        final DeadServer deadServers, final ServerName serverName, EventType type,
76        final boolean shouldSplitWal) {
77      super(server, type);
78      this.serverName = serverName;
79      this.server = server;
80      this.services = services;
81      this.deadServers = deadServers;
82      if (!this.deadServers.isDeadServer(this.serverName)) {
83        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
84      }
85      this.shouldSplitWal = shouldSplitWal;
86      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
87        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
88    }
89  
90    @Override
91    public String getInformativeName() {
92      if (serverName != null) {
93        return this.getClass().getSimpleName() + " for " + serverName;
94      } else {
95        return super.getInformativeName();
96      }
97    }
98  
99    /**
100    * @return True if the server we are processing was carrying <code>hbase:meta</code>
101    */
102   boolean isCarryingMeta() {
103     return false;
104   }
105 
106   @Override
107   public String toString() {
108     return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
109   }
110 
111   @Override
112   public void process() throws IOException {
113     boolean hasLogReplayWork = false;
114     final ServerName serverName = this.serverName;
115     try {
116 
117       // We don't want worker thread in the MetaServerShutdownHandler
118       // executor pool to block by waiting availability of hbase:meta
119       // Otherwise, it could run into the following issue:
120       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
121       //    to come online.
122       // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
123       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
124       //    instance For RS1 will still be blocked.
125       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
126       // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
127       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
128       //    instance For RS1 and RS2 will still be blocked.
129       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
130       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
131       // The solution here is to resubmit a ServerShutdownHandler request to process
132       // user regions on that server so that MetaServerShutdownHandler
133       // executor pool is always available.
134       //
135       // If AssignmentManager hasn't finished rebuilding user regions,
136       // we are not ready to assign dead regions either. So we re-queue up
137       // the dead server for further processing too.
138       AssignmentManager am = services.getAssignmentManager();
139       ServerManager serverManager = services.getServerManager();
140       if (isCarryingMeta() /* hbase:meta */ || !am.isFailoverCleanupDone()) {
141         serverManager.processDeadServer(serverName, this.shouldSplitWal);
142         return;
143       }
144 
145       // Wait on meta to come online; we need it to progress.
146       // TODO: Best way to hold strictly here?  We should build this retry logic
147       // into the MetaTableAccessor operations themselves.
148       // TODO: Is the reading of hbase:meta necessary when the Master has state of
149       // cluster in its head?  It should be possible to do without reading hbase:meta
150       // in all but one case. On split, the RS updates the hbase:meta
151       // table and THEN informs the master of the split via zk nodes in
152       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
153       // the regionserver dies, these nodes do not stick around and this server
154       // shutdown processing does fixup (see the fixupDaughters method below).
155       // If we wanted to skip the hbase:meta scan, we'd have to change at least the
156       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
157       // completed (zk is updated after edits to hbase:meta have gone in).  See
158       // {@link SplitTransaction}.  We'd also have to be figure another way for
159       // doing the below hbase:meta daughters fixup.
160       Set<HRegionInfo> hris = null;
161       while (!this.server.isStopped()) {
162         try {
163           server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
164           if (BaseLoadBalancer.tablesOnMaster(server.getConfiguration())) {
165             while (!this.server.isStopped() && serverManager.countOfRegionServers() < 2) {
166               // Wait till at least another regionserver is up besides the active master
167               // so that we don't assign all regions to the active master.
168               // This is best of efforts, because newly joined regionserver
169               // could crash right after that.
170               Thread.sleep(100);
171             }
172           }
173           // Skip getting user regions if the server is stopped.
174           if (!this.server.isStopped()) {
175             if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
176               hris = MetaTableAccessor.getServerUserRegions(this.server.getConnection(),
177                 this.serverName).keySet();
178             } else {
179               // Not using ZK for assignment, regionStates has everything we want
180               hris = am.getRegionStates().getServerRegions(serverName);
181             }
182           }
183           break;
184         } catch (InterruptedException e) {
185           Thread.currentThread().interrupt();
186           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
187         } catch (IOException ioe) {
188           LOG.info("Received exception accessing hbase:meta during server shutdown of " +
189             serverName + ", retrying hbase:meta read", ioe);
190         }
191       }
192       if (this.server.isStopped()) {
193         throw new IOException("Server is stopped");
194       }
195 
196       // delayed to set recovery mode based on configuration only after all outstanding splitlogtask
197       // drained
198       this.services.getMasterFileSystem().setLogRecoveryMode();
199       boolean distributedLogReplay = 
200         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
201 
202       try {
203         if (this.shouldSplitWal) {
204           if (distributedLogReplay) {
205             LOG.info("Mark regions in recovery for crashed server " + serverName +
206               " before assignment; regions=" + hris);
207             MasterFileSystem mfs = this.services.getMasterFileSystem();
208             mfs.prepareLogReplay(serverName, hris);
209           } else {
210             LOG.info("Splitting logs for " + serverName +
211               " before assignment; region count=" + (hris == null ? 0 : hris.size()));
212             this.services.getMasterFileSystem().splitLog(serverName);
213           }
214           am.getRegionStates().logSplit(serverName);
215         } else {
216           LOG.info("Skipping log splitting for " + serverName);
217         }
218       } catch (IOException ioe) {
219         resubmit(serverName, ioe);
220       }
221 
222       // Clean out anything in regions in transition.  Being conservative and
223       // doing after log splitting.  Could do some states before -- OPENING?
224       // OFFLINE? -- and then others after like CLOSING that depend on log
225       // splitting.
226       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
227       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
228         " region(s) that " + (serverName == null? "null": serverName)  +
229         " was carrying (and " + regionsInTransition.size() +
230         " regions(s) that were opening on this server)");
231 
232       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
233       toAssignRegions.addAll(regionsInTransition);
234 
235       // Iterate regions that were on this server and assign them
236       if (hris != null && !hris.isEmpty()) {
237         RegionStates regionStates = am.getRegionStates();
238         for (HRegionInfo hri: hris) {
239           if (regionsInTransition.contains(hri)) {
240             continue;
241           }
242           String encodedName = hri.getEncodedName();
243           Lock lock = am.acquireRegionLock(encodedName);
244           try {
245             RegionState rit = regionStates.getRegionTransitionState(hri);
246             if (processDeadRegion(hri, am)) { 
247               ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
248               if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
249                 // If this region is in transition on the dead server, it must be
250                 // opening or pending_open, which should have been covered by AM#processServerShutdown
251                 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
252                   + " because it has been opened in " + addressFromAM.getServerName());
253                 continue;
254               }
255               if (rit != null) {
256                 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
257                   // Skip regions that are in transition on other server
258                   LOG.info("Skip assigning region in transition on other server" + rit);
259                   continue;
260                 }
261                 try{
262                   //clean zk node
263                   LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
264                   ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
265                   regionStates.updateRegionState(hri, State.OFFLINE);
266                 } catch (KeeperException ke) {
267                   this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
268                   return;
269                 }
270               } else if (regionStates.isRegionInState(
271                   hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
272                 regionStates.updateRegionState(hri, State.OFFLINE);
273               }
274               toAssignRegions.add(hri);
275             } else if (rit != null) {
276               if ((rit.isPendingCloseOrClosing() || rit.isOffline())
277                   && am.getTableStateManager().isTableState(hri.getTable(),
278                   ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
279                 // If the table was partially disabled and the RS went down, we should clear the RIT
280                 // and remove the node for the region.
281                 // The rit that we use may be stale in case the table was in DISABLING state
282                 // but though we did assign we will not be clearing the znode in CLOSING state.
283                 // Doing this will have no harm. See HBASE-5927
284                 regionStates.updateRegionState(hri, State.OFFLINE);
285                 am.deleteClosingOrClosedNode(hri, rit.getServerName());
286                 am.offlineDisabledRegion(hri);
287               } else {
288                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
289                   + rit + " not to be assigned by SSH of server " + serverName);
290               }
291             }
292           } finally {
293             lock.unlock();
294           }
295         }
296       }
297 
298       try {
299         am.assign(toAssignRegions);
300       } catch (InterruptedException ie) {
301         LOG.error("Caught " + ie + " during round-robin assignment");
302         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
303       } catch (IOException ioe) {
304         LOG.info("Caught " + ioe + " during region assignment, will retry");
305         // Only do wal splitting if shouldSplitWal and in DLR mode
306         serverManager.processDeadServer(serverName,
307           this.shouldSplitWal && distributedLogReplay);
308         return;
309       }
310 
311       if (this.shouldSplitWal && distributedLogReplay) {
312         // wait for region assignment completes
313         for (HRegionInfo hri : toAssignRegions) {
314           try {
315             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
316               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
317               // when replay happens before region assignment completes.
318               LOG.warn("Region " + hri.getEncodedName()
319                   + " didn't complete assignment in time");
320             }
321           } catch (InterruptedException ie) {
322             throw new InterruptedIOException("Caught " + ie
323                 + " during waitOnRegionToClearRegionsInTransition");
324           }
325         }
326         // submit logReplay work
327         this.services.getExecutorService().submit(
328           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
329         hasLogReplayWork = true;
330       }
331     } finally {
332       this.deadServers.finish(serverName);
333     }
334 
335     if (!hasLogReplayWork) {
336       LOG.info("Finished processing of shutdown of " + serverName);
337     }
338   }
339 
340   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
341     // typecast to SSH so that we make sure that it is the SSH instance that
342     // gets submitted as opposed to MSSH or some other derived instance of SSH
343     this.services.getExecutorService().submit((ServerShutdownHandler) this);
344     this.deadServers.add(serverName);
345     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
346   }
347 
348   /**
349    * Process a dead region from a dead RS. Checks if the region is disabled or
350    * disabling or if the region has a partially completed split.
351    * @param hri
352    * @param assignmentManager
353    * @return Returns true if specified region should be assigned, false if not.
354    * @throws IOException
355    */
356   public static boolean processDeadRegion(HRegionInfo hri,
357       AssignmentManager assignmentManager)
358   throws IOException {
359     boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
360     if (!tablePresent) {
361       LOG.info("The table " + hri.getTable()
362           + " was deleted.  Hence not proceeding.");
363       return false;
364     }
365     // If table is not disabled but the region is offlined,
366     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
367       ZooKeeperProtos.Table.State.DISABLED);
368     if (disabled){
369       LOG.info("The table " + hri.getTable()
370           + " was disabled.  Hence not proceeding.");
371       return false;
372     }
373     if (hri.isOffline() && hri.isSplit()) {
374       //HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation.
375       //If the meta scanner saw the parent split, then it should see the daughters as assigned
376       //to the dead server. We don't have to do anything.
377       return false;
378     }
379     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
380       ZooKeeperProtos.Table.State.DISABLING);
381     if (disabling) {
382       LOG.info("The table " + hri.getTable()
383           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
384       return false;
385     }
386     return true;
387   }
388 }