View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashSet;
25  import java.util.List;
26  import java.util.Map;
27  import java.util.NavigableMap;
28  import java.util.Set;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.classification.InterfaceAudience;
33  import org.apache.hadoop.hbase.HConstants;
34  import org.apache.hadoop.hbase.HRegionInfo;
35  import org.apache.hadoop.hbase.Server;
36  import org.apache.hadoop.hbase.ServerName;
37  import org.apache.hadoop.hbase.catalog.CatalogTracker;
38  import org.apache.hadoop.hbase.catalog.MetaReader;
39  import org.apache.hadoop.hbase.client.Result;
40  import org.apache.hadoop.hbase.executor.EventHandler;
41  import org.apache.hadoop.hbase.executor.EventType;
42  import org.apache.hadoop.hbase.master.AssignmentManager;
43  import org.apache.hadoop.hbase.master.DeadServer;
44  import org.apache.hadoop.hbase.master.MasterServices;
45  import org.apache.hadoop.hbase.master.RegionState;
46  import org.apache.hadoop.hbase.master.RegionStates;
47  import org.apache.hadoop.hbase.master.ServerManager;
48  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
49  import org.apache.zookeeper.KeeperException;
50  
51  /**
52   * Process server shutdown.
53   * Server-to-handle must be already in the deadservers lists.  See
54   * {@link ServerManager#expireServer(ServerName)}
55   */
56  @InterfaceAudience.Private
57  public class ServerShutdownHandler extends EventHandler {
58    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
59    protected final ServerName serverName;
60    protected final MasterServices services;
61    protected final DeadServer deadServers;
62    protected final boolean shouldSplitHlog; // whether to split HLog or not
63    protected final boolean distributedLogReplay;
64    protected final int regionAssignmentWaitTimeout;
65  
66    public ServerShutdownHandler(final Server server, final MasterServices services,
67        final DeadServer deadServers, final ServerName serverName,
68        final boolean shouldSplitHlog) {
69      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
70          shouldSplitHlog);
71    }
72  
73    ServerShutdownHandler(final Server server, final MasterServices services,
74        final DeadServer deadServers, final ServerName serverName, EventType type,
75        final boolean shouldSplitHlog) {
76      super(server, type);
77      this.serverName = serverName;
78      this.server = server;
79      this.services = services;
80      this.deadServers = deadServers;
81      if (!this.deadServers.isDeadServer(this.serverName)) {
82        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
83      }
84      this.shouldSplitHlog = shouldSplitHlog;
85      this.distributedLogReplay = server.getConfiguration().getBoolean(
86            HConstants.DISTRIBUTED_LOG_REPLAY_KEY, 
87            HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG);
88      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
89        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
90    }
91  
92    @Override
93    public String getInformativeName() {
94      if (serverName != null) {
95        return this.getClass().getSimpleName() + " for " + serverName;
96      } else {
97        return super.getInformativeName();
98      }
99    }
100 
101   /**
102    * @return True if the server we are processing was carrying <code>.META.</code>
103    */
104   boolean isCarryingMeta() {
105     return false;
106   }
107 
108   @Override
109   public String toString() {
110     String name = "UnknownServerName";
111     if(server != null && server.getServerName() != null) {
112       name = server.getServerName().toString();
113     }
114     return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
115   }
116 
117   @Override
118   public void process() throws IOException {
119     boolean hasLogReplayWork = false;
120     final ServerName serverName = this.serverName;
121     try {
122 
123       // We don't want worker thread in the MetaServerShutdownHandler
124       // executor pool to block by waiting availability of .META.
125       // Otherwise, it could run into the following issue:
126       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the .META.
127       //    to come online.
128       // 2. The newly assigned .META. region server RS2 was shutdown right after
129       //    it opens the .META. region. So the MetaServerShutdownHandler
130       //    instance For RS1 will still be blocked.
131       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
132       // 4. The newly assigned .META. region server RS3 was shutdown right after
133       //    it opens the .META. region. So the MetaServerShutdownHandler
134       //    instance For RS1 and RS2 will still be blocked.
135       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
136       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
137       // The solution here is to resubmit a ServerShutdownHandler request to process
138       // user regions on that server so that MetaServerShutdownHandler
139       // executor pool is always available.
140       //
141       // If AssignmentManager hasn't finished rebuilding user regions,
142       // we are not ready to assign dead regions either. So we re-queue up
143       // the dead server for further processing too.
144       if (isCarryingMeta() // .META.
145           || !services.getAssignmentManager().isFailoverCleanupDone()) {
146         this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
147         return;
148       }
149 
150       // Wait on meta to come online; we need it to progress.
151       // TODO: Best way to hold strictly here?  We should build this retry logic
152       // into the MetaReader operations themselves.
153       // TODO: Is the reading of .META. necessary when the Master has state of
154       // cluster in its head?  It should be possible to do without reading .META.
155       // in all but one case. On split, the RS updates the .META.
156       // table and THEN informs the master of the split via zk nodes in
157       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
158       // the regionserver dies, these nodes do not stick around and this server
159       // shutdown processing does fixup (see the fixupDaughters method below).
160       // If we wanted to skip the .META. scan, we'd have to change at least the
161       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
162       // completed (zk is updated after edits to .META. have gone in).  See
163       // {@link SplitTransaction}.  We'd also have to be figure another way for
164       // doing the below .META. daughters fixup.
165       NavigableMap<HRegionInfo, Result> hris = null;
166       while (!this.server.isStopped()) {
167         try {
168           this.server.getCatalogTracker().waitForMeta();
169           hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
170             this.serverName);
171           break;
172         } catch (InterruptedException e) {
173           Thread.currentThread().interrupt();
174           throw new IOException("Interrupted", e);
175         } catch (IOException ioe) {
176           LOG.info("Received exception accessing META during server shutdown of " +
177               serverName + ", retrying META read", ioe);
178         }
179       }
180       if (this.server.isStopped()) {
181         throw new IOException("Server is stopped");
182       }
183 
184       try {
185         if (this.shouldSplitHlog) {
186           LOG.info("Splitting logs for " + serverName + " before assignment.");
187           if (this.distributedLogReplay) {
188             LOG.info("Mark regions in recovery before assignment.");
189             Set<ServerName> serverNames = new HashSet<ServerName>();
190             serverNames.add(serverName);
191             this.services.getMasterFileSystem().prepareLogReplay(serverNames);
192           } else {
193             this.services.getMasterFileSystem().splitLog(serverName);
194           }
195         } else {
196           LOG.info("Skipping log splitting for " + serverName);
197         }
198       } catch (IOException ioe) {
199         resubmit(serverName, ioe);
200       }
201 
202       // Clean out anything in regions in transition.  Being conservative and
203       // doing after log splitting.  Could do some states before -- OPENING?
204       // OFFLINE? -- and then others after like CLOSING that depend on log
205       // splitting.
206       AssignmentManager am = services.getAssignmentManager();
207       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
208       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
209         " region(s) that " + (serverName == null? "null": serverName)  +
210         " was carrying (and " + regionsInTransition.size() +
211         " regions(s) that were opening on this server)");
212 
213       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
214       toAssignRegions.addAll(regionsInTransition);
215 
216       // Iterate regions that were on this server and assign them
217       if (hris != null) {
218         RegionStates regionStates = am.getRegionStates();
219         for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
220           HRegionInfo hri = e.getKey();
221           if (regionsInTransition.contains(hri)) {
222             continue;
223           }
224           RegionState rit = regionStates.getRegionTransitionState(hri);
225           if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
226             ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
227             if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
228               // If this region is in transition on the dead server, it must be
229               // opening or pending_open, which should have been covered by AM#processServerShutdown
230               LOG.info("Skip assigning region " + hri.getRegionNameAsString()
231                 + " because it has been opened in " + addressFromAM.getServerName());
232               continue;
233             }
234             if (rit != null) {
235               if (!rit.isOnServer(serverName)
236                   || rit.isClosed() || rit.isOpened()) {
237                 // Skip regions that are in transition on other server,
238                 // or in state closed/opened
239                 LOG.info("Skip assigning region " + rit);
240                 continue;
241               }
242               try{
243                 //clean zk node
244                 LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
245                 ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
246               } catch (KeeperException ke) {
247                 this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
248                 return;
249               }
250             }
251             toAssignRegions.add(hri);
252           } else if (rit != null) {
253             if ((rit.isClosing() || rit.isPendingClose())
254                 && am.getZKTable().isDisablingOrDisabledTable(hri.getTableName())) {
255               // If the table was partially disabled and the RS went down, we should clear the RIT
256               // and remove the node for the region.
257               // The rit that we use may be stale in case the table was in DISABLING state
258               // but though we did assign we will not be clearing the znode in CLOSING state.
259               // Doing this will have no harm. See HBASE-5927
260               am.deleteClosingOrClosedNode(hri);
261               am.regionOffline(hri);
262             } else {
263               LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
264                 + rit + " not to be assigned by SSH of server " + serverName);
265             }
266           }
267         }
268       }
269  
270       try {
271         am.assign(toAssignRegions);
272       } catch (InterruptedException ie) {
273         LOG.error("Caught " + ie + " during round-robin assignment");
274         throw new IOException(ie);
275       }
276 
277       if (this.shouldSplitHlog && this.distributedLogReplay) {
278         // wait for region assignment completes
279         for (HRegionInfo hri : toAssignRegions) {
280           try {
281             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
282               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
283               // when replay happens before region assignment completes.
284               LOG.warn("Region " + hri.getEncodedName()
285                   + " didn't complete assignment in time");
286             }
287           } catch (InterruptedException ie) {
288             throw new InterruptedIOException("Caught " + ie
289                 + " during waitOnRegionToClearRegionsInTransition");
290           }
291         }
292         // submit logReplay work
293         this.services.getExecutorService().submit(
294           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
295         hasLogReplayWork = true;
296       }
297     } finally {
298       this.deadServers.finish(serverName);
299     }
300 
301     if (!hasLogReplayWork) {
302       LOG.info("Finished processing of shutdown of " + serverName);
303     }
304   }
305 
306   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
307     // typecast to SSH so that we make sure that it is the SSH instance that
308     // gets submitted as opposed to MSSH or some other derived instance of SSH
309     this.services.getExecutorService().submit((ServerShutdownHandler) this);
310     this.deadServers.add(serverName);
311     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
312   }
313 
314   /**
315    * Process a dead region from a dead RS. Checks if the region is disabled or
316    * disabling or if the region has a partially completed split.
317    * @param hri
318    * @param result
319    * @param assignmentManager
320    * @param catalogTracker
321    * @return Returns true if specified region should be assigned, false if not.
322    * @throws IOException
323    */
324   public static boolean processDeadRegion(HRegionInfo hri, Result result,
325       AssignmentManager assignmentManager, CatalogTracker catalogTracker)
326   throws IOException {
327     boolean tablePresent = assignmentManager.getZKTable().isTablePresent(
328         hri.getTableName());
329     if (!tablePresent) {
330       LOG.info("The table " + hri.getTableName()
331           + " was deleted.  Hence not proceeding.");
332       return false;
333     }
334     // If table is not disabled but the region is offlined,
335     boolean disabled = assignmentManager.getZKTable().isDisabledTable(
336         hri.getTableName());
337     if (disabled){
338       LOG.info("The table " + hri.getTableName()
339           + " was disabled.  Hence not proceeding.");
340       return false;
341     }
342     if (hri.isOffline() && hri.isSplit()) {
343       //HBASE-7721: Split parent and daughters are inserted into META as an atomic operation.
344       //If the meta scanner saw the parent split, then it should see the daughters as assigned
345       //to the dead server. We don't have to do anything.
346       return false;
347     }
348     boolean disabling = assignmentManager.getZKTable().isDisablingTable(
349         hri.getTableName());
350     if (disabling) {
351       LOG.info("The table " + hri.getTableName()
352           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
353       return false;
354     }
355     return true;
356   }
357 }