View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  import java.util.Map;
25  import java.util.NavigableMap;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.classification.InterfaceAudience;
30  import org.apache.hadoop.hbase.HRegionInfo;
31  import org.apache.hadoop.hbase.Server;
32  import org.apache.hadoop.hbase.ServerName;
33  import org.apache.hadoop.hbase.catalog.CatalogTracker;
34  import org.apache.hadoop.hbase.catalog.MetaReader;
35  import org.apache.hadoop.hbase.client.Result;
36  import org.apache.hadoop.hbase.executor.EventHandler;
37  import org.apache.hadoop.hbase.executor.EventType;
38  import org.apache.hadoop.hbase.master.AssignmentManager;
39  import org.apache.hadoop.hbase.master.DeadServer;
40  import org.apache.hadoop.hbase.master.MasterServices;
41  import org.apache.hadoop.hbase.master.RegionState;
42  import org.apache.hadoop.hbase.master.RegionStates;
43  import org.apache.hadoop.hbase.master.ServerManager;
44  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
45  import org.apache.zookeeper.KeeperException;
46  
47  /**
48   * Process server shutdown.
49   * Server-to-handle must be already in the deadservers lists.  See
50   * {@link ServerManager#expireServer(ServerName)}
51   */
52  @InterfaceAudience.Private
53  public class ServerShutdownHandler extends EventHandler {
54    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
55    protected final ServerName serverName;
56    protected final MasterServices services;
57    protected final DeadServer deadServers;
58    protected final boolean shouldSplitHlog; // whether to split HLog or not
59  
60    public ServerShutdownHandler(final Server server, final MasterServices services,
61        final DeadServer deadServers, final ServerName serverName,
62        final boolean shouldSplitHlog) {
63      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
64          shouldSplitHlog);
65    }
66  
67    ServerShutdownHandler(final Server server, final MasterServices services,
68        final DeadServer deadServers, final ServerName serverName, EventType type,
69        final boolean shouldSplitHlog) {
70      super(server, type);
71      this.serverName = serverName;
72      this.server = server;
73      this.services = services;
74      this.deadServers = deadServers;
75      if (!this.deadServers.isDeadServer(this.serverName)) {
76        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
77      }
78      this.shouldSplitHlog = shouldSplitHlog;
79    }
80  
81    @Override
82    public String getInformativeName() {
83      if (serverName != null) {
84        return this.getClass().getSimpleName() + " for " + serverName;
85      } else {
86        return super.getInformativeName();
87      }
88    }
89  
90    /**
91     * @return True if the server we are processing was carrying <code>.META.</code>
92     */
93    boolean isCarryingMeta() {
94      return false;
95    }
96  
97    @Override
98    public String toString() {
99      String name = "UnknownServerName";
100     if(server != null && server.getServerName() != null) {
101       name = server.getServerName().toString();
102     }
103     return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
104   }
105 
106   @Override
107   public void process() throws IOException {
108     final ServerName serverName = this.serverName;
109     try {
110       try {
111         if (this.shouldSplitHlog) {
112           LOG.info("Splitting logs for " + serverName);
113           this.services.getMasterFileSystem().splitLog(serverName);
114         } else {
115           LOG.info("Skipping log splitting for " + serverName);
116         }
117       } catch (IOException ioe) {
118         //typecast to SSH so that we make sure that it is the SSH instance that
119         //gets submitted as opposed to MSSH or some other derived instance of SSH
120         this.services.getExecutorService().submit((ServerShutdownHandler)this);
121         this.deadServers.add(serverName);
122         throw new IOException("failed log splitting for " +
123           serverName + ", will retry", ioe);
124       }
125       // We don't want worker thread in the MetaServerShutdownHandler
126       // executor pool to block by waiting availability of .META.
127       // Otherwise, it could run into the following issue:
128       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the .META.
129       //    to come online.
130       // 2. The newly assigned .META. region server RS2 was shutdown right after
131       //    it opens the .META. region. So the MetaServerShutdownHandler
132       //    instance For RS1 will still be blocked.
133       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
134       // 4. The newly assigned .META. region server RS3 was shutdown right after
135       //    it opens the .META. region. So the MetaServerShutdownHandler
136       //    instance For RS1 and RS2 will still be blocked.
137       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
138       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
139       // The solution here is to resubmit a ServerShutdownHandler request to process
140       // user regions on that server so that MetaServerShutdownHandler
141       // executor pool is always available.
142       //
143       // If AssignmentManager hasn't finished rebuilding user regions,
144       // we are not ready to assign dead regions either. So we re-queue up
145       // the dead server for further processing too.
146       if (isCarryingMeta() // .META.
147           || !services.getAssignmentManager().isFailoverCleanupDone()) {
148         this.services.getServerManager().processDeadServer(serverName);
149         return;
150       }
151 
152       // Wait on meta to come online; we need it to progress.
153       // TODO: Best way to hold strictly here?  We should build this retry logic
154       // into the MetaReader operations themselves.
155       // TODO: Is the reading of .META. necessary when the Master has state of
156       // cluster in its head?  It should be possible to do without reading .META.
157       // in all but one case. On split, the RS updates the .META.
158       // table and THEN informs the master of the split via zk nodes in
159       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
160       // the regionserver dies, these nodes do not stick around and this server
161       // shutdown processing does fixup (see the fixupDaughters method below).
162       // If we wanted to skip the .META. scan, we'd have to change at least the
163       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
164       // completed (zk is updated after edits to .META. have gone in).  See
165       // {@link SplitTransaction}.  We'd also have to be figure another way for
166       // doing the below .META. daughters fixup.
167       NavigableMap<HRegionInfo, Result> hris = null;
168       while (!this.server.isStopped()) {
169         try {
170           this.server.getCatalogTracker().waitForMeta();
171           hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
172             this.serverName);
173           break;
174         } catch (InterruptedException e) {
175           Thread.currentThread().interrupt();
176           throw new IOException("Interrupted", e);
177         } catch (IOException ioe) {
178           LOG.info("Received exception accessing META during server shutdown of " +
179               serverName + ", retrying META read", ioe);
180         }
181       }
182       if (this.server.isStopped()) {
183         throw new IOException("Server is stopped");
184       }
185 
186       // Clean out anything in regions in transition.  Being conservative and
187       // doing after log splitting.  Could do some states before -- OPENING?
188       // OFFLINE? -- and then others after like CLOSING that depend on log
189       // splitting.
190       AssignmentManager am = services.getAssignmentManager();
191       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
192       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
193         " region(s) that " + (serverName == null? "null": serverName)  +
194         " was carrying (and " + regionsInTransition.size() +
195         " regions(s) that were opening on this server)");
196 
197       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
198       toAssignRegions.addAll(regionsInTransition);
199 
200       // Iterate regions that were on this server and assign them
201       if (hris != null) {
202         RegionStates regionStates = am.getRegionStates();
203         for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
204           HRegionInfo hri = e.getKey();
205           if (regionsInTransition.contains(hri)) {
206             continue;
207           }
208           RegionState rit = regionStates.getRegionTransitionState(hri);
209           if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
210             ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
211             if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
212               // If this region is in transition on the dead server, it must be
213               // opening or pending_open, which should have been covered by AM#processServerShutdown
214               LOG.info("Skip assigning region " + hri.getRegionNameAsString()
215                 + " because it has been opened in " + addressFromAM.getServerName());
216               continue;
217             }
218             if (rit != null) {
219               if (!rit.isOnServer(serverName)
220                   || rit.isClosed() || rit.isOpened() || rit.isSplit()) {
221                 // Skip regions that are in transition on other server,
222                 // or in state closed/opened/split
223                 LOG.info("Skip assigning region " + rit);
224                 continue;
225               }
226               try{
227                 //clean zk node
228                 LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
229                 ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
230               } catch (KeeperException ke) {
231                 this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
232                 return;
233               }
234             }
235             toAssignRegions.add(hri);
236           } else if (rit != null) {
237             if (rit.isSplitting() || rit.isSplit()) {
238               // This will happen when the RS went down and the call back for the SPLIITING or SPLIT
239               // has not yet happened for node Deleted event. In that case if the region was actually
240               // split
241               // but the RS had gone down before completing the split process then will not try to
242               // assign the parent region again. In that case we should make the region offline and
243               // also delete the region from RIT.
244               am.regionOffline(hri);
245             } else if ((rit.isClosing() || rit.isPendingClose())
246                 && am.getZKTable().isDisablingOrDisabledTable(hri.getTableNameAsString())) {
247               // If the table was partially disabled and the RS went down, we should clear the RIT
248               // and remove the node for the region.
249               // The rit that we use may be stale in case the table was in DISABLING state
250               // but though we did assign we will not be clearing the znode in CLOSING state.
251               // Doing this will have no harm. See HBASE-5927
252               am.deleteClosingOrClosedNode(hri);
253               am.regionOffline(hri);
254             } else {
255               LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
256                 + rit + " not to be assigned by SSH of server " + serverName);
257             }
258           }
259         }
260       }
261       try {
262         am.assign(toAssignRegions);
263       } catch (InterruptedException ie) {
264         LOG.error("Caught " + ie + " during round-robin assignment");
265         throw new IOException(ie);
266       }
267     } finally {
268       this.deadServers.finish(serverName);
269     }
270     LOG.info("Finished processing of shutdown of " + serverName);
271   }
272 
273   /**
274    * Process a dead region from a dead RS. Checks if the region is disabled or
275    * disabling or if the region has a partially completed split.
276    * @param hri
277    * @param result
278    * @param assignmentManager
279    * @param catalogTracker
280    * @return Returns true if specified region should be assigned, false if not.
281    * @throws IOException
282    */
283   public static boolean processDeadRegion(HRegionInfo hri, Result result,
284       AssignmentManager assignmentManager, CatalogTracker catalogTracker)
285   throws IOException {
286     boolean tablePresent = assignmentManager.getZKTable().isTablePresent(
287         hri.getTableNameAsString());
288     if (!tablePresent) {
289       LOG.info("The table " + hri.getTableNameAsString()
290           + " was deleted.  Hence not proceeding.");
291       return false;
292     }
293     // If table is not disabled but the region is offlined,
294     boolean disabled = assignmentManager.getZKTable().isDisabledTable(
295         hri.getTableNameAsString());
296     if (disabled){
297       LOG.info("The table " + hri.getTableNameAsString()
298           + " was disabled.  Hence not proceeding.");
299       return false;
300     }
301     if (hri.isOffline() && hri.isSplit()) {
302       //HBASE-7721: Split parent and daughters are inserted into META as an atomic operation.
303       //If the meta scanner saw the parent split, then it should see the daughters as assigned
304       //to the dead server. We don't have to do anything.
305       return false;
306     }
307     boolean disabling = assignmentManager.getZKTable().isDisablingTable(
308         hri.getTableNameAsString());
309     if (disabling) {
310       LOG.info("The table " + hri.getTableNameAsString()
311           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
312       return false;
313     }
314     return true;
315   }
316 }