View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.net.InetAddress;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.SortedMap;
32  import java.util.Map.Entry;
33  import java.util.concurrent.ConcurrentHashMap;
34  import java.util.concurrent.ConcurrentSkipListMap;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.classification.InterfaceAudience;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
43  import org.apache.hadoop.hbase.RegionLoad;
44  import org.apache.hadoop.hbase.Server;
45  import org.apache.hadoop.hbase.ServerLoad;
46  import org.apache.hadoop.hbase.ServerName;
47  import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
48  import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
49  import org.apache.hadoop.hbase.client.AdminProtocol;
50  import org.apache.hadoop.hbase.client.HConnection;
51  import org.apache.hadoop.hbase.client.HConnectionManager;
52  import org.apache.hadoop.hbase.client.RetriesExhaustedException;
53  import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
54  import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
55  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
56  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
57  import org.apache.hadoop.hbase.protobuf.RequestConverter;
58  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
59  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
60  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
61  import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.hbase.util.Pair;
64  
65  import com.google.protobuf.ServiceException;
66  
67  /**
68   * The ServerManager class manages info about region servers.
69   * <p>
70   * Maintains lists of online and dead servers.  Processes the startups,
71   * shutdowns, and deaths of region servers.
72   * <p>
73   * Servers are distinguished in two different ways.  A given server has a
74   * location, specified by hostname and port, and of which there can only be one
75   * online at any given time.  A server instance is specified by the location
76   * (hostname and port) as well as the startcode (timestamp from when the server
77   * was started).  This is used to differentiate a restarted instance of a given
78   * server from the original instance.
79   * <p>
80   * If a sever is known not to be running any more, it is called dead. The dead
81   * server needs to be handled by a ServerShutdownHandler.  If the handler is not
82   * enabled yet, the server can't be handled right away so it is queued up.
83   * After the handler is enabled, the server will be submitted to a handler to handle.
84   * However, the handler may be just partially enabled.  If so,
85   * the server cannot be fully processed, and be queued up for further processing.
86   * A server is fully processed only after the handler is fully enabled
87   * and has completed the handling.
88   */
89  @InterfaceAudience.Private
90  public class ServerManager {
91    public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
92        "hbase.master.wait.on.regionservers.maxtostart";
93  
94    public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
95        "hbase.master.wait.on.regionservers.mintostart";
96  
97    public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
98        "hbase.master.wait.on.regionservers.timeout";
99  
100   public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
101       "hbase.master.wait.on.regionservers.interval";
102 
103   private static final Log LOG = LogFactory.getLog(ServerManager.class);
104 
105   // Set if we are to shutdown the cluster.
106   private volatile boolean clusterShutdown = false;
107 
108   private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
109     new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
110 
111   /** Map of registered servers to their current load */
112   private final Map<ServerName, ServerLoad> onlineServers =
113     new ConcurrentHashMap<ServerName, ServerLoad>();
114 
115   // TODO: This is strange to have two maps but HSI above is used on both sides
116   /**
117    * Map from full server-instance name to the RPC connection for this server.
118    */
119   private final Map<ServerName, AdminProtocol> serverConnections =
120     new HashMap<ServerName, AdminProtocol>();
121 
122   /**
123    * List of region servers <ServerName> that should not get any more new
124    * regions.
125    */
126   private final ArrayList<ServerName> drainingServers =
127     new ArrayList<ServerName>();
128 
129   private final Server master;
130   private final MasterServices services;
131   private final HConnection connection;
132 
133   private final DeadServer deadservers = new DeadServer();
134 
135   private final long maxSkew;
136   private final long warningSkew;
137 
138   /**
139    * Set of region servers which are dead but not processed immediately. If one
140    * server died before master enables ServerShutdownHandler, the server will be
141    * added to this set and will be processed through calling
142    * {@link ServerManager#processQueuedDeadServers()} by master.
143    * <p>
144    * A dead server is a server instance known to be dead, not listed in the /hbase/rs
145    * znode any more. It may have not been submitted to ServerShutdownHandler yet
146    * because the handler is not enabled.
147    * <p>
148    * A dead server, which has been submitted to ServerShutdownHandler while the
149    * handler is not enabled, is queued up.
150    * <p>
151    * So this is a set of region servers known to be dead but not submitted to
152    * ServerShutdownHander for processing yet.
153    */
154   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
155 
156   /**
157    * Set of region servers which are dead and submitted to ServerShutdownHandler to
158    * process but not fully processed immediately.
159    * <p>
160    * If one server died before assignment manager finished the failover cleanup, the server
161    * will be added to this set and will be processed through calling
162    * {@link ServerManager#processQueuedDeadServers()} by assignment manager.
163    * <p>
164    * For all the region servers in this set, HLog split is already completed.
165    * <p>
166    * ServerShutdownHandler processes a dead server submitted to the handler after
167    * the handler is enabled. It may not be able to complete the processing because meta
168    * is not yet online or master is currently in startup mode.  In this case, the dead
169    * server will be parked in this set temporarily.
170    */
171   private Set<ServerName> requeuedDeadServers = new HashSet<ServerName>();
172 
173   /**
174    * Constructor.
175    * @param master
176    * @param services
177    * @throws ZooKeeperConnectionException
178    */
179   public ServerManager(final Server master, final MasterServices services)
180       throws IOException {
181     this(master, services, true);
182   }
183 
184   ServerManager(final Server master, final MasterServices services,
185       final boolean connect) throws IOException {
186     this.master = master;
187     this.services = services;
188     Configuration c = master.getConfiguration();
189     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
190     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
191     this.connection = connect ? HConnectionManager.getConnection(c) : null;
192   }
193 
194   /**
195    * Let the server manager know a new regionserver has come online
196    * @param ia The remote address
197    * @param port The remote port
198    * @param serverStartcode
199    * @param serverCurrentTime The current time of the region server in ms
200    * @return The ServerName we know this server as.
201    * @throws IOException
202    */
203   ServerName regionServerStartup(final InetAddress ia, final int port,
204     final long serverStartcode, long serverCurrentTime)
205   throws IOException {
206     // Test for case where we get a region startup message from a regionserver
207     // that has been quickly restarted but whose znode expiration handler has
208     // not yet run, or from a server whose fail we are currently processing.
209     // Test its host+port combo is present in serverAddresstoServerInfo.  If it
210     // is, reject the server and trigger its expiration. The next time it comes
211     // in, it should have been removed from serverAddressToServerInfo and queued
212     // for processing by ProcessServerShutdown.
213     ServerName sn = new ServerName(ia.getHostName(), port, serverStartcode);
214     checkClockSkew(sn, serverCurrentTime);
215     checkIsDead(sn, "STARTUP");
216     checkAlreadySameHostPort(sn);
217     recordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD);
218     return sn;
219   }
220 
221   /**
222    * Updates last flushed sequence Ids for the regions on server sn
223    * @param sn
224    * @param hsl
225    */
226   private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
227     Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
228     for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
229       Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
230       long l = entry.getValue().getCompleteSequenceId();
231       if (existingValue != null) {
232         if (l != -1 && l < existingValue) {
233           if (LOG.isDebugEnabled()) {
234             LOG.debug("RegionServer " + sn +
235                 " indicates a last flushed sequence id (" + entry.getValue() +
236                 ") that is less than the previous last flushed sequence id (" +
237                 existingValue + ") for region " +
238                 Bytes.toString(entry.getKey()) + " Ignoring.");
239           }
240           continue; // Don't let smaller sequence ids override greater
241           // sequence ids.
242         }
243       }
244       flushedSequenceIdByRegion.put(entry.getKey(), l);
245     }
246   }
247 
248   void regionServerReport(ServerName sn, ServerLoad sl)
249   throws YouAreDeadException, PleaseHoldException {
250     checkIsDead(sn, "REPORT");
251     if (!this.onlineServers.containsKey(sn)) {
252       // Already have this host+port combo and its just different start code?
253       checkAlreadySameHostPort(sn);
254       // Just let the server in. Presume master joining a running cluster.
255       // recordNewServer is what happens at the end of reportServerStartup.
256       // The only thing we are skipping is passing back to the regionserver
257       // the ServerName to use. Here we presume a master has already done
258       // that so we'll press on with whatever it gave us for ServerName.
259       recordNewServer(sn, sl);
260     } else {
261       this.onlineServers.put(sn, sl);
262     }
263     updateLastFlushedSequenceIds(sn, sl);
264   }
265 
266   /**
267    * Test to see if we have a server of same host and port already.
268    * @param serverName
269    * @throws PleaseHoldException
270    */
271   void checkAlreadySameHostPort(final ServerName serverName)
272   throws PleaseHoldException {
273     ServerName existingServer =
274       ServerName.findServerWithSameHostnamePort(getOnlineServersList(), serverName);
275     if (existingServer != null) {
276       String message = "Server serverName=" + serverName +
277         " rejected; we already have " + existingServer.toString() +
278         " registered with same hostname and port";
279       LOG.info(message);
280       if (existingServer.getStartcode() < serverName.getStartcode()) {
281         LOG.info("Triggering server recovery; existingServer " +
282           existingServer + " looks stale, new server:" + serverName);
283         expireServer(existingServer);
284       }
285       if (services.isServerShutdownHandlerEnabled()) {
286         // master has completed the initialization
287         throw new PleaseHoldException(message);
288       }
289     }
290   }
291 
292   /**
293    * Checks if the clock skew between the server and the master. If the clock skew exceeds the
294    * configured max, it will throw an exception; if it exceeds the configured warning threshold,
295    * it will log a warning but start normally.
296    * @param serverName Incoming servers's name
297    * @param serverCurrentTime
298    * @throws ClockOutOfSyncException if the skew exceeds the configured max value
299    */
300   private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
301   throws ClockOutOfSyncException {
302     long skew = System.currentTimeMillis() - serverCurrentTime;
303     if (skew > maxSkew) {
304       String message = "Server " + serverName + " has been " +
305         "rejected; Reported time is too far out of sync with master.  " +
306         "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
307       LOG.warn(message);
308       throw new ClockOutOfSyncException(message);
309     } else if (skew > warningSkew){
310       String message = "Reported time for server " + serverName + " is out of sync with master " +
311         "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
312         "error threshold is " + maxSkew + "ms)";
313       LOG.warn(message);
314     }
315   }
316 
317   /**
318    * If this server is on the dead list, reject it with a YouAreDeadException.
319    * If it was dead but came back with a new start code, remove the old entry
320    * from the dead list.
321    * @param serverName
322    * @param what START or REPORT
323    * @throws org.apache.hadoop.hbase.exceptions.YouAreDeadException
324    */
325   private void checkIsDead(final ServerName serverName, final String what)
326       throws YouAreDeadException {
327     if (this.deadservers.isDeadServer(serverName)) {
328       // host name, port and start code all match with existing one of the
329       // dead servers. So, this server must be dead.
330       String message = "Server " + what + " rejected; currently processing " +
331           serverName + " as dead server";
332       LOG.debug(message);
333       throw new YouAreDeadException(message);
334     }
335     // remove dead server with same hostname and port of newly checking in rs after master
336     // initialization.See HBASE-5916 for more information.
337     if ((this.services == null || ((HMaster) this.services).isInitialized())
338         && this.deadservers.cleanPreviousInstance(serverName)) {
339       // This server has now become alive after we marked it as dead.
340       // We removed it's previous entry from the dead list to reflect it.
341       LOG.debug(what + ":" + " Server " + serverName + " came back up," +
342           " removed it from the dead servers list");
343     }
344   }
345 
346   /**
347    * Adds the onlineServers list.
348    * @param serverName The remote servers name.
349    * @param sl
350    */
351   void recordNewServer(final ServerName serverName, final ServerLoad sl) {
352     LOG.info("Registering server=" + serverName);
353     this.onlineServers.put(serverName, sl);
354     this.serverConnections.remove(serverName);
355   }
356 
357   public long getLastFlushedSequenceId(byte[] regionName) {
358     long seqId = -1;
359     if (flushedSequenceIdByRegion.containsKey(regionName)) {
360       seqId = flushedSequenceIdByRegion.get(regionName);
361     }
362     return seqId;
363   }
364 
365   /**
366    * @param serverName
367    * @return ServerLoad if serverName is known else null
368    */
369   public ServerLoad getLoad(final ServerName serverName) {
370     return this.onlineServers.get(serverName);
371   }
372 
373   /**
374    * Compute the average load across all region servers.
375    * Currently, this uses a very naive computation - just uses the number of
376    * regions being served, ignoring stats about number of requests.
377    * @return the average load
378    */
379   public double getAverageLoad() {
380     int totalLoad = 0;
381     int numServers = 0;
382     double averageLoad;
383     for (ServerLoad sl: this.onlineServers.values()) {
384         numServers++;
385         totalLoad += sl.getNumberOfRegions();
386     }
387     averageLoad = (double)totalLoad / (double)numServers;
388     return averageLoad;
389   }
390 
391   /** @return the count of active regionservers */
392   int countOfRegionServers() {
393     // Presumes onlineServers is a concurrent map
394     return this.onlineServers.size();
395   }
396 
397   /**
398    * @return Read-only map of servers to serverinfo
399    */
400   public Map<ServerName, ServerLoad> getOnlineServers() {
401     // Presumption is that iterating the returned Map is OK.
402     synchronized (this.onlineServers) {
403       return Collections.unmodifiableMap(this.onlineServers);
404     }
405   }
406 
407 
408   public DeadServer getDeadServers() {
409     return this.deadservers;
410   }
411 
412   /**
413    * Checks if any dead servers are currently in progress.
414    * @return true if any RS are being processed as dead, false if not
415    */
416   public boolean areDeadServersInProgress() {
417     return this.deadservers.areDeadServersInProgress();
418   }
419 
420   void letRegionServersShutdown() {
421     long previousLogTime = 0;
422     while (!onlineServers.isEmpty()) {
423 
424       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
425         StringBuilder sb = new StringBuilder();
426         for (ServerName key : this.onlineServers.keySet()) {
427           if (sb.length() > 0) {
428             sb.append(", ");
429           }
430           sb.append(key);
431         }
432         LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
433         previousLogTime = System.currentTimeMillis();
434       }
435 
436       synchronized (onlineServers) {
437         try {
438           onlineServers.wait(100);
439         } catch (InterruptedException ignored) {
440           // continue
441         }
442       }
443     }
444   }
445 
446   /*
447    * Expire the passed server.  Add it to list of dead servers and queue a
448    * shutdown processing.
449    */
450   public synchronized void expireServer(final ServerName serverName) {
451     if (!services.isServerShutdownHandlerEnabled()) {
452       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
453           + "delay expiring server " + serverName);
454       this.queuedDeadServers.add(serverName);
455       return;
456     }
457     if (!this.onlineServers.containsKey(serverName)) {
458       LOG.warn("Received expiration of " + serverName +
459         " but server is not currently online");
460     }
461     if (this.deadservers.isDeadServer(serverName)) {
462       // TODO: Can this happen?  It shouldn't be online in this case?
463       LOG.warn("Received expiration of " + serverName +
464           " but server shutdown is already in progress");
465       return;
466     }
467     // Remove the server from the known servers lists and update load info BUT
468     // add to deadservers first; do this so it'll show in dead servers list if
469     // not in online servers list.
470     this.deadservers.add(serverName);
471     this.onlineServers.remove(serverName);
472     synchronized (onlineServers) {
473       onlineServers.notifyAll();
474     }
475     this.serverConnections.remove(serverName);
476     // If cluster is going down, yes, servers are going to be expiring; don't
477     // process as a dead server
478     if (this.clusterShutdown) {
479       LOG.info("Cluster shutdown set; " + serverName +
480         " expired; onlineServers=" + this.onlineServers.size());
481       if (this.onlineServers.isEmpty()) {
482         master.stop("Cluster shutdown set; onlineServer=0");
483       }
484       return;
485     }
486 
487     boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
488     if (carryingMeta) {
489       this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
490         this.services, this.deadservers, serverName));
491     } else {
492       this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
493         this.services, this.deadservers, serverName, true));
494     }
495     LOG.debug("Added=" + serverName +
496       " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
497   }
498 
499   public synchronized void processDeadServer(final ServerName serverName) {
500     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
501     // in-memory region states, region servers could be down. Meta table can and
502     // should be re-assigned, log splitting can be done too. However, it is better to
503     // wait till the cleanup is done before re-assigning user regions.
504     //
505     // We should not wait in the server shutdown handler thread since it can clog
506     // the handler threads and meta table could not be re-assigned in case
507     // the corresponding server is down. So we queue them up here instead.
508     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
509       requeuedDeadServers.add(serverName);
510       return;
511     }
512 
513     this.deadservers.add(serverName);
514     this.services.getExecutorService().submit(new ServerShutdownHandler(
515       this.master, this.services, this.deadservers, serverName, false));
516   }
517 
518   /**
519    * Process the servers which died during master's initialization. It will be
520    * called after HMaster#assignMeta and AssignmentManager#joinCluster.
521    * */
522   synchronized void processQueuedDeadServers() {
523     if (!services.isServerShutdownHandlerEnabled()) {
524       LOG.info("Master hasn't enabled ServerShutdownHandler");
525     }
526     Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
527     while (serverIterator.hasNext()) {
528       expireServer(serverIterator.next());
529       serverIterator.remove();
530     }
531 
532     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
533       LOG.info("AssignmentManager hasn't finished failover cleanup");
534     }
535     serverIterator = requeuedDeadServers.iterator();
536     while (serverIterator.hasNext()) {
537       processDeadServer(serverIterator.next());
538       serverIterator.remove();
539     }
540   }
541 
542   /*
543    * Remove the server from the drain list.
544    */
545   public boolean removeServerFromDrainList(final ServerName sn) {
546     // Warn if the server (sn) is not online.  ServerName is of the form:
547     // <hostname> , <port> , <startcode>
548 
549     if (!this.isServerOnline(sn)) {
550       LOG.warn("Server " + sn + " is not currently online. " +
551                "Removing from draining list anyway, as requested.");
552     }
553     // Remove the server from the draining servers lists.
554     return this.drainingServers.remove(sn);
555   }
556 
557   /*
558    * Add the server to the drain list.
559    */
560   public boolean addServerToDrainList(final ServerName sn) {
561     // Warn if the server (sn) is not online.  ServerName is of the form:
562     // <hostname> , <port> , <startcode>
563 
564     if (!this.isServerOnline(sn)) {
565       LOG.warn("Server " + sn + " is not currently online. " +
566                "Ignoring request to add it to draining list.");
567       return false;
568     }
569     // Add the server to the draining servers lists, if it's not already in
570     // it.
571     if (this.drainingServers.contains(sn)) {
572       LOG.warn("Server " + sn + " is already in the draining server list." +
573                "Ignoring request to add it again.");
574       return false;
575     }
576     return this.drainingServers.add(sn);
577   }
578 
579   // RPC methods to region servers
580 
581   /**
582    * Sends an OPEN RPC to the specified server to open the specified region.
583    * <p>
584    * Open should not fail but can if server just crashed.
585    * <p>
586    * @param server server to open a region
587    * @param region region to open
588    * @param versionOfOfflineNode that needs to be present in the offline node
589    * when RS tries to change the state from OFFLINE to other states.
590    */
591   public RegionOpeningState sendRegionOpen(final ServerName server,
592       HRegionInfo region, int versionOfOfflineNode)
593   throws IOException {
594     AdminProtocol admin = getServerConnection(server);
595     if (admin == null) {
596       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
597         " failed because no RPC connection found to this server");
598       return RegionOpeningState.FAILED_OPENING;
599     }
600     OpenRegionRequest request =
601       RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode);
602     try {
603       OpenRegionResponse response = admin.openRegion(null, request);
604       return ResponseConverter.getRegionOpeningState(response);
605     } catch (ServiceException se) {
606       throw ProtobufUtil.getRemoteException(se);
607     }
608   }
609 
610   /**
611    * Sends an OPEN RPC to the specified server to open the specified region.
612    * <p>
613    * Open should not fail but can if server just crashed.
614    * <p>
615    * @param server server to open a region
616    * @param regionOpenInfos info of a list of regions to open
617    * @return a list of region opening states
618    */
619   public List<RegionOpeningState> sendRegionOpen(ServerName server,
620       List<Pair<HRegionInfo, Integer>> regionOpenInfos)
621   throws IOException {
622     AdminProtocol admin = getServerConnection(server);
623     if (admin == null) {
624       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
625         " failed because no RPC connection found to this server");
626       return null;
627     }
628 
629     OpenRegionRequest request =
630       RequestConverter.buildOpenRegionRequest(regionOpenInfos);
631     try {
632       OpenRegionResponse response = admin.openRegion(null, request);
633       return ResponseConverter.getRegionOpeningStateList(response);
634     } catch (ServiceException se) {
635       throw ProtobufUtil.getRemoteException(se);
636     }
637   }
638 
639   /**
640    * Sends an CLOSE RPC to the specified server to close the specified region.
641    * <p>
642    * A region server could reject the close request because it either does not
643    * have the specified region or the region is being split.
644    * @param server server to open a region
645    * @param region region to open
646    * @param versionOfClosingNode
647    *   the version of znode to compare when RS transitions the znode from
648    *   CLOSING state.
649    * @param dest - if the region is moved to another server, the destination server. null otherwise.
650    * @return true if server acknowledged close, false if not
651    * @throws IOException
652    */
653   public boolean sendRegionClose(ServerName server, HRegionInfo region,
654     int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
655     if (server == null) throw new NullPointerException("Passed server is null");
656     AdminProtocol admin = getServerConnection(server);
657     if (admin == null) {
658       throw new IOException("Attempting to send CLOSE RPC to server " +
659         server.toString() + " for region " +
660         region.getRegionNameAsString() +
661         " failed because no RPC connection found to this server");
662     }
663     return ProtobufUtil.closeRegion(admin, region.getRegionName(),
664       versionOfClosingNode, dest, transitionInZK);
665   }
666 
667   public boolean sendRegionClose(ServerName server,
668       HRegionInfo region, int versionOfClosingNode) throws IOException {
669     return sendRegionClose(server, region, versionOfClosingNode, null, true);
670   }
671 
672   /**
673    * Sends an MERGE REGIONS RPC to the specified server to merge the specified
674    * regions.
675    * <p>
676    * A region server could reject the close request because it either does not
677    * have the specified region.
678    * @param server server to merge regions
679    * @param region_a region to merge
680    * @param region_b region to merge
681    * @param forcible true if do a compulsory merge, otherwise we will only merge
682    *          two adjacent regions
683    * @throws IOException
684    */
685   public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
686       HRegionInfo region_b, boolean forcible) throws IOException {
687     if (server == null)
688       throw new NullPointerException("Passed server is null");
689     if (region_a == null || region_b == null)
690       throw new NullPointerException("Passed region is null");
691     AdminProtocol admin = getServerConnection(server);
692     if (admin == null) {
693       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
694           + server.toString() + " for region "
695           + region_a.getRegionNameAsString() + ","
696           + region_b.getRegionNameAsString()
697           + " failed because no RPC connection found to this server");
698     }
699     ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible);
700   }
701 
702     /**
703     * @param sn
704     * @return
705     * @throws IOException
706     * @throws RetriesExhaustedException wrapping a ConnectException if failed
707     * putting up proxy.
708     */
709   private AdminProtocol getServerConnection(final ServerName sn)
710   throws IOException {
711     AdminProtocol admin = this.serverConnections.get(sn);
712     if (admin == null) {
713       LOG.debug("New connection to " + sn.toString());
714       admin = this.connection.getAdmin(sn);
715       this.serverConnections.put(sn, admin);
716     }
717     return admin;
718   }
719 
720   /**
721    * Wait for the region servers to report in.
722    * We will wait until one of this condition is met:
723    *  - the master is stopped
724    *  - the 'hbase.master.wait.on.regionservers.maxtostart' number of
725    *    region servers is reached
726    *  - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND
727    *   there have been no new region server in for
728    *      'hbase.master.wait.on.regionservers.interval' time AND
729    *   the 'hbase.master.wait.on.regionservers.timeout' is reached
730    *
731    * @throws InterruptedException
732    */
733   public void waitForRegionServers(MonitoredTask status)
734   throws InterruptedException {
735     final long interval = this.master.getConfiguration().
736       getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
737     final long timeout = this.master.getConfiguration().
738       getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
739     int minToStart = this.master.getConfiguration().
740       getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
741     if (minToStart < 1) {
742       LOG.warn(String.format(
743         "The value of '%s' (%d) can not be less than 1, ignoring.",
744         WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
745       minToStart = 1;
746     }
747     int maxToStart = this.master.getConfiguration().
748       getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
749     if (maxToStart < minToStart) {
750         LOG.warn(String.format(
751             "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
752             WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
753             WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
754         maxToStart = Integer.MAX_VALUE;
755     }
756 
757     long now =  System.currentTimeMillis();
758     final long startTime = now;
759     long slept = 0;
760     long lastLogTime = 0;
761     long lastCountChange = startTime;
762     int count = countOfRegionServers();
763     int oldCount = 0;
764     while (
765       !this.master.isStopped() &&
766         count < maxToStart &&
767         (lastCountChange+interval > now || timeout > slept || count < minToStart)
768       ){
769 
770       // Log some info at every interval time or if there is a change
771       if (oldCount != count || lastLogTime+interval < now){
772         lastLogTime = now;
773         String msg =
774           "Waiting for region servers count to settle; currently"+
775             " checked in " + count + ", slept for " + slept + " ms," +
776             " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
777             ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
778         LOG.info(msg);
779         status.setStatus(msg);
780       }
781 
782       // We sleep for some time
783       final long sleepTime = 50;
784       Thread.sleep(sleepTime);
785       now =  System.currentTimeMillis();
786       slept = now - startTime;
787 
788       oldCount = count;
789       count = countOfRegionServers();
790       if (count != oldCount) {
791         lastCountChange = now;
792       }
793     }
794 
795     LOG.info("Finished waiting for region servers count to settle;" +
796       " checked in " + count + ", slept for " + slept + " ms," +
797       " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
798       " master is "+ (this.master.isStopped() ? "stopped.": "running.")
799     );
800   }
801 
802   /**
803    * @return A copy of the internal list of online servers.
804    */
805   public List<ServerName> getOnlineServersList() {
806     // TODO: optimize the load balancer call so we don't need to make a new list
807     // TODO: FIX. THIS IS POPULAR CALL.
808     return new ArrayList<ServerName>(this.onlineServers.keySet());
809   }
810 
811   /**
812    * @return A copy of the internal list of draining servers.
813    */
814   public List<ServerName> getDrainingServersList() {
815     return new ArrayList<ServerName>(this.drainingServers);
816   }
817 
818   /**
819    * @return A copy of the internal set of deadNotExpired servers.
820    */
821   Set<ServerName> getDeadNotExpiredServers() {
822     return new HashSet<ServerName>(this.queuedDeadServers);
823   }
824 
825   public boolean isServerOnline(ServerName serverName) {
826     return serverName != null && onlineServers.containsKey(serverName);
827   }
828 
829   /**
830    * Check if a server is known to be dead.  A server can be online,
831    * or known to be dead, or unknown to this manager (i.e, not online,
832    * not known to be dead either. it is simply not tracked by the
833    * master any more, for example, a very old previous instance).
834    */
835   public synchronized boolean isServerDead(ServerName serverName) {
836     return serverName == null || deadservers.isDeadServer(serverName)
837       || queuedDeadServers.contains(serverName)
838       || requeuedDeadServers.contains(serverName);
839   }
840 
841   public void shutdownCluster() {
842     this.clusterShutdown = true;
843     this.master.stop("Cluster shutdown requested");
844   }
845 
846   public boolean isClusterShutdown() {
847     return this.clusterShutdown;
848   }
849 
850   /**
851    * Stop the ServerManager.  Currently closes the connection to the master.
852    */
853   public void stop() {
854     if (connection != null) {
855       try {
856         connection.close();
857       } catch (IOException e) {
858         LOG.error("Attempt to close connection to master failed", e);
859       }
860     }
861   }
862 
863   /**
864    * Creates a list of possible destinations for a region. It contains the online servers, but not
865    *  the draining or dying servers.
866    *  @param serverToExclude can be null if there is no server to exclude
867    */
868   public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
869     final List<ServerName> destServers = getOnlineServersList();
870 
871     if (serverToExclude != null){
872       destServers.remove(serverToExclude);
873     }
874 
875     // Loop through the draining server list and remove them from the server list
876     final List<ServerName> drainingServersCopy = getDrainingServersList();
877     if (!drainingServersCopy.isEmpty()) {
878       for (final ServerName server: drainingServersCopy) {
879         destServers.remove(server);
880       }
881     }
882 
883     // Remove the deadNotExpired servers from the server list.
884     removeDeadNotExpiredServers(destServers);
885 
886     return destServers;
887   }
888 
889   /**
890    * Calls {@link #createDestinationServersList} without server to exclude.
891    */
892   public List<ServerName> createDestinationServersList(){
893     return createDestinationServersList(null);
894   }
895 
896     /**
897     * Loop through the deadNotExpired server list and remove them from the
898     * servers.
899     * This function should be used carefully outside of this class. You should use a high level
900     *  method such as {@link #createDestinationServersList()} instead of managing you own list.
901     */
902   void removeDeadNotExpiredServers(List<ServerName> servers) {
903     Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
904     if (!deadNotExpiredServersCopy.isEmpty()) {
905       for (ServerName server : deadNotExpiredServersCopy) {
906         LOG.debug("Removing dead but not expired server: " + server
907           + " from eligible server pool.");
908         servers.remove(server);
909       }
910     }
911   }
912   
913   /**
914    * To clear any dead server with same host name and port of any online server
915    */
916   void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
917     for (ServerName serverName : getOnlineServersList()) {
918       deadservers.cleanAllPreviousInstances(serverName);
919     }
920   }
921 }