View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.lang.reflect.Constructor;
24  import java.lang.reflect.InvocationTargetException;
25  import java.net.InetAddress;
26  import java.net.InetSocketAddress;
27  import java.net.UnknownHostException;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collection;
31  import java.util.Collections;
32  import java.util.Comparator;
33  import java.util.HashSet;
34  import java.util.Iterator;
35  import java.util.List;
36  import java.util.Map;
37  import java.util.Set;
38  import java.util.concurrent.TimeUnit;
39  import java.util.concurrent.atomic.AtomicReference;
40  import java.util.regex.Pattern;
41  
42  import javax.servlet.ServletException;
43  import javax.servlet.http.HttpServlet;
44  import javax.servlet.http.HttpServletRequest;
45  import javax.servlet.http.HttpServletResponse;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.hadoop.conf.Configuration;
50  import org.apache.hadoop.fs.Path;
51  import org.apache.hadoop.hbase.ClusterStatus;
52  import org.apache.hadoop.hbase.CoordinatedStateException;
53  import org.apache.hadoop.hbase.CoordinatedStateManager;
54  import org.apache.hadoop.hbase.DoNotRetryIOException;
55  import org.apache.hadoop.hbase.HBaseIOException;
56  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
57  import org.apache.hadoop.hbase.HColumnDescriptor;
58  import org.apache.hadoop.hbase.HConstants;
59  import org.apache.hadoop.hbase.HRegionInfo;
60  import org.apache.hadoop.hbase.HTableDescriptor;
61  import org.apache.hadoop.hbase.MasterNotRunningException;
62  import org.apache.hadoop.hbase.MetaMigrationConvertingToPB;
63  import org.apache.hadoop.hbase.MetaTableAccessor;
64  import org.apache.hadoop.hbase.NamespaceDescriptor;
65  import org.apache.hadoop.hbase.NamespaceNotFoundException;
66  import org.apache.hadoop.hbase.PleaseHoldException;
67  import org.apache.hadoop.hbase.Server;
68  import org.apache.hadoop.hbase.ServerLoad;
69  import org.apache.hadoop.hbase.ServerName;
70  import org.apache.hadoop.hbase.TableDescriptors;
71  import org.apache.hadoop.hbase.TableName;
72  import org.apache.hadoop.hbase.TableNotDisabledException;
73  import org.apache.hadoop.hbase.TableNotFoundException;
74  import org.apache.hadoop.hbase.UnknownRegionException;
75  import org.apache.hadoop.hbase.classification.InterfaceAudience;
76  import org.apache.hadoop.hbase.client.MetaScanner;
77  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
78  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
79  import org.apache.hadoop.hbase.client.Result;
80  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
81  import org.apache.hadoop.hbase.exceptions.DeserializationException;
82  import org.apache.hadoop.hbase.executor.ExecutorType;
83  import org.apache.hadoop.hbase.ipc.RequestContext;
84  import org.apache.hadoop.hbase.ipc.RpcServer;
85  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
86  import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
87  import org.apache.hadoop.hbase.master.RegionState.State;
88  import org.apache.hadoop.hbase.master.balancer.BalancerChore;
89  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
90  import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
91  import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
92  import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
93  import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
94  import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
95  import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
96  import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
97  import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
98  import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
99  import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
100 import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
101 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
102 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
103 import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
104 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
105 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
106 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
107 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
108 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
109 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
110 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
111 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
112 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
113 import org.apache.hadoop.hbase.regionserver.HRegionServer;
114 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
115 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
116 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
117 import org.apache.hadoop.hbase.replication.regionserver.Replication;
118 import org.apache.hadoop.hbase.security.UserProvider;
119 import org.apache.hadoop.hbase.util.Addressing;
120 import org.apache.hadoop.hbase.util.Bytes;
121 import org.apache.hadoop.hbase.util.CompressionTest;
122 import org.apache.hadoop.hbase.util.ConfigUtil;
123 import org.apache.hadoop.hbase.util.EncryptionTest;
124 import org.apache.hadoop.hbase.util.FSUtils;
125 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
126 import org.apache.hadoop.hbase.util.HasThread;
127 import org.apache.hadoop.hbase.util.Pair;
128 import org.apache.hadoop.hbase.util.Threads;
129 import org.apache.hadoop.hbase.util.VersionInfo;
130 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
131 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
132 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
133 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
134 import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
135 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
136 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
137 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
138 import org.apache.zookeeper.KeeperException;
139 import org.mortbay.jetty.Connector;
140 import org.mortbay.jetty.nio.SelectChannelConnector;
141 import org.mortbay.jetty.servlet.Context;
142 
143 import com.google.common.annotations.VisibleForTesting;
144 import com.google.common.collect.Maps;
145 import com.google.protobuf.Descriptors;
146 import com.google.protobuf.Service;
147 
148 /**
149  * HMaster is the "master server" for HBase. An HBase cluster has one active
150  * master.  If many masters are started, all compete.  Whichever wins goes on to
151  * run the cluster.  All others park themselves in their constructor until
152  * master or cluster shutdown or until the active master loses its lease in
153  * zookeeper.  Thereafter, all running master jostle to take over master role.
154  *
155  * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In
156  * this case it will tell all regionservers to go down and then wait on them
157  * all reporting in that they are down.  This master will then shut itself down.
158  *
159  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
160  *
161  * @see org.apache.zookeeper.Watcher
162  */
163 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
164 @SuppressWarnings("deprecation")
165 public class HMaster extends HRegionServer implements MasterServices, Server {
166   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
167 
168   /**
169    * Protection against zombie master. Started once Master accepts active responsibility and
170    * starts taking over responsibilities. Allows a finite time window before giving up ownership.
171    */
172   private static class InitializationMonitor extends HasThread {
173     /** The amount of time in milliseconds to sleep before checking initialization status. */
174     public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
175     public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
176 
177     /**
178      * When timeout expired and initialization has not complete, call {@link System#exit(int)} when
179      * true, do nothing otherwise.
180      */
181     public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";
182     public static final boolean HALT_DEFAULT = false;
183 
184     private final HMaster master;
185     private final long timeout;
186     private final boolean haltOnTimeout;
187 
188     /** Creates a Thread that monitors the {@link #isInitialized()} state. */
189     InitializationMonitor(HMaster master) {
190       super("MasterInitializationMonitor");
191       this.master = master;
192       this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);
193       this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);
194       this.setDaemon(true);
195     }
196 
197     @Override
198     public void run() {
199       try {
200         while (!master.isStopped() && master.isActiveMaster()) {
201           Thread.sleep(timeout);
202           if (master.isInitialized()) {
203             LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");
204           } else {
205             LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"
206                 + " consider submitting a bug report including a thread dump of this process.");
207             if (haltOnTimeout) {
208               LOG.error("Zombie Master exiting. Thread dump to stdout");
209               Threads.printThreadInfo(System.out, "Zombie HMaster");
210               System.exit(-1);
211             }
212           }
213         }
214       } catch (InterruptedException ie) {
215         LOG.trace("InitMonitor thread interrupted. Existing.");
216       }
217     }
218   }
219 
220   // MASTER is name of the webapp and the attribute name used stuffing this
221   //instance into web context.
222   public static final String MASTER = "master";
223 
224   // Manager and zk listener for master election
225   private final ActiveMasterManager activeMasterManager;
226   // Region server tracker
227   RegionServerTracker regionServerTracker;
228   // Draining region server tracker
229   private DrainingServerTracker drainingServerTracker;
230   // Tracker for load balancer state
231   LoadBalancerTracker loadBalancerTracker;
232 
233   /** Namespace stuff */
234   private TableNamespaceManager tableNamespaceManager;
235 
236   // Metrics for the HMaster
237   final MetricsMaster metricsMaster;
238   // file system manager for the master FS operations
239   private MasterFileSystem fileSystemManager;
240 
241   // server manager to deal with region server info
242   volatile ServerManager serverManager;
243 
244   // manager of assignment nodes in zookeeper
245   AssignmentManager assignmentManager;
246 
247   // buffer for "fatal error" notices from region servers
248   // in the cluster. This is only used for assisting
249   // operations/debugging.
250   MemoryBoundedLogMessageBuffer rsFatals;
251 
252   // flag set after we become the active master (used for testing)
253   private volatile boolean isActiveMaster = false;
254 
255   // flag set after we complete initialization once active,
256   // it is not private since it's used in unit tests
257   volatile boolean initialized = false;
258 
259   // flag set after master services are started,
260   // initialization may have not completed yet.
261   volatile boolean serviceStarted = false;
262 
263   // flag set after we complete assignMeta.
264   private volatile boolean serverShutdownHandlerEnabled = false;
265 
266   LoadBalancer balancer;
267   private BalancerChore balancerChore;
268   private ClusterStatusChore clusterStatusChore;
269   private ClusterStatusPublisher clusterStatusPublisherChore = null;
270 
271   CatalogJanitor catalogJanitorChore;
272   private LogCleaner logCleaner;
273   private HFileCleaner hfileCleaner;
274 
275   MasterCoprocessorHost cpHost;
276 
277   private final boolean preLoadTableDescriptors;
278 
279   // Time stamps for when a hmaster became active
280   private long masterActiveTime;
281 
282   //should we check the compression codec type at master side, default true, HBASE-6370
283   private final boolean masterCheckCompression;
284 
285   //should we check encryption settings at master side, default true
286   private final boolean masterCheckEncryption;
287 
288   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
289 
290   // monitor for snapshot of hbase tables
291   SnapshotManager snapshotManager;
292   // monitor for distributed procedures
293   MasterProcedureManagerHost mpmHost;
294 
295   /** flag used in test cases in order to simulate RS failures during master initialization */
296   private volatile boolean initializationBeforeMetaAssignment = false;
297 
298   /** jetty server for master to redirect requests to regionserver infoServer */
299   private org.mortbay.jetty.Server masterJettyServer;
300 
301   public static class RedirectServlet extends HttpServlet {
302     private static final long serialVersionUID = 2894774810058302472L;
303     private static int regionServerInfoPort;
304 
305     @Override
306     public void doGet(HttpServletRequest request,
307         HttpServletResponse response) throws ServletException, IOException {
308       String redirectUrl = request.getScheme() + "://"
309         + request.getServerName() + ":" + regionServerInfoPort
310         + request.getRequestURI();
311       response.sendRedirect(redirectUrl);
312     }
313   }
314 
315   /**
316    * Initializes the HMaster. The steps are as follows:
317    * <p>
318    * <ol>
319    * <li>Initialize the local HRegionServer
320    * <li>Start the ActiveMasterManager.
321    * </ol>
322    * <p>
323    * Remaining steps of initialization occur in
324    * #finishActiveMasterInitialization(MonitoredTask) after
325    * the master becomes the active one.
326    *
327    * @throws InterruptedException
328    * @throws KeeperException
329    * @throws IOException
330    */
331   public HMaster(final Configuration conf, CoordinatedStateManager csm)
332       throws IOException, KeeperException, InterruptedException {
333     super(conf, csm);
334     this.rsFatals = new MemoryBoundedLogMessageBuffer(
335       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
336 
337     LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
338         ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
339 
340     Replication.decorateMasterConfiguration(this.conf);
341 
342     // Hack! Maps DFSClient => Master for logs.  HDFS made this
343     // config param for task trackers, but we can piggyback off of it.
344     if (this.conf.get("mapreduce.task.attempt.id") == null) {
345       this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
346     }
347 
348     // should we check the compression codec type at master side, default true, HBASE-6370
349     this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
350 
351     // should we check encryption settings at master side, default true
352     this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
353 
354     this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
355 
356     // preload table descriptor at startup
357     this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
358 
359     // Do we publish the status?
360 
361     boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
362         HConstants.STATUS_PUBLISHED_DEFAULT);
363     Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
364         conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
365             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
366             ClusterStatusPublisher.Publisher.class);
367 
368     if (shouldPublish) {
369       if (publisherClass == null) {
370         LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
371             ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
372             " is not set - not publishing status");
373       } else {
374         clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
375         Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
376       }
377     }
378     activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
379     int infoPort = putUpJettyServer();
380     startActiveMasterManager(infoPort);
381   }
382 
383   // return the actual infoPort, -1 means disable info server.
384   private int putUpJettyServer() throws IOException {
385     if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
386       return -1;
387     }
388     int infoPort = conf.getInt("hbase.master.info.port.orig",
389       HConstants.DEFAULT_MASTER_INFOPORT);
390     // -1 is for disabling info server, so no redirecting
391     if (infoPort < 0 || infoServer == null) {
392       return -1;
393     }
394     String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
395     if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
396       String msg =
397           "Failed to start redirecting jetty server. Address " + addr
398               + " does not belong to this host. Correct configuration parameter: "
399               + "hbase.master.info.bindAddress";
400       LOG.error(msg);
401       throw new IOException(msg);
402     }
403 
404     RedirectServlet.regionServerInfoPort = infoServer.getPort();
405     if(RedirectServlet.regionServerInfoPort == infoPort) {
406       return infoPort;
407     }
408     masterJettyServer = new org.mortbay.jetty.Server();
409     Connector connector = new SelectChannelConnector();
410     connector.setHost(addr);
411     connector.setPort(infoPort);
412     masterJettyServer.addConnector(connector);
413     masterJettyServer.setStopAtShutdown(true);
414     Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
415     context.addServlet(RedirectServlet.class, "/*");
416     try {
417       masterJettyServer.start();
418     } catch (Exception e) {
419       throw new IOException("Failed to start redirecting jetty server", e);
420     }
421     return connector.getLocalPort();
422   }
423 
424   /**
425    * For compatibility, if failed with regionserver credentials, try the master one
426    */
427   @Override
428   protected void login(UserProvider user, String host) throws IOException {
429     try {
430       super.login(user, host);
431     } catch (IOException ie) {
432       user.login("hbase.master.keytab.file",
433         "hbase.master.kerberos.principal", host);
434     }
435   }
436 
437   /**
438    * If configured to put regions on active master,
439    * wait till a backup master becomes active.
440    * Otherwise, loop till the server is stopped or aborted.
441    */
442   @Override
443   protected void waitForMasterActive(){
444     boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
445     while (!(tablesOnMaster && isActiveMaster)
446         && !isStopped() && !isAborted()) {
447       sleeper.sleep();
448     }
449   }
450 
451   @VisibleForTesting
452   public MasterRpcServices getMasterRpcServices() {
453     return (MasterRpcServices)rpcServices;
454   }
455 
456   public boolean balanceSwitch(final boolean b) throws IOException {
457     return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
458   }
459 
460   @Override
461   protected String getProcessName() {
462     return MASTER;
463   }
464 
465   @Override
466   protected boolean canCreateBaseZNode() {
467     return true;
468   }
469 
470   @Override
471   protected boolean canUpdateTableDescriptor() {
472     return true;
473   }
474 
475   @Override
476   protected RSRpcServices createRpcServices() throws IOException {
477     return new MasterRpcServices(this);
478   }
479 
480   @Override
481   protected void configureInfoServer() {
482     infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
483     infoServer.setAttribute(MASTER, this);
484     if (BaseLoadBalancer.tablesOnMaster(conf)) {
485       super.configureInfoServer();
486     }
487   }
488 
489   @Override
490   protected Class<? extends HttpServlet> getDumpServlet() {
491     return MasterDumpServlet.class;
492   }
493 
494   /**
495    * Emit the HMaster metrics, such as region in transition metrics.
496    * Surrounding in a try block just to be sure metrics doesn't abort HMaster.
497    */
498   @Override
499   protected void doMetrics() {
500     try {
501       if (assignmentManager != null) {
502         assignmentManager.updateRegionsInTransitionMetrics();
503       }
504     } catch (Throwable e) {
505       LOG.error("Couldn't update metrics: " + e.getMessage());
506     }
507   }
508 
509   MetricsMaster getMasterMetrics() {
510     return metricsMaster;
511   }
512 
513   /**
514    * Initialize all ZK based system trackers.
515    * @throws IOException
516    * @throws InterruptedException
517    * @throws KeeperException
518    * @throws CoordinatedStateException
519    */
520   void initializeZKBasedSystemTrackers() throws IOException,
521       InterruptedException, KeeperException, CoordinatedStateException {
522     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
523     this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
524     this.loadBalancerTracker.start();
525     this.assignmentManager = new AssignmentManager(this, serverManager,
526       this.balancer, this.service, this.metricsMaster,
527       this.tableLockManager);
528     zooKeeper.registerListenerFirst(assignmentManager);
529 
530     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
531         this.serverManager);
532     this.regionServerTracker.start();
533 
534     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
535       this.serverManager);
536     this.drainingServerTracker.start();
537 
538     // Set the cluster as up.  If new RSs, they'll be waiting on this before
539     // going ahead with their startup.
540     boolean wasUp = this.clusterStatusTracker.isClusterUp();
541     if (!wasUp) this.clusterStatusTracker.setClusterUp();
542 
543     LOG.info("Server active/primary master=" + this.serverName +
544         ", sessionid=0x" +
545         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
546         ", setting cluster-up flag (Was=" + wasUp + ")");
547 
548     // create/initialize the snapshot manager and other procedure managers
549     this.snapshotManager = new SnapshotManager();
550     this.mpmHost = new MasterProcedureManagerHost();
551     this.mpmHost.register(this.snapshotManager);
552     this.mpmHost.register(new MasterFlushTableProcedureManager());
553     this.mpmHost.loadProcedures(conf);
554     this.mpmHost.initialize(this, this.metricsMaster);
555   }
556 
557   /**
558    * Finish initialization of HMaster after becoming the primary master.
559    *
560    * <ol>
561    * <li>Initialize master components - file system manager, server manager,
562    *     assignment manager, region server tracker, etc</li>
563    * <li>Start necessary service threads - balancer, catalog janior,
564    *     executor services, etc</li>
565    * <li>Set cluster as UP in ZooKeeper</li>
566    * <li>Wait for RegionServers to check-in</li>
567    * <li>Split logs and perform data recovery, if necessary</li>
568    * <li>Ensure assignment of meta/namespace regions<li>
569    * <li>Handle either fresh cluster start or master failover</li>
570    * </ol>
571    *
572    * @throws IOException
573    * @throws InterruptedException
574    * @throws KeeperException
575    * @throws CoordinatedStateException
576    */
577   private void finishActiveMasterInitialization(MonitoredTask status)
578       throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
579 
580     isActiveMaster = true;
581     Thread zombieDetector = new Thread(new InitializationMonitor(this));
582     zombieDetector.start();
583 
584     /*
585      * We are active master now... go initialize components we need to run.
586      * Note, there may be dross in zk from previous runs; it'll get addressed
587      * below after we determine if cluster startup or failover.
588      */
589 
590     status.setStatus("Initializing Master file system");
591 
592     this.masterActiveTime = System.currentTimeMillis();
593     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
594     this.fileSystemManager = new MasterFileSystem(this, this);
595 
596     // enable table descriptors cache
597     this.tableDescriptors.setCacheOn();
598 
599     // warm-up HTDs cache on master initialization
600     if (preLoadTableDescriptors) {
601       status.setStatus("Pre-loading table descriptors");
602       this.tableDescriptors.getAll();
603     }
604 
605     // publish cluster ID
606     status.setStatus("Publishing Cluster ID in ZooKeeper");
607     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
608     this.serverManager = createServerManager(this, this);
609 
610     setupClusterConnection();
611 
612     // Invalidate all write locks held previously
613     this.tableLockManager.reapWriteLocks();
614 
615     status.setStatus("Initializing ZK system trackers");
616     initializeZKBasedSystemTrackers();
617 
618     // initialize master side coprocessors before we start handling requests
619     status.setStatus("Initializing master coprocessors");
620     this.cpHost = new MasterCoprocessorHost(this, this.conf);
621 
622     // start up all service threads.
623     status.setStatus("Initializing master service threads");
624     startServiceThreads();
625 
626     // Wake up this server to check in
627     sleeper.skipSleepCycle();
628 
629     // Wait for region servers to report in
630     this.serverManager.waitForRegionServers(status);
631     // Check zk for region servers that are up but didn't register
632     for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
633       // The isServerOnline check is opportunistic, correctness is handled inside
634       if (!this.serverManager.isServerOnline(sn)
635           && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
636         LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
637       }
638     }
639 
640     // get a list for previously failed RS which need log splitting work
641     // we recover hbase:meta region servers inside master initialization and
642     // handle other failed servers in SSH in order to start up master node ASAP
643     Set<ServerName> previouslyFailedServers = this.fileSystemManager
644         .getFailedServersFromLogFolders();
645 
646     // remove stale recovering regions from previous run
647     this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
648 
649     // log splitting for hbase:meta server
650     ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
651     if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
652       splitMetaLogBeforeAssignment(oldMetaServerLocation);
653       // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
654       // may also host user regions
655     }
656     Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
657     // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
658     // instead of previouslyFailedMetaRSs alone to address the following two situations:
659     // 1) the chained failure situation(recovery failed multiple times in a row).
660     // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
661     // same server still has non-meta wals to be replayed so that
662     // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
663     // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
664     // there is no op for the server.
665     previouslyFailedMetaRSs.addAll(previouslyFailedServers);
666 
667     this.initializationBeforeMetaAssignment = true;
668 
669     // Wait for regionserver to finish initialization.
670     if (BaseLoadBalancer.tablesOnMaster(conf)) {
671       waitForServerOnline();
672     }
673 
674     //initialize load balancer
675     this.balancer.setClusterStatus(getClusterStatus());
676     this.balancer.setMasterServices(this);
677     this.balancer.initialize();
678 
679     // Check if master is shutting down because of some issue
680     // in initializing the regionserver or the balancer.
681     if(isStopped()) return;
682 
683     // Make sure meta assigned before proceeding.
684     status.setStatus("Assigning Meta Region");
685     assignMeta(status, previouslyFailedMetaRSs);
686     // check if master is shutting down because above assignMeta could return even hbase:meta isn't
687     // assigned when master is shutting down
688     if(isStopped()) return;
689 
690     status.setStatus("Submitting log splitting work for previously failed region servers");
691     // Master has recovered hbase:meta region server and we put
692     // other failed region servers in a queue to be handled later by SSH
693     for (ServerName tmpServer : previouslyFailedServers) {
694       this.serverManager.processDeadServer(tmpServer, true);
695     }
696 
697     // Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
698     // in meta. This must happen before we assign all user regions or else the assignment will
699     // fail.
700     if (this.conf.getBoolean("hbase.MetaMigrationConvertingToPB", true)) {
701       MetaMigrationConvertingToPB.updateMetaIfNecessary(this);
702     }
703 
704     // Fix up assignment manager status
705     status.setStatus("Starting assignment manager");
706     this.assignmentManager.joinCluster();
707 
708     //set cluster status again after user regions are assigned
709     this.balancer.setClusterStatus(getClusterStatus());
710 
711     // Start balancer and meta catalog janitor after meta and regions have
712     // been assigned.
713     status.setStatus("Starting balancer and catalog janitor");
714     this.clusterStatusChore = new ClusterStatusChore(this, balancer);
715     Threads.setDaemonThreadRunning(clusterStatusChore.getThread());
716     this.balancerChore = new BalancerChore(this);
717     Threads.setDaemonThreadRunning(balancerChore.getThread());
718     this.catalogJanitorChore = new CatalogJanitor(this, this);
719     Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
720 
721     status.setStatus("Starting namespace manager");
722     initNamespace();
723 
724     if (this.cpHost != null) {
725       try {
726         this.cpHost.preMasterInitialization();
727       } catch (IOException e) {
728         LOG.error("Coprocessor preMasterInitialization() hook failed", e);
729       }
730     }
731 
732     status.markComplete("Initialization successful");
733     LOG.info("Master has completed initialization");
734     configurationManager.registerObserver(this.balancer);
735     initialized = true;
736     // clear the dead servers with same host name and port of online server because we are not
737     // removing dead server with same hostname and port of rs which is trying to check in before
738     // master initialization. See HBASE-5916.
739     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
740 
741     if (this.cpHost != null) {
742       // don't let cp initialization errors kill the master
743       try {
744         this.cpHost.postStartMaster();
745       } catch (IOException ioe) {
746         LOG.error("Coprocessor postStartMaster() hook failed", ioe);
747       }
748     }
749 
750     zombieDetector.interrupt();
751   }
752 
753   /**
754    * Create a {@link ServerManager} instance.
755    * @param master
756    * @param services
757    * @return An instance of {@link ServerManager}
758    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
759    * @throws IOException
760    */
761   ServerManager createServerManager(final Server master,
762       final MasterServices services)
763   throws IOException {
764     // We put this out here in a method so can do a Mockito.spy and stub it out
765     // w/ a mocked up ServerManager.
766     return new ServerManager(master, services);
767   }
768 
769   /**
770    * Check <code>hbase:meta</code> is assigned. If not, assign it.
771    * @param status MonitoredTask
772    * @param previouslyFailedMetaRSs
773    * @throws InterruptedException
774    * @throws IOException
775    * @throws KeeperException
776    */
777   void assignMeta(MonitoredTask status, Set<ServerName> previouslyFailedMetaRSs)
778       throws InterruptedException, IOException, KeeperException {
779     // Work on meta region
780     int assigned = 0;
781     long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
782     status.setStatus("Assigning hbase:meta region");
783     // Get current meta state from zk.
784     RegionStates regionStates = assignmentManager.getRegionStates();
785     RegionState metaState = MetaTableLocator.getMetaRegionState(getZooKeeper());
786     ServerName currentMetaServer = metaState.getServerName();
787     if (!ConfigUtil.useZKForAssignment(conf)) {
788       regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO, metaState.getState(),
789         currentMetaServer, null);
790     } else {
791       regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO);
792     }
793     boolean rit = this.assignmentManager.
794       processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
795     boolean metaRegionLocation = metaTableLocator.verifyMetaRegionLocation(
796       this.getConnection(), this.getZooKeeper(), timeout);
797     if (!metaRegionLocation || !metaState.isOpened()) {
798       // Meta location is not verified. It should be in transition, or offline.
799       // We will wait for it to be assigned in enableSSHandWaitForMeta below.
800       assigned++;
801       if (!ConfigUtil.useZKForAssignment(conf)) {
802         assignMetaZkLess(regionStates, metaState, timeout, previouslyFailedMetaRSs);
803       } else if (!rit) {
804         // Assign meta since not already in transition
805         if (currentMetaServer != null) {
806           // If the meta server is not known to be dead or online,
807           // just split the meta log, and don't expire it since this
808           // could be a full cluster restart. Otherwise, we will think
809           // this is a failover and lose previous region locations.
810           // If it is really a failover case, AM will find out in rebuilding
811           // user regions. Otherwise, we are good since all logs are split
812           // or known to be replayed before user regions are assigned.
813           if (serverManager.isServerOnline(currentMetaServer)) {
814             LOG.info("Forcing expire of " + currentMetaServer);
815             serverManager.expireServer(currentMetaServer);
816           }
817           splitMetaLogBeforeAssignment(currentMetaServer);
818           previouslyFailedMetaRSs.add(currentMetaServer);
819         }
820         assignmentManager.assignMeta();
821       }
822     } else {
823       // Region already assigned. We didn't assign it. Add to in-memory state.
824       regionStates.updateRegionState(
825         HRegionInfo.FIRST_META_REGIONINFO, State.OPEN, currentMetaServer);
826       this.assignmentManager.regionOnline(
827         HRegionInfo.FIRST_META_REGIONINFO, currentMetaServer);
828     }
829 
830     enableMeta(TableName.META_TABLE_NAME);
831 
832     if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode())
833         && (!previouslyFailedMetaRSs.isEmpty())) {
834       // replay WAL edits mode need new hbase:meta RS is assigned firstly
835       status.setStatus("replaying log for Meta Region");
836       this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
837     }
838 
839     // Make sure a hbase:meta location is set. We need to enable SSH here since
840     // if the meta region server is died at this time, we need it to be re-assigned
841     // by SSH so that system tables can be assigned.
842     // No need to wait for meta is assigned = 0 when meta is just verified.
843     enableServerShutdownHandler(assigned != 0);
844 
845     LOG.info("hbase:meta assigned=" + assigned + ", rit=" + rit +
846       ", location=" + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
847     status.setStatus("META assigned.");
848   }
849 
850   private void assignMetaZkLess(RegionStates regionStates, RegionState regionState, long timeout,
851       Set<ServerName> previouslyFailedRs) throws IOException, KeeperException {
852     ServerName currentServer = regionState.getServerName();
853     if (serverManager.isServerOnline(currentServer)) {
854       LOG.info("Meta was in transition on " + currentServer);
855       assignmentManager.processRegionInTransitionZkLess();
856     } else {
857       if (currentServer != null) {
858         splitMetaLogBeforeAssignment(currentServer);
859         regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO);
860         previouslyFailedRs.add(currentServer);
861       }
862       LOG.info("Re-assigning hbase:meta, it was on " + currentServer);
863       regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, State.OFFLINE);
864       assignmentManager.assignMeta();
865     }
866   }
867 
868   void initNamespace() throws IOException {
869     //create namespace manager
870     tableNamespaceManager = new TableNamespaceManager(this);
871     tableNamespaceManager.start();
872   }
873 
874   boolean isCatalogJanitorEnabled() {
875     return catalogJanitorChore != null ?
876       catalogJanitorChore.getEnabled() : false;
877   }
878 
879   private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException {
880     if (RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) {
881       // In log replay mode, we mark hbase:meta region as recovering in ZK
882       Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
883       regions.add(HRegionInfo.FIRST_META_REGIONINFO);
884       this.fileSystemManager.prepareLogReplay(currentMetaServer, regions);
885     } else {
886       // In recovered.edits mode: create recovered edits file for hbase:meta server
887       this.fileSystemManager.splitMetaLog(currentMetaServer);
888     }
889   }
890 
891   private void enableServerShutdownHandler(
892       final boolean waitForMeta) throws IOException, InterruptedException {
893     // If ServerShutdownHandler is disabled, we enable it and expire those dead
894     // but not expired servers. This is required so that if meta is assigning to
895     // a server which dies after assignMeta starts assignment,
896     // SSH can re-assign it. Otherwise, we will be
897     // stuck here waiting forever if waitForMeta is specified.
898     if (!serverShutdownHandlerEnabled) {
899       serverShutdownHandlerEnabled = true;
900       this.serverManager.processQueuedDeadServers();
901     }
902 
903     if (waitForMeta) {
904       metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
905       // Above check waits for general meta availability but this does not
906       // guarantee that the transition has completed
907       this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
908     }
909   }
910 
911   private void enableMeta(TableName metaTableName) {
912     if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
913         ZooKeeperProtos.Table.State.ENABLED)) {
914       this.assignmentManager.setEnabledTable(metaTableName);
915     }
916   }
917 
918   /**
919    * This function returns a set of region server names under hbase:meta recovering region ZK node
920    * @return Set of meta server names which were recorded in ZK
921    * @throws KeeperException
922    */
923   private Set<ServerName> getPreviouselyFailedMetaServersFromZK() throws KeeperException {
924     Set<ServerName> result = new HashSet<ServerName>();
925     String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.recoveringRegionsZNode,
926       HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
927     List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode);
928     if (regionFailedServers == null) return result;
929 
930     for(String failedServer : regionFailedServers) {
931       ServerName server = ServerName.parseServerName(failedServer);
932       result.add(server);
933     }
934     return result;
935   }
936 
937   @Override
938   public TableDescriptors getTableDescriptors() {
939     return this.tableDescriptors;
940   }
941 
942   @Override
943   public ServerManager getServerManager() {
944     return this.serverManager;
945   }
946 
947   @Override
948   public MasterFileSystem getMasterFileSystem() {
949     return this.fileSystemManager;
950   }
951 
952   /*
953    * Start up all services. If any of these threads gets an unhandled exception
954    * then they just die with a logged message.  This should be fine because
955    * in general, we do not expect the master to get such unhandled exceptions
956    *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
957    *  need to install an unexpected exception handler.
958    */
959   private void startServiceThreads() throws IOException{
960    // Start the executor service pools
961    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
962       conf.getInt("hbase.master.executor.openregion.threads", 5));
963    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
964       conf.getInt("hbase.master.executor.closeregion.threads", 5));
965    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
966       conf.getInt("hbase.master.executor.serverops.threads", 5));
967    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
968       conf.getInt("hbase.master.executor.serverops.threads", 5));
969    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
970       conf.getInt("hbase.master.executor.logreplayops.threads", 10));
971 
972    // We depend on there being only one instance of this executor running
973    // at a time.  To do concurrency, would need fencing of enable/disable of
974    // tables.
975    // Any time changing this maxThreads to > 1, pls see the comment at
976    // AccessController#postCreateTableHandler
977    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
978 
979    // Start log cleaner thread
980    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
981    this.logCleaner =
982       new LogCleaner(cleanerInterval,
983          this, conf, getMasterFileSystem().getFileSystem(),
984          getMasterFileSystem().getOldLogDir());
985          Threads.setDaemonThreadRunning(logCleaner.getThread(),
986            getServerName().toShortString() + ".oldLogCleaner");
987 
988    //start the hfile archive cleaner thread
989     Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
990     this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
991         .getFileSystem(), archiveDir);
992     Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
993       getServerName().toShortString() + ".archivedHFileCleaner");
994 
995     serviceStarted = true;
996     if (LOG.isTraceEnabled()) {
997       LOG.trace("Started service threads");
998     }
999   }
1000 
1001   @Override
1002   protected void stopServiceThreads() {
1003     if (masterJettyServer != null) {
1004       LOG.info("Stopping master jetty server");
1005       try {
1006         masterJettyServer.stop();
1007       } catch (Exception e) {
1008         LOG.error("Failed to stop master jetty server", e);
1009       }
1010     }
1011     super.stopServiceThreads();
1012     stopChores();
1013     // Wait for all the remaining region servers to report in IFF we were
1014     // running a cluster shutdown AND we were NOT aborting.
1015     if (!isAborted() && this.serverManager != null &&
1016         this.serverManager.isClusterShutdown()) {
1017       this.serverManager.letRegionServersShutdown();
1018     }
1019     if (LOG.isDebugEnabled()) {
1020       LOG.debug("Stopping service threads");
1021     }
1022     // Clean up and close up shop
1023     if (this.logCleaner!= null) this.logCleaner.interrupt();
1024     if (this.hfileCleaner != null) this.hfileCleaner.interrupt();
1025     if (this.activeMasterManager != null) this.activeMasterManager.stop();
1026     if (this.serverManager != null) this.serverManager.stop();
1027     if (this.assignmentManager != null) this.assignmentManager.stop();
1028     if (this.fileSystemManager != null) this.fileSystemManager.stop();
1029     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
1030   }
1031 
1032   private void stopChores() {
1033     if (this.balancerChore != null) {
1034       this.balancerChore.interrupt();
1035     }
1036     if (this.clusterStatusChore != null) {
1037       this.clusterStatusChore.interrupt();
1038     }
1039     if (this.catalogJanitorChore != null) {
1040       this.catalogJanitorChore.interrupt();
1041     }
1042     if (this.clusterStatusPublisherChore != null){
1043       clusterStatusPublisherChore.interrupt();
1044     }
1045   }
1046 
1047   /**
1048    * @return Get remote side's InetAddress
1049    * @throws UnknownHostException
1050    */
1051   InetAddress getRemoteInetAddress(final int port,
1052       final long serverStartCode) throws UnknownHostException {
1053     // Do it out here in its own little method so can fake an address when
1054     // mocking up in tests.
1055     InetAddress ia = RpcServer.getRemoteIp();
1056 
1057     // The call could be from the local regionserver,
1058     // in which case, there is no remote address.
1059     if (ia == null && serverStartCode == startcode) {
1060       InetSocketAddress isa = rpcServices.getSocketAddress();
1061       if (isa != null && isa.getPort() == port) {
1062         ia = isa.getAddress();
1063       }
1064     }
1065     return ia;
1066   }
1067 
1068   /**
1069    * @return Maximum time we should run balancer for
1070    */
1071   private int getBalancerCutoffTime() {
1072     int balancerCutoffTime =
1073       getConfiguration().getInt("hbase.balancer.max.balancing", -1);
1074     if (balancerCutoffTime == -1) {
1075       // No time period set so create one
1076       int balancerPeriod =
1077         getConfiguration().getInt("hbase.balancer.period", 300000);
1078       balancerCutoffTime = balancerPeriod;
1079       // If nonsense period, set it to balancerPeriod
1080       if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod;
1081     }
1082     return balancerCutoffTime;
1083   }
1084 
1085   public boolean balance() throws IOException {
1086     // if master not initialized, don't run balancer.
1087     if (!this.initialized) {
1088       LOG.debug("Master has not been initialized, don't run balancer.");
1089       return false;
1090     }
1091     // Do this call outside of synchronized block.
1092     int maximumBalanceTime = getBalancerCutoffTime();
1093     synchronized (this.balancer) {
1094       // If balance not true, don't run balancer.
1095       if (!this.loadBalancerTracker.isBalancerOn()) return false;
1096       // Only allow one balance run at at time.
1097       if (this.assignmentManager.getRegionStates().isRegionsInTransition()) {
1098         Map<String, RegionState> regionsInTransition =
1099           this.assignmentManager.getRegionStates().getRegionsInTransition();
1100         LOG.debug("Not running balancer because " + regionsInTransition.size() +
1101           " region(s) in transition: " + org.apache.commons.lang.StringUtils.
1102             abbreviate(regionsInTransition.toString(), 256));
1103         return false;
1104       }
1105       if (this.serverManager.areDeadServersInProgress()) {
1106         LOG.debug("Not running balancer because processing dead regionserver(s): " +
1107           this.serverManager.getDeadServers());
1108         return false;
1109       }
1110 
1111       if (this.cpHost != null) {
1112         try {
1113           if (this.cpHost.preBalance()) {
1114             LOG.debug("Coprocessor bypassing balancer request");
1115             return false;
1116           }
1117         } catch (IOException ioe) {
1118           LOG.error("Error invoking master coprocessor preBalance()", ioe);
1119           return false;
1120         }
1121       }
1122 
1123       Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
1124         this.assignmentManager.getRegionStates().getAssignmentsByTable();
1125 
1126       List<RegionPlan> plans = new ArrayList<RegionPlan>();
1127       //Give the balancer the current cluster state.
1128       this.balancer.setClusterStatus(getClusterStatus());
1129       for (Map<ServerName, List<HRegionInfo>> assignments : assignmentsByTable.values()) {
1130         List<RegionPlan> partialPlans = this.balancer.balanceCluster(assignments);
1131         if (partialPlans != null) plans.addAll(partialPlans);
1132       }
1133       long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
1134       int rpCount = 0;  // number of RegionPlans balanced so far
1135       long totalRegPlanExecTime = 0;
1136       if (plans != null && !plans.isEmpty()) {
1137         for (RegionPlan plan: plans) {
1138           LOG.info("balance " + plan);
1139           long balStartTime = System.currentTimeMillis();
1140           //TODO: bulk assign
1141           this.assignmentManager.balance(plan);
1142           totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
1143           rpCount++;
1144           if (rpCount < plans.size() &&
1145               // if performing next balance exceeds cutoff time, exit the loop
1146               (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
1147             //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now)
1148             LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
1149               maximumBalanceTime);
1150             break;
1151           }
1152         }
1153       }
1154       if (this.cpHost != null) {
1155         try {
1156           this.cpHost.postBalance(rpCount < plans.size() ? plans.subList(0, rpCount) : plans);
1157         } catch (IOException ioe) {
1158           // balancing already succeeded so don't change the result
1159           LOG.error("Error invoking master coprocessor postBalance()", ioe);
1160         }
1161       }
1162     }
1163     // If LoadBalancer did not generate any plans, it means the cluster is already balanced.
1164     // Return true indicating a success.
1165     return true;
1166   }
1167 
1168   /**
1169    * @return Client info for use as prefix on an audit log string; who did an action
1170    */
1171   String getClientIdAuditPrefix() {
1172     return "Client=" + RequestContext.getRequestUserName() + "/" +
1173       RequestContext.get().getRemoteAddress();
1174   }
1175 
1176   /**
1177    * Switch for the background CatalogJanitor thread.
1178    * Used for testing.  The thread will continue to run.  It will just be a noop
1179    * if disabled.
1180    * @param b If false, the catalog janitor won't do anything.
1181    */
1182   public void setCatalogJanitorEnabled(final boolean b) {
1183     this.catalogJanitorChore.setEnabled(b);
1184   }
1185 
1186   @Override
1187   public void dispatchMergingRegions(final HRegionInfo region_a,
1188       final HRegionInfo region_b, final boolean forcible) throws IOException {
1189     checkInitialized();
1190     this.service.submit(new DispatchMergingRegionHandler(this,
1191         this.catalogJanitorChore, region_a, region_b, forcible));
1192   }
1193 
1194   void move(final byte[] encodedRegionName,
1195       final byte[] destServerName) throws HBaseIOException {
1196     RegionState regionState = assignmentManager.getRegionStates().
1197       getRegionState(Bytes.toString(encodedRegionName));
1198     if (regionState == null) {
1199       throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
1200     }
1201 
1202     HRegionInfo hri = regionState.getRegion();
1203     ServerName dest;
1204     if (destServerName == null || destServerName.length == 0) {
1205       LOG.info("Passed destination servername is null/empty so " +
1206         "choosing a server at random");
1207       final List<ServerName> destServers = this.serverManager.createDestinationServersList(
1208         regionState.getServerName());
1209       dest = balancer.randomAssignment(hri, destServers);
1210       if (dest == null) {
1211         LOG.debug("Unable to determine a plan to assign " + hri);
1212         return;
1213       }
1214     } else {
1215       dest = ServerName.valueOf(Bytes.toString(destServerName));
1216       if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
1217           && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
1218         // To avoid unnecessary region moving later by balancer. Don't put user
1219         // regions on master. Regions on master could be put on other region
1220         // server intentionally by test however.
1221         LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1222           + " to avoid unnecessary region moving later by load balancer,"
1223           + " because it should not be on master");
1224         return;
1225       }
1226     }
1227 
1228     if (dest.equals(regionState.getServerName())) {
1229       LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
1230         + " because region already assigned to the same server " + dest + ".");
1231       return;
1232     }
1233 
1234     // Now we can do the move
1235     RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
1236 
1237     try {
1238       checkInitialized();
1239       if (this.cpHost != null) {
1240         if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
1241           return;
1242         }
1243       }
1244       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
1245       this.assignmentManager.balance(rp);
1246       if (this.cpHost != null) {
1247         this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
1248       }
1249     } catch (IOException ioe) {
1250       if (ioe instanceof HBaseIOException) {
1251         throw (HBaseIOException)ioe;
1252       }
1253       throw new HBaseIOException(ioe);
1254     }
1255   }
1256 
1257   @Override
1258   public void createTable(HTableDescriptor hTableDescriptor,
1259       byte [][] splitKeys) throws IOException {
1260     if (isStopped()) {
1261       throw new MasterNotRunningException();
1262     }
1263 
1264     String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
1265     ensureNamespaceExists(namespace);
1266 
1267     HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
1268     checkInitialized();
1269     sanityCheckTableDescriptor(hTableDescriptor);
1270     if (cpHost != null) {
1271       cpHost.preCreateTable(hTableDescriptor, newRegions);
1272     }
1273     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
1274     this.service.submit(new CreateTableHandler(this,
1275       this.fileSystemManager, hTableDescriptor, conf,
1276       newRegions, this).prepare());
1277     if (cpHost != null) {
1278       cpHost.postCreateTable(hTableDescriptor, newRegions);
1279     }
1280 
1281   }
1282 
1283   /**
1284    * Checks whether the table conforms to some sane limits, and configured
1285    * values (compression, etc) work. Throws an exception if something is wrong.
1286    * @throws IOException
1287    */
1288   private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
1289     final String CONF_KEY = "hbase.table.sanity.checks";
1290     if (!conf.getBoolean(CONF_KEY, true)) {
1291       return;
1292     }
1293     String tableVal = htd.getConfigurationValue(CONF_KEY);
1294     if (tableVal != null && !Boolean.valueOf(tableVal)) {
1295       return;
1296     }
1297 
1298     // check max file size
1299     long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit
1300     long maxFileSize = htd.getMaxFileSize();
1301     if (maxFileSize < 0) {
1302       maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
1303     }
1304     if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
1305       throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
1306         + "\"hbase.hregion.max.filesize\" (" + maxFileSize
1307         + ") is too small, which might cause over splitting into unmanageable "
1308         + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
1309           + "if you want to bypass sanity checks");
1310     }
1311 
1312     // check flush size
1313     long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit
1314     long flushSize = htd.getMemStoreFlushSize();
1315     if (flushSize < 0) {
1316       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
1317     }
1318     if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
1319       throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
1320           + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
1321           + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
1322           + "if you want to bypass sanity checks");
1323     }
1324 
1325     // check that coprocessors and other specified plugin classes can be loaded
1326     try {
1327       checkClassLoading(conf, htd);
1328     } catch (Exception ex) {
1329       throw new DoNotRetryIOException(ex);
1330     }
1331 
1332     // check compression can be loaded
1333     try {
1334       checkCompression(htd);
1335     } catch (IOException e) {
1336       throw new DoNotRetryIOException(e.getMessage(), e);
1337     }
1338 
1339     // check encryption can be loaded
1340     try {
1341       checkEncryption(conf, htd);
1342     } catch (IOException e) {
1343       throw new DoNotRetryIOException(e.getMessage(), e);
1344     }
1345 
1346     // check that we have at least 1 CF
1347     if (htd.getColumnFamilies().length == 0) {
1348       throw new DoNotRetryIOException("Table should have at least one column family "
1349           + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
1350     }
1351 
1352     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1353       if (hcd.getTimeToLive() <= 0) {
1354         throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
1355           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1356           + "if you want to bypass sanity checks");
1357       }
1358 
1359       // check blockSize
1360       if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
1361         throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
1362           + "  must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
1363           + "if you want to bypass sanity checks");
1364       }
1365 
1366       // check versions
1367       if (hcd.getMinVersions() < 0) {
1368         throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
1369           + "  must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
1370           + "if you want to bypass sanity checks");
1371       }
1372       // max versions already being checked
1373 
1374       // check replication scope
1375       if (hcd.getScope() < 0) {
1376         throw new DoNotRetryIOException("Replication scope for column family "
1377           + hcd.getNameAsString() + "  must be positive. Set " + CONF_KEY + " to false at conf "
1378           + "or table descriptor if you want to bypass sanity checks");
1379       }
1380 
1381       // TODO: should we check coprocessors and encryption ?
1382     }
1383   }
1384 
1385   private void startActiveMasterManager(int infoPort) throws KeeperException {
1386     String backupZNode = ZKUtil.joinZNode(
1387       zooKeeper.backupMasterAddressesZNode, serverName.toString());
1388     /*
1389     * Add a ZNode for ourselves in the backup master directory since we
1390     * may not become the active master. If so, we want the actual active
1391     * master to know we are backup masters, so that it won't assign
1392     * regions to us if so configured.
1393     *
1394     * If we become the active master later, ActiveMasterManager will delete
1395     * this node explicitly.  If we crash before then, ZooKeeper will delete
1396     * this node for us since it is ephemeral.
1397     */
1398     LOG.info("Adding backup master ZNode " + backupZNode);
1399     if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode,
1400         serverName, infoPort)) {
1401       LOG.warn("Failed create of " + backupZNode + " by " + serverName);
1402     }
1403 
1404     activeMasterManager.setInfoPort(infoPort);
1405     // Start a thread to try to become the active master, so we won't block here
1406     Threads.setDaemonThreadRunning(new Thread(new Runnable() {
1407       @Override
1408       public void run() {
1409         int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
1410           HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
1411         // If we're a backup master, stall until a primary to writes his address
1412         if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP,
1413             HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
1414           LOG.debug("HMaster started in backup mode. "
1415             + "Stalling until master znode is written.");
1416           // This will only be a minute or so while the cluster starts up,
1417           // so don't worry about setting watches on the parent znode
1418           while (!activeMasterManager.hasActiveMaster()) {
1419             LOG.debug("Waiting for master address ZNode to be written "
1420               + "(Also watching cluster state node)");
1421             Threads.sleep(timeout);
1422           }
1423         }
1424         MonitoredTask status = TaskMonitor.get().createStatus("Master startup");
1425         status.setDescription("Master startup");
1426         try {
1427           if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) {
1428             finishActiveMasterInitialization(status);
1429           }
1430         } catch (Throwable t) {
1431           status.setStatus("Failed to become active: " + t.getMessage());
1432           LOG.fatal("Failed to become active master", t);
1433           // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
1434           if (t instanceof NoClassDefFoundError &&
1435               t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
1436             // improved error message for this special case
1437             abort("HBase is having a problem with its Hadoop jars.  You may need to "
1438               + "recompile HBase against Hadoop version "
1439               +  org.apache.hadoop.util.VersionInfo.getVersion()
1440               + " or change your hadoop jars to start properly", t);
1441           } else {
1442             abort("Unhandled exception. Starting shutdown.", t);
1443           }
1444         } finally {
1445           status.cleanup();
1446         }
1447       }
1448     }, getServerName().toShortString() + ".activeMasterManager"));
1449   }
1450 
1451   private void checkCompression(final HTableDescriptor htd)
1452   throws IOException {
1453     if (!this.masterCheckCompression) return;
1454     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1455       checkCompression(hcd);
1456     }
1457   }
1458 
1459   private void checkCompression(final HColumnDescriptor hcd)
1460   throws IOException {
1461     if (!this.masterCheckCompression) return;
1462     CompressionTest.testCompression(hcd.getCompression());
1463     CompressionTest.testCompression(hcd.getCompactionCompression());
1464   }
1465 
1466   private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
1467   throws IOException {
1468     if (!this.masterCheckEncryption) return;
1469     for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
1470       checkEncryption(conf, hcd);
1471     }
1472   }
1473 
1474   private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
1475   throws IOException {
1476     if (!this.masterCheckEncryption) return;
1477     EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
1478   }
1479 
1480   private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
1481   throws IOException {
1482     RegionSplitPolicy.getSplitPolicyClass(htd, conf);
1483     RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
1484   }
1485 
1486   private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
1487     byte[][] splitKeys) {
1488     long regionId = System.currentTimeMillis();
1489     HRegionInfo[] hRegionInfos = null;
1490     if (splitKeys == null || splitKeys.length == 0) {
1491       hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
1492                 false, regionId)};
1493     } else {
1494       int numRegions = splitKeys.length + 1;
1495       hRegionInfos = new HRegionInfo[numRegions];
1496       byte[] startKey = null;
1497       byte[] endKey = null;
1498       for (int i = 0; i < numRegions; i++) {
1499         endKey = (i == splitKeys.length) ? null : splitKeys[i];
1500         hRegionInfos[i] =
1501              new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
1502                  false, regionId);
1503         startKey = endKey;
1504       }
1505     }
1506     return hRegionInfos;
1507   }
1508 
1509   private static boolean isCatalogTable(final TableName tableName) {
1510     return tableName.equals(TableName.META_TABLE_NAME);
1511   }
1512 
1513   @Override
1514   public void deleteTable(final TableName tableName) throws IOException {
1515     checkInitialized();
1516     if (cpHost != null) {
1517       cpHost.preDeleteTable(tableName);
1518     }
1519     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
1520     this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
1521     if (cpHost != null) {
1522       cpHost.postDeleteTable(tableName);
1523     }
1524   }
1525 
1526   @Override
1527   public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
1528     checkInitialized();
1529     if (cpHost != null) {
1530       cpHost.preTruncateTable(tableName);
1531     }
1532     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
1533     TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
1534     handler.prepare();
1535     handler.process();
1536     if (cpHost != null) {
1537       cpHost.postTruncateTable(tableName);
1538     }
1539   }
1540 
1541   @Override
1542   public void addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor)
1543       throws IOException {
1544     checkInitialized();
1545     checkCompression(columnDescriptor);
1546     checkEncryption(conf, columnDescriptor);
1547     if (cpHost != null) {
1548       if (cpHost.preAddColumn(tableName, columnDescriptor)) {
1549         return;
1550       }
1551     }
1552     //TODO: we should process this (and some others) in an executor
1553     new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process();
1554     if (cpHost != null) {
1555       cpHost.postAddColumn(tableName, columnDescriptor);
1556     }
1557   }
1558 
1559   @Override
1560   public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
1561       throws IOException {
1562     checkInitialized();
1563     checkCompression(descriptor);
1564     checkEncryption(conf, descriptor);
1565     if (cpHost != null) {
1566       if (cpHost.preModifyColumn(tableName, descriptor)) {
1567         return;
1568       }
1569     }
1570     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
1571     new TableModifyFamilyHandler(tableName, descriptor, this, this)
1572       .prepare().process();
1573     if (cpHost != null) {
1574       cpHost.postModifyColumn(tableName, descriptor);
1575     }
1576   }
1577 
1578   @Override
1579   public void deleteColumn(final TableName tableName, final byte[] columnName)
1580       throws IOException {
1581     checkInitialized();
1582     if (cpHost != null) {
1583       if (cpHost.preDeleteColumn(tableName, columnName)) {
1584         return;
1585       }
1586     }
1587     LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
1588     new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
1589     if (cpHost != null) {
1590       cpHost.postDeleteColumn(tableName, columnName);
1591     }
1592   }
1593 
1594   @Override
1595   public void enableTable(final TableName tableName) throws IOException {
1596     checkInitialized();
1597     if (cpHost != null) {
1598       cpHost.preEnableTable(tableName);
1599     }
1600     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
1601     this.service.submit(new EnableTableHandler(this, tableName,
1602       assignmentManager, tableLockManager, false).prepare());
1603     if (cpHost != null) {
1604       cpHost.postEnableTable(tableName);
1605    }
1606   }
1607 
1608   @Override
1609   public void disableTable(final TableName tableName) throws IOException {
1610     checkInitialized();
1611     if (cpHost != null) {
1612       cpHost.preDisableTable(tableName);
1613     }
1614     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
1615     this.service.submit(new DisableTableHandler(this, tableName,
1616       assignmentManager, tableLockManager, false).prepare());
1617     if (cpHost != null) {
1618       cpHost.postDisableTable(tableName);
1619     }
1620   }
1621 
1622   /**
1623    * Return the region and current deployment for the region containing
1624    * the given row. If the region cannot be found, returns null. If it
1625    * is found, but not currently deployed, the second element of the pair
1626    * may be null.
1627    */
1628   @VisibleForTesting // Used by TestMaster.
1629   Pair<HRegionInfo, ServerName> getTableRegionForRow(
1630       final TableName tableName, final byte [] rowKey)
1631   throws IOException {
1632     final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1633       new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1634 
1635     MetaScannerVisitor visitor =
1636       new MetaScannerVisitorBase() {
1637         @Override
1638         public boolean processRow(Result data) throws IOException {
1639           if (data == null || data.size() <= 0) {
1640             return true;
1641           }
1642           Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
1643           if (pair == null) {
1644             return false;
1645           }
1646           if (!pair.getFirst().getTable().equals(tableName)) {
1647             return false;
1648           }
1649           result.set(pair);
1650           return true;
1651         }
1652     };
1653 
1654     MetaScanner.metaScan(clusterConnection, visitor, tableName, rowKey, 1);
1655     return result.get();
1656   }
1657 
1658   @Override
1659   public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
1660       throws IOException {
1661     checkInitialized();
1662     sanityCheckTableDescriptor(descriptor);
1663     if (cpHost != null) {
1664       cpHost.preModifyTable(tableName, descriptor);
1665     }
1666     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
1667     new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
1668     if (cpHost != null) {
1669       cpHost.postModifyTable(tableName, descriptor);
1670     }
1671   }
1672 
1673   @Override
1674   public void checkTableModifiable(final TableName tableName)
1675       throws IOException, TableNotFoundException, TableNotDisabledException {
1676     if (isCatalogTable(tableName)) {
1677       throw new IOException("Can't modify catalog tables");
1678     }
1679     if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
1680       throw new TableNotFoundException(tableName);
1681     }
1682     if (!getAssignmentManager().getTableStateManager().
1683         isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
1684       throw new TableNotDisabledException(tableName);
1685     }
1686   }
1687 
1688   /**
1689    * @return cluster status
1690    */
1691   public ClusterStatus getClusterStatus() throws InterruptedIOException {
1692     // Build Set of backup masters from ZK nodes
1693     List<String> backupMasterStrings;
1694     try {
1695       backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
1696         this.zooKeeper.backupMasterAddressesZNode);
1697     } catch (KeeperException e) {
1698       LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
1699       backupMasterStrings = null;
1700     }
1701 
1702     List<ServerName> backupMasters = null;
1703     if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
1704       backupMasters = new ArrayList<ServerName>(backupMasterStrings.size());
1705       for (String s: backupMasterStrings) {
1706         try {
1707           byte [] bytes;
1708           try {
1709             bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
1710                 this.zooKeeper.backupMasterAddressesZNode, s));
1711           } catch (InterruptedException e) {
1712             throw new InterruptedIOException();
1713           }
1714           if (bytes != null) {
1715             ServerName sn;
1716             try {
1717               sn = ServerName.parseFrom(bytes);
1718             } catch (DeserializationException e) {
1719               LOG.warn("Failed parse, skipping registering backup server", e);
1720               continue;
1721             }
1722             backupMasters.add(sn);
1723           }
1724         } catch (KeeperException e) {
1725           LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
1726                    "backup servers"), e);
1727         }
1728       }
1729       Collections.sort(backupMasters, new Comparator<ServerName>() {
1730         @Override
1731         public int compare(ServerName s1, ServerName s2) {
1732           return s1.getServerName().compareTo(s2.getServerName());
1733         }});
1734     }
1735 
1736     String clusterId = fileSystemManager != null ?
1737       fileSystemManager.getClusterId().toString() : null;
1738     Map<String, RegionState> regionsInTransition = assignmentManager != null ?
1739       assignmentManager.getRegionStates().getRegionsInTransition() : null;
1740     String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
1741     boolean balancerOn = loadBalancerTracker != null ?
1742       loadBalancerTracker.isBalancerOn() : false;
1743     Map<ServerName, ServerLoad> onlineServers = null;
1744     Set<ServerName> deadServers = null;
1745     if (serverManager != null) {
1746       deadServers = serverManager.getDeadServers().copyServerNames();
1747       onlineServers = serverManager.getOnlineServers();
1748     }
1749     return new ClusterStatus(VersionInfo.getVersion(), clusterId,
1750       onlineServers, deadServers, serverName, backupMasters,
1751       regionsInTransition, coprocessors, balancerOn);
1752   }
1753 
1754   /**
1755    * The set of loaded coprocessors is stored in a static set. Since it's
1756    * statically allocated, it does not require that HMaster's cpHost be
1757    * initialized prior to accessing it.
1758    * @return a String representation of the set of names of the loaded
1759    * coprocessors.
1760    */
1761   public static String getLoadedCoprocessors() {
1762     return CoprocessorHost.getLoadedCoprocessors().toString();
1763   }
1764 
1765   /**
1766    * @return timestamp in millis when HMaster was started.
1767    */
1768   public long getMasterStartTime() {
1769     return startcode;
1770   }
1771 
1772   /**
1773    * @return timestamp in millis when HMaster became the active master.
1774    */
1775   public long getMasterActiveTime() {
1776     return masterActiveTime;
1777   }
1778 
1779   public int getRegionServerInfoPort(final ServerName sn) {
1780     RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
1781     if (info == null || info.getInfoPort() == 0) {
1782       return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
1783         HConstants.DEFAULT_REGIONSERVER_INFOPORT);
1784     }
1785     return info.getInfoPort();
1786   }
1787 
1788   /**
1789    * @return array of coprocessor SimpleNames.
1790    */
1791   public String[] getMasterCoprocessors() {
1792     Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors();
1793     return masterCoprocessors.toArray(new String[masterCoprocessors.size()]);
1794   }
1795 
1796   @Override
1797   public void abort(final String msg, final Throwable t) {
1798     if (isAborted() || isStopped()) {
1799       return;
1800     }
1801     if (cpHost != null) {
1802       // HBASE-4014: dump a list of loaded coprocessors.
1803       LOG.fatal("Master server abort: loaded coprocessors are: " +
1804           getLoadedCoprocessors());
1805     }
1806     if (t != null) LOG.fatal(msg, t);
1807     stop(msg);
1808   }
1809 
1810   @Override
1811   public ZooKeeperWatcher getZooKeeper() {
1812     return zooKeeper;
1813   }
1814 
1815   @Override
1816   public MasterCoprocessorHost getMasterCoprocessorHost() {
1817     return cpHost;
1818   }
1819 
1820   @Override
1821   public ServerName getServerName() {
1822     return this.serverName;
1823   }
1824 
1825   @Override
1826   public AssignmentManager getAssignmentManager() {
1827     return this.assignmentManager;
1828   }
1829 
1830   public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1831     return rsFatals;
1832   }
1833 
1834   public void shutdown() {
1835     if (cpHost != null) {
1836       try {
1837         cpHost.preShutdown();
1838       } catch (IOException ioe) {
1839         LOG.error("Error call master coprocessor preShutdown()", ioe);
1840       }
1841     }
1842 
1843     if (this.serverManager != null) {
1844       this.serverManager.shutdownCluster();
1845     }
1846     if (this.clusterStatusTracker != null){
1847       try {
1848         this.clusterStatusTracker.setClusterDown();
1849       } catch (KeeperException e) {
1850         LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1851       }
1852     }
1853   }
1854 
1855   public void stopMaster() {
1856     if (cpHost != null) {
1857       try {
1858         cpHost.preStopMaster();
1859       } catch (IOException ioe) {
1860         LOG.error("Error call master coprocessor preStopMaster()", ioe);
1861       }
1862     }
1863     stop("Stopped by " + Thread.currentThread().getName());
1864   }
1865 
1866   void checkServiceStarted() throws ServerNotRunningYetException {
1867     if (!serviceStarted) {
1868       throw new ServerNotRunningYetException("Server is not running yet");
1869     }
1870   }
1871 
1872   void checkInitialized() throws PleaseHoldException, ServerNotRunningYetException {
1873     checkServiceStarted();
1874     if (!this.initialized) {
1875       throw new PleaseHoldException("Master is initializing");
1876     }
1877   }
1878 
1879   void checkNamespaceManagerReady() throws IOException {
1880     checkInitialized();
1881     if (tableNamespaceManager == null ||
1882         !tableNamespaceManager.isTableAvailableAndInitialized()) {
1883       throw new IOException("Table Namespace Manager not ready yet, try again later");
1884     }
1885   }
1886   /**
1887    * Report whether this master is currently the active master or not.
1888    * If not active master, we are parked on ZK waiting to become active.
1889    *
1890    * This method is used for testing.
1891    *
1892    * @return true if active master, false if not.
1893    */
1894   public boolean isActiveMaster() {
1895     return isActiveMaster;
1896   }
1897 
1898   /**
1899    * Report whether this master has completed with its initialization and is
1900    * ready.  If ready, the master is also the active master.  A standby master
1901    * is never ready.
1902    *
1903    * This method is used for testing.
1904    *
1905    * @return true if master is ready to go, false if not.
1906    */
1907   @Override
1908   public boolean isInitialized() {
1909     return initialized;
1910   }
1911 
1912   /**
1913    * ServerShutdownHandlerEnabled is set false before completing
1914    * assignMeta to prevent processing of ServerShutdownHandler.
1915    * @return true if assignMeta has completed;
1916    */
1917   @Override
1918   public boolean isServerShutdownHandlerEnabled() {
1919     return this.serverShutdownHandlerEnabled;
1920   }
1921 
1922   /**
1923    * Report whether this master has started initialization and is about to do meta region assignment
1924    * @return true if master is in initialization & about to assign hbase:meta regions
1925    */
1926   public boolean isInitializationStartsMetaRegionAssignment() {
1927     return this.initializationBeforeMetaAssignment;
1928   }
1929 
1930   public void assignRegion(HRegionInfo hri) {
1931     assignmentManager.assign(hri, true);
1932   }
1933 
1934   /**
1935    * Compute the average load across all region servers.
1936    * Currently, this uses a very naive computation - just uses the number of
1937    * regions being served, ignoring stats about number of requests.
1938    * @return the average load
1939    */
1940   public double getAverageLoad() {
1941     if (this.assignmentManager == null) {
1942       return 0;
1943     }
1944 
1945     RegionStates regionStates = this.assignmentManager.getRegionStates();
1946     if (regionStates == null) {
1947       return 0;
1948     }
1949     return regionStates.getAverageLoad();
1950   }
1951 
1952   @Override
1953   public boolean registerService(Service instance) {
1954     /*
1955      * No stacking of instances is allowed for a single service name
1956      */
1957     Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType();
1958     if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) {
1959       LOG.error("Coprocessor service "+serviceDesc.getFullName()+
1960           " already registered, rejecting request from "+instance
1961       );
1962       return false;
1963     }
1964 
1965     coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance);
1966     if (LOG.isDebugEnabled()) {
1967       LOG.debug("Registered master coprocessor service: service="+serviceDesc.getFullName());
1968     }
1969     return true;
1970   }
1971 
1972   /**
1973    * Utility for constructing an instance of the passed HMaster class.
1974    * @param masterClass
1975    * @param conf
1976    * @return HMaster instance.
1977    */
1978   public static HMaster constructMaster(Class<? extends HMaster> masterClass,
1979       final Configuration conf, final CoordinatedStateManager cp)  {
1980     try {
1981       Constructor<? extends HMaster> c =
1982         masterClass.getConstructor(Configuration.class, CoordinatedStateManager.class);
1983       return c.newInstance(conf, cp);
1984     } catch (InvocationTargetException ite) {
1985       Throwable target = ite.getTargetException() != null?
1986         ite.getTargetException(): ite;
1987       if (target.getCause() != null) target = target.getCause();
1988       throw new RuntimeException("Failed construction of Master: " +
1989         masterClass.toString(), target);
1990     } catch (Exception e) {
1991       throw new RuntimeException("Failed construction of Master: " +
1992         masterClass.toString() + ((e.getCause() != null)?
1993           e.getCause().getMessage(): ""), e);
1994     }
1995   }
1996 
1997   /**
1998    * @see org.apache.hadoop.hbase.master.HMasterCommandLine
1999    */
2000   public static void main(String [] args) {
2001     VersionInfo.logVersion();
2002     new HMasterCommandLine(HMaster.class).doMain(args);
2003   }
2004 
2005   public HFileCleaner getHFileCleaner() {
2006     return this.hfileCleaner;
2007   }
2008 
2009   /**
2010    * Exposed for TESTING!
2011    * @return the underlying snapshot manager
2012    */
2013   public SnapshotManager getSnapshotManagerForTesting() {
2014     return this.snapshotManager;
2015   }
2016 
2017   @Override
2018   public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
2019     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2020     checkNamespaceManagerReady();
2021     if (cpHost != null) {
2022       if (cpHost.preCreateNamespace(descriptor)) {
2023         return;
2024       }
2025     }
2026     LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
2027     tableNamespaceManager.create(descriptor);
2028     if (cpHost != null) {
2029       cpHost.postCreateNamespace(descriptor);
2030     }
2031   }
2032 
2033   @Override
2034   public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
2035     TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
2036     checkNamespaceManagerReady();
2037     if (cpHost != null) {
2038       if (cpHost.preModifyNamespace(descriptor)) {
2039         return;
2040       }
2041     }
2042     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
2043     tableNamespaceManager.update(descriptor);
2044     if (cpHost != null) {
2045       cpHost.postModifyNamespace(descriptor);
2046     }
2047   }
2048 
2049   @Override
2050   public void deleteNamespace(String name) throws IOException {
2051     checkNamespaceManagerReady();
2052     if (cpHost != null) {
2053       if (cpHost.preDeleteNamespace(name)) {
2054         return;
2055       }
2056     }
2057     LOG.info(getClientIdAuditPrefix() + " delete " + name);
2058     tableNamespaceManager.remove(name);
2059     if (cpHost != null) {
2060       cpHost.postDeleteNamespace(name);
2061     }
2062   }
2063 
2064   /**
2065    * Ensure that the specified namespace exists, otherwise throws a NamespaceNotFoundException
2066    *
2067    * @param name the namespace to check
2068    * @throws IOException if the namespace manager is not ready yet.
2069    * @throws NamespaceNotFoundException if the namespace does not exists
2070    */
2071   private void ensureNamespaceExists(final String name)
2072       throws IOException, NamespaceNotFoundException {
2073     checkNamespaceManagerReady();
2074     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2075     if (nsd == null) {
2076       throw new NamespaceNotFoundException(name);
2077     }
2078   }
2079 
2080   @Override
2081   public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
2082     checkNamespaceManagerReady();
2083 
2084     if (cpHost != null) {
2085       cpHost.preGetNamespaceDescriptor(name);
2086     }
2087 
2088     NamespaceDescriptor nsd = tableNamespaceManager.get(name);
2089     if (nsd == null) {
2090       throw new NamespaceNotFoundException(name);
2091     }
2092 
2093     if (cpHost != null) {
2094       cpHost.postGetNamespaceDescriptor(nsd);
2095     }
2096 
2097     return nsd;
2098   }
2099 
2100   @Override
2101   public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
2102     checkNamespaceManagerReady();
2103 
2104     final List<NamespaceDescriptor> descriptors = new ArrayList<NamespaceDescriptor>();
2105     boolean bypass = false;
2106     if (cpHost != null) {
2107       bypass = cpHost.preListNamespaceDescriptors(descriptors);
2108     }
2109 
2110     if (!bypass) {
2111       descriptors.addAll(tableNamespaceManager.list());
2112 
2113       if (cpHost != null) {
2114         cpHost.postListNamespaceDescriptors(descriptors);
2115       }
2116     }
2117     return descriptors;
2118   }
2119 
2120   @Override
2121   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
2122     ensureNamespaceExists(name);
2123     return listTableDescriptors(name, null, null, true);
2124   }
2125 
2126   @Override
2127   public List<TableName> listTableNamesByNamespace(String name) throws IOException {
2128     ensureNamespaceExists(name);
2129     return listTableNames(name, null, true);
2130   }
2131 
2132   /**
2133    * Returns the list of table descriptors that match the specified request
2134    *
2135    * @param namespace the namespace to query, or null if querying for all
2136    * @param regex The regular expression to match against, or null if querying for all
2137    * @param tableNameList the list of table names, or null if querying for all
2138    * @param includeSysTables False to match only against userspace tables
2139    * @return the list of table descriptors
2140    */
2141   public List<HTableDescriptor> listTableDescriptors(final String namespace, final String regex,
2142       final List<TableName> tableNameList, final boolean includeSysTables)
2143       throws IOException {
2144     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2145 
2146     boolean bypass = false;
2147     if (cpHost != null) {
2148       bypass = cpHost.preGetTableDescriptors(tableNameList, descriptors);
2149       // method required for AccessController.
2150       bypass |= cpHost.preGetTableDescriptors(tableNameList, descriptors, regex);
2151     }
2152 
2153     if (!bypass) {
2154       if (tableNameList == null || tableNameList.size() == 0) {
2155         // request for all TableDescriptors
2156         Collection<HTableDescriptor> htds;
2157         if (namespace != null && namespace.length() > 0) {
2158           htds = tableDescriptors.getByNamespace(namespace).values();
2159         } else {
2160           htds = tableDescriptors.getAll().values();
2161         }
2162 
2163         for (HTableDescriptor desc: htds) {
2164           if (includeSysTables || !desc.getTableName().isSystemTable()) {
2165             descriptors.add(desc);
2166           }
2167         }
2168       } else {
2169         for (TableName s: tableNameList) {
2170           HTableDescriptor desc = tableDescriptors.get(s);
2171           if (desc != null) {
2172             descriptors.add(desc);
2173           }
2174         }
2175       }
2176 
2177       // Retains only those matched by regular expression.
2178       if (regex != null) {
2179         filterTablesByRegex(descriptors, Pattern.compile(regex));
2180       }
2181 
2182       if (cpHost != null) {
2183         cpHost.postGetTableDescriptors(descriptors);
2184         // method required for AccessController.
2185         cpHost.postGetTableDescriptors(tableNameList, descriptors, regex);
2186       }
2187     }
2188     return descriptors;
2189   }
2190 
2191   /**
2192    * Returns the list of table names that match the specified request
2193    * @param regex The regular expression to match against, or null if querying for all
2194    * @param namespace the namespace to query, or null if querying for all
2195    * @param includeSysTables False to match only against userspace tables
2196    * @return the list of table names
2197    */
2198   public List<TableName> listTableNames(final String namespace, final String regex,
2199       final boolean includeSysTables) throws IOException {
2200     final List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
2201 
2202     boolean bypass = false;
2203     if (cpHost != null) {
2204       bypass = cpHost.preGetTableNames(descriptors, regex);
2205     }
2206 
2207     if (!bypass) {
2208       // get all descriptors
2209       Collection<HTableDescriptor> htds;
2210       if (namespace != null && namespace.length() > 0) {
2211         htds = tableDescriptors.getByNamespace(namespace).values();
2212       } else {
2213         htds = tableDescriptors.getAll().values();
2214       }
2215 
2216       for (HTableDescriptor htd: htds) {
2217         if (includeSysTables || !htd.getTableName().isSystemTable()) {
2218           descriptors.add(htd);
2219         }
2220       }
2221 
2222       // Retains only those matched by regular expression.
2223       if (regex != null) {
2224         filterTablesByRegex(descriptors, Pattern.compile(regex));
2225       }
2226 
2227       if (cpHost != null) {
2228         cpHost.postGetTableNames(descriptors, regex);
2229       }
2230     }
2231 
2232     List<TableName> result = new ArrayList<TableName>(descriptors.size());
2233     for (HTableDescriptor htd: descriptors) {
2234       result.add(htd.getTableName());
2235     }
2236     return result;
2237   }
2238 
2239 
2240   /**
2241    * Removes the table descriptors that don't match the pattern.
2242    * @param descriptors list of table descriptors to filter
2243    * @param pattern the regex to use
2244    */
2245   private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors,
2246       final Pattern pattern) {
2247     final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
2248     Iterator<HTableDescriptor> itr = descriptors.iterator();
2249     while (itr.hasNext()) {
2250       HTableDescriptor htd = itr.next();
2251       String tableName = htd.getTableName().getNameAsString();
2252       boolean matched = pattern.matcher(tableName).matches();
2253       if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
2254         matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
2255       }
2256       if (!matched) {
2257         itr.remove();
2258       }
2259     }
2260   }
2261 }