View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.LinkedList;
28  import java.util.List;
29  import java.util.concurrent.atomic.AtomicInteger;
30  import java.util.concurrent.atomic.AtomicReference;
31  import java.util.regex.Pattern;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.classification.InterfaceAudience;
36  import org.apache.hadoop.classification.InterfaceStability;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.hbase.Abortable;
39  import org.apache.hadoop.hbase.ClusterStatus;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.HBaseConfiguration;
42  import org.apache.hadoop.hbase.HBaseIOException;
43  import org.apache.hadoop.hbase.HColumnDescriptor;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.HRegionLocation;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.NamespaceDescriptor;
49  import org.apache.hadoop.hbase.MasterNotRunningException;
50  import org.apache.hadoop.hbase.NotServingRegionException;
51  import org.apache.hadoop.hbase.RegionException;
52  import org.apache.hadoop.hbase.ServerName;
53  import org.apache.hadoop.hbase.TableExistsException;
54  import org.apache.hadoop.hbase.TableNotEnabledException;
55  import org.apache.hadoop.hbase.TableNotFoundException;
56  import org.apache.hadoop.hbase.UnknownRegionException;
57  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
58  import org.apache.hadoop.hbase.catalog.CatalogTracker;
59  import org.apache.hadoop.hbase.catalog.MetaReader;
60  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
61  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
62  import org.apache.hadoop.hbase.exceptions.DeserializationException;
63  import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
64  import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
65  import org.apache.hadoop.hbase.exceptions.MergeRegionException;
66  import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
67  import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
68  import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
69  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
70  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
71  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
72  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
73  import org.apache.hadoop.hbase.protobuf.RequestConverter;
74  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
75  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
76  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
77  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
78  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
79  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
86  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
87  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
88  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
89  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
90  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
91  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
92  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
93  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
94  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
95  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
96  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
97  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
123 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
124 import org.apache.hadoop.hbase.util.Addressing;
125 import org.apache.hadoop.hbase.util.Bytes;
126 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
127 import org.apache.hadoop.hbase.util.Pair;
128 import org.apache.hadoop.ipc.RemoteException;
129 import org.apache.hadoop.util.StringUtils;
130 import org.apache.zookeeper.KeeperException;
131 
132 import com.google.protobuf.ByteString;
133 import com.google.protobuf.ServiceException;
134 
135 /**
136  * Provides an interface to manage HBase database table metadata + general
137  * administrative functions.  Use HBaseAdmin to create, drop, list, enable and
138  * disable tables. Use it also to add and drop table column families.
139  *
140  * <p>See {@link HTable} to add, update, and delete data from an individual table.
141  * <p>Currently HBaseAdmin instances are not expected to be long-lived.  For
142  * example, an HBaseAdmin instance will not ride over a Master restart.
143  */
144 @InterfaceAudience.Public
145 @InterfaceStability.Evolving
146 public class HBaseAdmin implements Abortable, Closeable {
147   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
148 
149   // We use the implementation class rather then the interface because we
150   //  need the package protected functions to get the connection to master
151   private HConnection connection;
152 
153   private volatile Configuration conf;
154   private final long pause;
155   private final int numRetries;
156   // Some operations can take a long time such as disable of big table.
157   // numRetries is for 'normal' stuff... Multiply by this factor when
158   // want to wait a long time.
159   private final int retryLongerMultiplier;
160   private boolean aborted;
161   private boolean cleanupConnectionOnClose = false; // close the connection in close()
162 
163   private RpcRetryingCallerFactory rpcCallerFactory;
164 
165   /**
166    * Constructor.
167    * See {@link #HBaseAdmin(HConnection connection)}
168    *
169    * @param c Configuration object. Copied internally.
170    */
171   public HBaseAdmin(Configuration c)
172   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
173     // Will not leak connections, as the new implementation of the constructor
174     // does not throw exceptions anymore.
175     this(HConnectionManager.getConnection(new Configuration(c)));
176     this.cleanupConnectionOnClose = true;
177   }
178 
179  /**
180   * Constructor for externally managed HConnections.
181   * The connection to master will be created when required by admin functions.
182   *
183   * @param connection The HConnection instance to use
184   * @throws MasterNotRunningException, ZooKeeperConnectionException are not
185   *  thrown anymore but kept into the interface for backward api compatibility
186   */
187   public HBaseAdmin(HConnection connection)
188       throws MasterNotRunningException, ZooKeeperConnectionException {
189     this.conf = connection.getConfiguration();
190     this.connection = connection;
191 
192     this.pause = this.conf.getLong("hbase.client.pause", 1000);
193     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
194         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
195     this.retryLongerMultiplier = this.conf.getInt(
196         "hbase.client.retries.longer.multiplier", 10);
197     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
198   }
199 
200   /**
201    * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)}
202    * to cleanup the returned catalog tracker.
203    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
204    * @throws IOException
205    * @see #cleanupCatalogTracker(CatalogTracker)
206    */
207   private synchronized CatalogTracker getCatalogTracker()
208   throws ZooKeeperConnectionException, IOException {
209     CatalogTracker ct = null;
210     try {
211       ct = new CatalogTracker(this.conf);
212       ct.start();
213     } catch (InterruptedException e) {
214       // Let it out as an IOE for now until we redo all so tolerate IEs
215       Thread.currentThread().interrupt();
216       throw new IOException("Interrupted", e);
217     }
218     return ct;
219   }
220 
221   private void cleanupCatalogTracker(final CatalogTracker ct) {
222     ct.stop();
223   }
224 
225   @Override
226   public void abort(String why, Throwable e) {
227     // Currently does nothing but throw the passed message and exception
228     this.aborted = true;
229     throw new RuntimeException(why, e);
230   }
231 
232   @Override
233   public boolean isAborted(){
234     return this.aborted;
235   }
236 
237   /** @return HConnection used by this object. */
238   public HConnection getConnection() {
239     return connection;
240   }
241 
242   /** @return - true if the master server is running. Throws an exception
243    *  otherwise.
244    * @throws ZooKeeperConnectionException
245    * @throws MasterNotRunningException
246    */
247   public boolean isMasterRunning()
248   throws MasterNotRunningException, ZooKeeperConnectionException {
249     return connection.isMasterRunning();
250   }
251 
252   /**
253    * @param tableName Table to check.
254    * @return True if table exists already.
255    * @throws IOException
256    */
257   public boolean tableExists(final TableName tableName)
258   throws IOException {
259     boolean b = false;
260     CatalogTracker ct = getCatalogTracker();
261     try {
262       b = MetaReader.tableExists(ct, tableName);
263     } finally {
264       cleanupCatalogTracker(ct);
265     }
266     return b;
267   }
268 
269   public boolean tableExists(final byte[] tableName)
270   throws IOException {
271     return tableExists(TableName.valueOf(tableName));
272   }
273 
274   public boolean tableExists(final String tableName)
275   throws IOException {
276     return tableExists(TableName.valueOf(tableName));
277   }
278 
279   /**
280    * List all the userspace tables.  In other words, scan the META table.
281    *
282    * If we wanted this to be really fast, we could implement a special
283    * catalog table that just contains table names and their descriptors.
284    * Right now, it only exists as part of the META table's region info.
285    *
286    * @return - returns an array of HTableDescriptors
287    * @throws IOException if a remote or network exception occurs
288    */
289   public HTableDescriptor[] listTables() throws IOException {
290     return this.connection.listTables();
291   }
292 
293   /**
294    * List all the userspace tables matching the given pattern.
295    *
296    * @param pattern The compiled regular expression to match against
297    * @return - returns an array of HTableDescriptors
298    * @throws IOException if a remote or network exception occurs
299    * @see #listTables()
300    */
301   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
302     List<HTableDescriptor> matched = new LinkedList<HTableDescriptor>();
303     HTableDescriptor[] tables = listTables();
304     for (HTableDescriptor table : tables) {
305       if (pattern.matcher(table.getTableName().getNameAsString()).matches()) {
306         matched.add(table);
307       }
308     }
309     return matched.toArray(new HTableDescriptor[matched.size()]);
310   }
311 
312   /**
313    * List all the userspace tables matching the given regular expression.
314    *
315    * @param regex The regular expression to match against
316    * @return - returns an array of HTableDescriptors
317    * @throws IOException if a remote or network exception occurs
318    * @see #listTables(java.util.regex.Pattern)
319    */
320   public HTableDescriptor[] listTables(String regex) throws IOException {
321     return listTables(Pattern.compile(regex));
322   }
323 
324   /**
325    * List all of the names of userspace tables.
326    * @return String[] table names
327    * @throws IOException if a remote or network exception occurs
328    */
329   @Deprecated
330   public String[] getTableNames() throws IOException {
331     return this.connection.getTableNames();
332   }
333 
334   /**
335    * List all of the names of userspace tables matching the given regular expression.
336    * @param pattern The regular expression to match against
337    * @return String[] table names
338    * @throws IOException if a remote or network exception occurs
339    */
340   @Deprecated
341   public String[] getTableNames(Pattern pattern) throws IOException {
342     List<String> matched = new ArrayList<String>();
343     for (String name: this.connection.getTableNames()) {
344       if (pattern.matcher(name).matches()) {
345         matched.add(name);
346       }
347     }
348     return matched.toArray(new String[matched.size()]);
349   }
350 
351   /**
352    * List all of the names of userspace tables matching the given regular expression.
353    * @param regex The regular expression to match against
354    * @return String[] table names
355    * @throws IOException if a remote or network exception occurs
356    */
357   @Deprecated
358   public String[] getTableNames(String regex) throws IOException {
359     return getTableNames(Pattern.compile(regex));
360   }
361 
362   /**
363    * List all of the names of userspace tables.
364    * @return TableName[] table names
365    * @throws IOException if a remote or network exception occurs
366    */
367   public TableName[] listTableNames() throws IOException {
368     return this.connection.listTableNames();
369   }
370 
371   /**
372    * Method for getting the tableDescriptor
373    * @param tableName as a byte []
374    * @return the tableDescriptor
375    * @throws TableNotFoundException
376    * @throws IOException if a remote or network exception occurs
377    */
378   public HTableDescriptor getTableDescriptor(final TableName tableName)
379   throws TableNotFoundException, IOException {
380     return this.connection.getHTableDescriptor(tableName);
381   }
382 
383   public HTableDescriptor getTableDescriptor(final byte[] tableName)
384   throws TableNotFoundException, IOException {
385     return getTableDescriptor(TableName.valueOf(tableName));
386   }
387 
388   private long getPauseTime(int tries) {
389     int triesCount = tries;
390     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
391       triesCount = HConstants.RETRY_BACKOFF.length - 1;
392     }
393     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
394   }
395 
396   /**
397    * Creates a new table.
398    * Synchronous operation.
399    *
400    * @param desc table descriptor for table
401    *
402    * @throws IllegalArgumentException if the table name is reserved
403    * @throws MasterNotRunningException if master is not running
404    * @throws TableExistsException if table already exists (If concurrent
405    * threads, the table may have been created between test-for-existence
406    * and attempt-at-creation).
407    * @throws IOException if a remote or network exception occurs
408    */
409   public void createTable(HTableDescriptor desc)
410   throws IOException {
411     createTable(desc, null);
412   }
413 
414   /**
415    * Creates a new table with the specified number of regions.  The start key
416    * specified will become the end key of the first region of the table, and
417    * the end key specified will become the start key of the last region of the
418    * table (the first region has a null start key and the last region has a
419    * null end key).
420    *
421    * BigInteger math will be used to divide the key range specified into
422    * enough segments to make the required number of total regions.
423    *
424    * Synchronous operation.
425    *
426    * @param desc table descriptor for table
427    * @param startKey beginning of key range
428    * @param endKey end of key range
429    * @param numRegions the total number of regions to create
430    *
431    * @throws IllegalArgumentException if the table name is reserved
432    * @throws MasterNotRunningException if master is not running
433    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
434    * threads, the table may have been created between test-for-existence
435    * and attempt-at-creation).
436    * @throws IOException
437    */
438   public void createTable(HTableDescriptor desc, byte [] startKey,
439       byte [] endKey, int numRegions)
440   throws IOException {
441     if(numRegions < 3) {
442       throw new IllegalArgumentException("Must create at least three regions");
443     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
444       throw new IllegalArgumentException("Start key must be smaller than end key");
445     }
446     if (numRegions == 3) {
447       createTable(desc, new byte[][]{startKey, endKey});
448       return;
449     }
450     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
451     if(splitKeys == null || splitKeys.length != numRegions - 1) {
452       throw new IllegalArgumentException("Unable to split key range into enough regions");
453     }
454     createTable(desc, splitKeys);
455   }
456 
457   /**
458    * Creates a new table with an initial set of empty regions defined by the
459    * specified split keys.  The total number of regions created will be the
460    * number of split keys plus one. Synchronous operation.
461    * Note : Avoid passing empty split key.
462    *
463    * @param desc table descriptor for table
464    * @param splitKeys array of split keys for the initial regions of the table
465    *
466    * @throws IllegalArgumentException if the table name is reserved, if the split keys
467    * are repeated and if the split key has empty byte array.
468    * @throws MasterNotRunningException if master is not running
469    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
470    * threads, the table may have been created between test-for-existence
471    * and attempt-at-creation).
472    * @throws IOException
473    */
474   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
475   throws IOException {
476     try {
477       createTableAsync(desc, splitKeys);
478     } catch (SocketTimeoutException ste) {
479       LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
480     }
481     int numRegs = splitKeys == null ? 1 : splitKeys.length + 1;
482     int prevRegCount = 0;
483     boolean doneWithMetaScan = false;
484     for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier;
485       ++tries) {
486       if (!doneWithMetaScan) {
487         // Wait for new table to come on-line
488         final AtomicInteger actualRegCount = new AtomicInteger(0);
489         MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
490           @Override
491           public boolean processRow(Result rowResult) throws IOException {
492             HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult);
493             if (info == null) {
494               LOG.warn("No serialized HRegionInfo in " + rowResult);
495               return true;
496             }
497             if (!info.getTableName().equals(desc.getTableName())) {
498               return false;
499             }
500             ServerName serverName = HRegionInfo.getServerName(rowResult);
501             // Make sure that regions are assigned to server
502             if (!(info.isOffline() || info.isSplit()) && serverName != null
503                 && serverName.getHostAndPort() != null) {
504               actualRegCount.incrementAndGet();
505             }
506             return true;
507           }
508         };
509         MetaScanner.metaScan(conf, connection, visitor, desc.getTableName());
510         if (actualRegCount.get() != numRegs) {
511           if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
512             throw new RegionOfflineException("Only " + actualRegCount.get() +
513               " of " + numRegs + " regions are online; retries exhausted.");
514           }
515           try { // Sleep
516             Thread.sleep(getPauseTime(tries));
517           } catch (InterruptedException e) {
518             throw new InterruptedIOException("Interrupted when opening" +
519               " regions; " + actualRegCount.get() + " of " + numRegs +
520               " regions processed so far");
521           }
522           if (actualRegCount.get() > prevRegCount) { // Making progress
523             prevRegCount = actualRegCount.get();
524             tries = -1;
525           }
526         } else {
527           doneWithMetaScan = true;
528           tries = -1;
529         }
530       } else if (isTableEnabled(desc.getTableName())) {
531         return;
532       } else {
533         try { // Sleep
534           Thread.sleep(getPauseTime(tries));
535         } catch (InterruptedException e) {
536           throw new InterruptedIOException("Interrupted when waiting" +
537             " for table to be enabled; meta scan was done");
538         }
539       }
540     }
541     throw new TableNotEnabledException(
542       "Retries exhausted while still waiting for table: "
543       + desc.getTableName() + " to be enabled");
544   }
545 
546   /**
547    * Creates a new table but does not block and wait for it to come online.
548    * Asynchronous operation.  To check if the table exists, use
549    * {@link #isTableAvailable} -- it is not safe to create an HTable
550    * instance to this table before it is available.
551    * Note : Avoid passing empty split key.
552    * @param desc table descriptor for table
553    *
554    * @throws IllegalArgumentException Bad table name, if the split keys
555    * are repeated and if the split key has empty byte array.
556    * @throws MasterNotRunningException if master is not running
557    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
558    * threads, the table may have been created between test-for-existence
559    * and attempt-at-creation).
560    * @throws IOException
561    */
562   public void createTableAsync(
563     final HTableDescriptor desc, final byte [][] splitKeys)
564   throws IOException {
565     if(desc.getTableName() == null) {
566       throw new IllegalArgumentException("TableName cannot be null");
567     }
568     if(splitKeys != null && splitKeys.length > 0) {
569       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
570       // Verify there are no duplicate split keys
571       byte [] lastKey = null;
572       for(byte [] splitKey : splitKeys) {
573         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
574           throw new IllegalArgumentException(
575               "Empty split key must not be passed in the split keys.");
576         }
577         if(lastKey != null && Bytes.equals(splitKey, lastKey)) {
578           throw new IllegalArgumentException("All split keys must be unique, " +
579             "found duplicate: " + Bytes.toStringBinary(splitKey) +
580             ", " + Bytes.toStringBinary(lastKey));
581         }
582         lastKey = splitKey;
583       }
584     }
585 
586     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
587       @Override
588       public Void call() throws ServiceException {
589         CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
590         masterAdmin.createTable(null, request);
591         return null;
592       }
593     });
594   }
595 
596   public void deleteTable(final String tableName) throws IOException {
597     deleteTable(TableName.valueOf(tableName));
598   }
599 
600   public void deleteTable(final byte[] tableName) throws IOException {
601     deleteTable(TableName.valueOf(tableName));
602   }
603 
604   /**
605    * Deletes a table.
606    * Synchronous operation.
607    *
608    * @param tableName name of table to delete
609    * @throws IOException if a remote or network exception occurs
610    */
611   public void deleteTable(final TableName tableName) throws IOException {
612     HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
613     boolean tableExists = true;
614 
615     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
616       @Override
617       public Void call() throws ServiceException {
618         DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
619         masterAdmin.deleteTable(null,req);
620         return null;
621       }
622     });
623 
624     // Wait until all regions deleted
625     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
626       try {
627 
628         Scan scan = MetaReader.getScanForTableName(tableName);
629         scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
630         ScanRequest request = RequestConverter.buildScanRequest(
631           firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
632         Result[] values = null;
633         // Get a batch at a time.
634         ClientService.BlockingInterface server = connection.getClient(firstMetaServer
635             .getServerName());
636         PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
637         try {
638           ScanResponse response = server.scan(controller, request);
639           values = ResponseConverter.getResults(controller.cellScanner(), response);
640         } catch (ServiceException se) {
641           throw ProtobufUtil.getRemoteException(se);
642         }
643 
644         // let us wait until .META. table is updated and
645         // HMaster removes the table from its HTableDescriptors
646         if (values == null || values.length == 0) {
647           tableExists = false;
648           GetTableDescriptorsResponse htds;
649           MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitorService();
650           try {
651             GetTableDescriptorsRequest req =
652               RequestConverter.buildGetTableDescriptorsRequest(tableName);
653             htds = master.getTableDescriptors(null, req);
654           } catch (ServiceException se) {
655             throw ProtobufUtil.getRemoteException(se);
656           } finally {
657             master.close();
658           }
659           tableExists = !htds.getTableSchemaList().isEmpty();
660           if (!tableExists) {
661             break;
662           }
663         }
664       } catch (IOException ex) {
665         if(tries == numRetries - 1) {           // no more tries left
666           if (ex instanceof RemoteException) {
667             throw ((RemoteException) ex).unwrapRemoteException();
668           } else {
669             throw ex;
670           }
671         }
672       }
673       try {
674         Thread.sleep(getPauseTime(tries));
675       } catch (InterruptedException e) {
676         // continue
677       }
678     }
679 
680     if (tableExists) {
681       throw new IOException("Retries exhausted, it took too long to wait"+
682         " for the table " + tableName + " to be deleted.");
683     }
684     // Delete cached information to prevent clients from using old locations
685     this.connection.clearRegionCache(tableName);
686     LOG.info("Deleted " + tableName);
687   }
688 
689   /**
690    * Deletes tables matching the passed in pattern and wait on completion.
691    *
692    * Warning: Use this method carefully, there is no prompting and the effect is
693    * immediate. Consider using {@link #listTables(java.lang.String)} and
694    * {@link #deleteTable(byte[])}
695    *
696    * @param regex The regular expression to match table names against
697    * @return Table descriptors for tables that couldn't be deleted
698    * @throws IOException
699    * @see #deleteTables(java.util.regex.Pattern)
700    * @see #deleteTable(java.lang.String)
701    */
702   public HTableDescriptor[] deleteTables(String regex) throws IOException {
703     return deleteTables(Pattern.compile(regex));
704   }
705 
706   /**
707    * Delete tables matching the passed in pattern and wait on completion.
708    *
709    * Warning: Use this method carefully, there is no prompting and the effect is
710    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
711    * {@link #deleteTable(byte[])}
712    *
713    * @param pattern The pattern to match table names against
714    * @return Table descriptors for tables that couldn't be deleted
715    * @throws IOException
716    */
717   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
718     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
719     for (HTableDescriptor table : listTables(pattern)) {
720       try {
721         deleteTable(table.getTableName());
722       } catch (IOException ex) {
723         LOG.info("Failed to delete table " + table.getTableName(), ex);
724         failed.add(table);
725       }
726     }
727     return failed.toArray(new HTableDescriptor[failed.size()]);
728   }
729 
730 
731   /**
732    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
733    * and {@link #isTableEnabled(byte[])} instead.
734    * The table has to be in disabled state for it to be enabled.
735    * @param tableName name of the table
736    * @throws IOException if a remote or network exception occurs
737    * There could be couple types of IOException
738    * TableNotFoundException means the table doesn't exist.
739    * TableNotDisabledException means the table isn't in disabled state.
740    * @see #isTableEnabled(byte[])
741    * @see #disableTable(byte[])
742    * @see #enableTableAsync(byte[])
743    */
744   public void enableTable(final TableName tableName)
745   throws IOException {
746     enableTableAsync(tableName);
747 
748     // Wait until all regions are enabled
749     waitUntilTableIsEnabled(tableName);
750 
751     LOG.info("Enabled table " + tableName);
752   }
753 
754   public void enableTable(final byte[] tableName)
755   throws IOException {
756     enableTable(TableName.valueOf(tableName));
757   }
758 
759   public void enableTable(final String tableName)
760   throws IOException {
761     enableTable(TableName.valueOf(tableName));
762   }
763 
764   /**
765    * Wait for the table to be enabled and available
766    * If enabling the table exceeds the retry period, an exception is thrown.
767    * @param tableName name of the table
768    * @throws IOException if a remote or network exception occurs or
769    *    table is not enabled after the retries period.
770    */
771   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
772     boolean enabled = false;
773     long start = EnvironmentEdgeManager.currentTimeMillis();
774     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
775       enabled = isTableEnabled(tableName) && isTableAvailable(tableName);
776       if (enabled) {
777         break;
778       }
779       long sleep = getPauseTime(tries);
780       if (LOG.isDebugEnabled()) {
781         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
782           "enabled in " + tableName);
783       }
784       try {
785         Thread.sleep(sleep);
786       } catch (InterruptedException e) {
787         Thread.currentThread().interrupt();
788         // Do this conversion rather than let it out because do not want to
789         // change the method signature.
790         throw new IOException("Interrupted", e);
791       }
792     }
793     if (!enabled) {
794       long msec = EnvironmentEdgeManager.currentTimeMillis() - start;
795       throw new IOException("Table '" + tableName +
796         "' not yet enabled, after " + msec + "ms.");
797     }
798   }
799 
800   /**
801    * Brings a table on-line (enables it).  Method returns immediately though
802    * enable of table may take some time to complete, especially if the table
803    * is large (All regions are opened as part of enabling process).  Check
804    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
805    * table is taking too long to online, check server logs.
806    * @param tableName
807    * @throws IOException
808    * @since 0.90.0
809    */
810   public void enableTableAsync(final TableName tableName)
811   throws IOException {
812     TableName.isLegalFullyQualifiedTableName(tableName.getName());
813     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
814       @Override
815       public Void call() throws ServiceException {
816         LOG.info("Started enable of " + tableName);
817         EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
818         masterAdmin.enableTable(null,req);
819         return null;
820       }
821     });
822   }
823 
824   public void enableTableAsync(final byte[] tableName)
825   throws IOException {
826     enableTable(TableName.valueOf(tableName));
827   }
828 
829   public void enableTableAsync(final String tableName)
830   throws IOException {
831     enableTableAsync(TableName.valueOf(tableName));
832   }
833 
834   /**
835    * Enable tables matching the passed in pattern and wait on completion.
836    *
837    * Warning: Use this method carefully, there is no prompting and the effect is
838    * immediate. Consider using {@link #listTables(java.lang.String)} and
839    * {@link #enableTable(byte[])}
840    *
841    * @param regex The regular expression to match table names against
842    * @throws IOException
843    * @see #enableTables(java.util.regex.Pattern)
844    * @see #enableTable(java.lang.String)
845    */
846   public HTableDescriptor[] enableTables(String regex) throws IOException {
847     return enableTables(Pattern.compile(regex));
848   }
849 
850   /**
851    * Enable tables matching the passed in pattern and wait on completion.
852    *
853    * Warning: Use this method carefully, there is no prompting and the effect is
854    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
855    * {@link #enableTable(byte[])}
856    *
857    * @param pattern The pattern to match table names against
858    * @throws IOException
859    */
860   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
861     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
862     for (HTableDescriptor table : listTables(pattern)) {
863       if (isTableDisabled(table.getTableName())) {
864         try {
865           enableTable(table.getTableName());
866         } catch (IOException ex) {
867           LOG.info("Failed to enable table " + table.getTableName(), ex);
868           failed.add(table);
869         }
870       }
871     }
872     return failed.toArray(new HTableDescriptor[failed.size()]);
873   }
874 
875   /**
876    * Starts the disable of a table.  If it is being served, the master
877    * will tell the servers to stop serving it.  This method returns immediately.
878    * The disable of a table can take some time if the table is large (all
879    * regions are closed as part of table disable operation).
880    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
881    * If table is taking too long to online, check server logs.
882    * @param tableName name of table
883    * @throws IOException if a remote or network exception occurs
884    * @see #isTableDisabled(byte[])
885    * @see #isTableEnabled(byte[])
886    * @since 0.90.0
887    */
888   public void disableTableAsync(final TableName tableName) throws IOException {
889     TableName.isLegalFullyQualifiedTableName(tableName.getName());
890     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
891       @Override
892       public Void call() throws ServiceException {
893         LOG.info("Started disable of " + tableName);
894         DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
895         masterAdmin.disableTable(null,req);
896         return null;
897       }
898     });
899   }
900 
901   public void disableTableAsync(final byte[] tableName) throws IOException {
902     disableTableAsync(TableName.valueOf(tableName));
903   }
904 
905   public void disableTableAsync(final String tableName) throws IOException {
906     disableTableAsync(TableName.valueOf(tableName));
907   }
908 
909   /**
910    * Disable table and wait on completion.  May timeout eventually.  Use
911    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
912    * instead.
913    * The table has to be in enabled state for it to be disabled.
914    * @param tableName
915    * @throws IOException
916    * There could be couple types of IOException
917    * TableNotFoundException means the table doesn't exist.
918    * TableNotEnabledException means the table isn't in enabled state.
919    */
920   public void disableTable(final TableName tableName)
921   throws IOException {
922     disableTableAsync(tableName);
923     // Wait until table is disabled
924     boolean disabled = false;
925     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
926       disabled = isTableDisabled(tableName);
927       if (disabled) {
928         break;
929       }
930       long sleep = getPauseTime(tries);
931       if (LOG.isDebugEnabled()) {
932         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
933           "disabled in " + tableName);
934       }
935       try {
936         Thread.sleep(sleep);
937       } catch (InterruptedException e) {
938         // Do this conversion rather than let it out because do not want to
939         // change the method signature.
940         Thread.currentThread().interrupt();
941         throw new IOException("Interrupted", e);
942       }
943     }
944     if (!disabled) {
945       throw new RegionException("Retries exhausted, it took too long to wait"+
946         " for the table " + tableName + " to be disabled.");
947     }
948     LOG.info("Disabled " + tableName);
949   }
950 
951   public void disableTable(final byte[] tableName)
952   throws IOException {
953     disableTable(TableName.valueOf(tableName));
954   }
955 
956   public void disableTable(final String tableName)
957   throws IOException {
958     disableTable(TableName.valueOf(tableName));
959   }
960 
961   /**
962    * Disable tables matching the passed in pattern and wait on completion.
963    *
964    * Warning: Use this method carefully, there is no prompting and the effect is
965    * immediate. Consider using {@link #listTables(java.lang.String)} and
966    * {@link #disableTable(byte[])}
967    *
968    * @param regex The regular expression to match table names against
969    * @return Table descriptors for tables that couldn't be disabled
970    * @throws IOException
971    * @see #disableTables(java.util.regex.Pattern)
972    * @see #disableTable(java.lang.String)
973    */
974   public HTableDescriptor[] disableTables(String regex) throws IOException {
975     return disableTables(Pattern.compile(regex));
976   }
977 
978   /**
979    * Disable tables matching the passed in pattern and wait on completion.
980    *
981    * Warning: Use this method carefully, there is no prompting and the effect is
982    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
983    * {@link #disableTable(byte[])}
984    *
985    * @param pattern The pattern to match table names against
986    * @return Table descriptors for tables that couldn't be disabled
987    * @throws IOException
988    */
989   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
990     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
991     for (HTableDescriptor table : listTables(pattern)) {
992       if (isTableEnabled(table.getTableName())) {
993         try {
994           disableTable(table.getTableName());
995         } catch (IOException ex) {
996           LOG.info("Failed to disable table " + table.getTableName(), ex);
997           failed.add(table);
998         }
999       }
1000     }
1001     return failed.toArray(new HTableDescriptor[failed.size()]);
1002   }
1003 
1004   /**
1005    * @param tableName name of table to check
1006    * @return true if table is on-line
1007    * @throws IOException if a remote or network exception occurs
1008    */
1009   public boolean isTableEnabled(TableName tableName) throws IOException {
1010     return connection.isTableEnabled(tableName);
1011   }
1012 
1013   public boolean isTableEnabled(byte[] tableName) throws IOException {
1014     return isTableEnabled(TableName.valueOf(tableName));
1015   }
1016 
1017   public boolean isTableEnabled(String tableName) throws IOException {
1018     return isTableEnabled(TableName.valueOf(tableName));
1019   }
1020 
1021 
1022 
1023   /**
1024    * @param tableName name of table to check
1025    * @return true if table is off-line
1026    * @throws IOException if a remote or network exception occurs
1027    */
1028   public boolean isTableDisabled(TableName tableName) throws IOException {
1029     return connection.isTableDisabled(tableName);
1030   }
1031 
1032   public boolean isTableDisabled(byte[] tableName) throws IOException {
1033     return isTableDisabled(TableName.valueOf(tableName));
1034   }
1035 
1036   public boolean isTableDisabled(String tableName) throws IOException {
1037     return isTableDisabled(TableName.valueOf(tableName));
1038   }
1039 
1040   /**
1041    * @param tableName name of table to check
1042    * @return true if all regions of the table are available
1043    * @throws IOException if a remote or network exception occurs
1044    */
1045   public boolean isTableAvailable(TableName tableName) throws IOException {
1046     return connection.isTableAvailable(tableName);
1047   }
1048 
1049   public boolean isTableAvailable(byte[] tableName) throws IOException {
1050     return isTableAvailable(TableName.valueOf(tableName));
1051   }
1052 
1053   public boolean isTableAvailable(String tableName) throws IOException {
1054     return isTableAvailable(TableName.valueOf(tableName));
1055   }
1056 
1057   /**
1058    * Use this api to check if the table has been created with the specified number of
1059    * splitkeys which was used while creating the given table.
1060    * Note : If this api is used after a table's region gets splitted, the api may return
1061    * false.
1062    * @param tableName
1063    *          name of table to check
1064    * @param splitKeys
1065    *          keys to check if the table has been created with all split keys
1066    * @throws IOException
1067    *           if a remote or network excpetion occurs
1068    */
1069   public boolean isTableAvailable(TableName tableName,
1070                                   byte[][] splitKeys) throws IOException {
1071     return connection.isTableAvailable(tableName, splitKeys);
1072   }
1073 
1074   public boolean isTableAvailable(byte[] tableName,
1075                                   byte[][] splitKeys) throws IOException {
1076     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1077   }
1078 
1079   public boolean isTableAvailable(String tableName,
1080                                   byte[][] splitKeys) throws IOException {
1081     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1082   }
1083 
1084   /**
1085    * Get the status of alter command - indicates how many regions have received
1086    * the updated schema Asynchronous operation.
1087    *
1088    * @param tableName TableName instance
1089    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1090    *         regions that are yet to be updated Pair.getSecond() is the total number
1091    *         of regions of the table
1092    * @throws IOException
1093    *           if a remote or network exception occurs
1094    */
1095   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1096   throws IOException {
1097     return executeCallable(new MasterMonitorCallable<Pair<Integer, Integer>>(getConnection()) {
1098       @Override
1099       public Pair<Integer, Integer> call() throws ServiceException {
1100         GetSchemaAlterStatusRequest req = RequestConverter
1101             .buildGetSchemaAlterStatusRequest(tableName);
1102         GetSchemaAlterStatusResponse ret = masterMonitor.getSchemaAlterStatus(null, req);
1103         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1104             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1105         return pair;
1106       }
1107     });
1108   }
1109 
1110   /**
1111    * Get the status of alter command - indicates how many regions have received
1112    * the updated schema Asynchronous operation.
1113    *
1114    * @param tableName
1115    *          name of the table to get the status of
1116    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1117    *         regions that are yet to be updated Pair.getSecond() is the total number
1118    *         of regions of the table
1119    * @throws IOException
1120    *           if a remote or network exception occurs
1121    */
1122   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1123    throws IOException {
1124     return getAlterStatus(TableName.valueOf(tableName));
1125   }
1126 
1127   /**
1128    * Add a column to an existing table.
1129    * Asynchronous operation.
1130    *
1131    * @param tableName name of the table to add column to
1132    * @param column column descriptor of column to be added
1133    * @throws IOException if a remote or network exception occurs
1134    */
1135   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1136   throws IOException {
1137     addColumn(TableName.valueOf(tableName), column);
1138   }
1139 
1140 
1141   /**
1142    * Add a column to an existing table.
1143    * Asynchronous operation.
1144    *
1145    * @param tableName name of the table to add column to
1146    * @param column column descriptor of column to be added
1147    * @throws IOException if a remote or network exception occurs
1148    */
1149   public void addColumn(final String tableName, HColumnDescriptor column)
1150   throws IOException {
1151     addColumn(TableName.valueOf(tableName), column);
1152   }
1153 
1154   /**
1155    * Add a column to an existing table.
1156    * Asynchronous operation.
1157    *
1158    * @param tableName name of the table to add column to
1159    * @param column column descriptor of column to be added
1160    * @throws IOException if a remote or network exception occurs
1161    */
1162   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1163   throws IOException {
1164     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1165       @Override
1166       public Void call() throws ServiceException {
1167         AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
1168         masterAdmin.addColumn(null,req);
1169         return null;
1170       }
1171     });
1172   }
1173 
1174   /**
1175    * Delete a column from a table.
1176    * Asynchronous operation.
1177    *
1178    * @param tableName name of table
1179    * @param columnName name of column to be deleted
1180    * @throws IOException if a remote or network exception occurs
1181    */
1182   public void deleteColumn(final byte[] tableName, final String columnName)
1183   throws IOException {
1184     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1185   }
1186 
1187   /**
1188    * Delete a column from a table.
1189    * Asynchronous operation.
1190    *
1191    * @param tableName name of table
1192    * @param columnName name of column to be deleted
1193    * @throws IOException if a remote or network exception occurs
1194    */
1195   public void deleteColumn(final String tableName, final String columnName)
1196   throws IOException {
1197     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1198   }
1199 
1200   /**
1201    * Delete a column from a table.
1202    * Asynchronous operation.
1203    *
1204    * @param tableName name of table
1205    * @param columnName name of column to be deleted
1206    * @throws IOException if a remote or network exception occurs
1207    */
1208   public void deleteColumn(final TableName tableName, final byte [] columnName)
1209   throws IOException {
1210     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1211       @Override
1212       public Void call() throws ServiceException {
1213         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
1214         masterAdmin.deleteColumn(null,req);
1215         return null;
1216       }
1217     });
1218   }
1219 
1220   /**
1221    * Modify an existing column family on a table.
1222    * Asynchronous operation.
1223    *
1224    * @param tableName name of table
1225    * @param descriptor new column descriptor to use
1226    * @throws IOException if a remote or network exception occurs
1227    */
1228   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1229   throws IOException {
1230     modifyColumn(TableName.valueOf(tableName), descriptor);
1231   }
1232 
1233   /**
1234    * Modify an existing column family on a table.
1235    * Asynchronous operation.
1236    *
1237    * @param tableName name of table
1238    * @param descriptor new column descriptor to use
1239    * @throws IOException if a remote or network exception occurs
1240    */
1241   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1242   throws IOException {
1243     modifyColumn(TableName.valueOf(tableName), descriptor);
1244   }
1245 
1246 
1247 
1248   /**
1249    * Modify an existing column family on a table.
1250    * Asynchronous operation.
1251    *
1252    * @param tableName name of table
1253    * @param descriptor new column descriptor to use
1254    * @throws IOException if a remote or network exception occurs
1255    */
1256   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1257   throws IOException {
1258     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1259       @Override
1260       public Void call() throws ServiceException {
1261         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
1262         masterAdmin.modifyColumn(null,req);
1263         return null;
1264       }
1265     });
1266   }
1267 
1268   /**
1269    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1270    * master will not be informed of the close.
1271    * @param regionname region name to close
1272    * @param serverName If supplied, we'll use this location rather than
1273    * the one currently in <code>.META.</code>
1274    * @throws IOException if a remote or network exception occurs
1275    */
1276   public void closeRegion(final String regionname, final String serverName)
1277   throws IOException {
1278     closeRegion(Bytes.toBytes(regionname), serverName);
1279   }
1280 
1281   /**
1282    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1283    * master will not be informed of the close.
1284    * @param regionname region name to close
1285    * @param serverName The servername of the regionserver.  If passed null we
1286    * will use servername found in the .META. table. A server name
1287    * is made of host, port and startcode.  Here is an example:
1288    * <code> host187.example.com,60020,1289493121758</code>
1289    * @throws IOException if a remote or network exception occurs
1290    */
1291   public void closeRegion(final byte [] regionname, final String serverName)
1292   throws IOException {
1293     CatalogTracker ct = getCatalogTracker();
1294     try {
1295       if (serverName != null) {
1296         Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, regionname);
1297         if (pair == null || pair.getFirst() == null) {
1298           throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1299         } else {
1300           closeRegion(new ServerName(serverName), pair.getFirst());
1301         }
1302       } else {
1303         Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, regionname);
1304         if (pair == null) {
1305           throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1306         } else if (pair.getSecond() == null) {
1307           throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1308         } else {
1309           closeRegion(pair.getSecond(), pair.getFirst());
1310         }
1311       }
1312     } finally {
1313       cleanupCatalogTracker(ct);
1314     }
1315   }
1316 
1317   /**
1318    * For expert-admins. Runs close on the regionserver. Closes a region based on
1319    * the encoded region name. The region server name is mandatory. If the
1320    * servername is provided then based on the online regions in the specified
1321    * regionserver the specified region will be closed. The master will not be
1322    * informed of the close. Note that the regionname is the encoded regionname.
1323    *
1324    * @param encodedRegionName
1325    *          The encoded region name; i.e. the hash that makes up the region
1326    *          name suffix: e.g. if regionname is
1327    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1328    *          , then the encoded region name is:
1329    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1330    * @param serverName
1331    *          The servername of the regionserver. A server name is made of host,
1332    *          port and startcode. This is mandatory. Here is an example:
1333    *          <code> host187.example.com,60020,1289493121758</code>
1334    * @return true if the region was closed, false if not.
1335    * @throws IOException
1336    *           if a remote or network exception occurs
1337    */
1338   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1339       final String serverName) throws IOException {
1340     if (null == serverName || ("").equals(serverName.trim())) {
1341       throw new IllegalArgumentException(
1342           "The servername cannot be null or empty.");
1343     }
1344     ServerName sn = new ServerName(serverName);
1345     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1346     // Close the region without updating zk state.
1347     CloseRegionRequest request =
1348       RequestConverter.buildCloseRegionRequest(encodedRegionName, false);
1349     try {
1350       CloseRegionResponse response = admin.closeRegion(null, request);
1351       boolean isRegionClosed = response.getClosed();
1352       if (false == isRegionClosed) {
1353         LOG.error("Not able to close the region " + encodedRegionName + ".");
1354       }
1355       return isRegionClosed;
1356     } catch (ServiceException se) {
1357       throw ProtobufUtil.getRemoteException(se);
1358     }
1359   }
1360 
1361   /**
1362    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1363    * master will not be informed of the close.
1364    * @param sn
1365    * @param hri
1366    * @throws IOException
1367    */
1368   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1369   throws IOException {
1370     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1371     // Close the region without updating zk state.
1372     ProtobufUtil.closeRegion(admin, hri.getRegionName(), false);
1373   }
1374 
1375   /**
1376    * Get all the online regions on a region server.
1377    */
1378   public List<HRegionInfo> getOnlineRegions(
1379       final ServerName sn) throws IOException {
1380     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1381     return ProtobufUtil.getOnlineRegions(admin);
1382   }
1383 
1384   /**
1385    * Flush a table or an individual region.
1386    * Synchronous operation.
1387    *
1388    * @param tableNameOrRegionName table or region to flush
1389    * @throws IOException if a remote or network exception occurs
1390    * @throws InterruptedException
1391    */
1392   public void flush(final String tableNameOrRegionName)
1393   throws IOException, InterruptedException {
1394     flush(Bytes.toBytes(tableNameOrRegionName));
1395   }
1396 
1397   /**
1398    * Flush a table or an individual region.
1399    * Synchronous operation.
1400    *
1401    * @param tableNameOrRegionName table or region to flush
1402    * @throws IOException if a remote or network exception occurs
1403    * @throws InterruptedException
1404    */
1405   public void flush(final byte[] tableNameOrRegionName)
1406   throws IOException, InterruptedException {
1407     CatalogTracker ct = getCatalogTracker();
1408     try {
1409       Pair<HRegionInfo, ServerName> regionServerPair
1410         = getRegion(tableNameOrRegionName, ct);
1411       if (regionServerPair != null) {
1412         if (regionServerPair.getSecond() == null) {
1413           throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
1414         } else {
1415           flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1416         }
1417       } else {
1418         final TableName tableName = checkTableExists(
1419             TableName.valueOf(tableNameOrRegionName), ct);
1420         List<Pair<HRegionInfo, ServerName>> pairs =
1421           MetaReader.getTableRegionsAndLocations(ct,
1422               tableName);
1423         for (Pair<HRegionInfo, ServerName> pair: pairs) {
1424           if (pair.getFirst().isOffline()) continue;
1425           if (pair.getSecond() == null) continue;
1426           try {
1427             flush(pair.getSecond(), pair.getFirst());
1428           } catch (NotServingRegionException e) {
1429             if (LOG.isDebugEnabled()) {
1430               LOG.debug("Trying to flush " + pair.getFirst() + ": " +
1431                 StringUtils.stringifyException(e));
1432             }
1433           }
1434         }
1435       }
1436     } finally {
1437       cleanupCatalogTracker(ct);
1438     }
1439   }
1440 
1441   private void flush(final ServerName sn, final HRegionInfo hri)
1442   throws IOException {
1443     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1444     FlushRegionRequest request =
1445       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1446     try {
1447       admin.flushRegion(null, request);
1448     } catch (ServiceException se) {
1449       throw ProtobufUtil.getRemoteException(se);
1450     }
1451   }
1452 
1453   /**
1454    * Compact a table or an individual region.
1455    * Asynchronous operation.
1456    *
1457    * @param tableNameOrRegionName table or region to compact
1458    * @throws IOException if a remote or network exception occurs
1459    * @throws InterruptedException
1460    */
1461   public void compact(final String tableNameOrRegionName)
1462   throws IOException, InterruptedException {
1463     compact(Bytes.toBytes(tableNameOrRegionName));
1464   }
1465 
1466   /**
1467    * Compact a table or an individual region.
1468    * Asynchronous operation.
1469    *
1470    * @param tableNameOrRegionName table or region to compact
1471    * @throws IOException if a remote or network exception occurs
1472    * @throws InterruptedException
1473    */
1474   public void compact(final byte[] tableNameOrRegionName)
1475   throws IOException, InterruptedException {
1476     compact(tableNameOrRegionName, null, false);
1477   }
1478 
1479   /**
1480    * Compact a column family within a table or region.
1481    * Asynchronous operation.
1482    *
1483    * @param tableOrRegionName table or region to compact
1484    * @param columnFamily column family within a table or region
1485    * @throws IOException if a remote or network exception occurs
1486    * @throws InterruptedException
1487    */
1488   public void compact(String tableOrRegionName, String columnFamily)
1489     throws IOException,  InterruptedException {
1490     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
1491   }
1492 
1493   /**
1494    * Compact a column family within a table or region.
1495    * Asynchronous operation.
1496    *
1497    * @param tableNameOrRegionName table or region to compact
1498    * @param columnFamily column family within a table or region
1499    * @throws IOException if a remote or network exception occurs
1500    * @throws InterruptedException
1501    */
1502   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
1503   throws IOException, InterruptedException {
1504     compact(tableNameOrRegionName, columnFamily, false);
1505   }
1506 
1507   /**
1508    * Major compact a table or an individual region.
1509    * Asynchronous operation.
1510    *
1511    * @param tableNameOrRegionName table or region to major compact
1512    * @throws IOException if a remote or network exception occurs
1513    * @throws InterruptedException
1514    */
1515   public void majorCompact(final String tableNameOrRegionName)
1516   throws IOException, InterruptedException {
1517     majorCompact(Bytes.toBytes(tableNameOrRegionName));
1518   }
1519 
1520   /**
1521    * Major compact a table or an individual region.
1522    * Asynchronous operation.
1523    *
1524    * @param tableNameOrRegionName table or region to major compact
1525    * @throws IOException if a remote or network exception occurs
1526    * @throws InterruptedException
1527    */
1528   public void majorCompact(final byte[] tableNameOrRegionName)
1529   throws IOException, InterruptedException {
1530     compact(tableNameOrRegionName, null, true);
1531   }
1532 
1533   /**
1534    * Major compact a column family within a table or region.
1535    * Asynchronous operation.
1536    *
1537    * @param tableNameOrRegionName table or region to major compact
1538    * @param columnFamily column family within a table or region
1539    * @throws IOException if a remote or network exception occurs
1540    * @throws InterruptedException
1541    */
1542   public void majorCompact(final String tableNameOrRegionName,
1543     final String columnFamily) throws IOException, InterruptedException {
1544     majorCompact(Bytes.toBytes(tableNameOrRegionName),
1545       Bytes.toBytes(columnFamily));
1546   }
1547 
1548   /**
1549    * Major compact a column family within a table or region.
1550    * Asynchronous operation.
1551    *
1552    * @param tableNameOrRegionName table or region to major compact
1553    * @param columnFamily column family within a table or region
1554    * @throws IOException if a remote or network exception occurs
1555    * @throws InterruptedException
1556    */
1557   public void majorCompact(final byte[] tableNameOrRegionName,
1558     final byte[] columnFamily) throws IOException, InterruptedException {
1559     compact(tableNameOrRegionName, columnFamily, true);
1560   }
1561 
1562   /**
1563    * Compact a table or an individual region.
1564    * Asynchronous operation.
1565    *
1566    * @param tableNameOrRegionName table or region to compact
1567    * @param columnFamily column family within a table or region
1568    * @param major True if we are to do a major compaction.
1569    * @throws IOException if a remote or network exception occurs
1570    * @throws InterruptedException
1571    */
1572   private void compact(final byte[] tableNameOrRegionName,
1573     final byte[] columnFamily,final boolean major)
1574   throws IOException, InterruptedException {
1575     CatalogTracker ct = getCatalogTracker();
1576     try {
1577       Pair<HRegionInfo, ServerName> regionServerPair
1578         = getRegion(tableNameOrRegionName, ct);
1579       if (regionServerPair != null) {
1580         if (regionServerPair.getSecond() == null) {
1581           throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
1582         } else {
1583           compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
1584         }
1585       } else {
1586         final TableName tableName =
1587             checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
1588         List<Pair<HRegionInfo, ServerName>> pairs =
1589           MetaReader.getTableRegionsAndLocations(ct,
1590               tableName);
1591         for (Pair<HRegionInfo, ServerName> pair: pairs) {
1592           if (pair.getFirst().isOffline()) continue;
1593           if (pair.getSecond() == null) continue;
1594           try {
1595             compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
1596           } catch (NotServingRegionException e) {
1597             if (LOG.isDebugEnabled()) {
1598               LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
1599                 pair.getFirst() + ": " +
1600                 StringUtils.stringifyException(e));
1601             }
1602           }
1603         }
1604       }
1605     } finally {
1606       cleanupCatalogTracker(ct);
1607     }
1608   }
1609 
1610   private void compact(final ServerName sn, final HRegionInfo hri,
1611       final boolean major, final byte [] family)
1612   throws IOException {
1613     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1614     CompactRegionRequest request =
1615       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1616     try {
1617       admin.compactRegion(null, request);
1618     } catch (ServiceException se) {
1619       throw ProtobufUtil.getRemoteException(se);
1620     }
1621   }
1622 
1623   /**
1624    * Move the region <code>r</code> to <code>dest</code>.
1625    * @param encodedRegionName The encoded region name; i.e. the hash that makes
1626    * up the region name suffix: e.g. if regionname is
1627    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
1628    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
1629    * @param destServerName The servername of the destination regionserver.  If
1630    * passed the empty byte array we'll assign to a random server.  A server name
1631    * is made of host, port and startcode.  Here is an example:
1632    * <code> host187.example.com,60020,1289493121758</code>
1633    * @throws UnknownRegionException Thrown if we can't find a region named
1634    * <code>encodedRegionName</code>
1635    * @throws ZooKeeperConnectionException
1636    * @throws MasterNotRunningException
1637    */
1638   public void move(final byte [] encodedRegionName, final byte [] destServerName)
1639   throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException {
1640     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1641     try {
1642       MoveRegionRequest request =
1643         RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
1644       stub.moveRegion(null,request);
1645     } catch (ServiceException se) {
1646       IOException ioe = ProtobufUtil.getRemoteException(se);
1647       if (ioe instanceof HBaseIOException) {
1648         throw (HBaseIOException)ioe;
1649       }
1650       LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion");
1651     } catch (DeserializationException de) {
1652       LOG.error("Could not parse destination server name: " + de);
1653     } finally {
1654       stub.close();
1655     }
1656   }
1657 
1658   /**
1659    * @param regionName
1660    *          Region name to assign.
1661    * @throws MasterNotRunningException
1662    * @throws ZooKeeperConnectionException
1663    * @throws IOException
1664    */
1665   public void assign(final byte[] regionName) throws MasterNotRunningException,
1666       ZooKeeperConnectionException, IOException {
1667     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1668       @Override
1669       public Void call() throws ServiceException {
1670         AssignRegionRequest request = RequestConverter.buildAssignRegionRequest(regionName);
1671         masterAdmin.assignRegion(null,request);
1672         return null;
1673       }
1674     });
1675   }
1676 
1677   /**
1678    * Unassign a region from current hosting regionserver.  Region will then be
1679    * assigned to a regionserver chosen at random.  Region could be reassigned
1680    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
1681    * to control the region movement.
1682    * @param regionName Region to unassign. Will clear any existing RegionPlan
1683    * if one found.
1684    * @param force If true, force unassign (Will remove region from
1685    * regions-in-transition too if present. If results in double assignment
1686    * use hbck -fix to resolve. To be used by experts).
1687    * @throws MasterNotRunningException
1688    * @throws ZooKeeperConnectionException
1689    * @throws IOException
1690    */
1691   public void unassign(final byte [] regionName, final boolean force)
1692   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
1693     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1694       @Override
1695       public Void call() throws ServiceException {
1696         UnassignRegionRequest request =
1697           RequestConverter.buildUnassignRegionRequest(regionName, force);
1698         masterAdmin.unassignRegion(null,request);
1699         return null;
1700       }
1701     });
1702   }
1703 
1704   /**
1705    * Special method, only used by hbck.
1706    */
1707   public void offline(final byte [] regionName)
1708   throws IOException {
1709     MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdminService();
1710     try {
1711       master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName));
1712     } catch (ServiceException se) {
1713       throw ProtobufUtil.getRemoteException(se);
1714     } finally {
1715       master.close();
1716     }
1717   }
1718 
1719   /**
1720    * Turn the load balancer on or off.
1721    * @param on If true, enable balancer. If false, disable balancer.
1722    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
1723    * @return Previous balancer value
1724    */
1725   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
1726   throws MasterNotRunningException, ZooKeeperConnectionException {
1727     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1728     try {
1729       SetBalancerRunningRequest req =
1730         RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1731       return stub.setBalancerRunning(null, req).getPrevBalanceValue();
1732     } catch (ServiceException se) {
1733       IOException ioe = ProtobufUtil.getRemoteException(se);
1734       if (ioe instanceof MasterNotRunningException) {
1735         throw (MasterNotRunningException)ioe;
1736       }
1737       if (ioe instanceof ZooKeeperConnectionException) {
1738         throw (ZooKeeperConnectionException)ioe;
1739       }
1740 
1741       // Throwing MasterNotRunningException even though not really valid in order to not
1742       // break interface by adding additional exception type.
1743       throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se);
1744     } finally {
1745       stub.close();
1746     }
1747   }
1748 
1749   /**
1750    * Invoke the balancer.  Will run the balancer and if regions to move, it will
1751    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
1752    * logs.
1753    * @return True if balancer ran, false otherwise.
1754    */
1755   public boolean balancer()
1756   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
1757     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1758     try {
1759       return stub.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan();
1760     } finally {
1761       stub.close();
1762     }
1763   }
1764 
1765   /**
1766    * Enable/Disable the catalog janitor
1767    * @param enable if true enables the catalog janitor
1768    * @return the previous state
1769    * @throws ServiceException
1770    * @throws MasterNotRunningException
1771    */
1772   public boolean enableCatalogJanitor(boolean enable)
1773       throws ServiceException, MasterNotRunningException {
1774     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1775     try {
1776       return stub.enableCatalogJanitor(null,
1777           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1778     } finally {
1779       stub.close();
1780     }
1781   }
1782 
1783   /**
1784    * Ask for a scan of the catalog table
1785    * @return the number of entries cleaned
1786    * @throws ServiceException
1787    * @throws MasterNotRunningException
1788    */
1789   public int runCatalogScan() throws ServiceException, MasterNotRunningException {
1790     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1791     try {
1792       return stub.runCatalogScan(null,
1793           RequestConverter.buildCatalogScanRequest()).getScanResult();
1794     } finally {
1795       stub.close();
1796     }
1797   }
1798 
1799   /**
1800    * Query on the catalog janitor state (Enabled/Disabled?)
1801    * @throws ServiceException
1802    * @throws org.apache.hadoop.hbase.MasterNotRunningException
1803    */
1804   public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException {
1805     MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
1806     try {
1807       return stub.isCatalogJanitorEnabled(null,
1808           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
1809     } finally {
1810       stub.close();
1811     }
1812   }
1813 
1814   /**
1815    * Merge two regions. Asynchronous operation.
1816    * @param encodedNameOfRegionA encoded name of region a
1817    * @param encodedNameOfRegionB encoded name of region b
1818    * @param forcible true if do a compulsory merge, otherwise we will only merge
1819    *          two adjacent regions
1820    * @throws IOException
1821    */
1822   public void mergeRegions(final byte[] encodedNameOfRegionA,
1823       final byte[] encodedNameOfRegionB, final boolean forcible)
1824       throws IOException {
1825     MasterAdminKeepAliveConnection master = connection
1826         .getKeepAliveMasterAdminService();
1827     try {
1828       DispatchMergingRegionsRequest request = RequestConverter
1829           .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
1830               encodedNameOfRegionB, forcible);
1831       master.dispatchMergingRegions(null, request);
1832     } catch (ServiceException se) {
1833       IOException ioe = ProtobufUtil.getRemoteException(se);
1834       if (ioe instanceof UnknownRegionException) {
1835         throw (UnknownRegionException) ioe;
1836       }
1837       if (ioe instanceof MergeRegionException) {
1838         throw (MergeRegionException) ioe;
1839       }
1840       LOG.error("Unexpected exception: " + se
1841           + " from calling HMaster.dispatchMergingRegions");
1842     } catch (DeserializationException de) {
1843       LOG.error("Could not parse destination server name: " + de);
1844     } finally {
1845       master.close();
1846     }
1847   }
1848 
1849   /**
1850    * Split a table or an individual region.
1851    * Asynchronous operation.
1852    *
1853    * @param tableNameOrRegionName table or region to split
1854    * @throws IOException if a remote or network exception occurs
1855    * @throws InterruptedException
1856    */
1857   public void split(final String tableNameOrRegionName)
1858   throws IOException, InterruptedException {
1859     split(Bytes.toBytes(tableNameOrRegionName));
1860   }
1861 
1862   /**
1863    * Split a table or an individual region.  Implicitly finds an optimal split
1864    * point.  Asynchronous operation.
1865    *
1866    * @param tableNameOrRegionName table to region to split
1867    * @throws IOException if a remote or network exception occurs
1868    * @throws InterruptedException
1869    */
1870   public void split(final byte[] tableNameOrRegionName)
1871   throws IOException, InterruptedException {
1872     split(tableNameOrRegionName, null);
1873   }
1874 
1875   public void split(final String tableNameOrRegionName,
1876     final String splitPoint) throws IOException, InterruptedException {
1877     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
1878   }
1879 
1880   /**
1881    * Split a table or an individual region.
1882    * Asynchronous operation.
1883    *
1884    * @param tableNameOrRegionName table to region to split
1885    * @param splitPoint the explicit position to split on
1886    * @throws IOException if a remote or network exception occurs
1887    * @throws InterruptedException interrupt exception occurred
1888    */
1889   public void split(final byte[] tableNameOrRegionName,
1890       final byte [] splitPoint) throws IOException, InterruptedException {
1891     CatalogTracker ct = getCatalogTracker();
1892     try {
1893       Pair<HRegionInfo, ServerName> regionServerPair
1894         = getRegion(tableNameOrRegionName, ct);
1895       if (regionServerPair != null) {
1896         if (regionServerPair.getSecond() == null) {
1897             throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
1898         } else {
1899           split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
1900         }
1901       } else {
1902         final TableName tableName =
1903             checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
1904         List<Pair<HRegionInfo, ServerName>> pairs =
1905           MetaReader.getTableRegionsAndLocations(ct,
1906               tableName);
1907         for (Pair<HRegionInfo, ServerName> pair: pairs) {
1908           // May not be a server for a particular row
1909           if (pair.getSecond() == null) continue;
1910           HRegionInfo r = pair.getFirst();
1911           // check for parents
1912           if (r.isSplitParent()) continue;
1913           // if a split point given, only split that particular region
1914           if (splitPoint != null && !r.containsRow(splitPoint)) continue;
1915           // call out to region server to do split now
1916           split(pair.getSecond(), pair.getFirst(), splitPoint);
1917         }
1918       }
1919     } finally {
1920       cleanupCatalogTracker(ct);
1921     }
1922   }
1923 
1924   private void split(final ServerName sn, final HRegionInfo hri,
1925       byte[] splitPoint) throws IOException {
1926     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1927     ProtobufUtil.split(admin, hri, splitPoint);
1928   }
1929 
1930   /**
1931    * Modify an existing table, more IRB friendly version.
1932    * Asynchronous operation.  This means that it may be a while before your
1933    * schema change is updated across all of the table.
1934    *
1935    * @param tableName name of table.
1936    * @param htd modified description of the table
1937    * @throws IOException if a remote or network exception occurs
1938    */
1939   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
1940   throws IOException {
1941     if (!tableName.equals(htd.getTableName())) {
1942       throw new IllegalArgumentException("the specified table name '" + tableName +
1943         "' doesn't match with the HTD one: " + htd.getTableName());
1944     }
1945 
1946     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
1947       @Override
1948       public Void call() throws ServiceException {
1949         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
1950         masterAdmin.modifyTable(null, request);
1951         return null;
1952       }
1953     });
1954   }
1955 
1956   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
1957   throws IOException {
1958     modifyTable(TableName.valueOf(tableName), htd);
1959   }
1960 
1961   public void modifyTable(final String tableName, final HTableDescriptor htd)
1962   throws IOException {
1963     modifyTable(TableName.valueOf(tableName), htd);
1964   }
1965 
1966   /**
1967    * @param tableNameOrRegionName Name of a table or name of a region.
1968    * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
1969    * @return a pair of HRegionInfo and ServerName if <code>tableNameOrRegionName</code> is
1970    *  a verified region name (we call {@link  MetaReader#getRegion( CatalogTracker, byte[])}
1971    *  else null.
1972    * Throw an exception if <code>tableNameOrRegionName</code> is null.
1973    * @throws IOException
1974    */
1975   Pair<HRegionInfo, ServerName> getRegion(final byte[] tableNameOrRegionName,
1976       final CatalogTracker ct) throws IOException {
1977     if (tableNameOrRegionName == null) {
1978       throw new IllegalArgumentException("Pass a table name or region name");
1979     }
1980     Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, tableNameOrRegionName);
1981     if (pair == null) {
1982       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
1983         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
1984       final String encodedName = Bytes.toString(tableNameOrRegionName);
1985       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
1986         @Override
1987         public boolean processRow(Result data) throws IOException {
1988           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
1989           if (info == null) {
1990             LOG.warn("No serialized HRegionInfo in " + data);
1991             return true;
1992           }
1993           if (!encodedName.equals(info.getEncodedName())) return true;
1994           ServerName sn = HRegionInfo.getServerName(data);
1995           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
1996           return false; // found the region, stop
1997         }
1998       };
1999 
2000       MetaScanner.metaScan(conf, connection, visitor, null);
2001       pair = result.get();
2002     }
2003     return pair;
2004   }
2005 
2006   /**
2007    * Check if table exists or not
2008    * @param tableName Name of a table.
2009    * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
2010    * @return tableName instance
2011    * @throws IOException if a remote or network exception occurs.
2012    * @throws TableNotFoundException if table does not exist.
2013    */
2014   //TODO rename this method
2015   private TableName checkTableExists(
2016       final TableName tableName, CatalogTracker ct)
2017       throws IOException {
2018     if (!MetaReader.tableExists(ct, tableName)) {
2019       throw new TableNotFoundException(tableName);
2020     }
2021     return tableName;
2022   }
2023 
2024   /**
2025    * Shuts down the HBase cluster
2026    * @throws IOException if a remote or network exception occurs
2027    */
2028   public synchronized void shutdown() throws IOException {
2029     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2030       @Override
2031       public Void call() throws ServiceException {
2032         masterAdmin.shutdown(null,ShutdownRequest.newBuilder().build());
2033         return null;
2034       }
2035     });
2036   }
2037 
2038   /**
2039    * Shuts down the current HBase master only.
2040    * Does not shutdown the cluster.
2041    * @see #shutdown()
2042    * @throws IOException if a remote or network exception occurs
2043    */
2044   public synchronized void stopMaster() throws IOException {
2045     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2046       @Override
2047       public Void call() throws ServiceException {
2048         masterAdmin.stopMaster(null,StopMasterRequest.newBuilder().build());
2049         return null;
2050       }
2051     });
2052   }
2053 
2054   /**
2055    * Stop the designated regionserver
2056    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2057    * <code>example.org:1234</code>
2058    * @throws IOException if a remote or network exception occurs
2059    */
2060   public synchronized void stopRegionServer(final String hostnamePort)
2061   throws IOException {
2062     String hostname = Addressing.parseHostname(hostnamePort);
2063     int port = Addressing.parsePort(hostnamePort);
2064     AdminService.BlockingInterface admin =
2065       this.connection.getAdmin(new ServerName(hostname, port, 0));
2066     StopServerRequest request = RequestConverter.buildStopServerRequest(
2067       "Called by admin client " + this.connection.toString());
2068     try {
2069       admin.stopServer(null, request);
2070     } catch (ServiceException se) {
2071       throw ProtobufUtil.getRemoteException(se);
2072     }
2073   }
2074 
2075 
2076   /**
2077    * @return cluster status
2078    * @throws IOException if a remote or network exception occurs
2079    */
2080   public ClusterStatus getClusterStatus() throws IOException {
2081     return executeCallable(new MasterMonitorCallable<ClusterStatus>(getConnection()) {
2082       @Override
2083       public ClusterStatus call() throws ServiceException {
2084         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2085         return ClusterStatus.convert(masterMonitor.getClusterStatus(null,req).getClusterStatus());
2086       }
2087     });
2088   }
2089 
2090   private HRegionLocation getFirstMetaServerForTable(final TableName tableName)
2091   throws IOException {
2092     return connection.locateRegion(TableName.META_TABLE_NAME,
2093       HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false));
2094   }
2095 
2096   /**
2097    * @return Configuration used by the instance.
2098    */
2099   public Configuration getConfiguration() {
2100     return this.conf;
2101   }
2102 
2103   /**
2104    * Create a new namespace
2105    * @param descriptor descriptor which describes the new namespace
2106    * @throws IOException
2107    */
2108   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2109     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2110       @Override
2111       public Void call() throws Exception {
2112         masterAdmin.createNamespace(null,
2113             MasterAdminProtos.CreateNamespaceRequest.newBuilder()
2114                 .setNamespaceDescriptor(ProtobufUtil
2115                     .toProtoNamespaceDescriptor(descriptor)).build());
2116         return null;
2117       }
2118     });
2119   }
2120 
2121   /**
2122    * Modify an existing namespace
2123    * @param descriptor descriptor which describes the new namespace
2124    * @throws IOException
2125    */
2126   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2127     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2128       @Override
2129       public Void call() throws Exception {
2130         masterAdmin.modifyNamespace(null,
2131             MasterAdminProtos.ModifyNamespaceRequest.newBuilder()
2132                 .setNamespaceDescriptor(ProtobufUtil
2133                     .toProtoNamespaceDescriptor(descriptor)).build());
2134         return null;
2135       }
2136     });
2137   }
2138 
2139   /**
2140    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2141    * @param name namespace name
2142    * @throws IOException
2143    */
2144   public void deleteNamespace(final String name) throws IOException {
2145     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2146       @Override
2147       public Void call() throws Exception {
2148         masterAdmin.deleteNamespace(null,
2149             MasterAdminProtos.DeleteNamespaceRequest.newBuilder()
2150                 .setNamespaceName(name).build());
2151         return null;
2152       }
2153     });
2154   }
2155 
2156   /**
2157    * Get a namespace descriptor by name
2158    * @param name name of namespace descriptor
2159    * @return A descriptor
2160    * @throws IOException
2161    */
2162   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
2163     return
2164         executeCallable(new MasterAdminCallable<NamespaceDescriptor>(getConnection()) {
2165           @Override
2166           public NamespaceDescriptor call() throws Exception {
2167             return ProtobufUtil.toNamespaceDescriptor(
2168               masterAdmin.getNamespaceDescriptor(null,
2169                   MasterAdminProtos.GetNamespaceDescriptorRequest.newBuilder()
2170                     .setNamespaceName(name).build()).getNamespaceDescriptor());
2171           }
2172         });
2173   }
2174 
2175   /**
2176    * List available namespace descriptors
2177    * @return List of descriptors
2178    * @throws IOException
2179    */
2180   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2181     return
2182         executeCallable(new MasterAdminCallable<NamespaceDescriptor[]>(getConnection()) {
2183           @Override
2184           public NamespaceDescriptor[] call() throws Exception {
2185             List<HBaseProtos.NamespaceDescriptor> list =
2186                 masterAdmin.listNamespaceDescriptors(null,
2187                     MasterAdminProtos.ListNamespaceDescriptorsRequest.newBuilder().build())
2188                     .getNamespaceDescriptorList();
2189             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2190             for(int i = 0; i < list.size(); i++) {
2191               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2192             }
2193             return res;
2194           }
2195         });
2196   }
2197 
2198   /**
2199    * Get list of table descriptors by namespace
2200    * @param name namespace name
2201    * @return A descriptor
2202    * @throws IOException
2203    */
2204   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2205     return
2206         executeCallable(new MasterAdminCallable<HTableDescriptor[]>(getConnection()) {
2207           @Override
2208           public HTableDescriptor[] call() throws Exception {
2209             List<TableSchema> list =
2210                 masterAdmin.listTableDescriptorsByNamespace(null,
2211                     MasterAdminProtos.ListTableDescriptorsByNamespaceRequest.newBuilder()
2212                         .setNamespaceName(name).build())
2213                             .getTableSchemaList();
2214             HTableDescriptor[] res = new HTableDescriptor[list.size()];
2215             for(int i=0; i < list.size(); i++) {
2216 
2217               res[i] = HTableDescriptor.convert(list.get(i));
2218             }
2219             return res;
2220           }
2221         });
2222   }
2223 
2224   /**
2225    * Get list of table names by namespace
2226    * @param name namespace name
2227    * @return The list of table names in the namespace
2228    * @throws IOException
2229    */
2230   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2231     return
2232         executeCallable(new MasterAdminCallable<TableName[]>(getConnection()) {
2233           @Override
2234           public TableName[] call() throws Exception {
2235             List<HBaseProtos.TableName> tableNames =
2236                 masterAdmin.listTableNamesByNamespace(null,
2237                     MasterAdminProtos.ListTableNamesByNamespaceRequest.newBuilder()
2238                         .setNamespaceName(name).build())
2239                 .getTableNameList();
2240             TableName[] result = new TableName[tableNames.size()];
2241             for (int i = 0; i < tableNames.size(); i++) {
2242               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2243             }
2244             return result;
2245           }
2246         });
2247   }
2248 
2249   /**
2250    * Check to see if HBase is running. Throw an exception if not.
2251    * We consider that HBase is running if ZooKeeper and Master are running.
2252    *
2253    * @param conf system configuration
2254    * @throws MasterNotRunningException if the master is not running
2255    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
2256    */
2257   public static void checkHBaseAvailable(Configuration conf)
2258     throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
2259     Configuration copyOfConf = HBaseConfiguration.create(conf);
2260 
2261     // We set it to make it fail as soon as possible if HBase is not available
2262     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2263     copyOfConf.setInt("zookeeper.recovery.retry", 0);
2264 
2265     HConnectionManager.HConnectionImplementation connection
2266       = (HConnectionManager.HConnectionImplementation)
2267       HConnectionManager.getConnection(copyOfConf);
2268 
2269     try {
2270       // Check ZK first.
2271       // If the connection exists, we may have a connection to ZK that does
2272       //  not work anymore
2273       ZooKeeperKeepAliveConnection zkw = null;
2274       try {
2275         zkw = connection.getKeepAliveZooKeeperWatcher();
2276         zkw.getRecoverableZooKeeper().getZooKeeper().exists(
2277           zkw.baseZNode, false);
2278 
2279       } catch (IOException e) {
2280         throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2281       } catch (InterruptedException e) {
2282         Thread.currentThread().interrupt();
2283         throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2284       } catch (KeeperException e) {
2285         throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
2286       } finally {
2287         if (zkw != null) {
2288           zkw.close();
2289         }
2290       }
2291 
2292       // Check Master
2293       connection.isMasterRunning();
2294 
2295     } finally {
2296       connection.close();
2297     }
2298   }
2299 
2300   /**
2301    * get the regions of a given table.
2302    *
2303    * @param tableName the name of the table
2304    * @return Ordered list of {@link HRegionInfo}.
2305    * @throws IOException
2306    */
2307   public List<HRegionInfo> getTableRegions(final TableName tableName)
2308   throws IOException {
2309     CatalogTracker ct = getCatalogTracker();
2310     List<HRegionInfo> Regions = null;
2311     try {
2312       Regions = MetaReader.getTableRegions(ct, tableName, true);
2313     } finally {
2314       cleanupCatalogTracker(ct);
2315     }
2316     return Regions;
2317   }
2318 
2319   public List<HRegionInfo> getTableRegions(final byte[] tableName)
2320   throws IOException {
2321     return getTableRegions(TableName.valueOf(tableName));
2322   }
2323 
2324   @Override
2325   public void close() throws IOException {
2326     if (cleanupConnectionOnClose && this.connection != null) {
2327       this.connection.close();
2328     }
2329   }
2330 
2331   /**
2332    * Get tableDescriptors
2333    * @param tableNames List of table names
2334    * @return HTD[] the tableDescriptor
2335    * @throws IOException if a remote or network exception occurs
2336    */
2337   public HTableDescriptor[] getTableDescriptorsByTableName(List<TableName> tableNames)
2338   throws IOException {
2339     return this.connection.getHTableDescriptorsByTableName(tableNames);
2340   }
2341 
2342   /**
2343    * Get tableDescriptors
2344    * @param names List of table names
2345    * @return HTD[] the tableDescriptor
2346    * @throws IOException if a remote or network exception occurs
2347    */
2348   public HTableDescriptor[] getTableDescriptors(List<String> names)
2349   throws IOException {
2350     List<TableName> tableNames = new ArrayList<TableName>(names.size());
2351     for(String name : names) {
2352       tableNames.add(TableName.valueOf(name));
2353     }
2354     return getTableDescriptorsByTableName(tableNames);
2355   }
2356 
2357   /**
2358    * Roll the log writer. That is, start writing log messages to a new file.
2359    *
2360    * @param serverName
2361    *          The servername of the regionserver. A server name is made of host,
2362    *          port and startcode. This is mandatory. Here is an example:
2363    *          <code> host187.example.com,60020,1289493121758</code>
2364    * @return If lots of logs, flush the returned regions so next time through
2365    * we can clean logs. Returns null if nothing to flush.  Names are actual
2366    * region names as returned by {@link HRegionInfo#getEncodedName()}
2367    * @throws IOException if a remote or network exception occurs
2368    * @throws FailedLogCloseException
2369    */
2370  public synchronized  byte[][] rollHLogWriter(String serverName)
2371       throws IOException, FailedLogCloseException {
2372     ServerName sn = new ServerName(serverName);
2373     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2374     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2375     try {
2376       RollWALWriterResponse response = admin.rollWALWriter(null, request);
2377       int regionCount = response.getRegionToFlushCount();
2378       byte[][] regionsToFlush = new byte[regionCount][];
2379       for (int i = 0; i < regionCount; i++) {
2380         ByteString region = response.getRegionToFlush(i);
2381         regionsToFlush[i] = region.toByteArray();
2382       }
2383       return regionsToFlush;
2384     } catch (ServiceException se) {
2385       throw ProtobufUtil.getRemoteException(se);
2386     }
2387   }
2388 
2389   public String[] getMasterCoprocessors() {
2390     try {
2391       return getClusterStatus().getMasterCoprocessors();
2392     } catch (IOException e) {
2393       LOG.error("Could not getClusterStatus()",e);
2394       return null;
2395     }
2396   }
2397 
2398   /**
2399    * Get the current compaction state of a table or region.
2400    * It could be in a major compaction, a minor compaction, both, or none.
2401    *
2402    * @param tableNameOrRegionName table or region to major compact
2403    * @throws IOException if a remote or network exception occurs
2404    * @throws InterruptedException
2405    * @return the current compaction state
2406    */
2407   public CompactionState getCompactionState(final String tableNameOrRegionName)
2408       throws IOException, InterruptedException {
2409     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
2410   }
2411 
2412   /**
2413    * Get the current compaction state of a table or region.
2414    * It could be in a major compaction, a minor compaction, both, or none.
2415    *
2416    * @param tableNameOrRegionName table or region to major compact
2417    * @throws IOException if a remote or network exception occurs
2418    * @throws InterruptedException
2419    * @return the current compaction state
2420    */
2421   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
2422       throws IOException, InterruptedException {
2423     CompactionState state = CompactionState.NONE;
2424     CatalogTracker ct = getCatalogTracker();
2425     try {
2426       Pair<HRegionInfo, ServerName> regionServerPair
2427         = getRegion(tableNameOrRegionName, ct);
2428       if (regionServerPair != null) {
2429         if (regionServerPair.getSecond() == null) {
2430           throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
2431         } else {
2432           ServerName sn = regionServerPair.getSecond();
2433           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2434           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2435             regionServerPair.getFirst().getRegionName(), true);
2436           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2437           return response.getCompactionState();
2438         }
2439       } else {
2440         final TableName tableName =
2441             checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
2442         List<Pair<HRegionInfo, ServerName>> pairs =
2443           MetaReader.getTableRegionsAndLocations(ct, tableName);
2444         for (Pair<HRegionInfo, ServerName> pair: pairs) {
2445           if (pair.getFirst().isOffline()) continue;
2446           if (pair.getSecond() == null) continue;
2447           try {
2448             ServerName sn = pair.getSecond();
2449             AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2450             GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2451               pair.getFirst().getRegionName(), true);
2452             GetRegionInfoResponse response = admin.getRegionInfo(null, request);
2453             switch (response.getCompactionState()) {
2454             case MAJOR_AND_MINOR:
2455               return CompactionState.MAJOR_AND_MINOR;
2456             case MAJOR:
2457               if (state == CompactionState.MINOR) {
2458                 return CompactionState.MAJOR_AND_MINOR;
2459               }
2460               state = CompactionState.MAJOR;
2461               break;
2462             case MINOR:
2463               if (state == CompactionState.MAJOR) {
2464                 return CompactionState.MAJOR_AND_MINOR;
2465               }
2466               state = CompactionState.MINOR;
2467               break;
2468             case NONE:
2469               default: // nothing, continue
2470             }
2471           } catch (NotServingRegionException e) {
2472             if (LOG.isDebugEnabled()) {
2473               LOG.debug("Trying to get compaction state of " +
2474                 pair.getFirst() + ": " +
2475                 StringUtils.stringifyException(e));
2476             }
2477           }
2478         }
2479       }
2480     } catch (ServiceException se) {
2481       throw ProtobufUtil.getRemoteException(se);
2482     } finally {
2483       cleanupCatalogTracker(ct);
2484     }
2485     return state;
2486   }
2487 
2488   /**
2489    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
2490    * taken. If the table is disabled, an offline snapshot is taken.
2491    * <p>
2492    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2493    * snapshot with the same name (even a different type or with different parameters) will fail with
2494    * a {@link SnapshotCreationException} indicating the duplicate naming.
2495    * <p>
2496    * Snapshot names follow the same naming constraints as tables in HBase. See
2497    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2498    * @param snapshotName name of the snapshot to be created
2499    * @param tableName name of the table for which snapshot is created
2500    * @throws IOException if a remote or network exception occurs
2501    * @throws SnapshotCreationException if snapshot creation failed
2502    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2503    */
2504   public void snapshot(final String snapshotName,
2505                        final TableName tableName) throws IOException,
2506       SnapshotCreationException, IllegalArgumentException {
2507     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
2508   }
2509 
2510   public void snapshot(final String snapshotName,
2511                        final String tableName) throws IOException,
2512       SnapshotCreationException, IllegalArgumentException {
2513     snapshot(snapshotName, TableName.valueOf(tableName),
2514         SnapshotDescription.Type.FLUSH);
2515   }
2516 
2517   /**
2518    public void snapshot(final String snapshotName,
2519     * Create a timestamp consistent snapshot for the given table.
2520                         final byte[] tableName) throws IOException,
2521     * <p>
2522     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2523     * snapshot with the same name (even a different type or with different parameters) will fail with
2524     * a {@link SnapshotCreationException} indicating the duplicate naming.
2525     * <p>
2526     * Snapshot names follow the same naming constraints as tables in HBase.
2527     * @param snapshotName name of the snapshot to be created
2528     * @param tableName name of the table for which snapshot is created
2529     * @throws IOException if a remote or network exception occurs
2530     * @throws SnapshotCreationException if snapshot creation failed
2531     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2532     */
2533   public void snapshot(final byte[] snapshotName,
2534                        final TableName tableName) throws IOException,
2535       SnapshotCreationException, IllegalArgumentException {
2536     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
2537   }
2538 
2539   public void snapshot(final byte[] snapshotName,
2540                        final byte[] tableName) throws IOException,
2541       SnapshotCreationException, IllegalArgumentException {
2542     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
2543         SnapshotDescription.Type.FLUSH);
2544   }
2545 
2546   /**
2547    * Create typed snapshot of the table.
2548    * <p>
2549    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2550    * snapshot with the same name (even a different type or with different parameters) will fail with
2551    * a {@link SnapshotCreationException} indicating the duplicate naming.
2552    * <p>
2553    * Snapshot names follow the same naming constraints as tables in HBase. See
2554    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2555    * <p>
2556    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
2557    *          snapshots stored on the cluster
2558    * @param tableName name of the table to snapshot
2559    * @param type type of snapshot to take
2560    * @throws IOException we fail to reach the master
2561    * @throws SnapshotCreationException if snapshot creation failed
2562    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2563    */
2564   public void snapshot(final String snapshotName,
2565                        final TableName tableName,
2566                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2567       IllegalArgumentException {
2568     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
2569     builder.setTable(tableName.getNameAsString());
2570     builder.setName(snapshotName);
2571     builder.setType(type);
2572     snapshot(builder.build());
2573   }
2574 
2575   public void snapshot(final String snapshotName,
2576                        final String tableName,
2577                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2578       IllegalArgumentException {
2579     snapshot(snapshotName, TableName.valueOf(tableName), type);
2580   }
2581 
2582   public void snapshot(final String snapshotName,
2583                        final byte[] tableName,
2584                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
2585       IllegalArgumentException {
2586     snapshot(snapshotName, TableName.valueOf(tableName), type);
2587   }
2588 
2589   /**
2590    * Take a snapshot and wait for the server to complete that snapshot (blocking).
2591    * <p>
2592    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
2593    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
2594    * time for a single cluster).
2595    * <p>
2596    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
2597    * snapshot with the same name (even a different type or with different parameters) will fail with
2598    * a {@link SnapshotCreationException} indicating the duplicate naming.
2599    * <p>
2600    * Snapshot names follow the same naming constraints as tables in HBase. See
2601    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
2602    * <p>
2603    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
2604    * unless you are sure about the type of snapshot that you want to take.
2605    * @param snapshot snapshot to take
2606    * @throws IOException or we lose contact with the master.
2607    * @throws SnapshotCreationException if snapshot failed to be taken
2608    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2609    */
2610   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
2611       IllegalArgumentException {
2612     // actually take the snapshot
2613     TakeSnapshotResponse response = takeSnapshotAsync(snapshot);
2614     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
2615         .build();
2616     IsSnapshotDoneResponse done = null;
2617     long start = EnvironmentEdgeManager.currentTimeMillis();
2618     long max = response.getExpectedTimeout();
2619     long maxPauseTime = max / this.numRetries;
2620     int tries = 0;
2621     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
2622         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
2623         maxPauseTime + " ms per retry)");
2624     while (tries == 0
2625         || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) {
2626       try {
2627         // sleep a backoff <= pauseTime amount
2628         long sleep = getPauseTime(tries++);
2629         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2630         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
2631           "ms while waiting for snapshot completion.");
2632         Thread.sleep(sleep);
2633 
2634       } catch (InterruptedException e) {
2635         LOG.debug("Interrupted while waiting for snapshot " + snapshot + " to complete");
2636         Thread.currentThread().interrupt();
2637       }
2638       LOG.debug("Getting current status of snapshot from master...");
2639       done = executeCallable(new MasterAdminCallable<IsSnapshotDoneResponse>(getConnection()) {
2640         @Override
2641         public IsSnapshotDoneResponse call() throws ServiceException {
2642           return masterAdmin.isSnapshotDone(null, request);
2643         }
2644       });
2645     };
2646     if (!done.getDone()) {
2647       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
2648           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
2649     }
2650   }
2651 
2652   /**
2653    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
2654    * <p>
2655    * Only a single snapshot should be taken at a time, or results may be undefined.
2656    * @param snapshot snapshot to take
2657    * @return response from the server indicating the max time to wait for the snapshot
2658    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
2659    * @throws SnapshotCreationException if snapshot creation failed
2660    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
2661    */
2662   public TakeSnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
2663       SnapshotCreationException {
2664     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2665     final TakeSnapshotRequest request = TakeSnapshotRequest.newBuilder().setSnapshot(snapshot)
2666         .build();
2667     // run the snapshot on the master
2668     return executeCallable(new MasterAdminCallable<TakeSnapshotResponse>(getConnection()) {
2669       @Override
2670       public TakeSnapshotResponse call() throws ServiceException {
2671         return masterAdmin.snapshot(null, request);
2672       }
2673     });
2674   }
2675 
2676   /**
2677    * Check the current state of the passed snapshot.
2678    * <p>
2679    * There are three possible states:
2680    * <ol>
2681    * <li>running - returns <tt>false</tt></li>
2682    * <li>finished - returns <tt>true</tt></li>
2683    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
2684    * </ol>
2685    * <p>
2686    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
2687    * run/started since the snapshot your are checking, you will recieve an
2688    * {@link UnknownSnapshotException}.
2689    * @param snapshot description of the snapshot to check
2690    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
2691    *         running
2692    * @throws IOException if we have a network issue
2693    * @throws HBaseSnapshotException if the snapshot failed
2694    * @throws UnknownSnapshotException if the requested snapshot is unknown
2695    */
2696   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
2697       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
2698 
2699     return executeCallable(new MasterAdminCallable<IsSnapshotDoneResponse>(getConnection()) {
2700       @Override
2701       public IsSnapshotDoneResponse call() throws ServiceException {
2702         return masterAdmin.isSnapshotDone(null,
2703           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
2704       }
2705     }).getDone();
2706   }
2707 
2708   /**
2709    * Restore the specified snapshot on the original table. (The table must be disabled)
2710    * Before restoring the table, a new snapshot with the current table state is created.
2711    * In case of failure, the table will be rolled back to its original state.
2712    *
2713    * @param snapshotName name of the snapshot to restore
2714    * @throws IOException if a remote or network exception occurs
2715    * @throws RestoreSnapshotException if snapshot failed to be restored
2716    * @throws IllegalArgumentException if the restore request is formatted incorrectly
2717    */
2718   public void restoreSnapshot(final byte[] snapshotName)
2719       throws IOException, RestoreSnapshotException {
2720     restoreSnapshot(Bytes.toString(snapshotName));
2721   }
2722 
2723   /**
2724    * Restore the specified snapshot on the original table. (The table must be disabled)
2725    * Before restoring the table, a new snapshot with the current table state is created.
2726    * In case of failure, the table will be rolled back to the its original state.
2727    *
2728    * @param snapshotName name of the snapshot to restore
2729    * @throws IOException if a remote or network exception occurs
2730    * @throws RestoreSnapshotException if snapshot failed to be restored
2731    * @throws IllegalArgumentException if the restore request is formatted incorrectly
2732    */
2733   public void restoreSnapshot(final String snapshotName)
2734       throws IOException, RestoreSnapshotException {
2735     String rollbackSnapshot = snapshotName + "-" + EnvironmentEdgeManager.currentTimeMillis();
2736 
2737     TableName tableName = null;
2738     for (SnapshotDescription snapshotInfo: listSnapshots()) {
2739       if (snapshotInfo.getName().equals(snapshotName)) {
2740         tableName = TableName.valueOf(snapshotInfo.getTable());
2741         break;
2742       }
2743     }
2744 
2745     if (tableName == null) {
2746       throw new RestoreSnapshotException(
2747         "Unable to find the table name for snapshot=" + snapshotName);
2748     }
2749 
2750     // Take a snapshot of the current state
2751     snapshot(rollbackSnapshot, tableName);
2752 
2753     // Restore snapshot
2754     try {
2755       internalRestoreSnapshot(snapshotName, tableName);
2756     } catch (IOException e) {
2757       // Try to rollback
2758       try {
2759         String msg = "Restore snapshot=" + snapshotName +
2760           " failed. Rollback to snapshot=" + rollbackSnapshot + " succeeded.";
2761         LOG.error(msg, e);
2762         internalRestoreSnapshot(rollbackSnapshot, tableName);
2763         throw new RestoreSnapshotException(msg, e);
2764       } catch (IOException ex) {
2765         String msg = "Failed to restore and rollback to snapshot=" + rollbackSnapshot;
2766         LOG.error(msg, ex);
2767         throw new RestoreSnapshotException(msg, ex);
2768       }
2769     }
2770   }
2771 
2772   /**
2773    * Create a new table by cloning the snapshot content.
2774    *
2775    * @param snapshotName name of the snapshot to be cloned
2776    * @param tableName name of the table where the snapshot will be restored
2777    * @throws IOException if a remote or network exception occurs
2778    * @throws TableExistsException if table to be created already exists
2779    * @throws RestoreSnapshotException if snapshot failed to be cloned
2780    * @throws IllegalArgumentException if the specified table has not a valid name
2781    */
2782   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
2783       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
2784     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
2785   }
2786 
2787   /**
2788    * Create a new table by cloning the snapshot content.
2789    *
2790    * @param snapshotName name of the snapshot to be cloned
2791    * @param tableName name of the table where the snapshot will be restored
2792    * @throws IOException if a remote or network exception occurs
2793    * @throws TableExistsException if table to be created already exists
2794    * @throws RestoreSnapshotException if snapshot failed to be cloned
2795    * @throws IllegalArgumentException if the specified table has not a valid name
2796    */
2797   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
2798       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
2799     cloneSnapshot(Bytes.toString(snapshotName), tableName);
2800   }
2801 
2802 
2803 
2804   /**
2805    * Create a new table by cloning the snapshot content.
2806    *
2807    * @param snapshotName name of the snapshot to be cloned
2808    * @param tableName name of the table where the snapshot will be restored
2809    * @throws IOException if a remote or network exception occurs
2810    * @throws TableExistsException if table to be created already exists
2811    * @throws RestoreSnapshotException if snapshot failed to be cloned
2812    * @throws IllegalArgumentException if the specified table has not a valid name
2813    */
2814   public void cloneSnapshot(final String snapshotName, final String tableName)
2815       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
2816     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
2817   }
2818 
2819  /**
2820   * Create a new table by cloning the snapshot content.
2821   *
2822   * @param snapshotName name of the snapshot to be cloned
2823   * @param tableName name of the table where the snapshot will be restored
2824   * @throws IOException if a remote or network exception occurs
2825   * @throws TableExistsException if table to be created already exists
2826   * @throws RestoreSnapshotException if snapshot failed to be cloned
2827   * @throws IllegalArgumentException if the specified table has not a valid name
2828   */
2829   public void cloneSnapshot(final String snapshotName, final TableName tableName)
2830       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
2831     if (tableExists(tableName)) {
2832       throw new TableExistsException("Table " + tableName + " already exists");
2833     }
2834     internalRestoreSnapshot(snapshotName, tableName);
2835     waitUntilTableIsEnabled(tableName);
2836   }
2837 
2838   /**
2839    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
2840    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
2841    * create an HTable instance to this table before it is available.
2842    * @param snapshotName snapshot to restore
2843    * @param tableName table name to restore the snapshot on
2844    * @throws IOException if a remote or network exception occurs
2845    * @throws RestoreSnapshotException if snapshot failed to be restored
2846    * @throws IllegalArgumentException if the restore request is formatted incorrectly
2847    */
2848   private void internalRestoreSnapshot(final String snapshotName, final TableName
2849       tableName)
2850       throws IOException, RestoreSnapshotException {
2851     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
2852         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
2853 
2854     // actually restore the snapshot
2855     internalRestoreSnapshotAsync(snapshot);
2856 
2857     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
2858         .setSnapshot(snapshot).build();
2859     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder().buildPartial();
2860     final long maxPauseTime = 5000;
2861     int tries = 0;
2862     while (!done.getDone()) {
2863       try {
2864         // sleep a backoff <= pauseTime amount
2865         long sleep = getPauseTime(tries++);
2866         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2867         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
2868         Thread.sleep(sleep);
2869       } catch (InterruptedException e) {
2870         LOG.debug("Interrupted while waiting for snapshot " + snapshot + " restore to complete");
2871         Thread.currentThread().interrupt();
2872       }
2873       LOG.debug("Getting current status of snapshot restore from master...");
2874       done = executeCallable(new MasterAdminCallable<IsRestoreSnapshotDoneResponse>(
2875           getConnection()) {
2876         @Override
2877         public IsRestoreSnapshotDoneResponse call() throws ServiceException {
2878           return masterAdmin.isRestoreSnapshotDone(null, request);
2879         }
2880       });
2881     }
2882     if (!done.getDone()) {
2883       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
2884     }
2885   }
2886 
2887   /**
2888    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
2889    * <p>
2890    * Only a single snapshot should be restored at a time, or results may be undefined.
2891    * @param snapshot snapshot to restore
2892    * @return response from the server indicating the max time to wait for the snapshot
2893    * @throws IOException if a remote or network exception occurs
2894    * @throws RestoreSnapshotException if snapshot failed to be restored
2895    * @throws IllegalArgumentException if the restore request is formatted incorrectly
2896    */
2897   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
2898       throws IOException, RestoreSnapshotException {
2899     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2900 
2901     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
2902         .build();
2903 
2904     // run the snapshot restore on the master
2905     return executeCallable(new MasterAdminCallable<RestoreSnapshotResponse>(getConnection()) {
2906       @Override
2907       public RestoreSnapshotResponse call() throws ServiceException {
2908         return masterAdmin.restoreSnapshot(null, request);
2909       }
2910     });
2911   }
2912 
2913   /**
2914    * List completed snapshots.
2915    * @return a list of snapshot descriptors for completed snapshots
2916    * @throws IOException if a network error occurs
2917    */
2918   public List<SnapshotDescription> listSnapshots() throws IOException {
2919     return executeCallable(new MasterAdminCallable<List<SnapshotDescription>>(getConnection()) {
2920       @Override
2921       public List<SnapshotDescription> call() throws ServiceException {
2922         return masterAdmin.getCompletedSnapshots(null, ListSnapshotRequest.newBuilder().build())
2923             .getSnapshotsList();
2924       }
2925     });
2926   }
2927 
2928   /**
2929    * List all the completed snapshots matching the given regular expression.
2930    *
2931    * @param regex The regular expression to match against
2932    * @return - returns a List of SnapshotDescription
2933    * @throws IOException if a remote or network exception occurs
2934    */
2935   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
2936     return listSnapshots(Pattern.compile(regex));
2937   }
2938 
2939   /**
2940    * List all the completed snapshots matching the given pattern.
2941    *
2942    * @param pattern The compiled regular expression to match against
2943    * @return - returns a List of SnapshotDescription
2944    * @throws IOException if a remote or network exception occurs
2945    */
2946   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
2947     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
2948     List<SnapshotDescription> snapshots = listSnapshots();
2949     for (SnapshotDescription snapshot : snapshots) {
2950       if (pattern.matcher(snapshot.getName()).matches()) {
2951         matched.add(snapshot);
2952       }
2953     }
2954     return matched;
2955   }
2956 
2957   /**
2958    * Delete an existing snapshot.
2959    * @param snapshotName name of the snapshot
2960    * @throws IOException if a remote or network exception occurs
2961    */
2962   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
2963     deleteSnapshot(Bytes.toString(snapshotName));
2964   }
2965 
2966   /**
2967    * Delete an existing snapshot.
2968    * @param snapshotName name of the snapshot
2969    * @throws IOException if a remote or network exception occurs
2970    */
2971   public void deleteSnapshot(final String snapshotName) throws IOException {
2972     // make sure the snapshot is possibly valid
2973     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
2974     // do the delete
2975     executeCallable(new MasterAdminCallable<Void>(getConnection()) {
2976       @Override
2977       public Void call() throws ServiceException {
2978         masterAdmin.deleteSnapshot(null,
2979           DeleteSnapshotRequest.newBuilder().
2980             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build());
2981         return null;
2982       }
2983     });
2984   }
2985 
2986   /**
2987    * Delete existing snapshots whose names match the pattern passed.
2988    * @param regex The regular expression to match against
2989    * @throws IOException if a remote or network exception occurs
2990    */
2991   public void deleteSnapshots(final String regex) throws IOException {
2992     deleteSnapshots(Pattern.compile(regex));
2993   }
2994 
2995   /**
2996    * Delete existing snapshots whose names match the pattern passed.
2997    * @param pattern pattern for names of the snapshot to match
2998    * @throws IOException if a remote or network exception occurs
2999    */
3000   public void deleteSnapshots(final Pattern pattern) throws IOException {
3001     List<SnapshotDescription> snapshots = listSnapshots(pattern);
3002     for (final SnapshotDescription snapshot : snapshots) {
3003       // do the delete
3004       executeCallable(new MasterAdminCallable<Void>(getConnection()) {
3005         @Override
3006         public Void call() throws ServiceException {
3007           this.masterAdmin.deleteSnapshot(null,
3008             DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build());
3009           return null;
3010         }
3011       });
3012     }
3013   }
3014 
3015   /**
3016    * @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)}
3017    */
3018   abstract static class MasterAdminCallable<V> extends MasterCallable<V> {
3019     protected MasterAdminKeepAliveConnection masterAdmin;
3020 
3021     public MasterAdminCallable(final HConnection connection) {
3022       super(connection);
3023     }
3024 
3025     @Override
3026     public void prepare(boolean reload) throws IOException {
3027       this.masterAdmin = this.connection.getKeepAliveMasterAdminService();
3028     }
3029 
3030     @Override
3031     public void close() throws IOException {
3032       this.masterAdmin.close();
3033     }
3034   }
3035 
3036   /**
3037    * @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)}
3038    */
3039   abstract static class MasterMonitorCallable<V> extends MasterCallable<V> {
3040     protected MasterMonitorKeepAliveConnection masterMonitor;
3041 
3042     public MasterMonitorCallable(final HConnection connection) {
3043       super(connection);
3044     }
3045 
3046     @Override
3047     public void prepare(boolean reload) throws IOException {
3048       this.masterMonitor = this.connection.getKeepAliveMasterMonitorService();
3049     }
3050 
3051     @Override
3052     public void close() throws IOException {
3053       this.masterMonitor.close();
3054     }
3055   }
3056 
3057   /**
3058    * Parent of {@link MasterMonitorCallable} and {@link MasterAdminCallable}.
3059    * Has common methods.
3060    * @param <V>
3061    */
3062   abstract static class MasterCallable<V> implements RetryingCallable<V>, Closeable {
3063     protected HConnection connection;
3064 
3065     public MasterCallable(final HConnection connection) {
3066       this.connection = connection;
3067     }
3068 
3069     @Override
3070     public void throwable(Throwable t, boolean retrying) {
3071     }
3072 
3073     @Override
3074     public String getExceptionMessageAdditionalDetail() {
3075       return "";
3076     }
3077 
3078     @Override
3079     public long sleep(long pause, int tries) {
3080       return ConnectionUtils.getPauseTime(pause, tries);
3081     }
3082   }
3083 
3084   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
3085     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
3086     try {
3087       return caller.callWithRetries(callable);
3088     } finally {
3089       callable.close();
3090     }
3091   }
3092 
3093   /**
3094    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
3095    * connected to the active master.
3096    *
3097    * <p>
3098    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
3099    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
3100    * </p>
3101    *
3102    * <div style="background-color: #cccccc; padding: 2px">
3103    * <blockquote><pre>
3104    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
3105    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
3106    * MyCallRequest request = MyCallRequest.newBuilder()
3107    *     ...
3108    *     .build();
3109    * MyCallResponse response = service.myCall(null, request);
3110    * </pre></blockquote></div>
3111    *
3112    * @return A MasterCoprocessorRpcChannel instance
3113    */
3114   public CoprocessorRpcChannel coprocessorService() {
3115     return new MasterCoprocessorRpcChannel(connection);
3116   }
3117 }