View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Modifier;
28  import java.net.InetAddress;
29  import java.net.ServerSocket;
30  import java.net.Socket;
31  import java.net.UnknownHostException;
32  import java.security.MessageDigest;
33  import java.util.ArrayList;
34  import java.util.Arrays;
35  import java.util.Collection;
36  import java.util.Collections;
37  import java.util.HashSet;
38  import java.util.List;
39  import java.util.Map;
40  import java.util.NavigableSet;
41  import java.util.Random;
42  import java.util.Set;
43  import java.util.UUID;
44  import java.util.concurrent.TimeUnit;
45  
46  import org.apache.commons.logging.Log;
47  import org.apache.commons.logging.LogFactory;
48  import org.apache.commons.logging.impl.Jdk14Logger;
49  import org.apache.commons.logging.impl.Log4JLogger;
50  import org.apache.hadoop.classification.InterfaceAudience;
51  import org.apache.hadoop.classification.InterfaceStability;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.hbase.Waiter.Predicate;
56  import org.apache.hadoop.hbase.catalog.MetaEditor;
57  import org.apache.hadoop.hbase.client.Delete;
58  import org.apache.hadoop.hbase.client.Durability;
59  import org.apache.hadoop.hbase.client.Get;
60  import org.apache.hadoop.hbase.client.HBaseAdmin;
61  import org.apache.hadoop.hbase.client.HConnection;
62  import org.apache.hadoop.hbase.client.HTable;
63  import org.apache.hadoop.hbase.client.Put;
64  import org.apache.hadoop.hbase.client.Result;
65  import org.apache.hadoop.hbase.client.ResultScanner;
66  import org.apache.hadoop.hbase.client.Scan;
67  import org.apache.hadoop.hbase.fs.HFileSystem;
68  import org.apache.hadoop.hbase.io.compress.Compression;
69  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72  import org.apache.hadoop.hbase.io.hfile.HFile;
73  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75  import org.apache.hadoop.hbase.master.HMaster;
76  import org.apache.hadoop.hbase.master.RegionStates;
77  import org.apache.hadoop.hbase.master.ServerManager;
78  import org.apache.hadoop.hbase.regionserver.BloomType;
79  import org.apache.hadoop.hbase.regionserver.HRegion;
80  import org.apache.hadoop.hbase.regionserver.HRegionServer;
81  import org.apache.hadoop.hbase.regionserver.HStore;
82  import org.apache.hadoop.hbase.regionserver.InternalScanner;
83  import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
84  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
85  import org.apache.hadoop.hbase.regionserver.wal.HLog;
86  import org.apache.hadoop.hbase.security.User;
87  import org.apache.hadoop.hbase.tool.Canary;
88  import org.apache.hadoop.hbase.util.Bytes;
89  import org.apache.hadoop.hbase.util.FSUtils;
90  import org.apache.hadoop.hbase.util.JVMClusterUtil;
91  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
92  import org.apache.hadoop.hbase.util.RegionSplitter;
93  import org.apache.hadoop.hbase.util.RetryCounter;
94  import org.apache.hadoop.hbase.util.Threads;
95  import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
96  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
97  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
98  import org.apache.hadoop.hbase.zookeeper.ZKConfig;
99  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
100 import org.apache.hadoop.hdfs.DFSClient;
101 import org.apache.hadoop.hdfs.DistributedFileSystem;
102 import org.apache.hadoop.hdfs.MiniDFSCluster;
103 import org.apache.hadoop.mapred.JobConf;
104 import org.apache.hadoop.mapred.MiniMRCluster;
105 import org.apache.hadoop.mapred.TaskLog;
106 import org.apache.zookeeper.KeeperException;
107 import org.apache.zookeeper.KeeperException.NodeExistsException;
108 import org.apache.zookeeper.WatchedEvent;
109 import org.apache.zookeeper.ZooKeeper;
110 import org.apache.zookeeper.ZooKeeper.States;
111 
112 /**
113  * Facility for testing HBase. Replacement for
114  * old HBaseTestCase and HBaseClusterTestCase functionality.
115  * Create an instance and keep it around testing HBase.  This class is
116  * meant to be your one-stop shop for anything you might need testing.  Manages
117  * one cluster at a time only. Managed cluster can be an in-process
118  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
119  * Not all methods work with the real cluster.
120  * Depends on log4j being on classpath and
121  * hbase-site.xml for logging and test-run configuration.  It does not set
122  * logging levels nor make changes to configuration parameters.
123  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
124  * setting it to true.
125  */
126 @InterfaceAudience.Public
127 @InterfaceStability.Evolving
128 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
129    private MiniZooKeeperCluster zkCluster = null;
130 
131   /**
132    * The default number of regions per regionserver when creating a pre-split
133    * table.
134    */
135   private static int DEFAULT_REGIONS_PER_SERVER = 5;
136 
137   /**
138    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
139    * part of general shutdown.
140    */
141   private boolean passedZkCluster = false;
142   private MiniDFSCluster dfsCluster = null;
143 
144   private HBaseCluster hbaseCluster = null;
145   private MiniMRCluster mrCluster = null;
146 
147   /** If there is a mini cluster running for this testing utility instance. */
148   private boolean miniClusterRunning;
149 
150   private String hadoopLogDir;
151 
152   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
153   private File clusterTestDir = null;
154 
155   /** Directory on test filesystem where we put the data for this instance of
156     * HBaseTestingUtility*/
157   private Path dataTestDirOnTestFS = null;
158 
159   /**
160    * System property key to get test directory value.
161    * Name is as it is because mini dfs has hard-codings to put test data here.
162    * It should NOT be used directly in HBase, as it's a property used in
163    *  mini dfs.
164    *  @deprecated can be used only with mini dfs
165    */
166   private static final String TEST_DIRECTORY_KEY = "test.build.data";
167 
168   /** Filesystem URI used for map-reduce mini-cluster setup */
169   private static String FS_URI;
170 
171   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
172   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
173 
174   /** Compression algorithms to use in parameterized JUnit 4 tests */
175   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
176     Arrays.asList(new Object[][] {
177       { Compression.Algorithm.NONE },
178       { Compression.Algorithm.GZ }
179     });
180 
181   /** This is for unit tests parameterized with a single boolean. */
182   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
183       Arrays.asList(new Object[][] {
184           { new Boolean(false) },
185           { new Boolean(true) }
186       });
187 
188   /** Compression algorithms to use in testing */
189   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
190       Compression.Algorithm.NONE, Compression.Algorithm.GZ
191     };
192 
193   /**
194    * Create all combinations of Bloom filters and compression algorithms for
195    * testing.
196    */
197   private static List<Object[]> bloomAndCompressionCombinations() {
198     List<Object[]> configurations = new ArrayList<Object[]>();
199     for (Compression.Algorithm comprAlgo :
200          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
201       for (BloomType bloomType : BloomType.values()) {
202         configurations.add(new Object[] { comprAlgo, bloomType });
203       }
204     }
205     return Collections.unmodifiableList(configurations);
206   }
207 
208   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
209       bloomAndCompressionCombinations();
210 
211   public HBaseTestingUtility() {
212     this(HBaseConfiguration.create());
213   }
214 
215   public HBaseTestingUtility(Configuration conf) {
216     super(conf);
217 
218     // a hbase checksum verification failure will cause unit tests to fail
219     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
220   }
221 
222   /**
223    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
224    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
225    * test dir.  Use this when you aren't using an Mini HDFS cluster.
226    * @return HBaseTestingUtility that use local fs for temp files.
227    */
228   public static HBaseTestingUtility createLocalHTU() {
229     Configuration c = HBaseConfiguration.create();
230     return createLocalHTU(c);
231   }
232 
233   /**
234    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
235    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
236    * test dir.  Use this when you aren't using an Mini HDFS cluster.
237    * @param c Configuration (will be modified)
238    * @return HBaseTestingUtility that use local fs for temp files.
239    */
240   public static HBaseTestingUtility createLocalHTU(Configuration c) {
241     HBaseTestingUtility htu = new HBaseTestingUtility(c);
242     String dataTestDir = htu.getDataTestDir().toString();
243     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
244     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
245     return htu;
246   }
247 
248   /**
249    * Returns this classes's instance of {@link Configuration}.  Be careful how
250    * you use the returned Configuration since {@link HConnection} instances
251    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
252    * say, a Connection was being used against a cluster that had been shutdown,
253    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
254    * be wholesome.  Rather than use the return direct, its usually best to
255    * make a copy and use that.  Do
256    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
257    * @return Instance of Configuration.
258    */
259   @Override
260   public Configuration getConfiguration() {
261     return super.getConfiguration();
262   }
263 
264   public void setHBaseCluster(HBaseCluster hbaseCluster) {
265     this.hbaseCluster = hbaseCluster;
266   }
267 
268   /**
269    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
270    * Give it a random name so can have many concurrent tests running if
271    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
272    * System property, as it's what minidfscluster bases
273    * it data dir on.  Moding a System property is not the way to do concurrent
274    * instances -- another instance could grab the temporary
275    * value unintentionally -- but not anything can do about it at moment;
276    * single instance only is how the minidfscluster works.
277    *
278    * We also create the underlying directory for
279    *  hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
280    *  in the conf, and as a system property for hadoop.tmp.dir
281    *
282    * @return The calculated data test build directory, if newly-created.
283    */
284   @Override
285   protected Path setupDataTestDir() {
286     Path testPath = super.setupDataTestDir();
287     if (null == testPath) {
288       return null;
289     }
290 
291     createSubDirAndSystemProperty(
292       "hadoop.log.dir",
293       testPath, "hadoop-log-dir");
294 
295     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
296     //  we want our own value to ensure uniqueness on the same machine
297     createSubDirAndSystemProperty(
298       "hadoop.tmp.dir",
299       testPath, "hadoop-tmp-dir");
300 
301     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
302     createSubDir(
303       "mapred.local.dir",
304       testPath, "mapred-local-dir");
305 
306     return testPath;
307   }
308 
309   private void createSubDirAndSystemProperty(
310     String propertyName, Path parent, String subDirName){
311 
312     String sysValue = System.getProperty(propertyName);
313 
314     if (sysValue != null) {
315       // There is already a value set. So we do nothing but hope
316       //  that there will be no conflicts
317       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
318         sysValue + " so I do NOT create it in " + parent);
319       String confValue = conf.get(propertyName);
320       if (confValue != null && !confValue.endsWith(sysValue)){
321        LOG.warn(
322          propertyName + " property value differs in configuration and system: "+
323          "Configuration="+confValue+" while System="+sysValue+
324          " Erasing configuration value by system value."
325        );
326       }
327       conf.set(propertyName, sysValue);
328     } else {
329       // Ok, it's not set, so we create it as a subdirectory
330       createSubDir(propertyName, parent, subDirName);
331       System.setProperty(propertyName, conf.get(propertyName));
332     }
333   }
334 
335   /**
336    * @return Where to write test data on the test filesystem; Returns working directory
337    * for the test filesystem by default
338    * @see #setupDataTestDirOnTestFS()
339    * @see #getTestFileSystem()
340    */
341   private Path getBaseTestDirOnTestFS() throws IOException {
342     FileSystem fs = getTestFileSystem();
343     return new Path(fs.getWorkingDirectory(), "test-data");
344   }
345 
346   /**
347    * @return Where the DFS cluster will write data on the local subsystem.
348    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
349    * @see #getTestFileSystem()
350    */
351   Path getClusterTestDir() {
352     if (clusterTestDir == null){
353       setupClusterTestDir();
354     }
355     return new Path(clusterTestDir.getAbsolutePath());
356   }
357 
358   /**
359    * Creates a directory for the DFS cluster, under the test data
360    */
361   private void setupClusterTestDir() {
362     if (clusterTestDir != null) {
363       return;
364     }
365 
366     // Using randomUUID ensures that multiple clusters can be launched by
367     //  a same test, if it stops & starts them
368     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
369     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
370     // Have it cleaned up on exit
371     boolean b = deleteOnExit();
372     if (b) clusterTestDir.deleteOnExit();
373     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
374     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
375   }
376 
377   /**
378    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
379    * to write temporary test data. Call this method after setting up the mini dfs cluster
380    * if the test relies on it.
381    * @return a unique path in the test filesystem
382    */
383   public Path getDataTestDirOnTestFS() throws IOException {
384     if (dataTestDirOnTestFS == null) {
385       setupDataTestDirOnTestFS();
386     }
387 
388     return dataTestDirOnTestFS;
389   }
390 
391   /**
392    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
393    * to write temporary test data. Call this method after setting up the mini dfs cluster
394    * if the test relies on it.
395    * @return a unique path in the test filesystem
396    * @param subdirName name of the subdir to create under the base test dir
397    */
398   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
399     return new Path(getDataTestDirOnTestFS(), subdirName);
400   }
401 
402   /**
403    * Sets up a path in test filesystem to be used by tests
404    */
405   private void setupDataTestDirOnTestFS() throws IOException {
406     if (dataTestDirOnTestFS != null) {
407       LOG.warn("Data test on test fs dir already setup in "
408           + dataTestDirOnTestFS.toString());
409       return;
410     }
411 
412     //The file system can be either local, mini dfs, or if the configuration
413     //is supplied externally, it can be an external cluster FS. If it is a local
414     //file system, the tests should use getBaseTestDir, otherwise, we can use
415     //the working directory, and create a unique sub dir there
416     FileSystem fs = getTestFileSystem();
417     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
418       File dataTestDir = new File(getDataTestDir().toString());
419       if (deleteOnExit()) dataTestDir.deleteOnExit();
420       dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
421     } else {
422       Path base = getBaseTestDirOnTestFS();
423       String randomStr = UUID.randomUUID().toString();
424       dataTestDirOnTestFS = new Path(base, randomStr);
425       if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
426     }
427   }
428 
429   /**
430    * Cleans the test data directory on the test filesystem.
431    * @return True if we removed the test dirs
432    * @throws IOException
433    */
434   public boolean cleanupDataTestDirOnTestFS() throws IOException {
435     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
436     if (ret)
437       dataTestDirOnTestFS = null;
438     return ret;
439   }
440 
441   /**
442    * Cleans a subdirectory under the test data directory on the test filesystem.
443    * @return True if we removed child
444    * @throws IOException
445    */
446   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
447     Path cpath = getDataTestDirOnTestFS(subdirName);
448     return getTestFileSystem().delete(cpath, true);
449   }
450 
451   /**
452    * Start a minidfscluster.
453    * @param servers How many DNs to start.
454    * @throws Exception
455    * @see {@link #shutdownMiniDFSCluster()}
456    * @return The mini dfs cluster created.
457    */
458   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
459     return startMiniDFSCluster(servers, null);
460   }
461 
462   /**
463    * Start a minidfscluster.
464    * This is useful if you want to run datanode on distinct hosts for things
465    * like HDFS block location verification.
466    * If you start MiniDFSCluster without host names, all instances of the
467    * datanodes will have the same host name.
468    * @param hosts hostnames DNs to run on.
469    * @throws Exception
470    * @see {@link #shutdownMiniDFSCluster()}
471    * @return The mini dfs cluster created.
472    */
473   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
474   throws Exception {
475     if ( hosts != null && hosts.length != 0) {
476       return startMiniDFSCluster(hosts.length, hosts);
477     } else {
478       return startMiniDFSCluster(1, null);
479     }
480   }
481 
482   /**
483    * Start a minidfscluster.
484    * Can only create one.
485    * @param servers How many DNs to start.
486    * @param hosts hostnames DNs to run on.
487    * @throws Exception
488    * @see {@link #shutdownMiniDFSCluster()}
489    * @return The mini dfs cluster created.
490    */
491   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
492   throws Exception {
493     createDirsAndSetProperties();
494 
495     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
496     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
497         setLevel(org.apache.log4j.Level.ERROR);
498     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
499         setLevel(org.apache.log4j.Level.ERROR);
500 
501 
502     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
503       true, null, null, hosts, null);
504 
505     // Set this just-started cluster as our filesystem.
506     FileSystem fs = this.dfsCluster.getFileSystem();
507     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
508 
509     // Wait for the cluster to be totally up
510     this.dfsCluster.waitClusterUp();
511 
512     //reset the test directory for test file system
513     dataTestDirOnTestFS = null;
514 
515     return this.dfsCluster;
516   }
517 
518 
519   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
520       throws Exception {
521     createDirsAndSetProperties();
522     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
523         true, null, racks, hosts, null);
524 
525     // Set this just-started cluster as our filesystem.
526     FileSystem fs = this.dfsCluster.getFileSystem();
527     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
528 
529     // Wait for the cluster to be totally up
530     this.dfsCluster.waitClusterUp();
531 
532     //reset the test directory for test file system
533     dataTestDirOnTestFS = null;
534 
535     return this.dfsCluster;
536   }
537 
538   public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
539     createDirsAndSetProperties();
540     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
541         null, null, null);
542     return dfsCluster;
543   }
544 
545   /** This is used before starting HDFS and map-reduce mini-clusters */
546   private void createDirsAndSetProperties() throws IOException {
547     setupClusterTestDir();
548     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
549     createDirAndSetProperty("cache_data", "test.cache.data");
550     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
551     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
552     createDirAndSetProperty("mapred_local", "mapred.local.dir");
553     createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
554     enableShortCircuit();
555 
556     Path root = getDataTestDirOnTestFS("hadoop");
557     conf.set(MapreduceTestingShim.getMROutputDirProp(),
558       new Path(root, "mapred-output-dir").toString());
559     conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
560     conf.set("mapreduce.jobtracker.staging.root.dir",
561       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
562     conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
563   }
564 
565 
566   /**
567    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
568    *  This allows to specify this parameter on the command line.
569    *   If not set, default is true.
570    */
571   public boolean isReadShortCircuitOn(){
572     final String propName = "hbase.tests.use.shortcircuit.reads";
573     String readOnProp = System.getProperty(propName);
574     if (readOnProp != null){
575       return  Boolean.parseBoolean(readOnProp);
576     } else {
577       return conf.getBoolean(propName, false);
578     }
579   }
580 
581   /** Enable the short circuit read, unless configured differently.
582    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
583    */
584   private void enableShortCircuit() {
585     if (isReadShortCircuitOn()) {
586       String curUser = System.getProperty("user.name");
587       LOG.info("read short circuit is ON for user " + curUser);
588       // read short circuit, for hdfs
589       conf.set("dfs.block.local-path-access.user", curUser);
590       // read short circuit, for hbase
591       conf.setBoolean("dfs.client.read.shortcircuit", true);
592       // Skip checking checksum, for the hdfs client and the datanode
593       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
594     } else {
595       LOG.info("read short circuit is OFF");
596     }
597   }
598 
599   private String createDirAndSetProperty(final String relPath, String property) {
600     String path = getDataTestDir(relPath).toString();
601     System.setProperty(property, path);
602     conf.set(property, path);
603     new File(path).mkdirs();
604     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
605     return path;
606   }
607 
608   /**
609    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
610    * or does nothing.
611    * @throws IOException
612    */
613   public void shutdownMiniDFSCluster() throws IOException {
614     if (this.dfsCluster != null) {
615       // The below throws an exception per dn, AsynchronousCloseException.
616       this.dfsCluster.shutdown();
617       dfsCluster = null;
618       dataTestDirOnTestFS = null;
619       FSUtils.setFsDefault(this.conf, new Path("file:///"));
620     }
621   }
622 
623   /**
624    * Call this if you only want a zk cluster.
625    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
626    * @throws Exception
627    * @see #shutdownMiniZKCluster()
628    * @return zk cluster started.
629    */
630   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
631     return startMiniZKCluster(1);
632   }
633 
634   /**
635    * Call this if you only want a zk cluster.
636    * @param zooKeeperServerNum
637    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
638    * @throws Exception
639    * @see #shutdownMiniZKCluster()
640    * @return zk cluster started.
641    */
642   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
643       throws Exception {
644     setupClusterTestDir();
645     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
646   }
647 
648   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
649     throws Exception {
650     return startMiniZKCluster(dir,1);
651   }
652 
653   /**
654    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
655    *  the port mentionned is used as the default port for ZooKeeper.
656    */
657   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
658       int zooKeeperServerNum)
659   throws Exception {
660     if (this.zkCluster != null) {
661       throw new IOException("Cluster already running at " + dir);
662     }
663     this.passedZkCluster = false;
664     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
665     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
666     if (defPort > 0){
667       // If there is a port in the config file, we use it.
668       this.zkCluster.setDefaultClientPort(defPort);
669     }
670     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
671     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
672       Integer.toString(clientPort));
673     return this.zkCluster;
674   }
675 
676   /**
677    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
678    * or does nothing.
679    * @throws IOException
680    * @see #startMiniZKCluster()
681    */
682   public void shutdownMiniZKCluster() throws IOException {
683     if (this.zkCluster != null) {
684       this.zkCluster.shutdown();
685       this.zkCluster = null;
686     }
687   }
688 
689   /**
690    * Start up a minicluster of hbase, dfs, and zookeeper.
691    * @throws Exception
692    * @return Mini hbase cluster instance created.
693    * @see {@link #shutdownMiniDFSCluster()}
694    */
695   public MiniHBaseCluster startMiniCluster() throws Exception {
696     return startMiniCluster(1, 1);
697   }
698 
699   /**
700    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
701    * Modifies Configuration.  Homes the cluster data directory under a random
702    * subdirectory in a directory under System property test.build.data.
703    * Directory is cleaned up on exit.
704    * @param numSlaves Number of slaves to start up.  We'll start this many
705    * datanodes and regionservers.  If numSlaves is > 1, then make sure
706    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
707    * bind errors.
708    * @throws Exception
709    * @see {@link #shutdownMiniCluster()}
710    * @return Mini hbase cluster instance created.
711    */
712   public MiniHBaseCluster startMiniCluster(final int numSlaves)
713   throws Exception {
714     return startMiniCluster(1, numSlaves);
715   }
716 
717 
718   /**
719    * start minicluster
720    * @throws Exception
721    * @see {@link #shutdownMiniCluster()}
722    * @return Mini hbase cluster instance created.
723    */
724   public MiniHBaseCluster startMiniCluster(final int numMasters,
725     final int numSlaves)
726   throws Exception {
727     return startMiniCluster(numMasters, numSlaves, null);
728   }
729 
730   /**
731    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
732    * Modifies Configuration.  Homes the cluster data directory under a random
733    * subdirectory in a directory under System property test.build.data.
734    * Directory is cleaned up on exit.
735    * @param numMasters Number of masters to start up.  We'll start this many
736    * hbase masters.  If numMasters > 1, you can find the active/primary master
737    * with {@link MiniHBaseCluster#getMaster()}.
738    * @param numSlaves Number of slaves to start up.  We'll start this many
739    * regionservers. If dataNodeHosts == null, this also indicates the number of
740    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
741    * based on dataNodeHosts.length.
742    * If numSlaves is > 1, then make sure
743    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
744    * bind errors.
745    * @param dataNodeHosts hostnames DNs to run on.
746    * This is useful if you want to run datanode on distinct hosts for things
747    * like HDFS block location verification.
748    * If you start MiniDFSCluster without host names,
749    * all instances of the datanodes will have the same host name.
750    * @throws Exception
751    * @see {@link #shutdownMiniCluster()}
752    * @return Mini hbase cluster instance created.
753    */
754   public MiniHBaseCluster startMiniCluster(final int numMasters,
755       final int numSlaves, final String[] dataNodeHosts) throws Exception {
756     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
757   }
758 
759   /**
760    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
761    * @param numDataNodes Number of data nodes.
762    */
763   public MiniHBaseCluster startMiniCluster(final int numMasters,
764       final int numSlaves, final int numDataNodes) throws Exception {
765     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
766   }
767 
768   /**
769    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
770    * Modifies Configuration.  Homes the cluster data directory under a random
771    * subdirectory in a directory under System property test.build.data.
772    * Directory is cleaned up on exit.
773    * @param numMasters Number of masters to start up.  We'll start this many
774    * hbase masters.  If numMasters > 1, you can find the active/primary master
775    * with {@link MiniHBaseCluster#getMaster()}.
776    * @param numSlaves Number of slaves to start up.  We'll start this many
777    * regionservers. If dataNodeHosts == null, this also indicates the number of
778    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
779    * based on dataNodeHosts.length.
780    * If numSlaves is > 1, then make sure
781    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
782    * bind errors.
783    * @param dataNodeHosts hostnames DNs to run on.
784    * This is useful if you want to run datanode on distinct hosts for things
785    * like HDFS block location verification.
786    * If you start MiniDFSCluster without host names,
787    * all instances of the datanodes will have the same host name.
788    * @param masterClass The class to use as HMaster, or null for default
789    * @param regionserverClass The class to use as HRegionServer, or null for
790    * default
791    * @throws Exception
792    * @see {@link #shutdownMiniCluster()}
793    * @return Mini hbase cluster instance created.
794    */
795   public MiniHBaseCluster startMiniCluster(final int numMasters,
796       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
797       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
798           throws Exception {
799     return startMiniCluster(
800         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
801   }
802 
803   /**
804    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
805    * number of datanodes.
806    * @param numDataNodes Number of data nodes.
807    */
808   public MiniHBaseCluster startMiniCluster(final int numMasters,
809     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
810     Class<? extends HMaster> masterClass,
811     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
812   throws Exception {
813     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
814       numDataNodes = dataNodeHosts.length;
815     }
816 
817     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
818         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
819 
820     // If we already put up a cluster, fail.
821     if (miniClusterRunning) {
822       throw new IllegalStateException("A mini-cluster is already running");
823     }
824     miniClusterRunning = true;
825 
826     setupClusterTestDir();
827     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
828 
829     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
830     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
831     startMiniDFSCluster(numDataNodes, dataNodeHosts);
832 
833     // Start up a zk cluster.
834     if (this.zkCluster == null) {
835       startMiniZKCluster(clusterTestDir);
836     }
837 
838     // Start the MiniHBaseCluster
839     return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
840   }
841 
842   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
843       throws IOException, InterruptedException{
844     return startMiniHBaseCluster(numMasters, numSlaves, null, null);
845   }
846 
847   /**
848    * Starts up mini hbase cluster.  Usually used after call to
849    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
850    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
851    * @param numMasters
852    * @param numSlaves
853    * @return Reference to the hbase mini hbase cluster.
854    * @throws IOException
855    * @throws InterruptedException
856    * @see {@link #startMiniCluster()}
857    */
858   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
859         final int numSlaves, Class<? extends HMaster> masterClass,
860         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
861   throws IOException, InterruptedException {
862     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
863     createRootDir();
864 
865     // These settings will make the server waits until this exact number of
866     // regions servers are connected.
867     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
868       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
869     }
870     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
871       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
872     }
873 
874     Configuration c = new Configuration(this.conf);
875     this.hbaseCluster =
876         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
877     // Don't leave here till we've done a successful scan of the hbase:meta
878     HTable t = new HTable(c, TableName.META_TABLE_NAME);
879     ResultScanner s = t.getScanner(new Scan());
880     while (s.next() != null) {
881       continue;
882     }
883     s.close();
884     t.close();
885 
886     getHBaseAdmin(); // create immediately the hbaseAdmin
887     LOG.info("Minicluster is up");
888     return (MiniHBaseCluster)this.hbaseCluster;
889   }
890 
891   /**
892    * Starts the hbase cluster up again after shutting it down previously in a
893    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
894    * @param servers number of region servers
895    * @throws IOException
896    */
897   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
898     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
899     // Don't leave here till we've done a successful scan of the hbase:meta
900     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
901     ResultScanner s = t.getScanner(new Scan());
902     while (s.next() != null) {
903       // do nothing
904     }
905     LOG.info("HBase has been restarted");
906     s.close();
907     t.close();
908   }
909 
910   /**
911    * @return Current mini hbase cluster. Only has something in it after a call
912    * to {@link #startMiniCluster()}.
913    * @see #startMiniCluster()
914    */
915   public MiniHBaseCluster getMiniHBaseCluster() {
916     if (this.hbaseCluster instanceof MiniHBaseCluster) {
917       return (MiniHBaseCluster)this.hbaseCluster;
918     }
919     throw new RuntimeException(hbaseCluster + " not an instance of " +
920                                MiniHBaseCluster.class.getName());
921   }
922 
923   /**
924    * Stops mini hbase, zk, and hdfs clusters.
925    * @throws IOException
926    * @see {@link #startMiniCluster(int)}
927    */
928   public void shutdownMiniCluster() throws Exception {
929     LOG.info("Shutting down minicluster");
930     shutdownMiniHBaseCluster();
931     if (!this.passedZkCluster){
932       shutdownMiniZKCluster();
933     }
934     shutdownMiniDFSCluster();
935 
936     cleanupTestDir();
937     miniClusterRunning = false;
938     LOG.info("Minicluster is down");
939   }
940 
941   /**
942    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
943    * @throws IOException
944    */
945   public void shutdownMiniHBaseCluster() throws IOException {
946     if (hbaseAdmin != null) {
947       hbaseAdmin.close0();
948       hbaseAdmin = null;
949     }
950 
951     if (zooKeeperWatcher != null) {
952       zooKeeperWatcher.close();
953       zooKeeperWatcher = null;
954     }
955 
956     // unset the configuration for MIN and MAX RS to start
957     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
958     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
959     if (this.hbaseCluster != null) {
960       this.hbaseCluster.shutdown();
961       // Wait till hbase is down before going on to shutdown zk.
962       this.hbaseCluster.waitUntilShutDown();
963       this.hbaseCluster = null;
964     }
965   }
966 
967   /**
968    * Returns the path to the default root dir the minicluster uses.
969    * Note: this does not cause the root dir to be created.
970    * @return Fully qualified path for the default hbase root dir
971    * @throws IOException
972    */
973   public Path getDefaultRootDirPath() throws IOException {
974 	FileSystem fs = FileSystem.get(this.conf);
975 	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
976   }
977 
978   /**
979    * Creates an hbase rootdir in user home directory.  Also creates hbase
980    * version file.  Normally you won't make use of this method.  Root hbasedir
981    * is created for you as part of mini cluster startup.  You'd only use this
982    * method if you were doing manual operation.
983    * @return Fully qualified path to hbase root dir
984    * @throws IOException
985    */
986   public Path createRootDir() throws IOException {
987     FileSystem fs = FileSystem.get(this.conf);
988     Path hbaseRootdir = getDefaultRootDirPath();
989     FSUtils.setRootDir(this.conf, hbaseRootdir);
990     fs.mkdirs(hbaseRootdir);
991     FSUtils.setVersion(fs, hbaseRootdir);
992     return hbaseRootdir;
993   }
994 
995   /**
996    * Flushes all caches in the mini hbase cluster
997    * @throws IOException
998    */
999   public void flush() throws IOException {
1000     getMiniHBaseCluster().flushcache();
1001   }
1002 
1003   /**
1004    * Flushes all caches in the mini hbase cluster
1005    * @throws IOException
1006    */
1007   public void flush(TableName tableName) throws IOException {
1008     getMiniHBaseCluster().flushcache(tableName);
1009   }
1010 
1011   /**
1012    * Compact all regions in the mini hbase cluster
1013    * @throws IOException
1014    */
1015   public void compact(boolean major) throws IOException {
1016     getMiniHBaseCluster().compact(major);
1017   }
1018 
1019   /**
1020    * Compact all of a table's reagion in the mini hbase cluster
1021    * @throws IOException
1022    */
1023   public void compact(TableName tableName, boolean major) throws IOException {
1024     getMiniHBaseCluster().compact(tableName, major);
1025   }
1026 
1027   /**
1028    * Create a table.
1029    * @param tableName
1030    * @param family
1031    * @return An HTable instance for the created table.
1032    * @throws IOException
1033    */
1034   public HTable createTable(String tableName, String family)
1035   throws IOException{
1036     return createTable(TableName.valueOf(tableName), new String[]{family});
1037   }
1038 
1039   /**
1040    * Create a table.
1041    * @param tableName
1042    * @param family
1043    * @return An HTable instance for the created table.
1044    * @throws IOException
1045    */
1046   public HTable createTable(byte[] tableName, byte[] family)
1047   throws IOException{
1048     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1049   }
1050 
1051   /**
1052    * Create a table.
1053    * @param tableName
1054    * @param families
1055    * @return An HTable instance for the created table.
1056    * @throws IOException
1057    */
1058   public HTable createTable(TableName tableName, String[] families)
1059   throws IOException {
1060     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1061     for (String family : families) {
1062       fams.add(Bytes.toBytes(family));
1063     }
1064     return createTable(tableName, fams.toArray(new byte[0][]));
1065   }
1066 
1067   /**
1068    * Create a table.
1069    * @param tableName
1070    * @param family
1071    * @return An HTable instance for the created table.
1072    * @throws IOException
1073    */
1074   public HTable createTable(TableName tableName, byte[] family)
1075   throws IOException{
1076     return createTable(tableName, new byte[][]{family});
1077   }
1078 
1079 
1080   /**
1081    * Create a table.
1082    * @param tableName
1083    * @param families
1084    * @return An HTable instance for the created table.
1085    * @throws IOException
1086    */
1087   public HTable createTable(byte[] tableName, byte[][] families)
1088   throws IOException {
1089     return createTable(tableName, families,
1090         new Configuration(getConfiguration()));
1091   }
1092 
1093   /**
1094    * Create a table.
1095    * @param tableName
1096    * @param families
1097    * @return An HTable instance for the created table.
1098    * @throws IOException
1099    */
1100   public HTable createTable(TableName tableName, byte[][] families)
1101   throws IOException {
1102     return createTable(tableName, families,
1103         new Configuration(getConfiguration()));
1104   }
1105 
1106   public HTable createTable(byte[] tableName, byte[][] families,
1107       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1108     return createTable(TableName.valueOf(tableName), families, numVersions,
1109         startKey, endKey, numRegions);
1110   }
1111 
1112   public HTable createTable(String tableName, byte[][] families,
1113       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1114     return createTable(TableName.valueOf(tableName), families, numVersions,
1115         startKey, endKey, numRegions);
1116   }
1117 
1118   public HTable createTable(TableName tableName, byte[][] families,
1119       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1120   throws IOException{
1121     HTableDescriptor desc = new HTableDescriptor(tableName);
1122     for (byte[] family : families) {
1123       HColumnDescriptor hcd = new HColumnDescriptor(family)
1124           .setMaxVersions(numVersions);
1125       desc.addFamily(hcd);
1126     }
1127     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1128     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1129     waitUntilAllRegionsAssigned(tableName);
1130     return new HTable(getConfiguration(), tableName);
1131   }
1132 
1133   /**
1134    * Create a table.
1135    * @param htd
1136    * @param families
1137    * @param c Configuration to use
1138    * @return An HTable instance for the created table.
1139    * @throws IOException
1140    */
1141   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1142   throws IOException {
1143     for(byte[] family : families) {
1144       HColumnDescriptor hcd = new HColumnDescriptor(family);
1145       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1146       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1147       // on is interfering.
1148       hcd.setBloomFilterType(BloomType.NONE);
1149       htd.addFamily(hcd);
1150     }
1151     getHBaseAdmin().createTable(htd);
1152     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1153     waitUntilAllRegionsAssigned(htd.getTableName());
1154     return new HTable(c, htd.getTableName());
1155   }
1156 
1157   /**
1158    * Create a table.
1159    * @param tableName
1160    * @param families
1161    * @param c Configuration to use
1162    * @return An HTable instance for the created table.
1163    * @throws IOException
1164    */
1165   public HTable createTable(TableName tableName, byte[][] families,
1166       final Configuration c)
1167   throws IOException {
1168     return createTable(new HTableDescriptor(tableName), families, c);
1169   }
1170 
1171   /**
1172    * Create a table.
1173    * @param tableName
1174    * @param families
1175    * @param c Configuration to use
1176    * @return An HTable instance for the created table.
1177    * @throws IOException
1178    */
1179   public HTable createTable(byte[] tableName, byte[][] families,
1180       final Configuration c)
1181   throws IOException {
1182     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1183     for(byte[] family : families) {
1184       HColumnDescriptor hcd = new HColumnDescriptor(family);
1185       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1186       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1187       // on is interfering.
1188       hcd.setBloomFilterType(BloomType.NONE);
1189       desc.addFamily(hcd);
1190     }
1191     getHBaseAdmin().createTable(desc);
1192     return new HTable(c, tableName);
1193   }
1194 
1195   /**
1196    * Create a table.
1197    * @param tableName
1198    * @param families
1199    * @param c Configuration to use
1200    * @param numVersions
1201    * @return An HTable instance for the created table.
1202    * @throws IOException
1203    */
1204   public HTable createTable(TableName tableName, byte[][] families,
1205       final Configuration c, int numVersions)
1206   throws IOException {
1207     HTableDescriptor desc = new HTableDescriptor(tableName);
1208     for(byte[] family : families) {
1209       HColumnDescriptor hcd = new HColumnDescriptor(family)
1210           .setMaxVersions(numVersions);
1211       desc.addFamily(hcd);
1212     }
1213     getHBaseAdmin().createTable(desc);
1214     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1215     waitUntilAllRegionsAssigned(tableName);
1216     return new HTable(c, tableName);
1217   }
1218 
1219   /**
1220    * Create a table.
1221    * @param tableName
1222    * @param families
1223    * @param c Configuration to use
1224    * @param numVersions
1225    * @return An HTable instance for the created table.
1226    * @throws IOException
1227    */
1228   public HTable createTable(byte[] tableName, byte[][] families,
1229       final Configuration c, int numVersions)
1230   throws IOException {
1231     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1232     for(byte[] family : families) {
1233       HColumnDescriptor hcd = new HColumnDescriptor(family)
1234           .setMaxVersions(numVersions);
1235       desc.addFamily(hcd);
1236     }
1237     getHBaseAdmin().createTable(desc);
1238     return new HTable(c, tableName);
1239   }
1240 
1241   /**
1242    * Create a table.
1243    * @param tableName
1244    * @param family
1245    * @param numVersions
1246    * @return An HTable instance for the created table.
1247    * @throws IOException
1248    */
1249   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1250   throws IOException {
1251     return createTable(tableName, new byte[][]{family}, numVersions);
1252   }
1253 
1254   /**
1255    * Create a table.
1256    * @param tableName
1257    * @param family
1258    * @param numVersions
1259    * @return An HTable instance for the created table.
1260    * @throws IOException
1261    */
1262   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1263   throws IOException {
1264     return createTable(tableName, new byte[][]{family}, numVersions);
1265   }
1266 
1267   /**
1268    * Create a table.
1269    * @param tableName
1270    * @param families
1271    * @param numVersions
1272    * @return An HTable instance for the created table.
1273    * @throws IOException
1274    */
1275   public HTable createTable(byte[] tableName, byte[][] families,
1276       int numVersions)
1277   throws IOException {
1278     return createTable(TableName.valueOf(tableName), families, numVersions);
1279   }
1280 
1281   /**
1282    * Create a table.
1283    * @param tableName
1284    * @param families
1285    * @param numVersions
1286    * @return An HTable instance for the created table.
1287    * @throws IOException
1288    */
1289   public HTable createTable(TableName tableName, byte[][] families,
1290       int numVersions)
1291   throws IOException {
1292     HTableDescriptor desc = new HTableDescriptor(tableName);
1293     for (byte[] family : families) {
1294       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1295       desc.addFamily(hcd);
1296     }
1297     getHBaseAdmin().createTable(desc);
1298     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1299     waitUntilAllRegionsAssigned(tableName);
1300     return new HTable(new Configuration(getConfiguration()), tableName);
1301   }
1302 
1303   /**
1304    * Create a table.
1305    * @param tableName
1306    * @param families
1307    * @param numVersions
1308    * @return An HTable instance for the created table.
1309    * @throws IOException
1310    */
1311   public HTable createTable(byte[] tableName, byte[][] families,
1312     int numVersions, int blockSize) throws IOException {
1313     return createTable(TableName.valueOf(tableName),
1314         families, numVersions, blockSize);
1315   }
1316 
1317   /**
1318    * Create a table.
1319    * @param tableName
1320    * @param families
1321    * @param numVersions
1322    * @return An HTable instance for the created table.
1323    * @throws IOException
1324    */
1325   public HTable createTable(TableName tableName, byte[][] families,
1326     int numVersions, int blockSize) throws IOException {
1327     HTableDescriptor desc = new HTableDescriptor(tableName);
1328     for (byte[] family : families) {
1329       HColumnDescriptor hcd = new HColumnDescriptor(family)
1330           .setMaxVersions(numVersions)
1331           .setBlocksize(blockSize);
1332       desc.addFamily(hcd);
1333     }
1334     getHBaseAdmin().createTable(desc);
1335     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1336     waitUntilAllRegionsAssigned(tableName);
1337     return new HTable(new Configuration(getConfiguration()), tableName);
1338   }
1339 
1340   /**
1341    * Create a table.
1342    * @param tableName
1343    * @param families
1344    * @param numVersions
1345    * @return An HTable instance for the created table.
1346    * @throws IOException
1347    */
1348   public HTable createTable(byte[] tableName, byte[][] families,
1349       int[] numVersions)
1350   throws IOException {
1351     return createTable(TableName.valueOf(tableName), families, numVersions);
1352   }
1353 
1354   /**
1355    * Create a table.
1356    * @param tableName
1357    * @param families
1358    * @param numVersions
1359    * @return An HTable instance for the created table.
1360    * @throws IOException
1361    */
1362   public HTable createTable(TableName tableName, byte[][] families,
1363       int[] numVersions)
1364   throws IOException {
1365     HTableDescriptor desc = new HTableDescriptor(tableName);
1366     int i = 0;
1367     for (byte[] family : families) {
1368       HColumnDescriptor hcd = new HColumnDescriptor(family)
1369           .setMaxVersions(numVersions[i]);
1370       desc.addFamily(hcd);
1371       i++;
1372     }
1373     getHBaseAdmin().createTable(desc);
1374     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1375     waitUntilAllRegionsAssigned(tableName);
1376     return new HTable(new Configuration(getConfiguration()), tableName);
1377   }
1378 
1379   /**
1380    * Create a table.
1381    * @param tableName
1382    * @param family
1383    * @param splitRows
1384    * @return An HTable instance for the created table.
1385    * @throws IOException
1386    */
1387   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1388     throws IOException{
1389     return createTable(TableName.valueOf(tableName), family, splitRows);
1390   }
1391 
1392   /**
1393    * Create a table.
1394    * @param tableName
1395    * @param family
1396    * @param splitRows
1397    * @return An HTable instance for the created table.
1398    * @throws IOException
1399    */
1400   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1401       throws IOException {
1402     HTableDescriptor desc = new HTableDescriptor(tableName);
1403     HColumnDescriptor hcd = new HColumnDescriptor(family);
1404     desc.addFamily(hcd);
1405     getHBaseAdmin().createTable(desc, splitRows);
1406     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1407     waitUntilAllRegionsAssigned(tableName);
1408     return new HTable(getConfiguration(), tableName);
1409   }
1410 
1411   /**
1412    * Create a table.
1413    * @param tableName
1414    * @param families
1415    * @param splitRows
1416    * @return An HTable instance for the created table.
1417    * @throws IOException
1418    */
1419   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1420       throws IOException {
1421     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1422     for(byte[] family:families) {
1423       HColumnDescriptor hcd = new HColumnDescriptor(family);
1424       desc.addFamily(hcd);
1425     }
1426     getHBaseAdmin().createTable(desc, splitRows);
1427     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1428     waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1429     return new HTable(getConfiguration(), tableName);
1430   }
1431 
1432   /**
1433    * Drop an existing table
1434    * @param tableName existing table
1435    */
1436   public void deleteTable(String tableName) throws IOException {
1437     deleteTable(TableName.valueOf(tableName));
1438   }
1439 
1440   /**
1441    * Drop an existing table
1442    * @param tableName existing table
1443    */
1444   public void deleteTable(byte[] tableName) throws IOException {
1445     deleteTable(TableName.valueOf(tableName));
1446   }
1447 
1448   /**
1449    * Drop an existing table
1450    * @param tableName existing table
1451    */
1452   public void deleteTable(TableName tableName) throws IOException {
1453     try {
1454       getHBaseAdmin().disableTable(tableName);
1455     } catch (TableNotEnabledException e) {
1456       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1457     }
1458     getHBaseAdmin().deleteTable(tableName);
1459   }
1460 
1461   // ==========================================================================
1462   // Canned table and table descriptor creation
1463   // TODO replace HBaseTestCase
1464   
1465   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1466   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1467   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1468   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1469   private static final int MAXVERSIONS = 3;
1470   
1471   public static final char FIRST_CHAR = 'a';
1472   public static final char LAST_CHAR = 'z';
1473   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1474   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1475 
1476   /**
1477    * Create a table of name <code>name</code> with {@link COLUMNS} for
1478    * families.
1479    * @param name Name to give table.
1480    * @param versions How many versions to allow per column.
1481    * @return Column descriptor.
1482    */
1483   public HTableDescriptor createTableDescriptor(final String name,
1484       final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1485     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1486     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1487       htd.addFamily(new HColumnDescriptor(cfName)
1488           .setMinVersions(minVersions)
1489           .setMaxVersions(versions)
1490           .setKeepDeletedCells(keepDeleted)
1491           .setBlockCacheEnabled(false)
1492           .setTimeToLive(ttl)
1493       );
1494     }
1495     return htd;
1496   }
1497 
1498   /**
1499    * Create a table of name <code>name</code> with {@link COLUMNS} for
1500    * families.
1501    * @param name Name to give table.
1502    * @return Column descriptor.
1503    */
1504   public HTableDescriptor createTableDescriptor(final String name) {
1505     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1506         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1507   }
1508 
1509   /**
1510    * Create an HRegion that writes to the local tmp dirs
1511    * @param desc
1512    * @param startKey
1513    * @param endKey
1514    * @return
1515    * @throws IOException
1516    */
1517   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1518       byte [] endKey)
1519   throws IOException {
1520     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1521     return createLocalHRegion(hri, desc);
1522   }
1523 
1524   /**
1525    * Create an HRegion that writes to the local tmp dirs
1526    * @param info
1527    * @param desc
1528    * @return
1529    * @throws IOException
1530    */
1531   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1532     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1533   }
1534 
1535   /**
1536    * Create an HRegion that writes to the local tmp dirs with specified hlog
1537    * @param info regioninfo
1538    * @param desc table descriptor
1539    * @param hlog hlog for this region.
1540    * @return created hregion
1541    * @throws IOException
1542    */
1543   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1544     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1545   }
1546 
1547   
1548   /**
1549    * @param tableName
1550    * @param startKey
1551    * @param stopKey
1552    * @param callingMethod
1553    * @param conf
1554    * @param isReadOnly
1555    * @param families
1556    * @throws IOException
1557    * @return A region on which you must call
1558    *         {@link HRegion#closeHRegion(HRegion)} when done.
1559    */
1560   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1561       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1562       HLog hlog, byte[]... families) throws IOException {
1563     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1564     htd.setReadOnly(isReadOnly);
1565     for (byte[] family : families) {
1566       HColumnDescriptor hcd = new HColumnDescriptor(family);
1567       // Set default to be three versions.
1568       hcd.setMaxVersions(Integer.MAX_VALUE);
1569       htd.addFamily(hcd);
1570     }
1571     htd.setDurability(durability);
1572     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1573     return createLocalHRegion(info, htd, hlog);
1574   }
1575   //
1576   // ==========================================================================
1577 
1578   /**
1579    * Provide an existing table name to truncate
1580    * @param tableName existing table
1581    * @return HTable to that new table
1582    * @throws IOException
1583    */
1584   public HTable truncateTable(byte[] tableName) throws IOException {
1585     return truncateTable(TableName.valueOf(tableName));
1586   }
1587 
1588   /**
1589    * Provide an existing table name to truncate
1590    * @param tableName existing table
1591    * @return HTable to that new table
1592    * @throws IOException
1593    */
1594   public HTable truncateTable(TableName tableName) throws IOException {
1595     HTable table = new HTable(getConfiguration(), tableName);
1596     Scan scan = new Scan();
1597     ResultScanner resScan = table.getScanner(scan);
1598     for(Result res : resScan) {
1599       Delete del = new Delete(res.getRow());
1600       table.delete(del);
1601     }
1602     resScan = table.getScanner(scan);
1603     resScan.close();
1604     return table;
1605   }
1606 
1607   /**
1608    * Load table with rows from 'aaa' to 'zzz'.
1609    * @param t Table
1610    * @param f Family
1611    * @return Count of rows loaded.
1612    * @throws IOException
1613    */
1614   public int loadTable(final HTable t, final byte[] f) throws IOException {
1615     t.setAutoFlush(false, true);
1616     byte[] k = new byte[3];
1617     int rowCount = 0;
1618     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1619       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1620         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1621           k[0] = b1;
1622           k[1] = b2;
1623           k[2] = b3;
1624           Put put = new Put(k);
1625           put.add(f, null, k);
1626           t.put(put);
1627           rowCount++;
1628         }
1629       }
1630     }
1631     t.flushCommits();
1632     return rowCount;
1633   }
1634 
1635   /**
1636    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1637    * @param t Table
1638    * @param f Array of Families to load
1639    * @return Count of rows loaded.
1640    * @throws IOException
1641    */
1642   public int loadTable(final HTable t, final byte[][] f) throws IOException {
1643     t.setAutoFlush(false, true);
1644     byte[] k = new byte[3];
1645     int rowCount = 0;
1646     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1647       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1648         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1649           k[0] = b1;
1650           k[1] = b2;
1651           k[2] = b3;
1652           Put put = new Put(k);
1653           for (int i = 0; i < f.length; i++) {
1654             put.add(f[i], null, k);
1655           }
1656           t.put(put);
1657           rowCount++;
1658         }
1659       }
1660     }
1661     t.flushCommits();
1662     return rowCount;
1663   }
1664 
1665   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1666     return loadRegion(r, f, false);
1667   }
1668 
1669   /**
1670    * Load region with rows from 'aaa' to 'zzz'.
1671    * @param r Region
1672    * @param f Family
1673    * @param flush flush the cache if true
1674    * @return Count of rows loaded.
1675    * @throws IOException
1676    */
1677   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1678   throws IOException {
1679     byte[] k = new byte[3];
1680     int rowCount = 0;
1681     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1682       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1683         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1684           k[0] = b1;
1685           k[1] = b2;
1686           k[2] = b3;
1687           Put put = new Put(k);
1688           put.add(f, null, k);
1689           if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1690 
1691           int preRowCount = rowCount;
1692           int pause = 10;
1693           int maxPause = 1000;
1694           while (rowCount == preRowCount) {
1695             try {
1696               r.put(put);
1697               rowCount++;
1698             } catch (RegionTooBusyException e) {
1699               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1700               Threads.sleep(pause);
1701             }
1702           }
1703         }
1704       }
1705       if (flush) {
1706         r.flushcache();
1707       }
1708     }
1709     return rowCount;
1710   }
1711 
1712   public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1713     for (int i = startRow; i < endRow; i++) {
1714       byte[] data = Bytes.toBytes(String.valueOf(i));
1715       Put put = new Put(data);
1716       put.add(f, null, data);
1717       t.put(put);
1718     }
1719   }
1720 
1721   /**
1722    * Return the number of rows in the given table.
1723    */
1724   public int countRows(final HTable table) throws IOException {
1725     Scan scan = new Scan();
1726     ResultScanner results = table.getScanner(scan);
1727     int count = 0;
1728     for (@SuppressWarnings("unused") Result res : results) {
1729       count++;
1730     }
1731     results.close();
1732     return count;
1733   }
1734 
1735   public int countRows(final HTable table, final byte[]... families) throws IOException {
1736     Scan scan = new Scan();
1737     for (byte[] family: families) {
1738       scan.addFamily(family);
1739     }
1740     ResultScanner results = table.getScanner(scan);
1741     int count = 0;
1742     for (@SuppressWarnings("unused") Result res : results) {
1743       count++;
1744     }
1745     results.close();
1746     return count;
1747   }
1748 
1749   /**
1750    * Return an md5 digest of the entire contents of a table.
1751    */
1752   public String checksumRows(final HTable table) throws Exception {
1753     Scan scan = new Scan();
1754     ResultScanner results = table.getScanner(scan);
1755     MessageDigest digest = MessageDigest.getInstance("MD5");
1756     for (Result res : results) {
1757       digest.update(res.getRow());
1758     }
1759     results.close();
1760     return digest.toString();
1761   }
1762 
1763   /**
1764    * Creates many regions names "aaa" to "zzz".
1765    *
1766    * @param table  The table to use for the data.
1767    * @param columnFamily  The family to insert the data into.
1768    * @return count of regions created.
1769    * @throws IOException When creating the regions fails.
1770    */
1771   public int createMultiRegions(HTable table, byte[] columnFamily)
1772   throws IOException {
1773     return createMultiRegions(getConfiguration(), table, columnFamily);
1774   }
1775 
1776   public static final byte[][] KEYS = {
1777     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1778     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1779     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1780     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1781     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1782     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1783     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1784     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1785     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1786   };
1787 
1788   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1789       Bytes.toBytes("bbb"),
1790       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1791       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1792       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1793       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1794       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1795       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1796       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1797       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1798   };
1799 
1800   /**
1801    * Creates many regions names "aaa" to "zzz".
1802    * @param c Configuration to use.
1803    * @param table  The table to use for the data.
1804    * @param columnFamily  The family to insert the data into.
1805    * @return count of regions created.
1806    * @throws IOException When creating the regions fails.
1807    */
1808   public int createMultiRegions(final Configuration c, final HTable table,
1809       final byte[] columnFamily)
1810   throws IOException {
1811     return createMultiRegions(c, table, columnFamily, KEYS);
1812   }
1813 
1814   /**
1815    * Creates the specified number of regions in the specified table.
1816    * @param c
1817    * @param table
1818    * @param family
1819    * @param numRegions
1820    * @return
1821    * @throws IOException
1822    */
1823   public int createMultiRegions(final Configuration c, final HTable table,
1824       final byte [] family, int numRegions)
1825   throws IOException {
1826     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1827     byte [] startKey = Bytes.toBytes("aaaaa");
1828     byte [] endKey = Bytes.toBytes("zzzzz");
1829     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1830     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1831     for (int i=0;i<splitKeys.length;i++) {
1832       regionStartKeys[i+1] = splitKeys[i];
1833     }
1834     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1835     return createMultiRegions(c, table, family, regionStartKeys);
1836   }
1837 
1838   @SuppressWarnings("deprecation")
1839   public int createMultiRegions(final Configuration c, final HTable table,
1840       final byte[] columnFamily, byte [][] startKeys)
1841   throws IOException {
1842     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1843     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1844     HTableDescriptor htd = table.getTableDescriptor();
1845     if(!htd.hasFamily(columnFamily)) {
1846       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1847       htd.addFamily(hcd);
1848     }
1849     // remove empty region - this is tricky as the mini cluster during the test
1850     // setup already has the "<tablename>,,123456789" row with an empty start
1851     // and end key. Adding the custom regions below adds those blindly,
1852     // including the new start region from empty to "bbb". lg
1853     List<byte[]> rows = getMetaTableRows(htd.getTableName());
1854     String regionToDeleteInFS = table
1855         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1856         .getRegionInfo().getEncodedName();
1857     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1858     // add custom ones
1859     int count = 0;
1860     for (int i = 0; i < startKeys.length; i++) {
1861       int j = (i + 1) % startKeys.length;
1862       HRegionInfo hri = new HRegionInfo(table.getName(),
1863         startKeys[i], startKeys[j]);
1864       MetaEditor.addRegionToMeta(meta, hri);
1865       newRegions.add(hri);
1866       count++;
1867     }
1868     // see comment above, remove "old" (or previous) single region
1869     for (byte[] row : rows) {
1870       LOG.info("createMultiRegions: deleting meta row -> " +
1871         Bytes.toStringBinary(row));
1872       meta.delete(new Delete(row));
1873     }
1874     // remove the "old" region from FS
1875     Path tableDir = new Path(getDefaultRootDirPath().toString()
1876         + System.getProperty("file.separator") + htd.getTableName()
1877         + System.getProperty("file.separator") + regionToDeleteInFS);
1878     FileSystem.get(c).delete(tableDir);
1879     // flush cache of regions
1880     HConnection conn = table.getConnection();
1881     conn.clearRegionCache();
1882     // assign all the new regions IF table is enabled.
1883     HBaseAdmin admin = getHBaseAdmin();
1884     if (admin.isTableEnabled(table.getTableName())) {
1885       for(HRegionInfo hri : newRegions) {
1886         admin.assign(hri.getRegionName());
1887       }
1888     }
1889 
1890     meta.close();
1891 
1892     return count;
1893   }
1894 
1895   /**
1896    * Create rows in hbase:meta for regions of the specified table with the specified
1897    * start keys.  The first startKey should be a 0 length byte array if you
1898    * want to form a proper range of regions.
1899    * @param conf
1900    * @param htd
1901    * @param startKeys
1902    * @return list of region info for regions added to meta
1903    * @throws IOException
1904    */
1905   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1906       final HTableDescriptor htd, byte [][] startKeys)
1907   throws IOException {
1908     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1909     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1910     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1911     // add custom ones
1912     for (int i = 0; i < startKeys.length; i++) {
1913       int j = (i + 1) % startKeys.length;
1914       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1915           startKeys[j]);
1916       MetaEditor.addRegionToMeta(meta, hri);
1917       newRegions.add(hri);
1918     }
1919 
1920     meta.close();
1921     return newRegions;
1922   }
1923 
1924   /**
1925    * Returns all rows from the hbase:meta table.
1926    *
1927    * @throws IOException When reading the rows fails.
1928    */
1929   public List<byte[]> getMetaTableRows() throws IOException {
1930     // TODO: Redo using MetaReader class
1931     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1932     List<byte[]> rows = new ArrayList<byte[]>();
1933     ResultScanner s = t.getScanner(new Scan());
1934     for (Result result : s) {
1935       LOG.info("getMetaTableRows: row -> " +
1936         Bytes.toStringBinary(result.getRow()));
1937       rows.add(result.getRow());
1938     }
1939     s.close();
1940     t.close();
1941     return rows;
1942   }
1943 
1944   /**
1945    * Returns all rows from the hbase:meta table for a given user table
1946    *
1947    * @throws IOException When reading the rows fails.
1948    */
1949   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
1950     // TODO: Redo using MetaReader.
1951     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1952     List<byte[]> rows = new ArrayList<byte[]>();
1953     ResultScanner s = t.getScanner(new Scan());
1954     for (Result result : s) {
1955       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
1956       if (info == null) {
1957         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1958         // TODO figure out what to do for this new hosed case.
1959         continue;
1960       }
1961 
1962       if (info.getTable().equals(tableName)) {
1963         LOG.info("getMetaTableRows: row -> " +
1964             Bytes.toStringBinary(result.getRow()) + info);
1965         rows.add(result.getRow());
1966       }
1967     }
1968     s.close();
1969     t.close();
1970     return rows;
1971   }
1972 
1973   /**
1974    * Tool to get the reference to the region server object that holds the
1975    * region of the specified user table.
1976    * It first searches for the meta rows that contain the region of the
1977    * specified table, then gets the index of that RS, and finally retrieves
1978    * the RS's reference.
1979    * @param tableName user table to lookup in hbase:meta
1980    * @return region server that holds it, null if the row doesn't exist
1981    * @throws IOException
1982    * @throws InterruptedException
1983    */
1984   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1985       throws IOException, InterruptedException {
1986     return getRSForFirstRegionInTable(TableName.valueOf(tableName));
1987   }
1988   /**
1989    * Tool to get the reference to the region server object that holds the
1990    * region of the specified user table.
1991    * It first searches for the meta rows that contain the region of the
1992    * specified table, then gets the index of that RS, and finally retrieves
1993    * the RS's reference.
1994    * @param tableName user table to lookup in hbase:meta
1995    * @return region server that holds it, null if the row doesn't exist
1996    * @throws IOException
1997    */
1998   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
1999       throws IOException, InterruptedException {
2000     List<byte[]> metaRows = getMetaTableRows(tableName);
2001     if (metaRows == null || metaRows.isEmpty()) {
2002       return null;
2003     }
2004     LOG.debug("Found " + metaRows.size() + " rows for table " +
2005       tableName);
2006     byte [] firstrow = metaRows.get(0);
2007     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2008     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2009       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2010     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2011       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2012     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2013     while(retrier.shouldRetry()) {
2014       int index = getMiniHBaseCluster().getServerWith(firstrow);
2015       if (index != -1) {
2016         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2017       }
2018       // Came back -1.  Region may not be online yet.  Sleep a while.
2019       retrier.sleepUntilNextRetry();
2020     }
2021     return null;
2022   }
2023 
2024   /**
2025    * Starts a <code>MiniMRCluster</code> with a default number of
2026    * <code>TaskTracker</code>'s.
2027    *
2028    * @throws IOException When starting the cluster fails.
2029    */
2030   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2031     startMiniMapReduceCluster(2);
2032     return mrCluster;
2033   }
2034 
2035   /**
2036    * Tasktracker has a bug where changing the hadoop.log.dir system property
2037    * will not change its internal static LOG_DIR variable.
2038    */
2039   private void forceChangeTaskLogDir() {
2040     Field logDirField;
2041     try {
2042       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2043       logDirField.setAccessible(true);
2044 
2045       Field modifiersField = Field.class.getDeclaredField("modifiers");
2046       modifiersField.setAccessible(true);
2047       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2048 
2049       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2050     } catch (SecurityException e) {
2051       throw new RuntimeException(e);
2052     } catch (NoSuchFieldException e) {
2053       // TODO Auto-generated catch block
2054       throw new RuntimeException(e);
2055     } catch (IllegalArgumentException e) {
2056       throw new RuntimeException(e);
2057     } catch (IllegalAccessException e) {
2058       throw new RuntimeException(e);
2059     }
2060   }
2061 
2062   /**
2063    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2064    * filesystem.
2065    * @param servers  The number of <code>TaskTracker</code>'s to start.
2066    * @throws IOException When starting the cluster fails.
2067    */
2068   private void startMiniMapReduceCluster(final int servers) throws IOException {
2069     if (mrCluster != null) {
2070       throw new IllegalStateException("MiniMRCluster is already running");
2071     }
2072     LOG.info("Starting mini mapreduce cluster...");
2073     setupClusterTestDir();
2074     createDirsAndSetProperties();
2075 
2076     forceChangeTaskLogDir();
2077 
2078     //// hadoop2 specific settings
2079     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2080     // we up the VM usable so that processes don't get killed.
2081     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2082 
2083     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2084     // this avoids the problem by disabling speculative task execution in tests.
2085     conf.setBoolean("mapreduce.map.speculative", false);
2086     conf.setBoolean("mapreduce.reduce.speculative", false);
2087     ////
2088 
2089     // Allow the user to override FS URI for this map-reduce cluster to use.
2090     mrCluster = new MiniMRCluster(servers,
2091       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2092       null, null, new JobConf(this.conf));
2093     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2094     if (jobConf == null) {
2095       jobConf = mrCluster.createJobConf();
2096     }
2097 
2098     jobConf.set("mapred.local.dir",
2099       conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
2100     LOG.info("Mini mapreduce cluster started");
2101 
2102     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2103     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2104     // necessary config properties here.  YARN-129 required adding a few properties.
2105     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2106     // this for mrv2 support; mr1 ignores this
2107     conf.set("mapreduce.framework.name", "yarn");
2108     conf.setBoolean("yarn.is.minicluster", true);
2109     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2110     if (rmAddress != null) {
2111       conf.set("yarn.resourcemanager.address", rmAddress);
2112     }
2113     String schedulerAddress =
2114       jobConf.get("yarn.resourcemanager.scheduler.address");
2115     if (schedulerAddress != null) {
2116       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2117     }
2118   }
2119 
2120   /**
2121    * Stops the previously started <code>MiniMRCluster</code>.
2122    */
2123   public void shutdownMiniMapReduceCluster() {
2124     LOG.info("Stopping mini mapreduce cluster...");
2125     if (mrCluster != null) {
2126       mrCluster.shutdown();
2127       mrCluster = null;
2128     }
2129     // Restore configuration to point to local jobtracker
2130     conf.set("mapred.job.tracker", "local");
2131     LOG.info("Mini mapreduce cluster stopped");
2132   }
2133 
2134   /**
2135    * Create a stubbed out RegionServerService, mainly for getting FS.
2136    */
2137   public RegionServerServices createMockRegionServerService() throws IOException { 
2138     return createMockRegionServerService((ServerName)null);
2139   }
2140 
2141   /**
2142    * Create a stubbed out RegionServerService, mainly for getting FS. 
2143    * This version is used by TestTokenAuthentication
2144    */
2145   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2146     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2147     rss.setFileSystem(getTestFileSystem());
2148     rss.setRpcServer(rpc);
2149     return rss;
2150   }
2151 
2152   /**
2153    * Create a stubbed out RegionServerService, mainly for getting FS. 
2154    * This version is used by TestOpenRegionHandler
2155    */
2156   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2157     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2158     rss.setFileSystem(getTestFileSystem());
2159     return rss;
2160   }
2161 
2162   /**
2163    * Switches the logger for the given class to DEBUG level.
2164    *
2165    * @param clazz  The class for which to switch to debug logging.
2166    */
2167   public void enableDebug(Class<?> clazz) {
2168     Log l = LogFactory.getLog(clazz);
2169     if (l instanceof Log4JLogger) {
2170       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2171     } else if (l instanceof Jdk14Logger) {
2172       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2173     }
2174   }
2175 
2176   /**
2177    * Expire the Master's session
2178    * @throws Exception
2179    */
2180   public void expireMasterSession() throws Exception {
2181     HMaster master = getMiniHBaseCluster().getMaster();
2182     expireSession(master.getZooKeeper(), false);
2183   }
2184 
2185   /**
2186    * Expire a region server's session
2187    * @param index which RS
2188    * @throws Exception
2189    */
2190   public void expireRegionServerSession(int index) throws Exception {
2191     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2192     expireSession(rs.getZooKeeper(), false);
2193     decrementMinRegionServerCount();
2194   }
2195 
2196   private void decrementMinRegionServerCount() {
2197     // decrement the count for this.conf, for newly spwaned master
2198     // this.hbaseCluster shares this configuration too
2199     decrementMinRegionServerCount(getConfiguration());
2200 
2201     // each master thread keeps a copy of configuration
2202     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2203       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2204     }
2205   }
2206 
2207   private void decrementMinRegionServerCount(Configuration conf) {
2208     int currentCount = conf.getInt(
2209         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2210     if (currentCount != -1) {
2211       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2212           Math.max(currentCount - 1, 1));
2213     }
2214   }
2215 
2216   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2217    expireSession(nodeZK, false);
2218   }
2219 
2220   @Deprecated
2221   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2222     throws Exception {
2223     expireSession(nodeZK, false);
2224   }
2225 
2226   /**
2227    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2228    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2229    * There are issues when doing this:
2230    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2231    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2232    *
2233    * @param nodeZK - the ZK watcher to expire
2234    * @param checkStatus - true to check if we can create an HTable with the
2235    *                    current configuration.
2236    */
2237   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2238     throws Exception {
2239     Configuration c = new Configuration(this.conf);
2240     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2241     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2242     byte[] password = zk.getSessionPasswd();
2243     long sessionID = zk.getSessionId();
2244 
2245     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2246     //  so we create a first watcher to be sure that the
2247     //  event was sent. We expect that if our watcher receives the event
2248     //  other watchers on the same machine will get is as well.
2249     // When we ask to close the connection, ZK does not close it before
2250     //  we receive all the events, so don't have to capture the event, just
2251     //  closing the connection should be enough.
2252     ZooKeeper monitor = new ZooKeeper(quorumServers,
2253       1000, new org.apache.zookeeper.Watcher(){
2254       @Override
2255       public void process(WatchedEvent watchedEvent) {
2256         LOG.info("Monitor ZKW received event="+watchedEvent);
2257       }
2258     } , sessionID, password);
2259 
2260     // Making it expire
2261     ZooKeeper newZK = new ZooKeeper(quorumServers,
2262         1000, EmptyWatcher.instance, sessionID, password);
2263 
2264     //ensure that we have connection to the server before closing down, otherwise
2265     //the close session event will be eaten out before we start CONNECTING state
2266     long start = System.currentTimeMillis();
2267     while (newZK.getState() != States.CONNECTED
2268          && System.currentTimeMillis() - start < 1000) {
2269        Thread.sleep(1);
2270     }
2271     newZK.close();
2272     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2273 
2274     // Now closing & waiting to be sure that the clients get it.
2275     monitor.close();
2276 
2277     if (checkStatus) {
2278       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2279     }
2280   }
2281 
2282   /**
2283    * Get the Mini HBase cluster.
2284    *
2285    * @return hbase cluster
2286    * @see #getHBaseClusterInterface()
2287    */
2288   public MiniHBaseCluster getHBaseCluster() {
2289     return getMiniHBaseCluster();
2290   }
2291 
2292   /**
2293    * Returns the HBaseCluster instance.
2294    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2295    * tests referring this should not assume that the cluster is a mini cluster or a
2296    * distributed one. If the test only works on a mini cluster, then specific
2297    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2298    * need to type-cast.
2299    */
2300   public HBaseCluster getHBaseClusterInterface() {
2301     //implementation note: we should rename this method as #getHBaseCluster(),
2302     //but this would require refactoring 90+ calls.
2303     return hbaseCluster;
2304   }
2305 
2306   /**
2307    * Returns a HBaseAdmin instance.
2308    * This instance is shared between HBaseTestingUtility instance users.
2309    * Closing it has no effect, it will be closed automatically when the
2310    * cluster shutdowns
2311    *
2312    * @return The HBaseAdmin instance.
2313    * @throws IOException
2314    */
2315   public synchronized HBaseAdmin getHBaseAdmin()
2316   throws IOException {
2317     if (hbaseAdmin == null){
2318       hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2319     }
2320     return hbaseAdmin;
2321   }
2322 
2323   private HBaseAdminForTests hbaseAdmin = null;
2324   private static class HBaseAdminForTests extends HBaseAdmin {
2325     public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2326         ZooKeeperConnectionException, IOException {
2327       super(c);
2328     }
2329 
2330     @Override
2331     public synchronized void close() throws IOException {
2332       LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2333     }
2334 
2335     private synchronized void close0() throws IOException {
2336       super.close();
2337     }
2338   }
2339 
2340   /**
2341    * Returns a ZooKeeperWatcher instance.
2342    * This instance is shared between HBaseTestingUtility instance users.
2343    * Don't close it, it will be closed automatically when the
2344    * cluster shutdowns
2345    *
2346    * @return The ZooKeeperWatcher instance.
2347    * @throws IOException
2348    */
2349   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2350     throws IOException {
2351     if (zooKeeperWatcher == null) {
2352       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2353         new Abortable() {
2354         @Override public void abort(String why, Throwable e) {
2355           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2356         }
2357         @Override public boolean isAborted() {return false;}
2358       });
2359     }
2360     return zooKeeperWatcher;
2361   }
2362   private ZooKeeperWatcher zooKeeperWatcher;
2363 
2364 
2365 
2366   /**
2367    * Closes the named region.
2368    *
2369    * @param regionName  The region to close.
2370    * @throws IOException
2371    */
2372   public void closeRegion(String regionName) throws IOException {
2373     closeRegion(Bytes.toBytes(regionName));
2374   }
2375 
2376   /**
2377    * Closes the named region.
2378    *
2379    * @param regionName  The region to close.
2380    * @throws IOException
2381    */
2382   public void closeRegion(byte[] regionName) throws IOException {
2383     getHBaseAdmin().closeRegion(regionName, null);
2384   }
2385 
2386   /**
2387    * Closes the region containing the given row.
2388    *
2389    * @param row  The row to find the containing region.
2390    * @param table  The table to find the region.
2391    * @throws IOException
2392    */
2393   public void closeRegionByRow(String row, HTable table) throws IOException {
2394     closeRegionByRow(Bytes.toBytes(row), table);
2395   }
2396 
2397   /**
2398    * Closes the region containing the given row.
2399    *
2400    * @param row  The row to find the containing region.
2401    * @param table  The table to find the region.
2402    * @throws IOException
2403    */
2404   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2405     HRegionLocation hrl = table.getRegionLocation(row);
2406     closeRegion(hrl.getRegionInfo().getRegionName());
2407   }
2408 
2409   /*
2410    * Retrieves a splittable region randomly from tableName
2411    *
2412    * @param tableName name of table
2413    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2414    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2415    */
2416   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2417     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2418     int regCount = regions.size();
2419     Set<Integer> attempted = new HashSet<Integer>();
2420     int idx;
2421     int attempts = 0;
2422     do {
2423       regions = getHBaseCluster().getRegions(tableName);
2424       if (regCount != regions.size()) {
2425         // if there was region movement, clear attempted Set
2426         attempted.clear();
2427       }
2428       regCount = regions.size();
2429       // There are chances that before we get the region for the table from an RS the region may
2430       // be going for CLOSE.  This may be because online schema change is enabled
2431       if (regCount > 0) {
2432         idx = random.nextInt(regCount);
2433         // if we have just tried this region, there is no need to try again
2434         if (attempted.contains(idx))
2435           continue;
2436         try {
2437           regions.get(idx).checkSplit();
2438           return regions.get(idx);
2439         } catch (Exception ex) {
2440           LOG.warn("Caught exception", ex);
2441           attempted.add(idx);
2442         }
2443       }
2444       attempts++;
2445     } while (maxAttempts == -1 || attempts < maxAttempts);
2446     return null;
2447   }
2448 
2449   public MiniZooKeeperCluster getZkCluster() {
2450     return zkCluster;
2451   }
2452 
2453   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2454     this.passedZkCluster = true;
2455     this.zkCluster = zkCluster;
2456     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2457   }
2458 
2459   public MiniDFSCluster getDFSCluster() {
2460     return dfsCluster;
2461   }
2462 
2463   public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2464     if (dfsCluster != null && dfsCluster.isClusterUp()) {
2465       throw new IOException("DFSCluster is already running! Shut it down first.");
2466     }
2467     this.dfsCluster = cluster;
2468   }
2469 
2470   public FileSystem getTestFileSystem() throws IOException {
2471     return HFileSystem.get(conf);
2472   }
2473 
2474   /**
2475    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2476    * (30 seconds).
2477    * @param table Table to wait on.
2478    * @throws InterruptedException
2479    * @throws IOException
2480    */
2481   public void waitTableAvailable(byte[] table)
2482       throws InterruptedException, IOException {
2483     waitTableAvailable(getHBaseAdmin(), table, 30000);
2484   }
2485 
2486   public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2487       throws InterruptedException, IOException {
2488     waitTableAvailable(admin, table, 30000);
2489   }
2490 
2491   /**
2492    * Wait until all regions in a table have been assigned
2493    * @param table Table to wait on.
2494    * @param timeoutMillis Timeout.
2495    * @throws InterruptedException
2496    * @throws IOException
2497    */
2498   public void waitTableAvailable(byte[] table, long timeoutMillis)
2499   throws InterruptedException, IOException {
2500     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2501   }
2502 
2503   public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2504   throws InterruptedException, IOException {
2505     long startWait = System.currentTimeMillis();
2506     while (!admin.isTableAvailable(table)) {
2507       assertTrue("Timed out waiting for table to become available " +
2508         Bytes.toStringBinary(table),
2509         System.currentTimeMillis() - startWait < timeoutMillis);
2510       Thread.sleep(200);
2511     }
2512     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2513     // in the hbase:meta table and almost open on all regionservers but there setting the region
2514     // online in the regionserver is the very last thing done and can take a little while to happen.
2515     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2516     // RegionOpeningException.  It is crass but when done all will be online.
2517     try {
2518       Canary.sniff(admin, TableName.valueOf(table));
2519     } catch (Exception e) {
2520       throw new IOException(e);
2521     }
2522   }
2523 
2524   /**
2525    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2526    * regions have been all assigned.  Will timeout after default period (30 seconds)
2527    * @see #waitTableAvailable(byte[])
2528    * @param table Table to wait on.
2529    * @param table
2530    * @throws InterruptedException
2531    * @throws IOException
2532    */
2533   public void waitTableEnabled(byte[] table)
2534       throws InterruptedException, IOException {
2535     waitTableEnabled(getHBaseAdmin(), table, 30000);
2536   }
2537 
2538   public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2539       throws InterruptedException, IOException {
2540     waitTableEnabled(admin, table, 30000);
2541   }
2542 
2543   /**
2544    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2545    * regions have been all assigned.
2546    * @see #waitTableAvailable(byte[])
2547    * @param table Table to wait on.
2548    * @param timeoutMillis Time to wait on it being marked enabled.
2549    * @throws InterruptedException
2550    * @throws IOException
2551    */
2552   public void waitTableEnabled(byte[] table, long timeoutMillis)
2553   throws InterruptedException, IOException {
2554     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2555   }
2556 
2557   public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2558   throws InterruptedException, IOException {
2559     long startWait = System.currentTimeMillis();
2560     waitTableAvailable(admin, table, timeoutMillis);
2561     long remainder = System.currentTimeMillis() - startWait;
2562     while (!admin.isTableEnabled(table)) {
2563       assertTrue("Timed out waiting for table to become available and enabled " +
2564          Bytes.toStringBinary(table),
2565          System.currentTimeMillis() - remainder < timeoutMillis);
2566       Thread.sleep(200);
2567     }
2568     LOG.debug("REMOVE AFTER table=" + Bytes.toString(table) + ", isTableAvailable=" +
2569         admin.isTableAvailable(table) +
2570         ", isTableEnabled=" + admin.isTableEnabled(table));
2571   }
2572 
2573   /**
2574    * Make sure that at least the specified number of region servers
2575    * are running
2576    * @param num minimum number of region servers that should be running
2577    * @return true if we started some servers
2578    * @throws IOException
2579    */
2580   public boolean ensureSomeRegionServersAvailable(final int num)
2581       throws IOException {
2582     boolean startedServer = false;
2583     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2584     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2585       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2586       startedServer = true;
2587     }
2588 
2589     return startedServer;
2590   }
2591 
2592 
2593   /**
2594    * Make sure that at least the specified number of region servers
2595    * are running. We don't count the ones that are currently stopping or are
2596    * stopped.
2597    * @param num minimum number of region servers that should be running
2598    * @return true if we started some servers
2599    * @throws IOException
2600    */
2601   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2602     throws IOException {
2603     boolean startedServer = ensureSomeRegionServersAvailable(num);
2604 
2605     int nonStoppedServers = 0;
2606     for (JVMClusterUtil.RegionServerThread rst :
2607       getMiniHBaseCluster().getRegionServerThreads()) {
2608 
2609       HRegionServer hrs = rst.getRegionServer();
2610       if (hrs.isStopping() || hrs.isStopped()) {
2611         LOG.info("A region server is stopped or stopping:"+hrs);
2612       } else {
2613         nonStoppedServers++;
2614       }
2615     }
2616     for (int i=nonStoppedServers; i<num; ++i) {
2617       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2618       startedServer = true;
2619     }
2620     return startedServer;
2621   }
2622 
2623 
2624   /**
2625    * This method clones the passed <code>c</code> configuration setting a new
2626    * user into the clone.  Use it getting new instances of FileSystem.  Only
2627    * works for DistributedFileSystem.
2628    * @param c Initial configuration
2629    * @param differentiatingSuffix Suffix to differentiate this user from others.
2630    * @return A new configuration instance with a different user set into it.
2631    * @throws IOException
2632    */
2633   public static User getDifferentUser(final Configuration c,
2634     final String differentiatingSuffix)
2635   throws IOException {
2636     FileSystem currentfs = FileSystem.get(c);
2637     if (!(currentfs instanceof DistributedFileSystem)) {
2638       return User.getCurrent();
2639     }
2640     // Else distributed filesystem.  Make a new instance per daemon.  Below
2641     // code is taken from the AppendTestUtil over in hdfs.
2642     String username = User.getCurrent().getName() +
2643       differentiatingSuffix;
2644     User user = User.createUserForTesting(c, username,
2645         new String[]{"supergroup"});
2646     return user;
2647   }
2648 
2649   /**
2650    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
2651    * makes tests linger.  Here is the exception you'll see:
2652    * <pre>
2653    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2654    * </pre>
2655    * @param stream A DFSClient.DFSOutputStream.
2656    * @param max
2657    * @throws NoSuchFieldException
2658    * @throws SecurityException
2659    * @throws IllegalAccessException
2660    * @throws IllegalArgumentException
2661    */
2662   public static void setMaxRecoveryErrorCount(final OutputStream stream,
2663       final int max) {
2664     try {
2665       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2666       for (Class<?> clazz: clazzes) {
2667         String className = clazz.getSimpleName();
2668         if (className.equals("DFSOutputStream")) {
2669           if (clazz.isInstance(stream)) {
2670             Field maxRecoveryErrorCountField =
2671               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2672             maxRecoveryErrorCountField.setAccessible(true);
2673             maxRecoveryErrorCountField.setInt(stream, max);
2674             break;
2675           }
2676         }
2677       }
2678     } catch (Exception e) {
2679       LOG.info("Could not set max recovery field", e);
2680     }
2681   }
2682 
2683   /**
2684    * Wait until all regions for a table in hbase:meta have a non-empty
2685    * info:server, up to 60 seconds. This means all regions have been deployed,
2686    * master has been informed and updated hbase:meta with the regions deployed
2687    * server.
2688    * @param tableName the table name
2689    * @throws IOException
2690    */
2691   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2692     waitUntilAllRegionsAssigned(tableName, 60000);
2693   }
2694 
2695   /**
2696    * Wait until all regions for a table in hbase:meta have a non-empty
2697    * info:server, or until timeout.  This means all regions have been deployed,
2698    * master has been informed and updated hbase:meta with the regions deployed
2699    * server.
2700    * @param tableName the table name
2701    * @param timeout timeout, in milliseconds
2702    * @throws IOException
2703    */
2704   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2705       throws IOException {
2706     final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2707     try {
2708       waitFor(timeout, 200, true, new Predicate<IOException>() {
2709         @Override
2710         public boolean evaluate() throws IOException {
2711           boolean allRegionsAssigned = true;
2712           Scan scan = new Scan();
2713           scan.addFamily(HConstants.CATALOG_FAMILY);
2714           ResultScanner s = meta.getScanner(scan);
2715           try {
2716             Result r;
2717             while ((r = s.next()) != null) {
2718               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2719               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2720               if (info != null && info.getTable().equals(tableName)) {
2721                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2722                 allRegionsAssigned &= (b != null);
2723               }
2724             }
2725           } finally {
2726             s.close();
2727           }
2728           return allRegionsAssigned;
2729         }
2730       });
2731     } finally {
2732       meta.close();
2733     }
2734   }
2735 
2736   /**
2737    * Do a small get/scan against one store. This is required because store
2738    * has no actual methods of querying itself, and relies on StoreScanner.
2739    */
2740   public static List<Cell> getFromStoreFile(HStore store,
2741                                                 Get get) throws IOException {
2742     MultiVersionConsistencyControl.resetThreadReadPoint();
2743     Scan scan = new Scan(get);
2744     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2745         scan.getFamilyMap().get(store.getFamily().getName()));
2746 
2747     List<Cell> result = new ArrayList<Cell>();
2748     scanner.next(result);
2749     if (!result.isEmpty()) {
2750       // verify that we are on the row we want:
2751       Cell kv = result.get(0);
2752       if (!CellUtil.matchingRow(kv, get.getRow())) {
2753         result.clear();
2754       }
2755     }
2756     scanner.close();
2757     return result;
2758   }
2759 
2760   /**
2761    * Create region split keys between startkey and endKey
2762    *
2763    * @param startKey
2764    * @param endKey
2765    * @param numRegions the number of regions to be created. it has to be greater than 3.
2766    * @return
2767    */
2768   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2769     assertTrue(numRegions>3);
2770     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2771     byte [][] result = new byte[tmpSplitKeys.length+1][];
2772     for (int i=0;i<tmpSplitKeys.length;i++) {
2773       result[i+1] = tmpSplitKeys[i];
2774     }
2775     result[0] = HConstants.EMPTY_BYTE_ARRAY;
2776     return result;
2777   }
2778 
2779   /**
2780    * Do a small get/scan against one store. This is required because store
2781    * has no actual methods of querying itself, and relies on StoreScanner.
2782    */
2783   public static List<Cell> getFromStoreFile(HStore store,
2784                                                 byte [] row,
2785                                                 NavigableSet<byte[]> columns
2786                                                 ) throws IOException {
2787     Get get = new Get(row);
2788     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2789     s.put(store.getFamily().getName(), columns);
2790 
2791     return getFromStoreFile(store,get);
2792   }
2793 
2794   /**
2795    * Gets a ZooKeeperWatcher.
2796    * @param TEST_UTIL
2797    */
2798   public static ZooKeeperWatcher getZooKeeperWatcher(
2799       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2800       IOException {
2801     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2802         "unittest", new Abortable() {
2803           boolean aborted = false;
2804 
2805           @Override
2806           public void abort(String why, Throwable e) {
2807             aborted = true;
2808             throw new RuntimeException("Fatal ZK error, why=" + why, e);
2809           }
2810 
2811           @Override
2812           public boolean isAborted() {
2813             return aborted;
2814           }
2815         });
2816     return zkw;
2817   }
2818 
2819   /**
2820    * Creates a znode with OPENED state.
2821    * @param TEST_UTIL
2822    * @param region
2823    * @param serverName
2824    * @return
2825    * @throws IOException
2826    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
2827    * @throws KeeperException
2828    * @throws NodeExistsException
2829    */
2830   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2831       HBaseTestingUtility TEST_UTIL, HRegion region,
2832       ServerName serverName) throws ZooKeeperConnectionException,
2833       IOException, KeeperException, NodeExistsException {
2834     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2835     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2836     int version = ZKAssign.transitionNodeOpening(zkw, region
2837         .getRegionInfo(), serverName);
2838     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2839         version);
2840     return zkw;
2841   }
2842 
2843   public static void assertKVListsEqual(String additionalMsg,
2844       final List<? extends Cell> expected,
2845       final List<? extends Cell> actual) {
2846     final int eLen = expected.size();
2847     final int aLen = actual.size();
2848     final int minLen = Math.min(eLen, aLen);
2849 
2850     int i;
2851     for (i = 0; i < minLen
2852         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2853         ++i) {}
2854 
2855     if (additionalMsg == null) {
2856       additionalMsg = "";
2857     }
2858     if (!additionalMsg.isEmpty()) {
2859       additionalMsg = ". " + additionalMsg;
2860     }
2861 
2862     if (eLen != aLen || i != minLen) {
2863       throw new AssertionError(
2864           "Expected and actual KV arrays differ at position " + i + ": " +
2865           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2866           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2867     }
2868   }
2869 
2870   private static <T> String safeGetAsStr(List<T> lst, int i) {
2871     if (0 <= i && i < lst.size()) {
2872       return lst.get(i).toString();
2873     } else {
2874       return "<out_of_range>";
2875     }
2876   }
2877 
2878   public String getClusterKey() {
2879     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2880         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2881         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2882             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2883   }
2884 
2885   /** Creates a random table with the given parameters */
2886   public HTable createRandomTable(String tableName,
2887       final Collection<String> families,
2888       final int maxVersions,
2889       final int numColsPerRow,
2890       final int numFlushes,
2891       final int numRegions,
2892       final int numRowsPerFlush)
2893       throws IOException, InterruptedException {
2894 
2895     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2896         " regions, " + numFlushes + " storefiles per region, " +
2897         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
2898         "\n");
2899 
2900     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2901     final int numCF = families.size();
2902     final byte[][] cfBytes = new byte[numCF][];
2903     {
2904       int cfIndex = 0;
2905       for (String cf : families) {
2906         cfBytes[cfIndex++] = Bytes.toBytes(cf);
2907       }
2908     }
2909 
2910     final int actualStartKey = 0;
2911     final int actualEndKey = Integer.MAX_VALUE;
2912     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2913     final int splitStartKey = actualStartKey + keysPerRegion;
2914     final int splitEndKey = actualEndKey - keysPerRegion;
2915     final String keyFormat = "%08x";
2916     final HTable table = createTable(tableName, cfBytes,
2917         maxVersions,
2918         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2919         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2920         numRegions);
2921 
2922     if (hbaseCluster != null) {
2923       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2924     }
2925 
2926     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2927       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2928         final byte[] row = Bytes.toBytes(String.format(keyFormat,
2929             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2930 
2931         Put put = new Put(row);
2932         Delete del = new Delete(row);
2933         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2934           final byte[] cf = cfBytes[rand.nextInt(numCF)];
2935           final long ts = rand.nextInt();
2936           final byte[] qual = Bytes.toBytes("col" + iCol);
2937           if (rand.nextBoolean()) {
2938             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2939                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2940                 ts + "_random_" + rand.nextLong());
2941             put.add(cf, qual, ts, value);
2942           } else if (rand.nextDouble() < 0.8) {
2943             del.deleteColumn(cf, qual, ts);
2944           } else {
2945             del.deleteColumns(cf, qual, ts);
2946           }
2947         }
2948 
2949         if (!put.isEmpty()) {
2950           table.put(put);
2951         }
2952 
2953         if (!del.isEmpty()) {
2954           table.delete(del);
2955         }
2956       }
2957       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2958       table.flushCommits();
2959       if (hbaseCluster != null) {
2960         getMiniHBaseCluster().flushcache(table.getName());
2961       }
2962     }
2963 
2964     return table;
2965   }
2966 
2967   private static final int MIN_RANDOM_PORT = 0xc000;
2968   private static final int MAX_RANDOM_PORT = 0xfffe;
2969   private static Random random = new Random();
2970 
2971   /**
2972    * Returns a random port. These ports cannot be registered with IANA and are
2973    * intended for dynamic allocation (see http://bit.ly/dynports).
2974    */
2975   public static int randomPort() {
2976     return MIN_RANDOM_PORT
2977         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2978   }
2979 
2980   /**
2981    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
2982    * called from single-threaded test setup code/
2983    */
2984   public static int randomFreePort() {
2985     int port = 0;
2986     do {
2987       port = randomPort();
2988       if (takenRandomPorts.contains(port)) {
2989         continue;
2990       }
2991       takenRandomPorts.add(port);
2992 
2993       try {
2994         ServerSocket sock = new ServerSocket(port);
2995         sock.close();
2996       } catch (IOException ex) {
2997         port = 0;
2998       }
2999     } while (port == 0);
3000     return port;
3001   }
3002 
3003 
3004   public static String randomMultiCastAddress() {
3005     return "226.1.1." + random.nextInt(254);
3006   }
3007 
3008 
3009 
3010   public static void waitForHostPort(String host, int port)
3011       throws IOException {
3012     final int maxTimeMs = 10000;
3013     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3014     IOException savedException = null;
3015     LOG.info("Waiting for server at " + host + ":" + port);
3016     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3017       try {
3018         Socket sock = new Socket(InetAddress.getByName(host), port);
3019         sock.close();
3020         savedException = null;
3021         LOG.info("Server at " + host + ":" + port + " is available");
3022         break;
3023       } catch (UnknownHostException e) {
3024         throw new IOException("Failed to look up " + host, e);
3025       } catch (IOException e) {
3026         savedException = e;
3027       }
3028       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3029     }
3030 
3031     if (savedException != null) {
3032       throw savedException;
3033     }
3034   }
3035 
3036   /**
3037    * Creates a pre-split table for load testing. If the table already exists,
3038    * logs a warning and continues.
3039    * @return the number of regions the table was split into
3040    */
3041   public static int createPreSplitLoadTestTable(Configuration conf,
3042       TableName tableName, byte[] columnFamily, Algorithm compression,
3043       DataBlockEncoding dataBlockEncoding) throws IOException {
3044     HTableDescriptor desc = new HTableDescriptor(tableName);
3045     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3046     hcd.setDataBlockEncoding(dataBlockEncoding);
3047     hcd.setCompressionType(compression);
3048     return createPreSplitLoadTestTable(conf, desc, hcd);
3049   }
3050 
3051   /**
3052    * Creates a pre-split table for load testing. If the table already exists,
3053    * logs a warning and continues.
3054    * @return the number of regions the table was split into
3055    */
3056   public static int createPreSplitLoadTestTable(Configuration conf,
3057       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3058     if (!desc.hasFamily(hcd.getName())) {
3059       desc.addFamily(hcd);
3060     }
3061 
3062     int totalNumberOfRegions = 0;
3063     HBaseAdmin admin = new HBaseAdmin(conf);
3064     try {
3065       // create a table a pre-splits regions.
3066       // The number of splits is set as:
3067       //    region servers * regions per region server).
3068       int numberOfServers = admin.getClusterStatus().getServers().size();
3069       if (numberOfServers == 0) {
3070         throw new IllegalStateException("No live regionservers");
3071       }
3072 
3073       totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3074       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3075           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3076           "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3077 
3078       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3079           totalNumberOfRegions);
3080 
3081       admin.createTable(desc, splits);
3082     } catch (MasterNotRunningException e) {
3083       LOG.error("Master not running", e);
3084       throw new IOException(e);
3085     } catch (TableExistsException e) {
3086       LOG.warn("Table " + desc.getTableName() +
3087           " already exists, continuing");
3088     } finally {
3089       admin.close();
3090     }
3091     return totalNumberOfRegions;
3092   }
3093 
3094   public static int getMetaRSPort(Configuration conf) throws IOException {
3095     HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3096     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3097     table.close();
3098     return hloc.getPort();
3099   }
3100 
3101   /**
3102    *  Due to async racing issue, a region may not be in
3103    *  the online region list of a region server yet, after
3104    *  the assignment znode is deleted and the new assignment
3105    *  is recorded in master.
3106    */
3107   public void assertRegionOnServer(
3108       final HRegionInfo hri, final ServerName server,
3109       final long timeout) throws IOException, InterruptedException {
3110     long timeoutTime = System.currentTimeMillis() + timeout;
3111     while (true) {
3112       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3113       if (regions.contains(hri)) return;
3114       long now = System.currentTimeMillis();
3115       if (now > timeoutTime) break;
3116       Thread.sleep(10);
3117     }
3118     fail("Could not find region " + hri.getRegionNameAsString()
3119       + " on server " + server);
3120   }
3121 
3122   /**
3123    * Check to make sure the region is open on the specified
3124    * region server, but not on any other one.
3125    */
3126   public void assertRegionOnlyOnServer(
3127       final HRegionInfo hri, final ServerName server,
3128       final long timeout) throws IOException, InterruptedException {
3129     long timeoutTime = System.currentTimeMillis() + timeout;
3130     while (true) {
3131       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3132       if (regions.contains(hri)) {
3133         List<JVMClusterUtil.RegionServerThread> rsThreads =
3134           getHBaseCluster().getLiveRegionServerThreads();
3135         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3136           HRegionServer rs = rsThread.getRegionServer();
3137           if (server.equals(rs.getServerName())) {
3138             continue;
3139           }
3140           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3141           for (HRegion r: hrs) {
3142             assertTrue("Region should not be double assigned",
3143               r.getRegionId() != hri.getRegionId());
3144           }
3145         }
3146         return; // good, we are happy
3147       }
3148       long now = System.currentTimeMillis();
3149       if (now > timeoutTime) break;
3150       Thread.sleep(10);
3151     }
3152     fail("Could not find region " + hri.getRegionNameAsString()
3153       + " on server " + server);
3154   }
3155 
3156   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3157       throws IOException {
3158     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3159     htd.addFamily(hcd);
3160     HRegionInfo info =
3161         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3162     HRegion region =
3163         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3164     return region;
3165   }
3166 
3167   public void setFileSystemURI(String fsURI) {
3168     FS_URI = fsURI;
3169   }
3170 
3171   /**
3172    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3173    */
3174   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3175       throws E {
3176     return Waiter.waitFor(this.conf, timeout, predicate);
3177   }
3178 
3179   /**
3180    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3181    */
3182   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3183       throws E {
3184     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3185   }
3186 
3187   /**
3188    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3189    */
3190   public <E extends Exception> long waitFor(long timeout, long interval,
3191       boolean failIfTimeout, Predicate<E> predicate) throws E {
3192     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3193   }
3194 
3195   /**
3196    * Returns a {@link Predicate} for checking that there is no regions in transition in master
3197    */
3198   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3199     return new Waiter.Predicate<Exception>() {
3200       @Override
3201       public boolean evaluate() throws Exception {
3202         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3203             .getAssignmentManager().getRegionStates();
3204         return !regionStates.isRegionsInTransition();
3205       }
3206     };
3207   }
3208 
3209 }