View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Modifier;
28  import java.net.InetAddress;
29  import java.net.ServerSocket;
30  import java.net.Socket;
31  import java.net.UnknownHostException;
32  import java.security.MessageDigest;
33  import java.util.ArrayList;
34  import java.util.Arrays;
35  import java.util.Collection;
36  import java.util.Collections;
37  import java.util.HashSet;
38  import java.util.List;
39  import java.util.Map;
40  import java.util.NavigableSet;
41  import java.util.Random;
42  import java.util.Set;
43  import java.util.UUID;
44  import java.util.concurrent.TimeUnit;
45  
46  import org.apache.commons.logging.Log;
47  import org.apache.commons.logging.LogFactory;
48  import org.apache.commons.logging.impl.Jdk14Logger;
49  import org.apache.commons.logging.impl.Log4JLogger;
50  import org.apache.hadoop.classification.InterfaceAudience;
51  import org.apache.hadoop.classification.InterfaceStability;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.hbase.Waiter.Predicate;
56  import org.apache.hadoop.hbase.catalog.MetaEditor;
57  import org.apache.hadoop.hbase.client.Delete;
58  import org.apache.hadoop.hbase.client.Durability;
59  import org.apache.hadoop.hbase.client.Get;
60  import org.apache.hadoop.hbase.client.HBaseAdmin;
61  import org.apache.hadoop.hbase.client.HConnection;
62  import org.apache.hadoop.hbase.client.HTable;
63  import org.apache.hadoop.hbase.client.Put;
64  import org.apache.hadoop.hbase.client.Result;
65  import org.apache.hadoop.hbase.client.ResultScanner;
66  import org.apache.hadoop.hbase.client.Scan;
67  import org.apache.hadoop.hbase.fs.HFileSystem;
68  import org.apache.hadoop.hbase.io.compress.Compression;
69  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72  import org.apache.hadoop.hbase.io.hfile.HFile;
73  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75  import org.apache.hadoop.hbase.master.HMaster;
76  import org.apache.hadoop.hbase.master.RegionStates;
77  import org.apache.hadoop.hbase.master.ServerManager;
78  import org.apache.hadoop.hbase.regionserver.BloomType;
79  import org.apache.hadoop.hbase.regionserver.HRegion;
80  import org.apache.hadoop.hbase.regionserver.HRegionServer;
81  import org.apache.hadoop.hbase.regionserver.HStore;
82  import org.apache.hadoop.hbase.regionserver.InternalScanner;
83  import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
84  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
85  import org.apache.hadoop.hbase.regionserver.wal.HLog;
86  import org.apache.hadoop.hbase.security.User;
87  import org.apache.hadoop.hbase.tool.Canary;
88  import org.apache.hadoop.hbase.util.Bytes;
89  import org.apache.hadoop.hbase.util.FSUtils;
90  import org.apache.hadoop.hbase.util.JVMClusterUtil;
91  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
92  import org.apache.hadoop.hbase.util.RegionSplitter;
93  import org.apache.hadoop.hbase.util.RetryCounter;
94  import org.apache.hadoop.hbase.util.Threads;
95  import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
96  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
97  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
98  import org.apache.hadoop.hbase.zookeeper.ZKConfig;
99  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
100 import org.apache.hadoop.hdfs.DFSClient;
101 import org.apache.hadoop.hdfs.DistributedFileSystem;
102 import org.apache.hadoop.hdfs.MiniDFSCluster;
103 import org.apache.hadoop.mapred.JobConf;
104 import org.apache.hadoop.mapred.MiniMRCluster;
105 import org.apache.hadoop.mapred.TaskLog;
106 import org.apache.zookeeper.KeeperException;
107 import org.apache.zookeeper.KeeperException.NodeExistsException;
108 import org.apache.zookeeper.WatchedEvent;
109 import org.apache.zookeeper.ZooKeeper;
110 import org.apache.zookeeper.ZooKeeper.States;
111 
112 /**
113  * Facility for testing HBase. Replacement for
114  * old HBaseTestCase and HBaseClusterTestCase functionality.
115  * Create an instance and keep it around testing HBase.  This class is
116  * meant to be your one-stop shop for anything you might need testing.  Manages
117  * one cluster at a time only. Managed cluster can be an in-process
118  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
119  * Not all methods work with the real cluster.
120  * Depends on log4j being on classpath and
121  * hbase-site.xml for logging and test-run configuration.  It does not set
122  * logging levels nor make changes to configuration parameters.
123  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
124  * setting it to true.
125  */
126 @InterfaceAudience.Public
127 @InterfaceStability.Evolving
128 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
129    private MiniZooKeeperCluster zkCluster = null;
130 
131   /**
132    * The default number of regions per regionserver when creating a pre-split
133    * table.
134    */
135   private static int DEFAULT_REGIONS_PER_SERVER = 5;
136 
137   /**
138    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
139    * part of general shutdown.
140    */
141   private boolean passedZkCluster = false;
142   private MiniDFSCluster dfsCluster = null;
143 
144   private HBaseCluster hbaseCluster = null;
145   private MiniMRCluster mrCluster = null;
146 
147   /** If there is a mini cluster running for this testing utility instance. */
148   private boolean miniClusterRunning;
149 
150   private String hadoopLogDir;
151 
152   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
153   private File clusterTestDir = null;
154 
155   /** Directory on test filesystem where we put the data for this instance of
156     * HBaseTestingUtility*/
157   private Path dataTestDirOnTestFS = null;
158 
159   /**
160    * System property key to get test directory value.
161    * Name is as it is because mini dfs has hard-codings to put test data here.
162    * It should NOT be used directly in HBase, as it's a property used in
163    *  mini dfs.
164    *  @deprecated can be used only with mini dfs
165    */
166   @Deprecated
167   private static final String TEST_DIRECTORY_KEY = "test.build.data";
168 
169   /** Filesystem URI used for map-reduce mini-cluster setup */
170   private static String FS_URI;
171 
172   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
173   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
174 
175   /** Compression algorithms to use in parameterized JUnit 4 tests */
176   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
177     Arrays.asList(new Object[][] {
178       { Compression.Algorithm.NONE },
179       { Compression.Algorithm.GZ }
180     });
181 
182   /** This is for unit tests parameterized with a single boolean. */
183   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
184       Arrays.asList(new Object[][] {
185           { new Boolean(false) },
186           { new Boolean(true) }
187       });
188 
189   /** Compression algorithms to use in testing */
190   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
191       Compression.Algorithm.NONE, Compression.Algorithm.GZ
192     };
193 
194   /**
195    * Create all combinations of Bloom filters and compression algorithms for
196    * testing.
197    */
198   private static List<Object[]> bloomAndCompressionCombinations() {
199     List<Object[]> configurations = new ArrayList<Object[]>();
200     for (Compression.Algorithm comprAlgo :
201          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
202       for (BloomType bloomType : BloomType.values()) {
203         configurations.add(new Object[] { comprAlgo, bloomType });
204       }
205     }
206     return Collections.unmodifiableList(configurations);
207   }
208 
209   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
210       bloomAndCompressionCombinations();
211 
212   public HBaseTestingUtility() {
213     this(HBaseConfiguration.create());
214   }
215 
216   public HBaseTestingUtility(Configuration conf) {
217     super(conf);
218 
219     // a hbase checksum verification failure will cause unit tests to fail
220     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
221   }
222 
223   /**
224    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
225    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
226    * test dir.  Use this when you aren't using an Mini HDFS cluster.
227    * @return HBaseTestingUtility that use local fs for temp files.
228    */
229   public static HBaseTestingUtility createLocalHTU() {
230     Configuration c = HBaseConfiguration.create();
231     return createLocalHTU(c);
232   }
233 
234   /**
235    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
236    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
237    * test dir.  Use this when you aren't using an Mini HDFS cluster.
238    * @param c Configuration (will be modified)
239    * @return HBaseTestingUtility that use local fs for temp files.
240    */
241   public static HBaseTestingUtility createLocalHTU(Configuration c) {
242     HBaseTestingUtility htu = new HBaseTestingUtility(c);
243     String dataTestDir = htu.getDataTestDir().toString();
244     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
245     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
246     return htu;
247   }
248 
249   /**
250    * Returns this classes's instance of {@link Configuration}.  Be careful how
251    * you use the returned Configuration since {@link HConnection} instances
252    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
253    * say, a Connection was being used against a cluster that had been shutdown,
254    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
255    * be wholesome.  Rather than use the return direct, its usually best to
256    * make a copy and use that.  Do
257    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
258    * @return Instance of Configuration.
259    */
260   @Override
261   public Configuration getConfiguration() {
262     return super.getConfiguration();
263   }
264 
265   public void setHBaseCluster(HBaseCluster hbaseCluster) {
266     this.hbaseCluster = hbaseCluster;
267   }
268 
269   /**
270    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
271    * Give it a random name so can have many concurrent tests running if
272    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
273    * System property, as it's what minidfscluster bases
274    * it data dir on.  Moding a System property is not the way to do concurrent
275    * instances -- another instance could grab the temporary
276    * value unintentionally -- but not anything can do about it at moment;
277    * single instance only is how the minidfscluster works.
278    *
279    * We also create the underlying directory for
280    *  hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
281    *  in the conf, and as a system property for hadoop.tmp.dir
282    *
283    * @return The calculated data test build directory, if newly-created.
284    */
285   @Override
286   protected Path setupDataTestDir() {
287     Path testPath = super.setupDataTestDir();
288     if (null == testPath) {
289       return null;
290     }
291 
292     createSubDirAndSystemProperty(
293       "hadoop.log.dir",
294       testPath, "hadoop-log-dir");
295 
296     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
297     //  we want our own value to ensure uniqueness on the same machine
298     createSubDirAndSystemProperty(
299       "hadoop.tmp.dir",
300       testPath, "hadoop-tmp-dir");
301 
302     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
303     createSubDir(
304       "mapred.local.dir",
305       testPath, "mapred-local-dir");
306 
307     return testPath;
308   }
309 
310   private void createSubDirAndSystemProperty(
311     String propertyName, Path parent, String subDirName){
312 
313     String sysValue = System.getProperty(propertyName);
314 
315     if (sysValue != null) {
316       // There is already a value set. So we do nothing but hope
317       //  that there will be no conflicts
318       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
319         sysValue + " so I do NOT create it in " + parent);
320       String confValue = conf.get(propertyName);
321       if (confValue != null && !confValue.endsWith(sysValue)){
322        LOG.warn(
323          propertyName + " property value differs in configuration and system: "+
324          "Configuration="+confValue+" while System="+sysValue+
325          " Erasing configuration value by system value."
326        );
327       }
328       conf.set(propertyName, sysValue);
329     } else {
330       // Ok, it's not set, so we create it as a subdirectory
331       createSubDir(propertyName, parent, subDirName);
332       System.setProperty(propertyName, conf.get(propertyName));
333     }
334   }
335 
336   /**
337    * @return Where to write test data on the test filesystem; Returns working directory
338    * for the test filesystem by default
339    * @see #setupDataTestDirOnTestFS()
340    * @see #getTestFileSystem()
341    */
342   private Path getBaseTestDirOnTestFS() throws IOException {
343     FileSystem fs = getTestFileSystem();
344     return new Path(fs.getWorkingDirectory(), "test-data");
345   }
346 
347   /**
348    * @return Where the DFS cluster will write data on the local subsystem.
349    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
350    * @see #getTestFileSystem()
351    */
352   Path getClusterTestDir() {
353     if (clusterTestDir == null){
354       setupClusterTestDir();
355     }
356     return new Path(clusterTestDir.getAbsolutePath());
357   }
358 
359   /**
360    * Creates a directory for the DFS cluster, under the test data
361    */
362   private void setupClusterTestDir() {
363     if (clusterTestDir != null) {
364       return;
365     }
366 
367     // Using randomUUID ensures that multiple clusters can be launched by
368     //  a same test, if it stops & starts them
369     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
370     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
371     // Have it cleaned up on exit
372     boolean b = deleteOnExit();
373     if (b) clusterTestDir.deleteOnExit();
374     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
375     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
376   }
377 
378   /**
379    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
380    * to write temporary test data. Call this method after setting up the mini dfs cluster
381    * if the test relies on it.
382    * @return a unique path in the test filesystem
383    */
384   public Path getDataTestDirOnTestFS() throws IOException {
385     if (dataTestDirOnTestFS == null) {
386       setupDataTestDirOnTestFS();
387     }
388 
389     return dataTestDirOnTestFS;
390   }
391 
392   /**
393    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
394    * to write temporary test data. Call this method after setting up the mini dfs cluster
395    * if the test relies on it.
396    * @return a unique path in the test filesystem
397    * @param subdirName name of the subdir to create under the base test dir
398    */
399   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
400     return new Path(getDataTestDirOnTestFS(), subdirName);
401   }
402 
403   /**
404    * Sets up a path in test filesystem to be used by tests
405    */
406   private void setupDataTestDirOnTestFS() throws IOException {
407     if (dataTestDirOnTestFS != null) {
408       LOG.warn("Data test on test fs dir already setup in "
409           + dataTestDirOnTestFS.toString());
410       return;
411     }
412 
413     //The file system can be either local, mini dfs, or if the configuration
414     //is supplied externally, it can be an external cluster FS. If it is a local
415     //file system, the tests should use getBaseTestDir, otherwise, we can use
416     //the working directory, and create a unique sub dir there
417     FileSystem fs = getTestFileSystem();
418     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
419       File dataTestDir = new File(getDataTestDir().toString());
420       if (deleteOnExit()) dataTestDir.deleteOnExit();
421       dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
422     } else {
423       Path base = getBaseTestDirOnTestFS();
424       String randomStr = UUID.randomUUID().toString();
425       dataTestDirOnTestFS = new Path(base, randomStr);
426       if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
427     }
428   }
429 
430   /**
431    * Cleans the test data directory on the test filesystem.
432    * @return True if we removed the test dirs
433    * @throws IOException
434    */
435   public boolean cleanupDataTestDirOnTestFS() throws IOException {
436     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
437     if (ret)
438       dataTestDirOnTestFS = null;
439     return ret;
440   }
441 
442   /**
443    * Cleans a subdirectory under the test data directory on the test filesystem.
444    * @return True if we removed child
445    * @throws IOException
446    */
447   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
448     Path cpath = getDataTestDirOnTestFS(subdirName);
449     return getTestFileSystem().delete(cpath, true);
450   }
451 
452   /**
453    * Start a minidfscluster.
454    * @param servers How many DNs to start.
455    * @throws Exception
456    * @see {@link #shutdownMiniDFSCluster()}
457    * @return The mini dfs cluster created.
458    */
459   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
460     return startMiniDFSCluster(servers, null);
461   }
462 
463   /**
464    * Start a minidfscluster.
465    * This is useful if you want to run datanode on distinct hosts for things
466    * like HDFS block location verification.
467    * If you start MiniDFSCluster without host names, all instances of the
468    * datanodes will have the same host name.
469    * @param hosts hostnames DNs to run on.
470    * @throws Exception
471    * @see {@link #shutdownMiniDFSCluster()}
472    * @return The mini dfs cluster created.
473    */
474   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
475   throws Exception {
476     if ( hosts != null && hosts.length != 0) {
477       return startMiniDFSCluster(hosts.length, hosts);
478     } else {
479       return startMiniDFSCluster(1, null);
480     }
481   }
482 
483   /**
484    * Start a minidfscluster.
485    * Can only create one.
486    * @param servers How many DNs to start.
487    * @param hosts hostnames DNs to run on.
488    * @throws Exception
489    * @see {@link #shutdownMiniDFSCluster()}
490    * @return The mini dfs cluster created.
491    */
492   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
493   throws Exception {
494     createDirsAndSetProperties();
495 
496     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
497     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
498         setLevel(org.apache.log4j.Level.ERROR);
499     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
500         setLevel(org.apache.log4j.Level.ERROR);
501 
502 
503     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
504       true, null, null, hosts, null);
505 
506     // Set this just-started cluster as our filesystem.
507     FileSystem fs = this.dfsCluster.getFileSystem();
508     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
509 
510     // Wait for the cluster to be totally up
511     this.dfsCluster.waitClusterUp();
512 
513     //reset the test directory for test file system
514     dataTestDirOnTestFS = null;
515 
516     return this.dfsCluster;
517   }
518 
519 
520   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
521       throws Exception {
522     createDirsAndSetProperties();
523     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
524         true, null, racks, hosts, null);
525 
526     // Set this just-started cluster as our filesystem.
527     FileSystem fs = this.dfsCluster.getFileSystem();
528     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
529 
530     // Wait for the cluster to be totally up
531     this.dfsCluster.waitClusterUp();
532 
533     //reset the test directory for test file system
534     dataTestDirOnTestFS = null;
535 
536     return this.dfsCluster;
537   }
538 
539   public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
540     createDirsAndSetProperties();
541     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
542         null, null, null);
543     return dfsCluster;
544   }
545 
546   /** This is used before starting HDFS and map-reduce mini-clusters */
547   private void createDirsAndSetProperties() throws IOException {
548     setupClusterTestDir();
549     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
550     createDirAndSetProperty("cache_data", "test.cache.data");
551     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
552     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
553     createDirAndSetProperty("mapred_local", "mapred.local.dir");
554     createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
555     enableShortCircuit();
556 
557     Path root = getDataTestDirOnTestFS("hadoop");
558     conf.set(MapreduceTestingShim.getMROutputDirProp(),
559       new Path(root, "mapred-output-dir").toString());
560     conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
561     conf.set("mapreduce.jobtracker.staging.root.dir",
562       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
563     conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
564   }
565 
566 
567   /**
568    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
569    *  This allows to specify this parameter on the command line.
570    *   If not set, default is true.
571    */
572   public boolean isReadShortCircuitOn(){
573     final String propName = "hbase.tests.use.shortcircuit.reads";
574     String readOnProp = System.getProperty(propName);
575     if (readOnProp != null){
576       return  Boolean.parseBoolean(readOnProp);
577     } else {
578       return conf.getBoolean(propName, false);
579     }
580   }
581 
582   /** Enable the short circuit read, unless configured differently.
583    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
584    */
585   private void enableShortCircuit() {
586     if (isReadShortCircuitOn()) {
587       String curUser = System.getProperty("user.name");
588       LOG.info("read short circuit is ON for user " + curUser);
589       // read short circuit, for hdfs
590       conf.set("dfs.block.local-path-access.user", curUser);
591       // read short circuit, for hbase
592       conf.setBoolean("dfs.client.read.shortcircuit", true);
593       // Skip checking checksum, for the hdfs client and the datanode
594       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
595     } else {
596       LOG.info("read short circuit is OFF");
597     }
598   }
599 
600   private String createDirAndSetProperty(final String relPath, String property) {
601     String path = getDataTestDir(relPath).toString();
602     System.setProperty(property, path);
603     conf.set(property, path);
604     new File(path).mkdirs();
605     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
606     return path;
607   }
608 
609   /**
610    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
611    * or does nothing.
612    * @throws IOException
613    */
614   public void shutdownMiniDFSCluster() throws IOException {
615     if (this.dfsCluster != null) {
616       // The below throws an exception per dn, AsynchronousCloseException.
617       this.dfsCluster.shutdown();
618       dfsCluster = null;
619       dataTestDirOnTestFS = null;
620       FSUtils.setFsDefault(this.conf, new Path("file:///"));
621     }
622   }
623 
624   /**
625    * Call this if you only want a zk cluster.
626    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
627    * @throws Exception
628    * @see #shutdownMiniZKCluster()
629    * @return zk cluster started.
630    */
631   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
632     return startMiniZKCluster(1);
633   }
634 
635   /**
636    * Call this if you only want a zk cluster.
637    * @param zooKeeperServerNum
638    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
639    * @throws Exception
640    * @see #shutdownMiniZKCluster()
641    * @return zk cluster started.
642    */
643   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
644       throws Exception {
645     setupClusterTestDir();
646     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
647   }
648 
649   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
650     throws Exception {
651     return startMiniZKCluster(dir,1);
652   }
653 
654   /**
655    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
656    *  the port mentionned is used as the default port for ZooKeeper.
657    */
658   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
659       int zooKeeperServerNum)
660   throws Exception {
661     if (this.zkCluster != null) {
662       throw new IOException("Cluster already running at " + dir);
663     }
664     this.passedZkCluster = false;
665     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
666     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
667     if (defPort > 0){
668       // If there is a port in the config file, we use it.
669       this.zkCluster.setDefaultClientPort(defPort);
670     }
671     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
672     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
673       Integer.toString(clientPort));
674     return this.zkCluster;
675   }
676 
677   /**
678    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
679    * or does nothing.
680    * @throws IOException
681    * @see #startMiniZKCluster()
682    */
683   public void shutdownMiniZKCluster() throws IOException {
684     if (this.zkCluster != null) {
685       this.zkCluster.shutdown();
686       this.zkCluster = null;
687     }
688   }
689 
690   /**
691    * Start up a minicluster of hbase, dfs, and zookeeper.
692    * @throws Exception
693    * @return Mini hbase cluster instance created.
694    * @see {@link #shutdownMiniDFSCluster()}
695    */
696   public MiniHBaseCluster startMiniCluster() throws Exception {
697     return startMiniCluster(1, 1);
698   }
699 
700   /**
701    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
702    * Modifies Configuration.  Homes the cluster data directory under a random
703    * subdirectory in a directory under System property test.build.data.
704    * Directory is cleaned up on exit.
705    * @param numSlaves Number of slaves to start up.  We'll start this many
706    * datanodes and regionservers.  If numSlaves is > 1, then make sure
707    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
708    * bind errors.
709    * @throws Exception
710    * @see {@link #shutdownMiniCluster()}
711    * @return Mini hbase cluster instance created.
712    */
713   public MiniHBaseCluster startMiniCluster(final int numSlaves)
714   throws Exception {
715     return startMiniCluster(1, numSlaves);
716   }
717 
718 
719   /**
720    * start minicluster
721    * @throws Exception
722    * @see {@link #shutdownMiniCluster()}
723    * @return Mini hbase cluster instance created.
724    */
725   public MiniHBaseCluster startMiniCluster(final int numMasters,
726     final int numSlaves)
727   throws Exception {
728     return startMiniCluster(numMasters, numSlaves, null);
729   }
730 
731   /**
732    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
733    * Modifies Configuration.  Homes the cluster data directory under a random
734    * subdirectory in a directory under System property test.build.data.
735    * Directory is cleaned up on exit.
736    * @param numMasters Number of masters to start up.  We'll start this many
737    * hbase masters.  If numMasters > 1, you can find the active/primary master
738    * with {@link MiniHBaseCluster#getMaster()}.
739    * @param numSlaves Number of slaves to start up.  We'll start this many
740    * regionservers. If dataNodeHosts == null, this also indicates the number of
741    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
742    * based on dataNodeHosts.length.
743    * If numSlaves is > 1, then make sure
744    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
745    * bind errors.
746    * @param dataNodeHosts hostnames DNs to run on.
747    * This is useful if you want to run datanode on distinct hosts for things
748    * like HDFS block location verification.
749    * If you start MiniDFSCluster without host names,
750    * all instances of the datanodes will have the same host name.
751    * @throws Exception
752    * @see {@link #shutdownMiniCluster()}
753    * @return Mini hbase cluster instance created.
754    */
755   public MiniHBaseCluster startMiniCluster(final int numMasters,
756       final int numSlaves, final String[] dataNodeHosts) throws Exception {
757     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
758   }
759 
760   /**
761    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
762    * @param numDataNodes Number of data nodes.
763    */
764   public MiniHBaseCluster startMiniCluster(final int numMasters,
765       final int numSlaves, final int numDataNodes) throws Exception {
766     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
767   }
768 
769   /**
770    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
771    * Modifies Configuration.  Homes the cluster data directory under a random
772    * subdirectory in a directory under System property test.build.data.
773    * Directory is cleaned up on exit.
774    * @param numMasters Number of masters to start up.  We'll start this many
775    * hbase masters.  If numMasters > 1, you can find the active/primary master
776    * with {@link MiniHBaseCluster#getMaster()}.
777    * @param numSlaves Number of slaves to start up.  We'll start this many
778    * regionservers. If dataNodeHosts == null, this also indicates the number of
779    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
780    * based on dataNodeHosts.length.
781    * If numSlaves is > 1, then make sure
782    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
783    * bind errors.
784    * @param dataNodeHosts hostnames DNs to run on.
785    * This is useful if you want to run datanode on distinct hosts for things
786    * like HDFS block location verification.
787    * If you start MiniDFSCluster without host names,
788    * all instances of the datanodes will have the same host name.
789    * @param masterClass The class to use as HMaster, or null for default
790    * @param regionserverClass The class to use as HRegionServer, or null for
791    * default
792    * @throws Exception
793    * @see {@link #shutdownMiniCluster()}
794    * @return Mini hbase cluster instance created.
795    */
796   public MiniHBaseCluster startMiniCluster(final int numMasters,
797       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
798       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
799           throws Exception {
800     return startMiniCluster(
801         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
802   }
803 
804   /**
805    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
806    * number of datanodes.
807    * @param numDataNodes Number of data nodes.
808    */
809   public MiniHBaseCluster startMiniCluster(final int numMasters,
810     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
811     Class<? extends HMaster> masterClass,
812     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
813   throws Exception {
814     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
815       numDataNodes = dataNodeHosts.length;
816     }
817 
818     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
819         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
820 
821     // If we already put up a cluster, fail.
822     if (miniClusterRunning) {
823       throw new IllegalStateException("A mini-cluster is already running");
824     }
825     miniClusterRunning = true;
826 
827     setupClusterTestDir();
828     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
829 
830     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
831     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
832     startMiniDFSCluster(numDataNodes, dataNodeHosts);
833 
834     // Start up a zk cluster.
835     if (this.zkCluster == null) {
836       startMiniZKCluster(clusterTestDir);
837     }
838 
839     // Start the MiniHBaseCluster
840     return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
841   }
842 
843   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
844       throws IOException, InterruptedException{
845     return startMiniHBaseCluster(numMasters, numSlaves, null, null);
846   }
847 
848   /**
849    * Starts up mini hbase cluster.  Usually used after call to
850    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
851    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
852    * @param numMasters
853    * @param numSlaves
854    * @return Reference to the hbase mini hbase cluster.
855    * @throws IOException
856    * @throws InterruptedException
857    * @see {@link #startMiniCluster()}
858    */
859   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
860         final int numSlaves, Class<? extends HMaster> masterClass,
861         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
862   throws IOException, InterruptedException {
863     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
864     createRootDir();
865 
866     // These settings will make the server waits until this exact number of
867     // regions servers are connected.
868     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
869       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
870     }
871     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
872       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
873     }
874 
875     Configuration c = new Configuration(this.conf);
876     this.hbaseCluster =
877         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
878     // Don't leave here till we've done a successful scan of the hbase:meta
879     HTable t = new HTable(c, TableName.META_TABLE_NAME);
880     ResultScanner s = t.getScanner(new Scan());
881     while (s.next() != null) {
882       continue;
883     }
884     s.close();
885     t.close();
886 
887     getHBaseAdmin(); // create immediately the hbaseAdmin
888     LOG.info("Minicluster is up");
889     return (MiniHBaseCluster)this.hbaseCluster;
890   }
891 
892   /**
893    * Starts the hbase cluster up again after shutting it down previously in a
894    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
895    * @param servers number of region servers
896    * @throws IOException
897    */
898   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
899     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
900     // Don't leave here till we've done a successful scan of the hbase:meta
901     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
902     ResultScanner s = t.getScanner(new Scan());
903     while (s.next() != null) {
904       // do nothing
905     }
906     LOG.info("HBase has been restarted");
907     s.close();
908     t.close();
909   }
910 
911   /**
912    * @return Current mini hbase cluster. Only has something in it after a call
913    * to {@link #startMiniCluster()}.
914    * @see #startMiniCluster()
915    */
916   public MiniHBaseCluster getMiniHBaseCluster() {
917     if (this.hbaseCluster instanceof MiniHBaseCluster) {
918       return (MiniHBaseCluster)this.hbaseCluster;
919     }
920     throw new RuntimeException(hbaseCluster + " not an instance of " +
921                                MiniHBaseCluster.class.getName());
922   }
923 
924   /**
925    * Stops mini hbase, zk, and hdfs clusters.
926    * @throws IOException
927    * @see {@link #startMiniCluster(int)}
928    */
929   public void shutdownMiniCluster() throws Exception {
930     LOG.info("Shutting down minicluster");
931     shutdownMiniHBaseCluster();
932     if (!this.passedZkCluster){
933       shutdownMiniZKCluster();
934     }
935     shutdownMiniDFSCluster();
936 
937     cleanupTestDir();
938     miniClusterRunning = false;
939     LOG.info("Minicluster is down");
940   }
941 
942   /**
943    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
944    * @throws IOException
945    */
946   public void shutdownMiniHBaseCluster() throws IOException {
947     if (hbaseAdmin != null) {
948       hbaseAdmin.close0();
949       hbaseAdmin = null;
950     }
951 
952     if (zooKeeperWatcher != null) {
953       zooKeeperWatcher.close();
954       zooKeeperWatcher = null;
955     }
956 
957     // unset the configuration for MIN and MAX RS to start
958     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
959     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
960     if (this.hbaseCluster != null) {
961       this.hbaseCluster.shutdown();
962       // Wait till hbase is down before going on to shutdown zk.
963       this.hbaseCluster.waitUntilShutDown();
964       this.hbaseCluster = null;
965     }
966   }
967 
968   /**
969    * Returns the path to the default root dir the minicluster uses.
970    * Note: this does not cause the root dir to be created.
971    * @return Fully qualified path for the default hbase root dir
972    * @throws IOException
973    */
974   public Path getDefaultRootDirPath() throws IOException {
975 	FileSystem fs = FileSystem.get(this.conf);
976 	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
977   }
978 
979   /**
980    * Creates an hbase rootdir in user home directory.  Also creates hbase
981    * version file.  Normally you won't make use of this method.  Root hbasedir
982    * is created for you as part of mini cluster startup.  You'd only use this
983    * method if you were doing manual operation.
984    * @return Fully qualified path to hbase root dir
985    * @throws IOException
986    */
987   public Path createRootDir() throws IOException {
988     FileSystem fs = FileSystem.get(this.conf);
989     Path hbaseRootdir = getDefaultRootDirPath();
990     FSUtils.setRootDir(this.conf, hbaseRootdir);
991     fs.mkdirs(hbaseRootdir);
992     FSUtils.setVersion(fs, hbaseRootdir);
993     return hbaseRootdir;
994   }
995 
996   /**
997    * Flushes all caches in the mini hbase cluster
998    * @throws IOException
999    */
1000   public void flush() throws IOException {
1001     getMiniHBaseCluster().flushcache();
1002   }
1003 
1004   /**
1005    * Flushes all caches in the mini hbase cluster
1006    * @throws IOException
1007    */
1008   public void flush(TableName tableName) throws IOException {
1009     getMiniHBaseCluster().flushcache(tableName);
1010   }
1011 
1012   /**
1013    * Compact all regions in the mini hbase cluster
1014    * @throws IOException
1015    */
1016   public void compact(boolean major) throws IOException {
1017     getMiniHBaseCluster().compact(major);
1018   }
1019 
1020   /**
1021    * Compact all of a table's reagion in the mini hbase cluster
1022    * @throws IOException
1023    */
1024   public void compact(TableName tableName, boolean major) throws IOException {
1025     getMiniHBaseCluster().compact(tableName, major);
1026   }
1027 
1028   /**
1029    * Create a table.
1030    * @param tableName
1031    * @param family
1032    * @return An HTable instance for the created table.
1033    * @throws IOException
1034    */
1035   public HTable createTable(String tableName, String family)
1036   throws IOException{
1037     return createTable(TableName.valueOf(tableName), new String[]{family});
1038   }
1039 
1040   /**
1041    * Create a table.
1042    * @param tableName
1043    * @param family
1044    * @return An HTable instance for the created table.
1045    * @throws IOException
1046    */
1047   public HTable createTable(byte[] tableName, byte[] family)
1048   throws IOException{
1049     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1050   }
1051 
1052   /**
1053    * Create a table.
1054    * @param tableName
1055    * @param families
1056    * @return An HTable instance for the created table.
1057    * @throws IOException
1058    */
1059   public HTable createTable(TableName tableName, String[] families)
1060   throws IOException {
1061     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1062     for (String family : families) {
1063       fams.add(Bytes.toBytes(family));
1064     }
1065     return createTable(tableName, fams.toArray(new byte[0][]));
1066   }
1067 
1068   /**
1069    * Create a table.
1070    * @param tableName
1071    * @param family
1072    * @return An HTable instance for the created table.
1073    * @throws IOException
1074    */
1075   public HTable createTable(TableName tableName, byte[] family)
1076   throws IOException{
1077     return createTable(tableName, new byte[][]{family});
1078   }
1079 
1080 
1081   /**
1082    * Create a table.
1083    * @param tableName
1084    * @param families
1085    * @return An HTable instance for the created table.
1086    * @throws IOException
1087    */
1088   public HTable createTable(byte[] tableName, byte[][] families)
1089   throws IOException {
1090     return createTable(tableName, families,
1091         new Configuration(getConfiguration()));
1092   }
1093 
1094   /**
1095    * Create a table.
1096    * @param tableName
1097    * @param families
1098    * @return An HTable instance for the created table.
1099    * @throws IOException
1100    */
1101   public HTable createTable(TableName tableName, byte[][] families)
1102   throws IOException {
1103     return createTable(tableName, families,
1104         new Configuration(getConfiguration()));
1105   }
1106 
1107   public HTable createTable(byte[] tableName, byte[][] families,
1108       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1109     return createTable(TableName.valueOf(tableName), families, numVersions,
1110         startKey, endKey, numRegions);
1111   }
1112 
1113   public HTable createTable(String tableName, byte[][] families,
1114       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1115     return createTable(TableName.valueOf(tableName), families, numVersions,
1116         startKey, endKey, numRegions);
1117   }
1118 
1119   public HTable createTable(TableName tableName, byte[][] families,
1120       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1121   throws IOException{
1122     HTableDescriptor desc = new HTableDescriptor(tableName);
1123     for (byte[] family : families) {
1124       HColumnDescriptor hcd = new HColumnDescriptor(family)
1125           .setMaxVersions(numVersions);
1126       desc.addFamily(hcd);
1127     }
1128     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1129     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1130     waitUntilAllRegionsAssigned(tableName);
1131     return new HTable(getConfiguration(), tableName);
1132   }
1133 
1134   /**
1135    * Create a table.
1136    * @param htd
1137    * @param families
1138    * @param c Configuration to use
1139    * @return An HTable instance for the created table.
1140    * @throws IOException
1141    */
1142   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1143   throws IOException {
1144     for(byte[] family : families) {
1145       HColumnDescriptor hcd = new HColumnDescriptor(family);
1146       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1147       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1148       // on is interfering.
1149       hcd.setBloomFilterType(BloomType.NONE);
1150       htd.addFamily(hcd);
1151     }
1152     getHBaseAdmin().createTable(htd);
1153     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1154     waitUntilAllRegionsAssigned(htd.getTableName());
1155     return new HTable(c, htd.getTableName());
1156   }
1157 
1158   /**
1159    * Create a table.
1160    * @param tableName
1161    * @param families
1162    * @param c Configuration to use
1163    * @return An HTable instance for the created table.
1164    * @throws IOException
1165    */
1166   public HTable createTable(TableName tableName, byte[][] families,
1167       final Configuration c)
1168   throws IOException {
1169     return createTable(new HTableDescriptor(tableName), families, c);
1170   }
1171 
1172   /**
1173    * Create a table.
1174    * @param tableName
1175    * @param families
1176    * @param c Configuration to use
1177    * @return An HTable instance for the created table.
1178    * @throws IOException
1179    */
1180   public HTable createTable(byte[] tableName, byte[][] families,
1181       final Configuration c)
1182   throws IOException {
1183     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1184     for(byte[] family : families) {
1185       HColumnDescriptor hcd = new HColumnDescriptor(family);
1186       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1187       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1188       // on is interfering.
1189       hcd.setBloomFilterType(BloomType.NONE);
1190       desc.addFamily(hcd);
1191     }
1192     getHBaseAdmin().createTable(desc);
1193     return new HTable(c, tableName);
1194   }
1195 
1196   /**
1197    * Create a table.
1198    * @param tableName
1199    * @param families
1200    * @param c Configuration to use
1201    * @param numVersions
1202    * @return An HTable instance for the created table.
1203    * @throws IOException
1204    */
1205   public HTable createTable(TableName tableName, byte[][] families,
1206       final Configuration c, int numVersions)
1207   throws IOException {
1208     HTableDescriptor desc = new HTableDescriptor(tableName);
1209     for(byte[] family : families) {
1210       HColumnDescriptor hcd = new HColumnDescriptor(family)
1211           .setMaxVersions(numVersions);
1212       desc.addFamily(hcd);
1213     }
1214     getHBaseAdmin().createTable(desc);
1215     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1216     waitUntilAllRegionsAssigned(tableName);
1217     return new HTable(c, tableName);
1218   }
1219 
1220   /**
1221    * Create a table.
1222    * @param tableName
1223    * @param families
1224    * @param c Configuration to use
1225    * @param numVersions
1226    * @return An HTable instance for the created table.
1227    * @throws IOException
1228    */
1229   public HTable createTable(byte[] tableName, byte[][] families,
1230       final Configuration c, int numVersions)
1231   throws IOException {
1232     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1233     for(byte[] family : families) {
1234       HColumnDescriptor hcd = new HColumnDescriptor(family)
1235           .setMaxVersions(numVersions);
1236       desc.addFamily(hcd);
1237     }
1238     getHBaseAdmin().createTable(desc);
1239     return new HTable(c, tableName);
1240   }
1241 
1242   /**
1243    * Create a table.
1244    * @param tableName
1245    * @param family
1246    * @param numVersions
1247    * @return An HTable instance for the created table.
1248    * @throws IOException
1249    */
1250   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1251   throws IOException {
1252     return createTable(tableName, new byte[][]{family}, numVersions);
1253   }
1254 
1255   /**
1256    * Create a table.
1257    * @param tableName
1258    * @param family
1259    * @param numVersions
1260    * @return An HTable instance for the created table.
1261    * @throws IOException
1262    */
1263   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1264   throws IOException {
1265     return createTable(tableName, new byte[][]{family}, numVersions);
1266   }
1267 
1268   /**
1269    * Create a table.
1270    * @param tableName
1271    * @param families
1272    * @param numVersions
1273    * @return An HTable instance for the created table.
1274    * @throws IOException
1275    */
1276   public HTable createTable(byte[] tableName, byte[][] families,
1277       int numVersions)
1278   throws IOException {
1279     return createTable(TableName.valueOf(tableName), families, numVersions);
1280   }
1281 
1282   /**
1283    * Create a table.
1284    * @param tableName
1285    * @param families
1286    * @param numVersions
1287    * @return An HTable instance for the created table.
1288    * @throws IOException
1289    */
1290   public HTable createTable(TableName tableName, byte[][] families,
1291       int numVersions)
1292   throws IOException {
1293     HTableDescriptor desc = new HTableDescriptor(tableName);
1294     for (byte[] family : families) {
1295       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1296       desc.addFamily(hcd);
1297     }
1298     getHBaseAdmin().createTable(desc);
1299     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1300     waitUntilAllRegionsAssigned(tableName);
1301     return new HTable(new Configuration(getConfiguration()), tableName);
1302   }
1303 
1304   /**
1305    * Create a table.
1306    * @param tableName
1307    * @param families
1308    * @param numVersions
1309    * @return An HTable instance for the created table.
1310    * @throws IOException
1311    */
1312   public HTable createTable(byte[] tableName, byte[][] families,
1313     int numVersions, int blockSize) throws IOException {
1314     return createTable(TableName.valueOf(tableName),
1315         families, numVersions, blockSize);
1316   }
1317 
1318   /**
1319    * Create a table.
1320    * @param tableName
1321    * @param families
1322    * @param numVersions
1323    * @return An HTable instance for the created table.
1324    * @throws IOException
1325    */
1326   public HTable createTable(TableName tableName, byte[][] families,
1327     int numVersions, int blockSize) throws IOException {
1328     HTableDescriptor desc = new HTableDescriptor(tableName);
1329     for (byte[] family : families) {
1330       HColumnDescriptor hcd = new HColumnDescriptor(family)
1331           .setMaxVersions(numVersions)
1332           .setBlocksize(blockSize);
1333       desc.addFamily(hcd);
1334     }
1335     getHBaseAdmin().createTable(desc);
1336     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1337     waitUntilAllRegionsAssigned(tableName);
1338     return new HTable(new Configuration(getConfiguration()), tableName);
1339   }
1340 
1341   /**
1342    * Create a table.
1343    * @param tableName
1344    * @param families
1345    * @param numVersions
1346    * @return An HTable instance for the created table.
1347    * @throws IOException
1348    */
1349   public HTable createTable(byte[] tableName, byte[][] families,
1350       int[] numVersions)
1351   throws IOException {
1352     return createTable(TableName.valueOf(tableName), families, numVersions);
1353   }
1354 
1355   /**
1356    * Create a table.
1357    * @param tableName
1358    * @param families
1359    * @param numVersions
1360    * @return An HTable instance for the created table.
1361    * @throws IOException
1362    */
1363   public HTable createTable(TableName tableName, byte[][] families,
1364       int[] numVersions)
1365   throws IOException {
1366     HTableDescriptor desc = new HTableDescriptor(tableName);
1367     int i = 0;
1368     for (byte[] family : families) {
1369       HColumnDescriptor hcd = new HColumnDescriptor(family)
1370           .setMaxVersions(numVersions[i]);
1371       desc.addFamily(hcd);
1372       i++;
1373     }
1374     getHBaseAdmin().createTable(desc);
1375     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1376     waitUntilAllRegionsAssigned(tableName);
1377     return new HTable(new Configuration(getConfiguration()), tableName);
1378   }
1379 
1380   /**
1381    * Create a table.
1382    * @param tableName
1383    * @param family
1384    * @param splitRows
1385    * @return An HTable instance for the created table.
1386    * @throws IOException
1387    */
1388   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1389     throws IOException{
1390     return createTable(TableName.valueOf(tableName), family, splitRows);
1391   }
1392 
1393   /**
1394    * Create a table.
1395    * @param tableName
1396    * @param family
1397    * @param splitRows
1398    * @return An HTable instance for the created table.
1399    * @throws IOException
1400    */
1401   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1402       throws IOException {
1403     HTableDescriptor desc = new HTableDescriptor(tableName);
1404     HColumnDescriptor hcd = new HColumnDescriptor(family);
1405     desc.addFamily(hcd);
1406     getHBaseAdmin().createTable(desc, splitRows);
1407     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1408     waitUntilAllRegionsAssigned(tableName);
1409     return new HTable(getConfiguration(), tableName);
1410   }
1411 
1412   /**
1413    * Create a table.
1414    * @param tableName
1415    * @param families
1416    * @param splitRows
1417    * @return An HTable instance for the created table.
1418    * @throws IOException
1419    */
1420   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1421       throws IOException {
1422     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1423     for(byte[] family:families) {
1424       HColumnDescriptor hcd = new HColumnDescriptor(family);
1425       desc.addFamily(hcd);
1426     }
1427     getHBaseAdmin().createTable(desc, splitRows);
1428     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1429     waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1430     return new HTable(getConfiguration(), tableName);
1431   }
1432 
1433   /**
1434    * Drop an existing table
1435    * @param tableName existing table
1436    */
1437   public void deleteTable(String tableName) throws IOException {
1438     deleteTable(TableName.valueOf(tableName));
1439   }
1440 
1441   /**
1442    * Drop an existing table
1443    * @param tableName existing table
1444    */
1445   public void deleteTable(byte[] tableName) throws IOException {
1446     deleteTable(TableName.valueOf(tableName));
1447   }
1448 
1449   /**
1450    * Drop an existing table
1451    * @param tableName existing table
1452    */
1453   public void deleteTable(TableName tableName) throws IOException {
1454     try {
1455       getHBaseAdmin().disableTable(tableName);
1456     } catch (TableNotEnabledException e) {
1457       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1458     }
1459     getHBaseAdmin().deleteTable(tableName);
1460   }
1461 
1462   // ==========================================================================
1463   // Canned table and table descriptor creation
1464   // TODO replace HBaseTestCase
1465   
1466   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1467   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1468   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1469   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1470   private static final int MAXVERSIONS = 3;
1471   
1472   public static final char FIRST_CHAR = 'a';
1473   public static final char LAST_CHAR = 'z';
1474   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1475   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1476 
1477   /**
1478    * Create a table of name <code>name</code> with {@link COLUMNS} for
1479    * families.
1480    * @param name Name to give table.
1481    * @param versions How many versions to allow per column.
1482    * @return Column descriptor.
1483    */
1484   public HTableDescriptor createTableDescriptor(final String name,
1485       final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1486     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1487     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1488       htd.addFamily(new HColumnDescriptor(cfName)
1489           .setMinVersions(minVersions)
1490           .setMaxVersions(versions)
1491           .setKeepDeletedCells(keepDeleted)
1492           .setBlockCacheEnabled(false)
1493           .setTimeToLive(ttl)
1494       );
1495     }
1496     return htd;
1497   }
1498 
1499   /**
1500    * Create a table of name <code>name</code> with {@link COLUMNS} for
1501    * families.
1502    * @param name Name to give table.
1503    * @return Column descriptor.
1504    */
1505   public HTableDescriptor createTableDescriptor(final String name) {
1506     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1507         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1508   }
1509 
1510   /**
1511    * Create an HRegion that writes to the local tmp dirs
1512    * @param desc
1513    * @param startKey
1514    * @param endKey
1515    * @return
1516    * @throws IOException
1517    */
1518   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1519       byte [] endKey)
1520   throws IOException {
1521     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1522     return createLocalHRegion(hri, desc);
1523   }
1524 
1525   /**
1526    * Create an HRegion that writes to the local tmp dirs
1527    * @param info
1528    * @param desc
1529    * @return
1530    * @throws IOException
1531    */
1532   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1533     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1534   }
1535 
1536   /**
1537    * Create an HRegion that writes to the local tmp dirs with specified hlog
1538    * @param info regioninfo
1539    * @param desc table descriptor
1540    * @param hlog hlog for this region.
1541    * @return created hregion
1542    * @throws IOException
1543    */
1544   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1545     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1546   }
1547 
1548   
1549   /**
1550    * @param tableName
1551    * @param startKey
1552    * @param stopKey
1553    * @param callingMethod
1554    * @param conf
1555    * @param isReadOnly
1556    * @param families
1557    * @throws IOException
1558    * @return A region on which you must call
1559    *         {@link HRegion#closeHRegion(HRegion)} when done.
1560    */
1561   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1562       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1563       HLog hlog, byte[]... families) throws IOException {
1564     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1565     htd.setReadOnly(isReadOnly);
1566     for (byte[] family : families) {
1567       HColumnDescriptor hcd = new HColumnDescriptor(family);
1568       // Set default to be three versions.
1569       hcd.setMaxVersions(Integer.MAX_VALUE);
1570       htd.addFamily(hcd);
1571     }
1572     htd.setDurability(durability);
1573     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1574     return createLocalHRegion(info, htd, hlog);
1575   }
1576   //
1577   // ==========================================================================
1578 
1579   /**
1580    * Provide an existing table name to truncate
1581    * @param tableName existing table
1582    * @return HTable to that new table
1583    * @throws IOException
1584    */
1585   public HTable truncateTable(byte[] tableName) throws IOException {
1586     return truncateTable(TableName.valueOf(tableName));
1587   }
1588 
1589   /**
1590    * Provide an existing table name to truncate
1591    * @param tableName existing table
1592    * @return HTable to that new table
1593    * @throws IOException
1594    */
1595   public HTable truncateTable(TableName tableName) throws IOException {
1596     HTable table = new HTable(getConfiguration(), tableName);
1597     Scan scan = new Scan();
1598     ResultScanner resScan = table.getScanner(scan);
1599     for(Result res : resScan) {
1600       Delete del = new Delete(res.getRow());
1601       table.delete(del);
1602     }
1603     resScan = table.getScanner(scan);
1604     resScan.close();
1605     return table;
1606   }
1607 
1608   /**
1609    * Load table with rows from 'aaa' to 'zzz'.
1610    * @param t Table
1611    * @param f Family
1612    * @return Count of rows loaded.
1613    * @throws IOException
1614    */
1615   public int loadTable(final HTable t, final byte[] f) throws IOException {
1616     return loadTable(t, new byte[][] {f});
1617   }
1618 
1619   /**
1620    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1621    * @param t Table
1622    * @param f Array of Families to load
1623    * @return Count of rows loaded.
1624    * @throws IOException
1625    */
1626   public int loadTable(final HTable t, final byte[][] f) throws IOException {
1627     return loadTable(t, f, null);
1628   }
1629 
1630   /**
1631    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1632    * @param t Table
1633    * @param f Array of Families to load
1634    * @param value the values of the cells. If null is passed, the row key is used as value
1635    * @return Count of rows loaded.
1636    * @throws IOException
1637    */
1638   public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1639     t.setAutoFlush(false);
1640     int rowCount = 0;
1641     for (byte[] row : HBaseTestingUtility.ROWS) {
1642       Put put = new Put(row);
1643       for (int i = 0; i < f.length; i++) {
1644         put.add(f[i], null, value != null ? value : row);
1645       }
1646       t.put(put);
1647       rowCount++;
1648     }
1649     t.flushCommits();
1650     return rowCount;
1651   }
1652 
1653   /** A tracker for tracking and validating table rows
1654    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1655    */
1656   public static class SeenRowTracker {
1657     int dim = 'z' - 'a' + 1;
1658     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1659     byte[] startRow;
1660     byte[] stopRow;
1661 
1662     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1663       this.startRow = startRow;
1664       this.stopRow = stopRow;
1665     }
1666 
1667     void reset() {
1668       for (byte[] row : ROWS) {
1669         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1670       }
1671     }
1672 
1673     int i(byte b) {
1674       return b - 'a';
1675     }
1676 
1677     public void addRow(byte[] row) {
1678       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1679     }
1680 
1681     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1682      * all other rows none
1683      */
1684     public void validate() {
1685       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1686         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1687           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1688             int count = seenRows[i(b1)][i(b2)][i(b3)];
1689             int expectedCount = 0;
1690             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1691                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1692               expectedCount = 1;
1693             }
1694             if (count != expectedCount) {
1695               String row = new String(new byte[] {b1,b2,b3});
1696               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1697             }
1698           }
1699         }
1700       }
1701     }
1702   }
1703 
1704   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1705     return loadRegion(r, f, false);
1706   }
1707 
1708   /**
1709    * Load region with rows from 'aaa' to 'zzz'.
1710    * @param r Region
1711    * @param f Family
1712    * @param flush flush the cache if true
1713    * @return Count of rows loaded.
1714    * @throws IOException
1715    */
1716   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1717   throws IOException {
1718     byte[] k = new byte[3];
1719     int rowCount = 0;
1720     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1721       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1722         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1723           k[0] = b1;
1724           k[1] = b2;
1725           k[2] = b3;
1726           Put put = new Put(k);
1727           put.add(f, null, k);
1728           if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1729 
1730           int preRowCount = rowCount;
1731           int pause = 10;
1732           int maxPause = 1000;
1733           while (rowCount == preRowCount) {
1734             try {
1735               r.put(put);
1736               rowCount++;
1737             } catch (RegionTooBusyException e) {
1738               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1739               Threads.sleep(pause);
1740             }
1741           }
1742         }
1743       }
1744       if (flush) {
1745         r.flushcache();
1746       }
1747     }
1748     return rowCount;
1749   }
1750 
1751   public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1752     for (int i = startRow; i < endRow; i++) {
1753       byte[] data = Bytes.toBytes(String.valueOf(i));
1754       Put put = new Put(data);
1755       put.add(f, null, data);
1756       t.put(put);
1757     }
1758   }
1759 
1760   /**
1761    * Return the number of rows in the given table.
1762    */
1763   public int countRows(final HTable table) throws IOException {
1764     Scan scan = new Scan();
1765     ResultScanner results = table.getScanner(scan);
1766     int count = 0;
1767     for (@SuppressWarnings("unused") Result res : results) {
1768       count++;
1769     }
1770     results.close();
1771     return count;
1772   }
1773 
1774   public int countRows(final HTable table, final byte[]... families) throws IOException {
1775     Scan scan = new Scan();
1776     for (byte[] family: families) {
1777       scan.addFamily(family);
1778     }
1779     ResultScanner results = table.getScanner(scan);
1780     int count = 0;
1781     for (@SuppressWarnings("unused") Result res : results) {
1782       count++;
1783     }
1784     results.close();
1785     return count;
1786   }
1787 
1788   /**
1789    * Return an md5 digest of the entire contents of a table.
1790    */
1791   public String checksumRows(final HTable table) throws Exception {
1792     Scan scan = new Scan();
1793     ResultScanner results = table.getScanner(scan);
1794     MessageDigest digest = MessageDigest.getInstance("MD5");
1795     for (Result res : results) {
1796       digest.update(res.getRow());
1797     }
1798     results.close();
1799     return digest.toString();
1800   }
1801 
1802   /**
1803    * Creates many regions names "aaa" to "zzz".
1804    *
1805    * @param table  The table to use for the data.
1806    * @param columnFamily  The family to insert the data into.
1807    * @return count of regions created.
1808    * @throws IOException When creating the regions fails.
1809    */
1810   public int createMultiRegions(HTable table, byte[] columnFamily)
1811   throws IOException {
1812     return createMultiRegions(getConfiguration(), table, columnFamily);
1813   }
1814 
1815   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
1816   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
1817   static {
1818     int i = 0;
1819     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1820       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1821         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1822           ROWS[i][0] = b1;
1823           ROWS[i][1] = b2;
1824           ROWS[i][2] = b3;
1825           i++;
1826         }
1827       }
1828     }
1829   }
1830 
1831   public static final byte[][] KEYS = {
1832     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1833     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1834     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1835     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1836     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1837     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1838     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1839     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1840     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1841   };
1842 
1843   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1844       Bytes.toBytes("bbb"),
1845       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1846       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1847       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1848       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1849       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1850       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1851       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1852       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1853   };
1854 
1855   /**
1856    * Creates many regions names "aaa" to "zzz".
1857    * @param c Configuration to use.
1858    * @param table  The table to use for the data.
1859    * @param columnFamily  The family to insert the data into.
1860    * @return count of regions created.
1861    * @throws IOException When creating the regions fails.
1862    */
1863   public int createMultiRegions(final Configuration c, final HTable table,
1864       final byte[] columnFamily)
1865   throws IOException {
1866     return createMultiRegions(c, table, columnFamily, KEYS);
1867   }
1868 
1869   /**
1870    * Creates the specified number of regions in the specified table.
1871    * @param c
1872    * @param table
1873    * @param family
1874    * @param numRegions
1875    * @return
1876    * @throws IOException
1877    */
1878   public int createMultiRegions(final Configuration c, final HTable table,
1879       final byte [] family, int numRegions)
1880   throws IOException {
1881     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1882     byte [] startKey = Bytes.toBytes("aaaaa");
1883     byte [] endKey = Bytes.toBytes("zzzzz");
1884     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1885     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1886     for (int i=0;i<splitKeys.length;i++) {
1887       regionStartKeys[i+1] = splitKeys[i];
1888     }
1889     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1890     return createMultiRegions(c, table, family, regionStartKeys);
1891   }
1892 
1893   @SuppressWarnings("deprecation")
1894   public int createMultiRegions(final Configuration c, final HTable table,
1895       final byte[] columnFamily, byte [][] startKeys)
1896   throws IOException {
1897     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1898     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1899     HTableDescriptor htd = table.getTableDescriptor();
1900     if(!htd.hasFamily(columnFamily)) {
1901       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1902       htd.addFamily(hcd);
1903     }
1904     // remove empty region - this is tricky as the mini cluster during the test
1905     // setup already has the "<tablename>,,123456789" row with an empty start
1906     // and end key. Adding the custom regions below adds those blindly,
1907     // including the new start region from empty to "bbb". lg
1908     List<byte[]> rows = getMetaTableRows(htd.getTableName());
1909     String regionToDeleteInFS = table
1910         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1911         .getRegionInfo().getEncodedName();
1912     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1913     // add custom ones
1914     int count = 0;
1915     for (int i = 0; i < startKeys.length; i++) {
1916       int j = (i + 1) % startKeys.length;
1917       HRegionInfo hri = new HRegionInfo(table.getName(),
1918         startKeys[i], startKeys[j]);
1919       MetaEditor.addRegionToMeta(meta, hri);
1920       newRegions.add(hri);
1921       count++;
1922     }
1923     // see comment above, remove "old" (or previous) single region
1924     for (byte[] row : rows) {
1925       LOG.info("createMultiRegions: deleting meta row -> " +
1926         Bytes.toStringBinary(row));
1927       meta.delete(new Delete(row));
1928     }
1929     // remove the "old" region from FS
1930     Path tableDir = new Path(getDefaultRootDirPath().toString()
1931         + System.getProperty("file.separator") + htd.getTableName()
1932         + System.getProperty("file.separator") + regionToDeleteInFS);
1933     FileSystem.get(c).delete(tableDir);
1934     // flush cache of regions
1935     HConnection conn = table.getConnection();
1936     conn.clearRegionCache();
1937     // assign all the new regions IF table is enabled.
1938     HBaseAdmin admin = getHBaseAdmin();
1939     if (admin.isTableEnabled(table.getTableName())) {
1940       for(HRegionInfo hri : newRegions) {
1941         admin.assign(hri.getRegionName());
1942       }
1943     }
1944 
1945     meta.close();
1946 
1947     return count;
1948   }
1949 
1950   /**
1951    * Create rows in hbase:meta for regions of the specified table with the specified
1952    * start keys.  The first startKey should be a 0 length byte array if you
1953    * want to form a proper range of regions.
1954    * @param conf
1955    * @param htd
1956    * @param startKeys
1957    * @return list of region info for regions added to meta
1958    * @throws IOException
1959    */
1960   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1961       final HTableDescriptor htd, byte [][] startKeys)
1962   throws IOException {
1963     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1964     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1965     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1966     // add custom ones
1967     for (int i = 0; i < startKeys.length; i++) {
1968       int j = (i + 1) % startKeys.length;
1969       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1970           startKeys[j]);
1971       MetaEditor.addRegionToMeta(meta, hri);
1972       newRegions.add(hri);
1973     }
1974 
1975     meta.close();
1976     return newRegions;
1977   }
1978 
1979   /**
1980    * Returns all rows from the hbase:meta table.
1981    *
1982    * @throws IOException When reading the rows fails.
1983    */
1984   public List<byte[]> getMetaTableRows() throws IOException {
1985     // TODO: Redo using MetaReader class
1986     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1987     List<byte[]> rows = new ArrayList<byte[]>();
1988     ResultScanner s = t.getScanner(new Scan());
1989     for (Result result : s) {
1990       LOG.info("getMetaTableRows: row -> " +
1991         Bytes.toStringBinary(result.getRow()));
1992       rows.add(result.getRow());
1993     }
1994     s.close();
1995     t.close();
1996     return rows;
1997   }
1998 
1999   /**
2000    * Returns all rows from the hbase:meta table for a given user table
2001    *
2002    * @throws IOException When reading the rows fails.
2003    */
2004   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2005     // TODO: Redo using MetaReader.
2006     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2007     List<byte[]> rows = new ArrayList<byte[]>();
2008     ResultScanner s = t.getScanner(new Scan());
2009     for (Result result : s) {
2010       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2011       if (info == null) {
2012         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2013         // TODO figure out what to do for this new hosed case.
2014         continue;
2015       }
2016 
2017       if (info.getTable().equals(tableName)) {
2018         LOG.info("getMetaTableRows: row -> " +
2019             Bytes.toStringBinary(result.getRow()) + info);
2020         rows.add(result.getRow());
2021       }
2022     }
2023     s.close();
2024     t.close();
2025     return rows;
2026   }
2027 
2028   /**
2029    * Tool to get the reference to the region server object that holds the
2030    * region of the specified user table.
2031    * It first searches for the meta rows that contain the region of the
2032    * specified table, then gets the index of that RS, and finally retrieves
2033    * the RS's reference.
2034    * @param tableName user table to lookup in hbase:meta
2035    * @return region server that holds it, null if the row doesn't exist
2036    * @throws IOException
2037    * @throws InterruptedException
2038    */
2039   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2040       throws IOException, InterruptedException {
2041     return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2042   }
2043   /**
2044    * Tool to get the reference to the region server object that holds the
2045    * region of the specified user table.
2046    * It first searches for the meta rows that contain the region of the
2047    * specified table, then gets the index of that RS, and finally retrieves
2048    * the RS's reference.
2049    * @param tableName user table to lookup in hbase:meta
2050    * @return region server that holds it, null if the row doesn't exist
2051    * @throws IOException
2052    */
2053   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2054       throws IOException, InterruptedException {
2055     List<byte[]> metaRows = getMetaTableRows(tableName);
2056     if (metaRows == null || metaRows.isEmpty()) {
2057       return null;
2058     }
2059     LOG.debug("Found " + metaRows.size() + " rows for table " +
2060       tableName);
2061     byte [] firstrow = metaRows.get(0);
2062     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2063     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2064       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2065     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2066       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2067     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2068     while(retrier.shouldRetry()) {
2069       int index = getMiniHBaseCluster().getServerWith(firstrow);
2070       if (index != -1) {
2071         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2072       }
2073       // Came back -1.  Region may not be online yet.  Sleep a while.
2074       retrier.sleepUntilNextRetry();
2075     }
2076     return null;
2077   }
2078 
2079   /**
2080    * Starts a <code>MiniMRCluster</code> with a default number of
2081    * <code>TaskTracker</code>'s.
2082    *
2083    * @throws IOException When starting the cluster fails.
2084    */
2085   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2086     startMiniMapReduceCluster(2);
2087     return mrCluster;
2088   }
2089 
2090   /**
2091    * Tasktracker has a bug where changing the hadoop.log.dir system property
2092    * will not change its internal static LOG_DIR variable.
2093    */
2094   private void forceChangeTaskLogDir() {
2095     Field logDirField;
2096     try {
2097       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2098       logDirField.setAccessible(true);
2099 
2100       Field modifiersField = Field.class.getDeclaredField("modifiers");
2101       modifiersField.setAccessible(true);
2102       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2103 
2104       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2105     } catch (SecurityException e) {
2106       throw new RuntimeException(e);
2107     } catch (NoSuchFieldException e) {
2108       // TODO Auto-generated catch block
2109       throw new RuntimeException(e);
2110     } catch (IllegalArgumentException e) {
2111       throw new RuntimeException(e);
2112     } catch (IllegalAccessException e) {
2113       throw new RuntimeException(e);
2114     }
2115   }
2116 
2117   /**
2118    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2119    * filesystem.
2120    * @param servers  The number of <code>TaskTracker</code>'s to start.
2121    * @throws IOException When starting the cluster fails.
2122    */
2123   private void startMiniMapReduceCluster(final int servers) throws IOException {
2124     if (mrCluster != null) {
2125       throw new IllegalStateException("MiniMRCluster is already running");
2126     }
2127     LOG.info("Starting mini mapreduce cluster...");
2128     setupClusterTestDir();
2129     createDirsAndSetProperties();
2130 
2131     forceChangeTaskLogDir();
2132 
2133     //// hadoop2 specific settings
2134     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2135     // we up the VM usable so that processes don't get killed.
2136     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2137 
2138     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2139     // this avoids the problem by disabling speculative task execution in tests.
2140     conf.setBoolean("mapreduce.map.speculative", false);
2141     conf.setBoolean("mapreduce.reduce.speculative", false);
2142     ////
2143 
2144     // Allow the user to override FS URI for this map-reduce cluster to use.
2145     mrCluster = new MiniMRCluster(servers,
2146       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2147       null, null, new JobConf(this.conf));
2148     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2149     if (jobConf == null) {
2150       jobConf = mrCluster.createJobConf();
2151     }
2152 
2153     jobConf.set("mapred.local.dir",
2154       conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
2155     LOG.info("Mini mapreduce cluster started");
2156 
2157     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2158     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2159     // necessary config properties here.  YARN-129 required adding a few properties.
2160     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2161     // this for mrv2 support; mr1 ignores this
2162     conf.set("mapreduce.framework.name", "yarn");
2163     conf.setBoolean("yarn.is.minicluster", true);
2164     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2165     if (rmAddress != null) {
2166       conf.set("yarn.resourcemanager.address", rmAddress);
2167     }
2168     String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2169     if (historyAddress != null) {
2170       conf.set("mapreduce.jobhistory.address", historyAddress);
2171     }
2172     String schedulerAddress =
2173       jobConf.get("yarn.resourcemanager.scheduler.address");
2174     if (schedulerAddress != null) {
2175       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2176     }
2177   }
2178 
2179   /**
2180    * Stops the previously started <code>MiniMRCluster</code>.
2181    */
2182   public void shutdownMiniMapReduceCluster() {
2183     LOG.info("Stopping mini mapreduce cluster...");
2184     if (mrCluster != null) {
2185       mrCluster.shutdown();
2186       mrCluster = null;
2187     }
2188     // Restore configuration to point to local jobtracker
2189     conf.set("mapred.job.tracker", "local");
2190     LOG.info("Mini mapreduce cluster stopped");
2191   }
2192 
2193   /**
2194    * Create a stubbed out RegionServerService, mainly for getting FS.
2195    */
2196   public RegionServerServices createMockRegionServerService() throws IOException { 
2197     return createMockRegionServerService((ServerName)null);
2198   }
2199 
2200   /**
2201    * Create a stubbed out RegionServerService, mainly for getting FS. 
2202    * This version is used by TestTokenAuthentication
2203    */
2204   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2205     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2206     rss.setFileSystem(getTestFileSystem());
2207     rss.setRpcServer(rpc);
2208     return rss;
2209   }
2210 
2211   /**
2212    * Create a stubbed out RegionServerService, mainly for getting FS. 
2213    * This version is used by TestOpenRegionHandler
2214    */
2215   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2216     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2217     rss.setFileSystem(getTestFileSystem());
2218     return rss;
2219   }
2220 
2221   /**
2222    * Switches the logger for the given class to DEBUG level.
2223    *
2224    * @param clazz  The class for which to switch to debug logging.
2225    */
2226   public void enableDebug(Class<?> clazz) {
2227     Log l = LogFactory.getLog(clazz);
2228     if (l instanceof Log4JLogger) {
2229       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2230     } else if (l instanceof Jdk14Logger) {
2231       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2232     }
2233   }
2234 
2235   /**
2236    * Expire the Master's session
2237    * @throws Exception
2238    */
2239   public void expireMasterSession() throws Exception {
2240     HMaster master = getMiniHBaseCluster().getMaster();
2241     expireSession(master.getZooKeeper(), false);
2242   }
2243 
2244   /**
2245    * Expire a region server's session
2246    * @param index which RS
2247    * @throws Exception
2248    */
2249   public void expireRegionServerSession(int index) throws Exception {
2250     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2251     expireSession(rs.getZooKeeper(), false);
2252     decrementMinRegionServerCount();
2253   }
2254 
2255   private void decrementMinRegionServerCount() {
2256     // decrement the count for this.conf, for newly spwaned master
2257     // this.hbaseCluster shares this configuration too
2258     decrementMinRegionServerCount(getConfiguration());
2259 
2260     // each master thread keeps a copy of configuration
2261     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2262       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2263     }
2264   }
2265 
2266   private void decrementMinRegionServerCount(Configuration conf) {
2267     int currentCount = conf.getInt(
2268         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2269     if (currentCount != -1) {
2270       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2271           Math.max(currentCount - 1, 1));
2272     }
2273   }
2274 
2275   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2276    expireSession(nodeZK, false);
2277   }
2278 
2279   @Deprecated
2280   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2281     throws Exception {
2282     expireSession(nodeZK, false);
2283   }
2284 
2285   /**
2286    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2287    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2288    * There are issues when doing this:
2289    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2290    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2291    *
2292    * @param nodeZK - the ZK watcher to expire
2293    * @param checkStatus - true to check if we can create an HTable with the
2294    *                    current configuration.
2295    */
2296   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2297     throws Exception {
2298     Configuration c = new Configuration(this.conf);
2299     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2300     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2301     byte[] password = zk.getSessionPasswd();
2302     long sessionID = zk.getSessionId();
2303 
2304     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2305     //  so we create a first watcher to be sure that the
2306     //  event was sent. We expect that if our watcher receives the event
2307     //  other watchers on the same machine will get is as well.
2308     // When we ask to close the connection, ZK does not close it before
2309     //  we receive all the events, so don't have to capture the event, just
2310     //  closing the connection should be enough.
2311     ZooKeeper monitor = new ZooKeeper(quorumServers,
2312       1000, new org.apache.zookeeper.Watcher(){
2313       @Override
2314       public void process(WatchedEvent watchedEvent) {
2315         LOG.info("Monitor ZKW received event="+watchedEvent);
2316       }
2317     } , sessionID, password);
2318 
2319     // Making it expire
2320     ZooKeeper newZK = new ZooKeeper(quorumServers,
2321         1000, EmptyWatcher.instance, sessionID, password);
2322 
2323     //ensure that we have connection to the server before closing down, otherwise
2324     //the close session event will be eaten out before we start CONNECTING state
2325     long start = System.currentTimeMillis();
2326     while (newZK.getState() != States.CONNECTED
2327          && System.currentTimeMillis() - start < 1000) {
2328        Thread.sleep(1);
2329     }
2330     newZK.close();
2331     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2332 
2333     // Now closing & waiting to be sure that the clients get it.
2334     monitor.close();
2335 
2336     if (checkStatus) {
2337       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2338     }
2339   }
2340 
2341   /**
2342    * Get the Mini HBase cluster.
2343    *
2344    * @return hbase cluster
2345    * @see #getHBaseClusterInterface()
2346    */
2347   public MiniHBaseCluster getHBaseCluster() {
2348     return getMiniHBaseCluster();
2349   }
2350 
2351   /**
2352    * Returns the HBaseCluster instance.
2353    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2354    * tests referring this should not assume that the cluster is a mini cluster or a
2355    * distributed one. If the test only works on a mini cluster, then specific
2356    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2357    * need to type-cast.
2358    */
2359   public HBaseCluster getHBaseClusterInterface() {
2360     //implementation note: we should rename this method as #getHBaseCluster(),
2361     //but this would require refactoring 90+ calls.
2362     return hbaseCluster;
2363   }
2364 
2365   /**
2366    * Returns a HBaseAdmin instance.
2367    * This instance is shared between HBaseTestingUtility instance users.
2368    * Closing it has no effect, it will be closed automatically when the
2369    * cluster shutdowns
2370    *
2371    * @return The HBaseAdmin instance.
2372    * @throws IOException
2373    */
2374   public synchronized HBaseAdmin getHBaseAdmin()
2375   throws IOException {
2376     if (hbaseAdmin == null){
2377       hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2378     }
2379     return hbaseAdmin;
2380   }
2381 
2382   private HBaseAdminForTests hbaseAdmin = null;
2383   private static class HBaseAdminForTests extends HBaseAdmin {
2384     public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2385         ZooKeeperConnectionException, IOException {
2386       super(c);
2387     }
2388 
2389     @Override
2390     public synchronized void close() throws IOException {
2391       LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2392     }
2393 
2394     private synchronized void close0() throws IOException {
2395       super.close();
2396     }
2397   }
2398 
2399   /**
2400    * Returns a ZooKeeperWatcher instance.
2401    * This instance is shared between HBaseTestingUtility instance users.
2402    * Don't close it, it will be closed automatically when the
2403    * cluster shutdowns
2404    *
2405    * @return The ZooKeeperWatcher instance.
2406    * @throws IOException
2407    */
2408   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2409     throws IOException {
2410     if (zooKeeperWatcher == null) {
2411       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2412         new Abortable() {
2413         @Override public void abort(String why, Throwable e) {
2414           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2415         }
2416         @Override public boolean isAborted() {return false;}
2417       });
2418     }
2419     return zooKeeperWatcher;
2420   }
2421   private ZooKeeperWatcher zooKeeperWatcher;
2422 
2423 
2424 
2425   /**
2426    * Closes the named region.
2427    *
2428    * @param regionName  The region to close.
2429    * @throws IOException
2430    */
2431   public void closeRegion(String regionName) throws IOException {
2432     closeRegion(Bytes.toBytes(regionName));
2433   }
2434 
2435   /**
2436    * Closes the named region.
2437    *
2438    * @param regionName  The region to close.
2439    * @throws IOException
2440    */
2441   public void closeRegion(byte[] regionName) throws IOException {
2442     getHBaseAdmin().closeRegion(regionName, null);
2443   }
2444 
2445   /**
2446    * Closes the region containing the given row.
2447    *
2448    * @param row  The row to find the containing region.
2449    * @param table  The table to find the region.
2450    * @throws IOException
2451    */
2452   public void closeRegionByRow(String row, HTable table) throws IOException {
2453     closeRegionByRow(Bytes.toBytes(row), table);
2454   }
2455 
2456   /**
2457    * Closes the region containing the given row.
2458    *
2459    * @param row  The row to find the containing region.
2460    * @param table  The table to find the region.
2461    * @throws IOException
2462    */
2463   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2464     HRegionLocation hrl = table.getRegionLocation(row);
2465     closeRegion(hrl.getRegionInfo().getRegionName());
2466   }
2467 
2468   /*
2469    * Retrieves a splittable region randomly from tableName
2470    *
2471    * @param tableName name of table
2472    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2473    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2474    */
2475   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2476     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2477     int regCount = regions.size();
2478     Set<Integer> attempted = new HashSet<Integer>();
2479     int idx;
2480     int attempts = 0;
2481     do {
2482       regions = getHBaseCluster().getRegions(tableName);
2483       if (regCount != regions.size()) {
2484         // if there was region movement, clear attempted Set
2485         attempted.clear();
2486       }
2487       regCount = regions.size();
2488       // There are chances that before we get the region for the table from an RS the region may
2489       // be going for CLOSE.  This may be because online schema change is enabled
2490       if (regCount > 0) {
2491         idx = random.nextInt(regCount);
2492         // if we have just tried this region, there is no need to try again
2493         if (attempted.contains(idx))
2494           continue;
2495         try {
2496           regions.get(idx).checkSplit();
2497           return regions.get(idx);
2498         } catch (Exception ex) {
2499           LOG.warn("Caught exception", ex);
2500           attempted.add(idx);
2501         }
2502       }
2503       attempts++;
2504     } while (maxAttempts == -1 || attempts < maxAttempts);
2505     return null;
2506   }
2507 
2508   public MiniZooKeeperCluster getZkCluster() {
2509     return zkCluster;
2510   }
2511 
2512   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2513     this.passedZkCluster = true;
2514     this.zkCluster = zkCluster;
2515     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2516   }
2517 
2518   public MiniDFSCluster getDFSCluster() {
2519     return dfsCluster;
2520   }
2521 
2522   public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2523     if (dfsCluster != null && dfsCluster.isClusterUp()) {
2524       throw new IOException("DFSCluster is already running! Shut it down first.");
2525     }
2526     this.dfsCluster = cluster;
2527   }
2528 
2529   public FileSystem getTestFileSystem() throws IOException {
2530     return HFileSystem.get(conf);
2531   }
2532 
2533   /**
2534    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2535    * (30 seconds).
2536    * @param table Table to wait on.
2537    * @throws InterruptedException
2538    * @throws IOException
2539    */
2540   public void waitTableAvailable(byte[] table)
2541       throws InterruptedException, IOException {
2542     waitTableAvailable(getHBaseAdmin(), table, 30000);
2543   }
2544 
2545   public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2546       throws InterruptedException, IOException {
2547     waitTableAvailable(admin, table, 30000);
2548   }
2549 
2550   /**
2551    * Wait until all regions in a table have been assigned
2552    * @param table Table to wait on.
2553    * @param timeoutMillis Timeout.
2554    * @throws InterruptedException
2555    * @throws IOException
2556    */
2557   public void waitTableAvailable(byte[] table, long timeoutMillis)
2558   throws InterruptedException, IOException {
2559     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2560   }
2561 
2562   public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2563   throws InterruptedException, IOException {
2564     long startWait = System.currentTimeMillis();
2565     while (!admin.isTableAvailable(table)) {
2566       assertTrue("Timed out waiting for table to become available " +
2567         Bytes.toStringBinary(table),
2568         System.currentTimeMillis() - startWait < timeoutMillis);
2569       Thread.sleep(200);
2570     }
2571     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2572     // in the hbase:meta table and almost open on all regionservers but there setting the region
2573     // online in the regionserver is the very last thing done and can take a little while to happen.
2574     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2575     // RegionOpeningException.  It is crass but when done all will be online.
2576     try {
2577       Canary.sniff(admin, TableName.valueOf(table));
2578     } catch (Exception e) {
2579       throw new IOException(e);
2580     }
2581   }
2582 
2583   /**
2584    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2585    * regions have been all assigned.  Will timeout after default period (30 seconds)
2586    * @see #waitTableAvailable(byte[])
2587    * @param table Table to wait on.
2588    * @param table
2589    * @throws InterruptedException
2590    * @throws IOException
2591    */
2592   public void waitTableEnabled(byte[] table)
2593       throws InterruptedException, IOException {
2594     waitTableEnabled(getHBaseAdmin(), table, 30000);
2595   }
2596 
2597   public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2598       throws InterruptedException, IOException {
2599     waitTableEnabled(admin, table, 30000);
2600   }
2601 
2602   /**
2603    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2604    * regions have been all assigned.
2605    * @see #waitTableAvailable(byte[])
2606    * @param table Table to wait on.
2607    * @param timeoutMillis Time to wait on it being marked enabled.
2608    * @throws InterruptedException
2609    * @throws IOException
2610    */
2611   public void waitTableEnabled(byte[] table, long timeoutMillis)
2612   throws InterruptedException, IOException {
2613     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2614   }
2615 
2616   public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2617   throws InterruptedException, IOException {
2618     long startWait = System.currentTimeMillis();
2619     waitTableAvailable(admin, table, timeoutMillis);
2620     long remainder = System.currentTimeMillis() - startWait;
2621     while (!admin.isTableEnabled(table)) {
2622       assertTrue("Timed out waiting for table to become available and enabled " +
2623          Bytes.toStringBinary(table),
2624          System.currentTimeMillis() - remainder < timeoutMillis);
2625       Thread.sleep(200);
2626     }
2627     LOG.debug("REMOVE AFTER table=" + Bytes.toString(table) + ", isTableAvailable=" +
2628         admin.isTableAvailable(table) +
2629         ", isTableEnabled=" + admin.isTableEnabled(table));
2630   }
2631 
2632   /**
2633    * Make sure that at least the specified number of region servers
2634    * are running
2635    * @param num minimum number of region servers that should be running
2636    * @return true if we started some servers
2637    * @throws IOException
2638    */
2639   public boolean ensureSomeRegionServersAvailable(final int num)
2640       throws IOException {
2641     boolean startedServer = false;
2642     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2643     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2644       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2645       startedServer = true;
2646     }
2647 
2648     return startedServer;
2649   }
2650 
2651 
2652   /**
2653    * Make sure that at least the specified number of region servers
2654    * are running. We don't count the ones that are currently stopping or are
2655    * stopped.
2656    * @param num minimum number of region servers that should be running
2657    * @return true if we started some servers
2658    * @throws IOException
2659    */
2660   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2661     throws IOException {
2662     boolean startedServer = ensureSomeRegionServersAvailable(num);
2663 
2664     int nonStoppedServers = 0;
2665     for (JVMClusterUtil.RegionServerThread rst :
2666       getMiniHBaseCluster().getRegionServerThreads()) {
2667 
2668       HRegionServer hrs = rst.getRegionServer();
2669       if (hrs.isStopping() || hrs.isStopped()) {
2670         LOG.info("A region server is stopped or stopping:"+hrs);
2671       } else {
2672         nonStoppedServers++;
2673       }
2674     }
2675     for (int i=nonStoppedServers; i<num; ++i) {
2676       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2677       startedServer = true;
2678     }
2679     return startedServer;
2680   }
2681 
2682 
2683   /**
2684    * This method clones the passed <code>c</code> configuration setting a new
2685    * user into the clone.  Use it getting new instances of FileSystem.  Only
2686    * works for DistributedFileSystem.
2687    * @param c Initial configuration
2688    * @param differentiatingSuffix Suffix to differentiate this user from others.
2689    * @return A new configuration instance with a different user set into it.
2690    * @throws IOException
2691    */
2692   public static User getDifferentUser(final Configuration c,
2693     final String differentiatingSuffix)
2694   throws IOException {
2695     FileSystem currentfs = FileSystem.get(c);
2696     if (!(currentfs instanceof DistributedFileSystem)) {
2697       return User.getCurrent();
2698     }
2699     // Else distributed filesystem.  Make a new instance per daemon.  Below
2700     // code is taken from the AppendTestUtil over in hdfs.
2701     String username = User.getCurrent().getName() +
2702       differentiatingSuffix;
2703     User user = User.createUserForTesting(c, username,
2704         new String[]{"supergroup"});
2705     return user;
2706   }
2707 
2708   /**
2709    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
2710    * makes tests linger.  Here is the exception you'll see:
2711    * <pre>
2712    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2713    * </pre>
2714    * @param stream A DFSClient.DFSOutputStream.
2715    * @param max
2716    * @throws NoSuchFieldException
2717    * @throws SecurityException
2718    * @throws IllegalAccessException
2719    * @throws IllegalArgumentException
2720    */
2721   public static void setMaxRecoveryErrorCount(final OutputStream stream,
2722       final int max) {
2723     try {
2724       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2725       for (Class<?> clazz: clazzes) {
2726         String className = clazz.getSimpleName();
2727         if (className.equals("DFSOutputStream")) {
2728           if (clazz.isInstance(stream)) {
2729             Field maxRecoveryErrorCountField =
2730               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2731             maxRecoveryErrorCountField.setAccessible(true);
2732             maxRecoveryErrorCountField.setInt(stream, max);
2733             break;
2734           }
2735         }
2736       }
2737     } catch (Exception e) {
2738       LOG.info("Could not set max recovery field", e);
2739     }
2740   }
2741 
2742   /**
2743    * Wait until all regions for a table in hbase:meta have a non-empty
2744    * info:server, up to 60 seconds. This means all regions have been deployed,
2745    * master has been informed and updated hbase:meta with the regions deployed
2746    * server.
2747    * @param tableName the table name
2748    * @throws IOException
2749    */
2750   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2751     waitUntilAllRegionsAssigned(tableName, 60000);
2752   }
2753 
2754   /**
2755    * Wait until all regions for a table in hbase:meta have a non-empty
2756    * info:server, or until timeout.  This means all regions have been deployed,
2757    * master has been informed and updated hbase:meta with the regions deployed
2758    * server.
2759    * @param tableName the table name
2760    * @param timeout timeout, in milliseconds
2761    * @throws IOException
2762    */
2763   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2764       throws IOException {
2765     final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2766     try {
2767       waitFor(timeout, 200, true, new Predicate<IOException>() {
2768         @Override
2769         public boolean evaluate() throws IOException {
2770           boolean allRegionsAssigned = true;
2771           Scan scan = new Scan();
2772           scan.addFamily(HConstants.CATALOG_FAMILY);
2773           ResultScanner s = meta.getScanner(scan);
2774           try {
2775             Result r;
2776             while ((r = s.next()) != null) {
2777               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2778               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2779               if (info != null && info.getTable().equals(tableName)) {
2780                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2781                 allRegionsAssigned &= (b != null);
2782               }
2783             }
2784           } finally {
2785             s.close();
2786           }
2787           return allRegionsAssigned;
2788         }
2789       });
2790     } finally {
2791       meta.close();
2792     }
2793   }
2794 
2795   /**
2796    * Do a small get/scan against one store. This is required because store
2797    * has no actual methods of querying itself, and relies on StoreScanner.
2798    */
2799   public static List<Cell> getFromStoreFile(HStore store,
2800                                                 Get get) throws IOException {
2801     MultiVersionConsistencyControl.resetThreadReadPoint();
2802     Scan scan = new Scan(get);
2803     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2804         scan.getFamilyMap().get(store.getFamily().getName()));
2805 
2806     List<Cell> result = new ArrayList<Cell>();
2807     scanner.next(result);
2808     if (!result.isEmpty()) {
2809       // verify that we are on the row we want:
2810       Cell kv = result.get(0);
2811       if (!CellUtil.matchingRow(kv, get.getRow())) {
2812         result.clear();
2813       }
2814     }
2815     scanner.close();
2816     return result;
2817   }
2818 
2819   /**
2820    * Create region split keys between startkey and endKey
2821    *
2822    * @param startKey
2823    * @param endKey
2824    * @param numRegions the number of regions to be created. it has to be greater than 3.
2825    * @return
2826    */
2827   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2828     assertTrue(numRegions>3);
2829     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2830     byte [][] result = new byte[tmpSplitKeys.length+1][];
2831     for (int i=0;i<tmpSplitKeys.length;i++) {
2832       result[i+1] = tmpSplitKeys[i];
2833     }
2834     result[0] = HConstants.EMPTY_BYTE_ARRAY;
2835     return result;
2836   }
2837 
2838   /**
2839    * Do a small get/scan against one store. This is required because store
2840    * has no actual methods of querying itself, and relies on StoreScanner.
2841    */
2842   public static List<Cell> getFromStoreFile(HStore store,
2843                                                 byte [] row,
2844                                                 NavigableSet<byte[]> columns
2845                                                 ) throws IOException {
2846     Get get = new Get(row);
2847     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2848     s.put(store.getFamily().getName(), columns);
2849 
2850     return getFromStoreFile(store,get);
2851   }
2852 
2853   /**
2854    * Gets a ZooKeeperWatcher.
2855    * @param TEST_UTIL
2856    */
2857   public static ZooKeeperWatcher getZooKeeperWatcher(
2858       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2859       IOException {
2860     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2861         "unittest", new Abortable() {
2862           boolean aborted = false;
2863 
2864           @Override
2865           public void abort(String why, Throwable e) {
2866             aborted = true;
2867             throw new RuntimeException("Fatal ZK error, why=" + why, e);
2868           }
2869 
2870           @Override
2871           public boolean isAborted() {
2872             return aborted;
2873           }
2874         });
2875     return zkw;
2876   }
2877 
2878   /**
2879    * Creates a znode with OPENED state.
2880    * @param TEST_UTIL
2881    * @param region
2882    * @param serverName
2883    * @return
2884    * @throws IOException
2885    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
2886    * @throws KeeperException
2887    * @throws NodeExistsException
2888    */
2889   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2890       HBaseTestingUtility TEST_UTIL, HRegion region,
2891       ServerName serverName) throws ZooKeeperConnectionException,
2892       IOException, KeeperException, NodeExistsException {
2893     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2894     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2895     int version = ZKAssign.transitionNodeOpening(zkw, region
2896         .getRegionInfo(), serverName);
2897     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2898         version);
2899     return zkw;
2900   }
2901 
2902   public static void assertKVListsEqual(String additionalMsg,
2903       final List<? extends Cell> expected,
2904       final List<? extends Cell> actual) {
2905     final int eLen = expected.size();
2906     final int aLen = actual.size();
2907     final int minLen = Math.min(eLen, aLen);
2908 
2909     int i;
2910     for (i = 0; i < minLen
2911         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2912         ++i) {}
2913 
2914     if (additionalMsg == null) {
2915       additionalMsg = "";
2916     }
2917     if (!additionalMsg.isEmpty()) {
2918       additionalMsg = ". " + additionalMsg;
2919     }
2920 
2921     if (eLen != aLen || i != minLen) {
2922       throw new AssertionError(
2923           "Expected and actual KV arrays differ at position " + i + ": " +
2924           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2925           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2926     }
2927   }
2928 
2929   private static <T> String safeGetAsStr(List<T> lst, int i) {
2930     if (0 <= i && i < lst.size()) {
2931       return lst.get(i).toString();
2932     } else {
2933       return "<out_of_range>";
2934     }
2935   }
2936 
2937   public String getClusterKey() {
2938     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2939         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2940         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2941             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2942   }
2943 
2944   /** Creates a random table with the given parameters */
2945   public HTable createRandomTable(String tableName,
2946       final Collection<String> families,
2947       final int maxVersions,
2948       final int numColsPerRow,
2949       final int numFlushes,
2950       final int numRegions,
2951       final int numRowsPerFlush)
2952       throws IOException, InterruptedException {
2953 
2954     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2955         " regions, " + numFlushes + " storefiles per region, " +
2956         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
2957         "\n");
2958 
2959     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2960     final int numCF = families.size();
2961     final byte[][] cfBytes = new byte[numCF][];
2962     {
2963       int cfIndex = 0;
2964       for (String cf : families) {
2965         cfBytes[cfIndex++] = Bytes.toBytes(cf);
2966       }
2967     }
2968 
2969     final int actualStartKey = 0;
2970     final int actualEndKey = Integer.MAX_VALUE;
2971     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2972     final int splitStartKey = actualStartKey + keysPerRegion;
2973     final int splitEndKey = actualEndKey - keysPerRegion;
2974     final String keyFormat = "%08x";
2975     final HTable table = createTable(tableName, cfBytes,
2976         maxVersions,
2977         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2978         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2979         numRegions);
2980 
2981     if (hbaseCluster != null) {
2982       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2983     }
2984 
2985     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2986       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2987         final byte[] row = Bytes.toBytes(String.format(keyFormat,
2988             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2989 
2990         Put put = new Put(row);
2991         Delete del = new Delete(row);
2992         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2993           final byte[] cf = cfBytes[rand.nextInt(numCF)];
2994           final long ts = rand.nextInt();
2995           final byte[] qual = Bytes.toBytes("col" + iCol);
2996           if (rand.nextBoolean()) {
2997             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2998                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2999                 ts + "_random_" + rand.nextLong());
3000             put.add(cf, qual, ts, value);
3001           } else if (rand.nextDouble() < 0.8) {
3002             del.deleteColumn(cf, qual, ts);
3003           } else {
3004             del.deleteColumns(cf, qual, ts);
3005           }
3006         }
3007 
3008         if (!put.isEmpty()) {
3009           table.put(put);
3010         }
3011 
3012         if (!del.isEmpty()) {
3013           table.delete(del);
3014         }
3015       }
3016       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3017       table.flushCommits();
3018       if (hbaseCluster != null) {
3019         getMiniHBaseCluster().flushcache(table.getName());
3020       }
3021     }
3022 
3023     return table;
3024   }
3025 
3026   private static final int MIN_RANDOM_PORT = 0xc000;
3027   private static final int MAX_RANDOM_PORT = 0xfffe;
3028   private static Random random = new Random();
3029 
3030   /**
3031    * Returns a random port. These ports cannot be registered with IANA and are
3032    * intended for dynamic allocation (see http://bit.ly/dynports).
3033    */
3034   public static int randomPort() {
3035     return MIN_RANDOM_PORT
3036         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3037   }
3038 
3039   /**
3040    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3041    * called from single-threaded test setup code/
3042    */
3043   public static int randomFreePort() {
3044     int port = 0;
3045     do {
3046       port = randomPort();
3047       if (takenRandomPorts.contains(port)) {
3048         continue;
3049       }
3050       takenRandomPorts.add(port);
3051 
3052       try {
3053         ServerSocket sock = new ServerSocket(port);
3054         sock.close();
3055       } catch (IOException ex) {
3056         port = 0;
3057       }
3058     } while (port == 0);
3059     return port;
3060   }
3061 
3062 
3063   public static String randomMultiCastAddress() {
3064     return "226.1.1." + random.nextInt(254);
3065   }
3066 
3067 
3068 
3069   public static void waitForHostPort(String host, int port)
3070       throws IOException {
3071     final int maxTimeMs = 10000;
3072     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3073     IOException savedException = null;
3074     LOG.info("Waiting for server at " + host + ":" + port);
3075     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3076       try {
3077         Socket sock = new Socket(InetAddress.getByName(host), port);
3078         sock.close();
3079         savedException = null;
3080         LOG.info("Server at " + host + ":" + port + " is available");
3081         break;
3082       } catch (UnknownHostException e) {
3083         throw new IOException("Failed to look up " + host, e);
3084       } catch (IOException e) {
3085         savedException = e;
3086       }
3087       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3088     }
3089 
3090     if (savedException != null) {
3091       throw savedException;
3092     }
3093   }
3094 
3095   /**
3096    * Creates a pre-split table for load testing. If the table already exists,
3097    * logs a warning and continues.
3098    * @return the number of regions the table was split into
3099    */
3100   public static int createPreSplitLoadTestTable(Configuration conf,
3101       TableName tableName, byte[] columnFamily, Algorithm compression,
3102       DataBlockEncoding dataBlockEncoding) throws IOException {
3103     HTableDescriptor desc = new HTableDescriptor(tableName);
3104     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3105     hcd.setDataBlockEncoding(dataBlockEncoding);
3106     hcd.setCompressionType(compression);
3107     return createPreSplitLoadTestTable(conf, desc, hcd);
3108   }
3109 
3110   /**
3111    * Creates a pre-split table for load testing. If the table already exists,
3112    * logs a warning and continues.
3113    * @return the number of regions the table was split into
3114    */
3115   public static int createPreSplitLoadTestTable(Configuration conf,
3116       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3117     if (!desc.hasFamily(hcd.getName())) {
3118       desc.addFamily(hcd);
3119     }
3120 
3121     int totalNumberOfRegions = 0;
3122     HBaseAdmin admin = new HBaseAdmin(conf);
3123     try {
3124       // create a table a pre-splits regions.
3125       // The number of splits is set as:
3126       //    region servers * regions per region server).
3127       int numberOfServers = admin.getClusterStatus().getServers().size();
3128       if (numberOfServers == 0) {
3129         throw new IllegalStateException("No live regionservers");
3130       }
3131 
3132       totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3133       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3134           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3135           "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3136 
3137       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3138           totalNumberOfRegions);
3139 
3140       admin.createTable(desc, splits);
3141     } catch (MasterNotRunningException e) {
3142       LOG.error("Master not running", e);
3143       throw new IOException(e);
3144     } catch (TableExistsException e) {
3145       LOG.warn("Table " + desc.getTableName() +
3146           " already exists, continuing");
3147     } finally {
3148       admin.close();
3149     }
3150     return totalNumberOfRegions;
3151   }
3152 
3153   public static int getMetaRSPort(Configuration conf) throws IOException {
3154     HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3155     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3156     table.close();
3157     return hloc.getPort();
3158   }
3159 
3160   /**
3161    *  Due to async racing issue, a region may not be in
3162    *  the online region list of a region server yet, after
3163    *  the assignment znode is deleted and the new assignment
3164    *  is recorded in master.
3165    */
3166   public void assertRegionOnServer(
3167       final HRegionInfo hri, final ServerName server,
3168       final long timeout) throws IOException, InterruptedException {
3169     long timeoutTime = System.currentTimeMillis() + timeout;
3170     while (true) {
3171       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3172       if (regions.contains(hri)) return;
3173       long now = System.currentTimeMillis();
3174       if (now > timeoutTime) break;
3175       Thread.sleep(10);
3176     }
3177     fail("Could not find region " + hri.getRegionNameAsString()
3178       + " on server " + server);
3179   }
3180 
3181   /**
3182    * Check to make sure the region is open on the specified
3183    * region server, but not on any other one.
3184    */
3185   public void assertRegionOnlyOnServer(
3186       final HRegionInfo hri, final ServerName server,
3187       final long timeout) throws IOException, InterruptedException {
3188     long timeoutTime = System.currentTimeMillis() + timeout;
3189     while (true) {
3190       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3191       if (regions.contains(hri)) {
3192         List<JVMClusterUtil.RegionServerThread> rsThreads =
3193           getHBaseCluster().getLiveRegionServerThreads();
3194         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3195           HRegionServer rs = rsThread.getRegionServer();
3196           if (server.equals(rs.getServerName())) {
3197             continue;
3198           }
3199           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3200           for (HRegion r: hrs) {
3201             assertTrue("Region should not be double assigned",
3202               r.getRegionId() != hri.getRegionId());
3203           }
3204         }
3205         return; // good, we are happy
3206       }
3207       long now = System.currentTimeMillis();
3208       if (now > timeoutTime) break;
3209       Thread.sleep(10);
3210     }
3211     fail("Could not find region " + hri.getRegionNameAsString()
3212       + " on server " + server);
3213   }
3214 
3215   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3216       throws IOException {
3217     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3218     htd.addFamily(hcd);
3219     HRegionInfo info =
3220         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3221     HRegion region =
3222         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3223     return region;
3224   }
3225 
3226   public void setFileSystemURI(String fsURI) {
3227     FS_URI = fsURI;
3228   }
3229 
3230   /**
3231    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3232    */
3233   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3234       throws E {
3235     return Waiter.waitFor(this.conf, timeout, predicate);
3236   }
3237 
3238   /**
3239    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3240    */
3241   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3242       throws E {
3243     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3244   }
3245 
3246   /**
3247    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3248    */
3249   public <E extends Exception> long waitFor(long timeout, long interval,
3250       boolean failIfTimeout, Predicate<E> predicate) throws E {
3251     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3252   }
3253 
3254   /**
3255    * Returns a {@link Predicate} for checking that there is no regions in transition in master
3256    */
3257   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3258     return new Waiter.Predicate<Exception>() {
3259       @Override
3260       public boolean evaluate() throws Exception {
3261         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3262             .getAssignmentManager().getRegionStates();
3263         return !regionStates.isRegionsInTransition();
3264       }
3265     };
3266   }
3267 
3268   /**
3269    * Returns a {@link Predicate} for checking that table is enabled
3270    */
3271   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3272     return new Waiter.Predicate<Exception>() {
3273      @Override
3274      public boolean evaluate() throws Exception {
3275        return getHBaseAdmin().isTableEnabled(tableName);
3276       }
3277     };
3278   }
3279 }