View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Modifier;
28  import java.net.InetAddress;
29  import java.net.ServerSocket;
30  import java.net.Socket;
31  import java.net.UnknownHostException;
32  import java.security.MessageDigest;
33  import java.util.ArrayList;
34  import java.util.Arrays;
35  import java.util.Collection;
36  import java.util.Collections;
37  import java.util.HashSet;
38  import java.util.List;
39  import java.util.Map;
40  import java.util.NavigableSet;
41  import java.util.Random;
42  import java.util.Set;
43  import java.util.UUID;
44  import java.util.concurrent.TimeUnit;
45  
46  import org.apache.commons.logging.Log;
47  import org.apache.commons.logging.LogFactory;
48  import org.apache.commons.logging.impl.Jdk14Logger;
49  import org.apache.commons.logging.impl.Log4JLogger;
50  import org.apache.hadoop.classification.InterfaceAudience;
51  import org.apache.hadoop.classification.InterfaceStability;
52  import org.apache.hadoop.conf.Configuration;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.hbase.Waiter.Predicate;
56  import org.apache.hadoop.hbase.catalog.MetaEditor;
57  import org.apache.hadoop.hbase.client.Delete;
58  import org.apache.hadoop.hbase.client.Durability;
59  import org.apache.hadoop.hbase.client.Get;
60  import org.apache.hadoop.hbase.client.HBaseAdmin;
61  import org.apache.hadoop.hbase.client.HConnection;
62  import org.apache.hadoop.hbase.client.HTable;
63  import org.apache.hadoop.hbase.client.Put;
64  import org.apache.hadoop.hbase.client.Result;
65  import org.apache.hadoop.hbase.client.ResultScanner;
66  import org.apache.hadoop.hbase.client.Scan;
67  import org.apache.hadoop.hbase.fs.HFileSystem;
68  import org.apache.hadoop.hbase.io.compress.Compression;
69  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72  import org.apache.hadoop.hbase.io.hfile.HFile;
73  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75  import org.apache.hadoop.hbase.master.HMaster;
76  import org.apache.hadoop.hbase.master.RegionStates;
77  import org.apache.hadoop.hbase.master.ServerManager;
78  import org.apache.hadoop.hbase.regionserver.BloomType;
79  import org.apache.hadoop.hbase.regionserver.HRegion;
80  import org.apache.hadoop.hbase.regionserver.HRegionServer;
81  import org.apache.hadoop.hbase.regionserver.HStore;
82  import org.apache.hadoop.hbase.regionserver.InternalScanner;
83  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
84  import org.apache.hadoop.hbase.regionserver.wal.HLog;
85  import org.apache.hadoop.hbase.security.User;
86  import org.apache.hadoop.hbase.tool.Canary;
87  import org.apache.hadoop.hbase.util.Bytes;
88  import org.apache.hadoop.hbase.util.FSUtils;
89  import org.apache.hadoop.hbase.util.JVMClusterUtil;
90  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
91  import org.apache.hadoop.hbase.util.RegionSplitter;
92  import org.apache.hadoop.hbase.util.RetryCounter;
93  import org.apache.hadoop.hbase.util.Threads;
94  import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
95  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
96  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
97  import org.apache.hadoop.hbase.zookeeper.ZKConfig;
98  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
99  import org.apache.hadoop.hdfs.DFSClient;
100 import org.apache.hadoop.hdfs.DistributedFileSystem;
101 import org.apache.hadoop.hdfs.MiniDFSCluster;
102 import org.apache.hadoop.mapred.JobConf;
103 import org.apache.hadoop.mapred.MiniMRCluster;
104 import org.apache.hadoop.mapred.TaskLog;
105 import org.apache.zookeeper.KeeperException;
106 import org.apache.zookeeper.KeeperException.NodeExistsException;
107 import org.apache.zookeeper.WatchedEvent;
108 import org.apache.zookeeper.ZooKeeper;
109 import org.apache.zookeeper.ZooKeeper.States;
110 
111 /**
112  * Facility for testing HBase. Replacement for
113  * old HBaseTestCase and HBaseClusterTestCase functionality.
114  * Create an instance and keep it around testing HBase.  This class is
115  * meant to be your one-stop shop for anything you might need testing.  Manages
116  * one cluster at a time only. Managed cluster can be an in-process
117  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
118  * Not all methods work with the real cluster.
119  * Depends on log4j being on classpath and
120  * hbase-site.xml for logging and test-run configuration.  It does not set
121  * logging levels nor make changes to configuration parameters.
122  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
123  * setting it to true.
124  */
125 @InterfaceAudience.Public
126 @InterfaceStability.Evolving
127 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
128    private MiniZooKeeperCluster zkCluster = null;
129 
130   /**
131    * The default number of regions per regionserver when creating a pre-split
132    * table.
133    */
134   private static int DEFAULT_REGIONS_PER_SERVER = 5;
135 
136   /**
137    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
138    * part of general shutdown.
139    */
140   private boolean passedZkCluster = false;
141   private MiniDFSCluster dfsCluster = null;
142 
143   private HBaseCluster hbaseCluster = null;
144   private MiniMRCluster mrCluster = null;
145 
146   /** If there is a mini cluster running for this testing utility instance. */
147   private boolean miniClusterRunning;
148 
149   private String hadoopLogDir;
150 
151   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
152   private File clusterTestDir = null;
153 
154   /** Directory on test filesystem where we put the data for this instance of
155     * HBaseTestingUtility*/
156   private Path dataTestDirOnTestFS = null;
157 
158   /**
159    * System property key to get test directory value.
160    * Name is as it is because mini dfs has hard-codings to put test data here.
161    * It should NOT be used directly in HBase, as it's a property used in
162    *  mini dfs.
163    *  @deprecated can be used only with mini dfs
164    */
165   @Deprecated
166   private static final String TEST_DIRECTORY_KEY = "test.build.data";
167 
168   /** Filesystem URI used for map-reduce mini-cluster setup */
169   private static String FS_URI;
170 
171   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
172   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
173 
174   /** Compression algorithms to use in parameterized JUnit 4 tests */
175   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
176     Arrays.asList(new Object[][] {
177       { Compression.Algorithm.NONE },
178       { Compression.Algorithm.GZ }
179     });
180 
181   /** This is for unit tests parameterized with a two booleans. */
182   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
183       Arrays.asList(new Object[][] {
184           { new Boolean(false) },
185           { new Boolean(true) }
186       });
187 
188   /** This is for unit tests parameterized with a single boolean. */
189   public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination()  ;
190   /** Compression algorithms to use in testing */
191   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
192       Compression.Algorithm.NONE, Compression.Algorithm.GZ
193     };
194 
195   /**
196    * Create all combinations of Bloom filters and compression algorithms for
197    * testing.
198    */
199   private static List<Object[]> bloomAndCompressionCombinations() {
200     List<Object[]> configurations = new ArrayList<Object[]>();
201     for (Compression.Algorithm comprAlgo :
202          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
203       for (BloomType bloomType : BloomType.values()) {
204         configurations.add(new Object[] { comprAlgo, bloomType });
205       }
206     }
207     return Collections.unmodifiableList(configurations);
208   }
209 
210   /**
211    * Create combination of memstoreTS and tags
212    */
213   private static List<Object[]> memStoreTSAndTagsCombination() {
214     List<Object[]> configurations = new ArrayList<Object[]>();
215     configurations.add(new Object[] { false, false });
216     configurations.add(new Object[] { false, true });
217     configurations.add(new Object[] { true, false });
218     configurations.add(new Object[] { true, true });
219     return Collections.unmodifiableList(configurations);
220   }
221 
222   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
223       bloomAndCompressionCombinations();
224 
225   public HBaseTestingUtility() {
226     this(HBaseConfiguration.create());
227   }
228 
229   public HBaseTestingUtility(Configuration conf) {
230     super(conf);
231 
232     // a hbase checksum verification failure will cause unit tests to fail
233     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
234   }
235 
236   /**
237    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
238    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
239    * test dir.  Use this when you aren't using an Mini HDFS cluster.
240    * @return HBaseTestingUtility that use local fs for temp files.
241    */
242   public static HBaseTestingUtility createLocalHTU() {
243     Configuration c = HBaseConfiguration.create();
244     return createLocalHTU(c);
245   }
246 
247   /**
248    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
249    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
250    * test dir.  Use this when you aren't using an Mini HDFS cluster.
251    * @param c Configuration (will be modified)
252    * @return HBaseTestingUtility that use local fs for temp files.
253    */
254   public static HBaseTestingUtility createLocalHTU(Configuration c) {
255     HBaseTestingUtility htu = new HBaseTestingUtility(c);
256     String dataTestDir = htu.getDataTestDir().toString();
257     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
258     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
259     return htu;
260   }
261 
262   /**
263    * Returns this classes's instance of {@link Configuration}.  Be careful how
264    * you use the returned Configuration since {@link HConnection} instances
265    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
266    * say, a Connection was being used against a cluster that had been shutdown,
267    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
268    * be wholesome.  Rather than use the return direct, its usually best to
269    * make a copy and use that.  Do
270    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
271    * @return Instance of Configuration.
272    */
273   @Override
274   public Configuration getConfiguration() {
275     return super.getConfiguration();
276   }
277 
278   public void setHBaseCluster(HBaseCluster hbaseCluster) {
279     this.hbaseCluster = hbaseCluster;
280   }
281 
282   /**
283    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
284    * Give it a random name so can have many concurrent tests running if
285    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
286    * System property, as it's what minidfscluster bases
287    * it data dir on.  Moding a System property is not the way to do concurrent
288    * instances -- another instance could grab the temporary
289    * value unintentionally -- but not anything can do about it at moment;
290    * single instance only is how the minidfscluster works.
291    *
292    * We also create the underlying directory for
293    *  hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
294    *  in the conf, and as a system property for hadoop.tmp.dir
295    *
296    * @return The calculated data test build directory, if newly-created.
297    */
298   @Override
299   protected Path setupDataTestDir() {
300     Path testPath = super.setupDataTestDir();
301     if (null == testPath) {
302       return null;
303     }
304 
305     createSubDirAndSystemProperty(
306       "hadoop.log.dir",
307       testPath, "hadoop-log-dir");
308 
309     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
310     //  we want our own value to ensure uniqueness on the same machine
311     createSubDirAndSystemProperty(
312       "hadoop.tmp.dir",
313       testPath, "hadoop-tmp-dir");
314 
315     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
316     createSubDir(
317       "mapred.local.dir",
318       testPath, "mapred-local-dir");
319 
320     return testPath;
321   }
322 
323   private void createSubDirAndSystemProperty(
324     String propertyName, Path parent, String subDirName){
325 
326     String sysValue = System.getProperty(propertyName);
327 
328     if (sysValue != null) {
329       // There is already a value set. So we do nothing but hope
330       //  that there will be no conflicts
331       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
332         sysValue + " so I do NOT create it in " + parent);
333       String confValue = conf.get(propertyName);
334       if (confValue != null && !confValue.endsWith(sysValue)){
335        LOG.warn(
336          propertyName + " property value differs in configuration and system: "+
337          "Configuration="+confValue+" while System="+sysValue+
338          " Erasing configuration value by system value."
339        );
340       }
341       conf.set(propertyName, sysValue);
342     } else {
343       // Ok, it's not set, so we create it as a subdirectory
344       createSubDir(propertyName, parent, subDirName);
345       System.setProperty(propertyName, conf.get(propertyName));
346     }
347   }
348 
349   /**
350    * @return Where to write test data on the test filesystem; Returns working directory
351    * for the test filesystem by default
352    * @see #setupDataTestDirOnTestFS()
353    * @see #getTestFileSystem()
354    */
355   private Path getBaseTestDirOnTestFS() throws IOException {
356     FileSystem fs = getTestFileSystem();
357     return new Path(fs.getWorkingDirectory(), "test-data");
358   }
359 
360   /**
361    * @return Where the DFS cluster will write data on the local subsystem.
362    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
363    * @see #getTestFileSystem()
364    */
365   Path getClusterTestDir() {
366     if (clusterTestDir == null){
367       setupClusterTestDir();
368     }
369     return new Path(clusterTestDir.getAbsolutePath());
370   }
371 
372   /**
373    * Creates a directory for the DFS cluster, under the test data
374    */
375   private void setupClusterTestDir() {
376     if (clusterTestDir != null) {
377       return;
378     }
379 
380     // Using randomUUID ensures that multiple clusters can be launched by
381     //  a same test, if it stops & starts them
382     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
383     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
384     // Have it cleaned up on exit
385     boolean b = deleteOnExit();
386     if (b) clusterTestDir.deleteOnExit();
387     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
388     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
389   }
390 
391   /**
392    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
393    * to write temporary test data. Call this method after setting up the mini dfs cluster
394    * if the test relies on it.
395    * @return a unique path in the test filesystem
396    */
397   public Path getDataTestDirOnTestFS() throws IOException {
398     if (dataTestDirOnTestFS == null) {
399       setupDataTestDirOnTestFS();
400     }
401 
402     return dataTestDirOnTestFS;
403   }
404 
405   /**
406    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
407    * to write temporary test data. Call this method after setting up the mini dfs cluster
408    * if the test relies on it.
409    * @return a unique path in the test filesystem
410    * @param subdirName name of the subdir to create under the base test dir
411    */
412   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
413     return new Path(getDataTestDirOnTestFS(), subdirName);
414   }
415 
416   /**
417    * Sets up a path in test filesystem to be used by tests
418    */
419   private void setupDataTestDirOnTestFS() throws IOException {
420     if (dataTestDirOnTestFS != null) {
421       LOG.warn("Data test on test fs dir already setup in "
422           + dataTestDirOnTestFS.toString());
423       return;
424     }
425 
426     //The file system can be either local, mini dfs, or if the configuration
427     //is supplied externally, it can be an external cluster FS. If it is a local
428     //file system, the tests should use getBaseTestDir, otherwise, we can use
429     //the working directory, and create a unique sub dir there
430     FileSystem fs = getTestFileSystem();
431     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
432       File dataTestDir = new File(getDataTestDir().toString());
433       if (deleteOnExit()) dataTestDir.deleteOnExit();
434       dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
435     } else {
436       Path base = getBaseTestDirOnTestFS();
437       String randomStr = UUID.randomUUID().toString();
438       dataTestDirOnTestFS = new Path(base, randomStr);
439       if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
440     }
441   }
442 
443   /**
444    * Cleans the test data directory on the test filesystem.
445    * @return True if we removed the test dirs
446    * @throws IOException
447    */
448   public boolean cleanupDataTestDirOnTestFS() throws IOException {
449     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
450     if (ret)
451       dataTestDirOnTestFS = null;
452     return ret;
453   }
454 
455   /**
456    * Cleans a subdirectory under the test data directory on the test filesystem.
457    * @return True if we removed child
458    * @throws IOException
459    */
460   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
461     Path cpath = getDataTestDirOnTestFS(subdirName);
462     return getTestFileSystem().delete(cpath, true);
463   }
464 
465   /**
466    * Start a minidfscluster.
467    * @param servers How many DNs to start.
468    * @throws Exception
469    * @see {@link #shutdownMiniDFSCluster()}
470    * @return The mini dfs cluster created.
471    */
472   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
473     return startMiniDFSCluster(servers, null);
474   }
475 
476   /**
477    * Start a minidfscluster.
478    * This is useful if you want to run datanode on distinct hosts for things
479    * like HDFS block location verification.
480    * If you start MiniDFSCluster without host names, all instances of the
481    * datanodes will have the same host name.
482    * @param hosts hostnames DNs to run on.
483    * @throws Exception
484    * @see {@link #shutdownMiniDFSCluster()}
485    * @return The mini dfs cluster created.
486    */
487   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
488   throws Exception {
489     if ( hosts != null && hosts.length != 0) {
490       return startMiniDFSCluster(hosts.length, hosts);
491     } else {
492       return startMiniDFSCluster(1, null);
493     }
494   }
495 
496   /**
497    * Start a minidfscluster.
498    * Can only create one.
499    * @param servers How many DNs to start.
500    * @param hosts hostnames DNs to run on.
501    * @throws Exception
502    * @see {@link #shutdownMiniDFSCluster()}
503    * @return The mini dfs cluster created.
504    */
505   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
506   throws Exception {
507     createDirsAndSetProperties();
508 
509     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
510     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
511         setLevel(org.apache.log4j.Level.ERROR);
512     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
513         setLevel(org.apache.log4j.Level.ERROR);
514 
515 
516     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
517       true, null, null, hosts, null);
518 
519     // Set this just-started cluster as our filesystem.
520     FileSystem fs = this.dfsCluster.getFileSystem();
521     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
522 
523     // Wait for the cluster to be totally up
524     this.dfsCluster.waitClusterUp();
525 
526     //reset the test directory for test file system
527     dataTestDirOnTestFS = null;
528 
529     return this.dfsCluster;
530   }
531 
532 
533   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
534       throws Exception {
535     createDirsAndSetProperties();
536     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
537         true, null, racks, hosts, null);
538 
539     // Set this just-started cluster as our filesystem.
540     FileSystem fs = this.dfsCluster.getFileSystem();
541     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
542 
543     // Wait for the cluster to be totally up
544     this.dfsCluster.waitClusterUp();
545 
546     //reset the test directory for test file system
547     dataTestDirOnTestFS = null;
548 
549     return this.dfsCluster;
550   }
551 
552   public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
553     createDirsAndSetProperties();
554     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
555         null, null, null);
556     return dfsCluster;
557   }
558 
559   /** This is used before starting HDFS and map-reduce mini-clusters */
560   private void createDirsAndSetProperties() throws IOException {
561     setupClusterTestDir();
562     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
563     createDirAndSetProperty("cache_data", "test.cache.data");
564     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
565     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
566     createDirAndSetProperty("mapred_local", "mapred.local.dir");
567     createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
568     enableShortCircuit();
569 
570     Path root = getDataTestDirOnTestFS("hadoop");
571     conf.set(MapreduceTestingShim.getMROutputDirProp(),
572       new Path(root, "mapred-output-dir").toString());
573     conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
574     conf.set("mapreduce.jobtracker.staging.root.dir",
575       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
576     conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
577   }
578 
579 
580   /**
581    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
582    *  This allows to specify this parameter on the command line.
583    *   If not set, default is true.
584    */
585   public boolean isReadShortCircuitOn(){
586     final String propName = "hbase.tests.use.shortcircuit.reads";
587     String readOnProp = System.getProperty(propName);
588     if (readOnProp != null){
589       return  Boolean.parseBoolean(readOnProp);
590     } else {
591       return conf.getBoolean(propName, false);
592     }
593   }
594 
595   /** Enable the short circuit read, unless configured differently.
596    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
597    */
598   private void enableShortCircuit() {
599     if (isReadShortCircuitOn()) {
600       String curUser = System.getProperty("user.name");
601       LOG.info("read short circuit is ON for user " + curUser);
602       // read short circuit, for hdfs
603       conf.set("dfs.block.local-path-access.user", curUser);
604       // read short circuit, for hbase
605       conf.setBoolean("dfs.client.read.shortcircuit", true);
606       // Skip checking checksum, for the hdfs client and the datanode
607       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
608     } else {
609       LOG.info("read short circuit is OFF");
610     }
611   }
612 
613   private String createDirAndSetProperty(final String relPath, String property) {
614     String path = getDataTestDir(relPath).toString();
615     System.setProperty(property, path);
616     conf.set(property, path);
617     new File(path).mkdirs();
618     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
619     return path;
620   }
621 
622   /**
623    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
624    * or does nothing.
625    * @throws IOException
626    */
627   public void shutdownMiniDFSCluster() throws IOException {
628     if (this.dfsCluster != null) {
629       // The below throws an exception per dn, AsynchronousCloseException.
630       this.dfsCluster.shutdown();
631       dfsCluster = null;
632       dataTestDirOnTestFS = null;
633       FSUtils.setFsDefault(this.conf, new Path("file:///"));
634     }
635   }
636 
637   /**
638    * Call this if you only want a zk cluster.
639    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
640    * @throws Exception
641    * @see #shutdownMiniZKCluster()
642    * @return zk cluster started.
643    */
644   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
645     return startMiniZKCluster(1);
646   }
647 
648   /**
649    * Call this if you only want a zk cluster.
650    * @param zooKeeperServerNum
651    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
652    * @throws Exception
653    * @see #shutdownMiniZKCluster()
654    * @return zk cluster started.
655    */
656   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
657       throws Exception {
658     setupClusterTestDir();
659     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
660   }
661 
662   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
663     throws Exception {
664     return startMiniZKCluster(dir,1);
665   }
666 
667   /**
668    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
669    *  the port mentionned is used as the default port for ZooKeeper.
670    */
671   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
672       int zooKeeperServerNum)
673   throws Exception {
674     if (this.zkCluster != null) {
675       throw new IOException("Cluster already running at " + dir);
676     }
677     this.passedZkCluster = false;
678     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
679     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
680     if (defPort > 0){
681       // If there is a port in the config file, we use it.
682       this.zkCluster.setDefaultClientPort(defPort);
683     }
684     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
685     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
686       Integer.toString(clientPort));
687     return this.zkCluster;
688   }
689 
690   /**
691    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
692    * or does nothing.
693    * @throws IOException
694    * @see #startMiniZKCluster()
695    */
696   public void shutdownMiniZKCluster() throws IOException {
697     if (this.zkCluster != null) {
698       this.zkCluster.shutdown();
699       this.zkCluster = null;
700     }
701   }
702 
703   /**
704    * Start up a minicluster of hbase, dfs, and zookeeper.
705    * @throws Exception
706    * @return Mini hbase cluster instance created.
707    * @see {@link #shutdownMiniDFSCluster()}
708    */
709   public MiniHBaseCluster startMiniCluster() throws Exception {
710     return startMiniCluster(1, 1);
711   }
712 
713   /**
714    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
715    * Modifies Configuration.  Homes the cluster data directory under a random
716    * subdirectory in a directory under System property test.build.data.
717    * Directory is cleaned up on exit.
718    * @param numSlaves Number of slaves to start up.  We'll start this many
719    * datanodes and regionservers.  If numSlaves is > 1, then make sure
720    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
721    * bind errors.
722    * @throws Exception
723    * @see {@link #shutdownMiniCluster()}
724    * @return Mini hbase cluster instance created.
725    */
726   public MiniHBaseCluster startMiniCluster(final int numSlaves)
727   throws Exception {
728     return startMiniCluster(1, numSlaves);
729   }
730 
731 
732   /**
733    * start minicluster
734    * @throws Exception
735    * @see {@link #shutdownMiniCluster()}
736    * @return Mini hbase cluster instance created.
737    */
738   public MiniHBaseCluster startMiniCluster(final int numMasters,
739     final int numSlaves)
740   throws Exception {
741     return startMiniCluster(numMasters, numSlaves, null);
742   }
743 
744   /**
745    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
746    * Modifies Configuration.  Homes the cluster data directory under a random
747    * subdirectory in a directory under System property test.build.data.
748    * Directory is cleaned up on exit.
749    * @param numMasters Number of masters to start up.  We'll start this many
750    * hbase masters.  If numMasters > 1, you can find the active/primary master
751    * with {@link MiniHBaseCluster#getMaster()}.
752    * @param numSlaves Number of slaves to start up.  We'll start this many
753    * regionservers. If dataNodeHosts == null, this also indicates the number of
754    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
755    * based on dataNodeHosts.length.
756    * If numSlaves is > 1, then make sure
757    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
758    * bind errors.
759    * @param dataNodeHosts hostnames DNs to run on.
760    * This is useful if you want to run datanode on distinct hosts for things
761    * like HDFS block location verification.
762    * If you start MiniDFSCluster without host names,
763    * all instances of the datanodes will have the same host name.
764    * @throws Exception
765    * @see {@link #shutdownMiniCluster()}
766    * @return Mini hbase cluster instance created.
767    */
768   public MiniHBaseCluster startMiniCluster(final int numMasters,
769       final int numSlaves, final String[] dataNodeHosts) throws Exception {
770     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
771   }
772 
773   /**
774    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
775    * @param numDataNodes Number of data nodes.
776    */
777   public MiniHBaseCluster startMiniCluster(final int numMasters,
778       final int numSlaves, final int numDataNodes) throws Exception {
779     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
780   }
781 
782   /**
783    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
784    * Modifies Configuration.  Homes the cluster data directory under a random
785    * subdirectory in a directory under System property test.build.data.
786    * Directory is cleaned up on exit.
787    * @param numMasters Number of masters to start up.  We'll start this many
788    * hbase masters.  If numMasters > 1, you can find the active/primary master
789    * with {@link MiniHBaseCluster#getMaster()}.
790    * @param numSlaves Number of slaves to start up.  We'll start this many
791    * regionservers. If dataNodeHosts == null, this also indicates the number of
792    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
793    * based on dataNodeHosts.length.
794    * If numSlaves is > 1, then make sure
795    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
796    * bind errors.
797    * @param dataNodeHosts hostnames DNs to run on.
798    * This is useful if you want to run datanode on distinct hosts for things
799    * like HDFS block location verification.
800    * If you start MiniDFSCluster without host names,
801    * all instances of the datanodes will have the same host name.
802    * @param masterClass The class to use as HMaster, or null for default
803    * @param regionserverClass The class to use as HRegionServer, or null for
804    * default
805    * @throws Exception
806    * @see {@link #shutdownMiniCluster()}
807    * @return Mini hbase cluster instance created.
808    */
809   public MiniHBaseCluster startMiniCluster(final int numMasters,
810       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
811       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
812           throws Exception {
813     return startMiniCluster(
814         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
815   }
816 
817   /**
818    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
819    * number of datanodes.
820    * @param numDataNodes Number of data nodes.
821    */
822   public MiniHBaseCluster startMiniCluster(final int numMasters,
823     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
824     Class<? extends HMaster> masterClass,
825     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
826   throws Exception {
827     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
828       numDataNodes = dataNodeHosts.length;
829     }
830 
831     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
832         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
833 
834     // If we already put up a cluster, fail.
835     if (miniClusterRunning) {
836       throw new IllegalStateException("A mini-cluster is already running");
837     }
838     miniClusterRunning = true;
839 
840     setupClusterTestDir();
841     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
842 
843     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
844     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
845     startMiniDFSCluster(numDataNodes, dataNodeHosts);
846 
847     // Start up a zk cluster.
848     if (this.zkCluster == null) {
849       startMiniZKCluster(clusterTestDir);
850     }
851 
852     // Start the MiniHBaseCluster
853     return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
854   }
855 
856   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
857       throws IOException, InterruptedException{
858     return startMiniHBaseCluster(numMasters, numSlaves, null, null);
859   }
860 
861   /**
862    * Starts up mini hbase cluster.  Usually used after call to
863    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
864    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
865    * @param numMasters
866    * @param numSlaves
867    * @return Reference to the hbase mini hbase cluster.
868    * @throws IOException
869    * @throws InterruptedException
870    * @see {@link #startMiniCluster()}
871    */
872   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
873         final int numSlaves, Class<? extends HMaster> masterClass,
874         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
875   throws IOException, InterruptedException {
876     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
877     createRootDir();
878 
879     // These settings will make the server waits until this exact number of
880     // regions servers are connected.
881     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
882       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
883     }
884     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
885       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
886     }
887 
888     Configuration c = new Configuration(this.conf);
889     this.hbaseCluster =
890         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
891     // Don't leave here till we've done a successful scan of the hbase:meta
892     HTable t = new HTable(c, TableName.META_TABLE_NAME);
893     ResultScanner s = t.getScanner(new Scan());
894     while (s.next() != null) {
895       continue;
896     }
897     s.close();
898     t.close();
899 
900     getHBaseAdmin(); // create immediately the hbaseAdmin
901     LOG.info("Minicluster is up");
902     return (MiniHBaseCluster)this.hbaseCluster;
903   }
904 
905   /**
906    * Starts the hbase cluster up again after shutting it down previously in a
907    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
908    * @param servers number of region servers
909    * @throws IOException
910    */
911   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
912     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
913     // Don't leave here till we've done a successful scan of the hbase:meta
914     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
915     ResultScanner s = t.getScanner(new Scan());
916     while (s.next() != null) {
917       // do nothing
918     }
919     LOG.info("HBase has been restarted");
920     s.close();
921     t.close();
922   }
923 
924   /**
925    * @return Current mini hbase cluster. Only has something in it after a call
926    * to {@link #startMiniCluster()}.
927    * @see #startMiniCluster()
928    */
929   public MiniHBaseCluster getMiniHBaseCluster() {
930     if (this.hbaseCluster instanceof MiniHBaseCluster) {
931       return (MiniHBaseCluster)this.hbaseCluster;
932     }
933     throw new RuntimeException(hbaseCluster + " not an instance of " +
934                                MiniHBaseCluster.class.getName());
935   }
936 
937   /**
938    * Stops mini hbase, zk, and hdfs clusters.
939    * @throws IOException
940    * @see {@link #startMiniCluster(int)}
941    */
942   public void shutdownMiniCluster() throws Exception {
943     LOG.info("Shutting down minicluster");
944     shutdownMiniHBaseCluster();
945     if (!this.passedZkCluster){
946       shutdownMiniZKCluster();
947     }
948     shutdownMiniDFSCluster();
949 
950     cleanupTestDir();
951     miniClusterRunning = false;
952     LOG.info("Minicluster is down");
953   }
954 
955   /**
956    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
957    * @throws IOException
958    */
959   public void shutdownMiniHBaseCluster() throws IOException {
960     if (hbaseAdmin != null) {
961       hbaseAdmin.close0();
962       hbaseAdmin = null;
963     }
964 
965     // unset the configuration for MIN and MAX RS to start
966     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
967     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
968     if (this.hbaseCluster != null) {
969       this.hbaseCluster.shutdown();
970       // Wait till hbase is down before going on to shutdown zk.
971       this.hbaseCluster.waitUntilShutDown();
972       this.hbaseCluster = null;
973     }
974 
975     if (zooKeeperWatcher != null) {
976       zooKeeperWatcher.close();
977       zooKeeperWatcher = null;
978     }
979   }
980 
981   /**
982    * Returns the path to the default root dir the minicluster uses.
983    * Note: this does not cause the root dir to be created.
984    * @return Fully qualified path for the default hbase root dir
985    * @throws IOException
986    */
987   public Path getDefaultRootDirPath() throws IOException {
988 	FileSystem fs = FileSystem.get(this.conf);
989 	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
990   }
991 
992   /**
993    * Creates an hbase rootdir in user home directory.  Also creates hbase
994    * version file.  Normally you won't make use of this method.  Root hbasedir
995    * is created for you as part of mini cluster startup.  You'd only use this
996    * method if you were doing manual operation.
997    * @return Fully qualified path to hbase root dir
998    * @throws IOException
999    */
1000   public Path createRootDir() throws IOException {
1001     FileSystem fs = FileSystem.get(this.conf);
1002     Path hbaseRootdir = getDefaultRootDirPath();
1003     FSUtils.setRootDir(this.conf, hbaseRootdir);
1004     fs.mkdirs(hbaseRootdir);
1005     FSUtils.setVersion(fs, hbaseRootdir);
1006     return hbaseRootdir;
1007   }
1008 
1009   /**
1010    * Flushes all caches in the mini hbase cluster
1011    * @throws IOException
1012    */
1013   public void flush() throws IOException {
1014     getMiniHBaseCluster().flushcache();
1015   }
1016 
1017   /**
1018    * Flushes all caches in the mini hbase cluster
1019    * @throws IOException
1020    */
1021   public void flush(TableName tableName) throws IOException {
1022     getMiniHBaseCluster().flushcache(tableName);
1023   }
1024 
1025   /**
1026    * Compact all regions in the mini hbase cluster
1027    * @throws IOException
1028    */
1029   public void compact(boolean major) throws IOException {
1030     getMiniHBaseCluster().compact(major);
1031   }
1032 
1033   /**
1034    * Compact all of a table's reagion in the mini hbase cluster
1035    * @throws IOException
1036    */
1037   public void compact(TableName tableName, boolean major) throws IOException {
1038     getMiniHBaseCluster().compact(tableName, major);
1039   }
1040 
1041   /**
1042    * Create a table.
1043    * @param tableName
1044    * @param family
1045    * @return An HTable instance for the created table.
1046    * @throws IOException
1047    */
1048   public HTable createTable(String tableName, String family)
1049   throws IOException{
1050     return createTable(TableName.valueOf(tableName), new String[]{family});
1051   }
1052 
1053   /**
1054    * Create a table.
1055    * @param tableName
1056    * @param family
1057    * @return An HTable instance for the created table.
1058    * @throws IOException
1059    */
1060   public HTable createTable(byte[] tableName, byte[] family)
1061   throws IOException{
1062     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1063   }
1064 
1065   /**
1066    * Create a table.
1067    * @param tableName
1068    * @param families
1069    * @return An HTable instance for the created table.
1070    * @throws IOException
1071    */
1072   public HTable createTable(TableName tableName, String[] families)
1073   throws IOException {
1074     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1075     for (String family : families) {
1076       fams.add(Bytes.toBytes(family));
1077     }
1078     return createTable(tableName, fams.toArray(new byte[0][]));
1079   }
1080 
1081   /**
1082    * Create a table.
1083    * @param tableName
1084    * @param family
1085    * @return An HTable instance for the created table.
1086    * @throws IOException
1087    */
1088   public HTable createTable(TableName tableName, byte[] family)
1089   throws IOException{
1090     return createTable(tableName, new byte[][]{family});
1091   }
1092 
1093 
1094   /**
1095    * Create a table.
1096    * @param tableName
1097    * @param families
1098    * @return An HTable instance for the created table.
1099    * @throws IOException
1100    */
1101   public HTable createTable(byte[] tableName, byte[][] families)
1102   throws IOException {
1103     return createTable(tableName, families,
1104         new Configuration(getConfiguration()));
1105   }
1106 
1107   /**
1108    * Create a table.
1109    * @param tableName
1110    * @param families
1111    * @return An HTable instance for the created table.
1112    * @throws IOException
1113    */
1114   public HTable createTable(TableName tableName, byte[][] families)
1115   throws IOException {
1116     return createTable(tableName, families,
1117         new Configuration(getConfiguration()));
1118   }
1119 
1120   public HTable createTable(byte[] tableName, byte[][] families,
1121       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1122     return createTable(TableName.valueOf(tableName), families, numVersions,
1123         startKey, endKey, numRegions);
1124   }
1125 
1126   public HTable createTable(String tableName, byte[][] families,
1127       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1128     return createTable(TableName.valueOf(tableName), families, numVersions,
1129         startKey, endKey, numRegions);
1130   }
1131 
1132   public HTable createTable(TableName tableName, byte[][] families,
1133       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1134   throws IOException{
1135     HTableDescriptor desc = new HTableDescriptor(tableName);
1136     for (byte[] family : families) {
1137       HColumnDescriptor hcd = new HColumnDescriptor(family)
1138           .setMaxVersions(numVersions);
1139       desc.addFamily(hcd);
1140     }
1141     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1142     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1143     waitUntilAllRegionsAssigned(tableName);
1144     return new HTable(getConfiguration(), tableName);
1145   }
1146 
1147   /**
1148    * Create a table.
1149    * @param htd
1150    * @param families
1151    * @param c Configuration to use
1152    * @return An HTable instance for the created table.
1153    * @throws IOException
1154    */
1155   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1156   throws IOException {
1157     for(byte[] family : families) {
1158       HColumnDescriptor hcd = new HColumnDescriptor(family);
1159       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1160       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1161       // on is interfering.
1162       hcd.setBloomFilterType(BloomType.NONE);
1163       htd.addFamily(hcd);
1164     }
1165     getHBaseAdmin().createTable(htd);
1166     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1167     waitUntilAllRegionsAssigned(htd.getTableName());
1168     return new HTable(c, htd.getTableName());
1169   }
1170 
1171   /**
1172    * Create a table.
1173    * @param tableName
1174    * @param families
1175    * @param c Configuration to use
1176    * @return An HTable instance for the created table.
1177    * @throws IOException
1178    */
1179   public HTable createTable(TableName tableName, byte[][] families,
1180       final Configuration c)
1181   throws IOException {
1182     return createTable(new HTableDescriptor(tableName), families, c);
1183   }
1184 
1185   /**
1186    * Create a table.
1187    * @param tableName
1188    * @param families
1189    * @param c Configuration to use
1190    * @return An HTable instance for the created table.
1191    * @throws IOException
1192    */
1193   public HTable createTable(byte[] tableName, byte[][] families,
1194       final Configuration c)
1195   throws IOException {
1196     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1197     for(byte[] family : families) {
1198       HColumnDescriptor hcd = new HColumnDescriptor(family);
1199       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1200       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1201       // on is interfering.
1202       hcd.setBloomFilterType(BloomType.NONE);
1203       desc.addFamily(hcd);
1204     }
1205     getHBaseAdmin().createTable(desc);
1206     return new HTable(c, tableName);
1207   }
1208 
1209   /**
1210    * Create a table.
1211    * @param tableName
1212    * @param families
1213    * @param c Configuration to use
1214    * @param numVersions
1215    * @return An HTable instance for the created table.
1216    * @throws IOException
1217    */
1218   public HTable createTable(TableName tableName, byte[][] families,
1219       final Configuration c, int numVersions)
1220   throws IOException {
1221     HTableDescriptor desc = new HTableDescriptor(tableName);
1222     for(byte[] family : families) {
1223       HColumnDescriptor hcd = new HColumnDescriptor(family)
1224           .setMaxVersions(numVersions);
1225       desc.addFamily(hcd);
1226     }
1227     getHBaseAdmin().createTable(desc);
1228     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1229     waitUntilAllRegionsAssigned(tableName);
1230     return new HTable(c, tableName);
1231   }
1232 
1233   /**
1234    * Create a table.
1235    * @param tableName
1236    * @param families
1237    * @param c Configuration to use
1238    * @param numVersions
1239    * @return An HTable instance for the created table.
1240    * @throws IOException
1241    */
1242   public HTable createTable(byte[] tableName, byte[][] families,
1243       final Configuration c, int numVersions)
1244   throws IOException {
1245     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1246     for(byte[] family : families) {
1247       HColumnDescriptor hcd = new HColumnDescriptor(family)
1248           .setMaxVersions(numVersions);
1249       desc.addFamily(hcd);
1250     }
1251     getHBaseAdmin().createTable(desc);
1252     return new HTable(c, tableName);
1253   }
1254 
1255   /**
1256    * Create a table.
1257    * @param tableName
1258    * @param family
1259    * @param numVersions
1260    * @return An HTable instance for the created table.
1261    * @throws IOException
1262    */
1263   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1264   throws IOException {
1265     return createTable(tableName, new byte[][]{family}, numVersions);
1266   }
1267 
1268   /**
1269    * Create a table.
1270    * @param tableName
1271    * @param family
1272    * @param numVersions
1273    * @return An HTable instance for the created table.
1274    * @throws IOException
1275    */
1276   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1277   throws IOException {
1278     return createTable(tableName, new byte[][]{family}, numVersions);
1279   }
1280 
1281   /**
1282    * Create a table.
1283    * @param tableName
1284    * @param families
1285    * @param numVersions
1286    * @return An HTable instance for the created table.
1287    * @throws IOException
1288    */
1289   public HTable createTable(byte[] tableName, byte[][] families,
1290       int numVersions)
1291   throws IOException {
1292     return createTable(TableName.valueOf(tableName), families, numVersions);
1293   }
1294 
1295   /**
1296    * Create a table.
1297    * @param tableName
1298    * @param families
1299    * @param numVersions
1300    * @return An HTable instance for the created table.
1301    * @throws IOException
1302    */
1303   public HTable createTable(TableName tableName, byte[][] families,
1304       int numVersions)
1305   throws IOException {
1306     HTableDescriptor desc = new HTableDescriptor(tableName);
1307     for (byte[] family : families) {
1308       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1309       desc.addFamily(hcd);
1310     }
1311     getHBaseAdmin().createTable(desc);
1312     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1313     waitUntilAllRegionsAssigned(tableName);
1314     return new HTable(new Configuration(getConfiguration()), tableName);
1315   }
1316 
1317   /**
1318    * Create a table.
1319    * @param tableName
1320    * @param families
1321    * @param numVersions
1322    * @return An HTable instance for the created table.
1323    * @throws IOException
1324    */
1325   public HTable createTable(byte[] tableName, byte[][] families,
1326     int numVersions, int blockSize) throws IOException {
1327     return createTable(TableName.valueOf(tableName),
1328         families, numVersions, blockSize);
1329   }
1330 
1331   /**
1332    * Create a table.
1333    * @param tableName
1334    * @param families
1335    * @param numVersions
1336    * @return An HTable instance for the created table.
1337    * @throws IOException
1338    */
1339   public HTable createTable(TableName tableName, byte[][] families,
1340     int numVersions, int blockSize) throws IOException {
1341     HTableDescriptor desc = new HTableDescriptor(tableName);
1342     for (byte[] family : families) {
1343       HColumnDescriptor hcd = new HColumnDescriptor(family)
1344           .setMaxVersions(numVersions)
1345           .setBlocksize(blockSize);
1346       desc.addFamily(hcd);
1347     }
1348     getHBaseAdmin().createTable(desc);
1349     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1350     waitUntilAllRegionsAssigned(tableName);
1351     return new HTable(new Configuration(getConfiguration()), tableName);
1352   }
1353 
1354   /**
1355    * Create a table.
1356    * @param tableName
1357    * @param families
1358    * @param numVersions
1359    * @return An HTable instance for the created table.
1360    * @throws IOException
1361    */
1362   public HTable createTable(byte[] tableName, byte[][] families,
1363       int[] numVersions)
1364   throws IOException {
1365     return createTable(TableName.valueOf(tableName), families, numVersions);
1366   }
1367 
1368   /**
1369    * Create a table.
1370    * @param tableName
1371    * @param families
1372    * @param numVersions
1373    * @return An HTable instance for the created table.
1374    * @throws IOException
1375    */
1376   public HTable createTable(TableName tableName, byte[][] families,
1377       int[] numVersions)
1378   throws IOException {
1379     HTableDescriptor desc = new HTableDescriptor(tableName);
1380     int i = 0;
1381     for (byte[] family : families) {
1382       HColumnDescriptor hcd = new HColumnDescriptor(family)
1383           .setMaxVersions(numVersions[i]);
1384       desc.addFamily(hcd);
1385       i++;
1386     }
1387     getHBaseAdmin().createTable(desc);
1388     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1389     waitUntilAllRegionsAssigned(tableName);
1390     return new HTable(new Configuration(getConfiguration()), tableName);
1391   }
1392 
1393   /**
1394    * Create a table.
1395    * @param tableName
1396    * @param family
1397    * @param splitRows
1398    * @return An HTable instance for the created table.
1399    * @throws IOException
1400    */
1401   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1402     throws IOException{
1403     return createTable(TableName.valueOf(tableName), family, splitRows);
1404   }
1405 
1406   /**
1407    * Create a table.
1408    * @param tableName
1409    * @param family
1410    * @param splitRows
1411    * @return An HTable instance for the created table.
1412    * @throws IOException
1413    */
1414   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1415       throws IOException {
1416     HTableDescriptor desc = new HTableDescriptor(tableName);
1417     HColumnDescriptor hcd = new HColumnDescriptor(family);
1418     desc.addFamily(hcd);
1419     getHBaseAdmin().createTable(desc, splitRows);
1420     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1421     waitUntilAllRegionsAssigned(tableName);
1422     return new HTable(getConfiguration(), tableName);
1423   }
1424 
1425   /**
1426    * Create a table.
1427    * @param tableName
1428    * @param families
1429    * @param splitRows
1430    * @return An HTable instance for the created table.
1431    * @throws IOException
1432    */
1433   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1434       throws IOException {
1435     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1436     for(byte[] family:families) {
1437       HColumnDescriptor hcd = new HColumnDescriptor(family);
1438       desc.addFamily(hcd);
1439     }
1440     getHBaseAdmin().createTable(desc, splitRows);
1441     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1442     waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1443     return new HTable(getConfiguration(), tableName);
1444   }
1445 
1446   /**
1447    * Drop an existing table
1448    * @param tableName existing table
1449    */
1450   public void deleteTable(String tableName) throws IOException {
1451     deleteTable(TableName.valueOf(tableName));
1452   }
1453 
1454   /**
1455    * Drop an existing table
1456    * @param tableName existing table
1457    */
1458   public void deleteTable(byte[] tableName) throws IOException {
1459     deleteTable(TableName.valueOf(tableName));
1460   }
1461 
1462   /**
1463    * Drop an existing table
1464    * @param tableName existing table
1465    */
1466   public void deleteTable(TableName tableName) throws IOException {
1467     try {
1468       getHBaseAdmin().disableTable(tableName);
1469     } catch (TableNotEnabledException e) {
1470       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1471     }
1472     getHBaseAdmin().deleteTable(tableName);
1473   }
1474 
1475   // ==========================================================================
1476   // Canned table and table descriptor creation
1477   // TODO replace HBaseTestCase
1478 
1479   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1480   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1481   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1482   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1483   private static final int MAXVERSIONS = 3;
1484 
1485   public static final char FIRST_CHAR = 'a';
1486   public static final char LAST_CHAR = 'z';
1487   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1488   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1489 
1490   /**
1491    * Create a table of name <code>name</code> with {@link COLUMNS} for
1492    * families.
1493    * @param name Name to give table.
1494    * @param versions How many versions to allow per column.
1495    * @return Column descriptor.
1496    */
1497   public HTableDescriptor createTableDescriptor(final String name,
1498       final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1499     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1500     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1501       htd.addFamily(new HColumnDescriptor(cfName)
1502           .setMinVersions(minVersions)
1503           .setMaxVersions(versions)
1504           .setKeepDeletedCells(keepDeleted)
1505           .setBlockCacheEnabled(false)
1506           .setTimeToLive(ttl)
1507       );
1508     }
1509     return htd;
1510   }
1511 
1512   /**
1513    * Create a table of name <code>name</code> with {@link COLUMNS} for
1514    * families.
1515    * @param name Name to give table.
1516    * @return Column descriptor.
1517    */
1518   public HTableDescriptor createTableDescriptor(final String name) {
1519     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1520         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1521   }
1522 
1523   /**
1524    * Create an HRegion that writes to the local tmp dirs
1525    * @param desc
1526    * @param startKey
1527    * @param endKey
1528    * @return
1529    * @throws IOException
1530    */
1531   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1532       byte [] endKey)
1533   throws IOException {
1534     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1535     return createLocalHRegion(hri, desc);
1536   }
1537 
1538   /**
1539    * Create an HRegion that writes to the local tmp dirs
1540    * @param info
1541    * @param desc
1542    * @return
1543    * @throws IOException
1544    */
1545   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1546     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1547   }
1548 
1549   /**
1550    * Create an HRegion that writes to the local tmp dirs with specified hlog
1551    * @param info regioninfo
1552    * @param desc table descriptor
1553    * @param hlog hlog for this region.
1554    * @return created hregion
1555    * @throws IOException
1556    */
1557   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1558     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1559   }
1560 
1561 
1562   /**
1563    * @param tableName
1564    * @param startKey
1565    * @param stopKey
1566    * @param callingMethod
1567    * @param conf
1568    * @param isReadOnly
1569    * @param families
1570    * @throws IOException
1571    * @return A region on which you must call
1572    *         {@link HRegion#closeHRegion(HRegion)} when done.
1573    */
1574   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1575       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1576       HLog hlog, byte[]... families) throws IOException {
1577     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1578     htd.setReadOnly(isReadOnly);
1579     for (byte[] family : families) {
1580       HColumnDescriptor hcd = new HColumnDescriptor(family);
1581       // Set default to be three versions.
1582       hcd.setMaxVersions(Integer.MAX_VALUE);
1583       htd.addFamily(hcd);
1584     }
1585     htd.setDurability(durability);
1586     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1587     return createLocalHRegion(info, htd, hlog);
1588   }
1589   //
1590   // ==========================================================================
1591 
1592   /**
1593    * Provide an existing table name to truncate
1594    * @param tableName existing table
1595    * @return HTable to that new table
1596    * @throws IOException
1597    */
1598   public HTable truncateTable(byte[] tableName) throws IOException {
1599     return truncateTable(TableName.valueOf(tableName));
1600   }
1601 
1602   /**
1603    * Provide an existing table name to truncate
1604    * @param tableName existing table
1605    * @return HTable to that new table
1606    * @throws IOException
1607    */
1608   public HTable truncateTable(TableName tableName) throws IOException {
1609     HTable table = new HTable(getConfiguration(), tableName);
1610     Scan scan = new Scan();
1611     ResultScanner resScan = table.getScanner(scan);
1612     for(Result res : resScan) {
1613       Delete del = new Delete(res.getRow());
1614       table.delete(del);
1615     }
1616     resScan = table.getScanner(scan);
1617     resScan.close();
1618     return table;
1619   }
1620 
1621   /**
1622    * Load table with rows from 'aaa' to 'zzz'.
1623    * @param t Table
1624    * @param f Family
1625    * @return Count of rows loaded.
1626    * @throws IOException
1627    */
1628   public int loadTable(final HTable t, final byte[] f) throws IOException {
1629     return loadTable(t, new byte[][] {f});
1630   }
1631 
1632   /**
1633    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1634    * @param t Table
1635    * @param f Array of Families to load
1636    * @return Count of rows loaded.
1637    * @throws IOException
1638    */
1639   public int loadTable(final HTable t, final byte[][] f) throws IOException {
1640     return loadTable(t, f, null);
1641   }
1642 
1643   /**
1644    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1645    * @param t Table
1646    * @param f Array of Families to load
1647    * @param value the values of the cells. If null is passed, the row key is used as value
1648    * @return Count of rows loaded.
1649    * @throws IOException
1650    */
1651   public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1652     t.setAutoFlush(false);
1653     int rowCount = 0;
1654     for (byte[] row : HBaseTestingUtility.ROWS) {
1655       Put put = new Put(row);
1656       for (int i = 0; i < f.length; i++) {
1657         put.add(f[i], null, value != null ? value : row);
1658       }
1659       t.put(put);
1660       rowCount++;
1661     }
1662     t.flushCommits();
1663     return rowCount;
1664   }
1665 
1666   /** A tracker for tracking and validating table rows
1667    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1668    */
1669   public static class SeenRowTracker {
1670     int dim = 'z' - 'a' + 1;
1671     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1672     byte[] startRow;
1673     byte[] stopRow;
1674 
1675     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1676       this.startRow = startRow;
1677       this.stopRow = stopRow;
1678     }
1679 
1680     void reset() {
1681       for (byte[] row : ROWS) {
1682         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1683       }
1684     }
1685 
1686     int i(byte b) {
1687       return b - 'a';
1688     }
1689 
1690     public void addRow(byte[] row) {
1691       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1692     }
1693 
1694     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1695      * all other rows none
1696      */
1697     public void validate() {
1698       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1699         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1700           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1701             int count = seenRows[i(b1)][i(b2)][i(b3)];
1702             int expectedCount = 0;
1703             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1704                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1705               expectedCount = 1;
1706             }
1707             if (count != expectedCount) {
1708               String row = new String(new byte[] {b1,b2,b3});
1709               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1710             }
1711           }
1712         }
1713       }
1714     }
1715   }
1716 
1717   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1718     return loadRegion(r, f, false);
1719   }
1720 
1721   /**
1722    * Load region with rows from 'aaa' to 'zzz'.
1723    * @param r Region
1724    * @param f Family
1725    * @param flush flush the cache if true
1726    * @return Count of rows loaded.
1727    * @throws IOException
1728    */
1729   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1730   throws IOException {
1731     byte[] k = new byte[3];
1732     int rowCount = 0;
1733     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1734       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1735         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1736           k[0] = b1;
1737           k[1] = b2;
1738           k[2] = b3;
1739           Put put = new Put(k);
1740           put.add(f, null, k);
1741           if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1742 
1743           int preRowCount = rowCount;
1744           int pause = 10;
1745           int maxPause = 1000;
1746           while (rowCount == preRowCount) {
1747             try {
1748               r.put(put);
1749               rowCount++;
1750             } catch (RegionTooBusyException e) {
1751               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1752               Threads.sleep(pause);
1753             }
1754           }
1755         }
1756       }
1757       if (flush) {
1758         r.flushcache();
1759       }
1760     }
1761     return rowCount;
1762   }
1763 
1764   public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1765     for (int i = startRow; i < endRow; i++) {
1766       byte[] data = Bytes.toBytes(String.valueOf(i));
1767       Put put = new Put(data);
1768       put.add(f, null, data);
1769       t.put(put);
1770     }
1771   }
1772 
1773   /**
1774    * Return the number of rows in the given table.
1775    */
1776   public int countRows(final HTable table) throws IOException {
1777     Scan scan = new Scan();
1778     ResultScanner results = table.getScanner(scan);
1779     int count = 0;
1780     for (@SuppressWarnings("unused") Result res : results) {
1781       count++;
1782     }
1783     results.close();
1784     return count;
1785   }
1786 
1787   public int countRows(final HTable table, final byte[]... families) throws IOException {
1788     Scan scan = new Scan();
1789     for (byte[] family: families) {
1790       scan.addFamily(family);
1791     }
1792     ResultScanner results = table.getScanner(scan);
1793     int count = 0;
1794     for (@SuppressWarnings("unused") Result res : results) {
1795       count++;
1796     }
1797     results.close();
1798     return count;
1799   }
1800 
1801   /**
1802    * Return an md5 digest of the entire contents of a table.
1803    */
1804   public String checksumRows(final HTable table) throws Exception {
1805     Scan scan = new Scan();
1806     ResultScanner results = table.getScanner(scan);
1807     MessageDigest digest = MessageDigest.getInstance("MD5");
1808     for (Result res : results) {
1809       digest.update(res.getRow());
1810     }
1811     results.close();
1812     return digest.toString();
1813   }
1814 
1815   /**
1816    * Creates many regions names "aaa" to "zzz".
1817    *
1818    * @param table  The table to use for the data.
1819    * @param columnFamily  The family to insert the data into.
1820    * @return count of regions created.
1821    * @throws IOException When creating the regions fails.
1822    */
1823   public int createMultiRegions(HTable table, byte[] columnFamily)
1824   throws IOException {
1825     return createMultiRegions(getConfiguration(), table, columnFamily);
1826   }
1827 
1828   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
1829   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
1830   static {
1831     int i = 0;
1832     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1833       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1834         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1835           ROWS[i][0] = b1;
1836           ROWS[i][1] = b2;
1837           ROWS[i][2] = b3;
1838           i++;
1839         }
1840       }
1841     }
1842   }
1843 
1844   public static final byte[][] KEYS = {
1845     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1846     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1847     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1848     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1849     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1850     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1851     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1852     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1853     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1854   };
1855 
1856   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1857       Bytes.toBytes("bbb"),
1858       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1859       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1860       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1861       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1862       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1863       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1864       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1865       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1866   };
1867 
1868   /**
1869    * Creates many regions names "aaa" to "zzz".
1870    * @param c Configuration to use.
1871    * @param table  The table to use for the data.
1872    * @param columnFamily  The family to insert the data into.
1873    * @return count of regions created.
1874    * @throws IOException When creating the regions fails.
1875    */
1876   public int createMultiRegions(final Configuration c, final HTable table,
1877       final byte[] columnFamily)
1878   throws IOException {
1879     return createMultiRegions(c, table, columnFamily, KEYS);
1880   }
1881 
1882   /**
1883    * Creates the specified number of regions in the specified table.
1884    * @param c
1885    * @param table
1886    * @param family
1887    * @param numRegions
1888    * @return
1889    * @throws IOException
1890    */
1891   public int createMultiRegions(final Configuration c, final HTable table,
1892       final byte [] family, int numRegions)
1893   throws IOException {
1894     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1895     byte [] startKey = Bytes.toBytes("aaaaa");
1896     byte [] endKey = Bytes.toBytes("zzzzz");
1897     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1898     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1899     for (int i=0;i<splitKeys.length;i++) {
1900       regionStartKeys[i+1] = splitKeys[i];
1901     }
1902     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1903     return createMultiRegions(c, table, family, regionStartKeys);
1904   }
1905 
1906   @SuppressWarnings("deprecation")
1907   public int createMultiRegions(final Configuration c, final HTable table,
1908       final byte[] columnFamily, byte [][] startKeys)
1909   throws IOException {
1910     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1911     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1912     HTableDescriptor htd = table.getTableDescriptor();
1913     if(!htd.hasFamily(columnFamily)) {
1914       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1915       htd.addFamily(hcd);
1916     }
1917     // remove empty region - this is tricky as the mini cluster during the test
1918     // setup already has the "<tablename>,,123456789" row with an empty start
1919     // and end key. Adding the custom regions below adds those blindly,
1920     // including the new start region from empty to "bbb". lg
1921     List<byte[]> rows = getMetaTableRows(htd.getTableName());
1922     String regionToDeleteInFS = table
1923         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1924         .getRegionInfo().getEncodedName();
1925     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1926     // add custom ones
1927     int count = 0;
1928     for (int i = 0; i < startKeys.length; i++) {
1929       int j = (i + 1) % startKeys.length;
1930       HRegionInfo hri = new HRegionInfo(table.getName(),
1931         startKeys[i], startKeys[j]);
1932       MetaEditor.addRegionToMeta(meta, hri);
1933       newRegions.add(hri);
1934       count++;
1935     }
1936     // see comment above, remove "old" (or previous) single region
1937     for (byte[] row : rows) {
1938       LOG.info("createMultiRegions: deleting meta row -> " +
1939         Bytes.toStringBinary(row));
1940       meta.delete(new Delete(row));
1941     }
1942     // remove the "old" region from FS
1943     Path tableDir = new Path(getDefaultRootDirPath().toString()
1944         + System.getProperty("file.separator") + htd.getTableName()
1945         + System.getProperty("file.separator") + regionToDeleteInFS);
1946     FileSystem.get(c).delete(tableDir);
1947     // flush cache of regions
1948     HConnection conn = table.getConnection();
1949     conn.clearRegionCache();
1950     // assign all the new regions IF table is enabled.
1951     HBaseAdmin admin = getHBaseAdmin();
1952     if (admin.isTableEnabled(table.getTableName())) {
1953       for(HRegionInfo hri : newRegions) {
1954         admin.assign(hri.getRegionName());
1955       }
1956     }
1957 
1958     meta.close();
1959 
1960     return count;
1961   }
1962 
1963   /**
1964    * Create rows in hbase:meta for regions of the specified table with the specified
1965    * start keys.  The first startKey should be a 0 length byte array if you
1966    * want to form a proper range of regions.
1967    * @param conf
1968    * @param htd
1969    * @param startKeys
1970    * @return list of region info for regions added to meta
1971    * @throws IOException
1972    */
1973   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1974       final HTableDescriptor htd, byte [][] startKeys)
1975   throws IOException {
1976     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1977     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1978     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1979     // add custom ones
1980     for (int i = 0; i < startKeys.length; i++) {
1981       int j = (i + 1) % startKeys.length;
1982       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1983           startKeys[j]);
1984       MetaEditor.addRegionToMeta(meta, hri);
1985       newRegions.add(hri);
1986     }
1987 
1988     meta.close();
1989     return newRegions;
1990   }
1991 
1992   /**
1993    * Returns all rows from the hbase:meta table.
1994    *
1995    * @throws IOException When reading the rows fails.
1996    */
1997   public List<byte[]> getMetaTableRows() throws IOException {
1998     // TODO: Redo using MetaReader class
1999     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2000     List<byte[]> rows = new ArrayList<byte[]>();
2001     ResultScanner s = t.getScanner(new Scan());
2002     for (Result result : s) {
2003       LOG.info("getMetaTableRows: row -> " +
2004         Bytes.toStringBinary(result.getRow()));
2005       rows.add(result.getRow());
2006     }
2007     s.close();
2008     t.close();
2009     return rows;
2010   }
2011 
2012   /**
2013    * Returns all rows from the hbase:meta table for a given user table
2014    *
2015    * @throws IOException When reading the rows fails.
2016    */
2017   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2018     // TODO: Redo using MetaReader.
2019     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2020     List<byte[]> rows = new ArrayList<byte[]>();
2021     ResultScanner s = t.getScanner(new Scan());
2022     for (Result result : s) {
2023       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2024       if (info == null) {
2025         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2026         // TODO figure out what to do for this new hosed case.
2027         continue;
2028       }
2029 
2030       if (info.getTable().equals(tableName)) {
2031         LOG.info("getMetaTableRows: row -> " +
2032             Bytes.toStringBinary(result.getRow()) + info);
2033         rows.add(result.getRow());
2034       }
2035     }
2036     s.close();
2037     t.close();
2038     return rows;
2039   }
2040 
2041   /**
2042    * Tool to get the reference to the region server object that holds the
2043    * region of the specified user table.
2044    * It first searches for the meta rows that contain the region of the
2045    * specified table, then gets the index of that RS, and finally retrieves
2046    * the RS's reference.
2047    * @param tableName user table to lookup in hbase:meta
2048    * @return region server that holds it, null if the row doesn't exist
2049    * @throws IOException
2050    * @throws InterruptedException
2051    */
2052   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2053       throws IOException, InterruptedException {
2054     return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2055   }
2056   /**
2057    * Tool to get the reference to the region server object that holds the
2058    * region of the specified user table.
2059    * It first searches for the meta rows that contain the region of the
2060    * specified table, then gets the index of that RS, and finally retrieves
2061    * the RS's reference.
2062    * @param tableName user table to lookup in hbase:meta
2063    * @return region server that holds it, null if the row doesn't exist
2064    * @throws IOException
2065    */
2066   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2067       throws IOException, InterruptedException {
2068     List<byte[]> metaRows = getMetaTableRows(tableName);
2069     if (metaRows == null || metaRows.isEmpty()) {
2070       return null;
2071     }
2072     LOG.debug("Found " + metaRows.size() + " rows for table " +
2073       tableName);
2074     byte [] firstrow = metaRows.get(0);
2075     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2076     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2077       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2078     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2079       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2080     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2081     while(retrier.shouldRetry()) {
2082       int index = getMiniHBaseCluster().getServerWith(firstrow);
2083       if (index != -1) {
2084         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2085       }
2086       // Came back -1.  Region may not be online yet.  Sleep a while.
2087       retrier.sleepUntilNextRetry();
2088     }
2089     return null;
2090   }
2091 
2092   /**
2093    * Starts a <code>MiniMRCluster</code> with a default number of
2094    * <code>TaskTracker</code>'s.
2095    *
2096    * @throws IOException When starting the cluster fails.
2097    */
2098   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2099     startMiniMapReduceCluster(2);
2100     return mrCluster;
2101   }
2102 
2103   /**
2104    * Tasktracker has a bug where changing the hadoop.log.dir system property
2105    * will not change its internal static LOG_DIR variable.
2106    */
2107   private void forceChangeTaskLogDir() {
2108     Field logDirField;
2109     try {
2110       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2111       logDirField.setAccessible(true);
2112 
2113       Field modifiersField = Field.class.getDeclaredField("modifiers");
2114       modifiersField.setAccessible(true);
2115       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2116 
2117       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2118     } catch (SecurityException e) {
2119       throw new RuntimeException(e);
2120     } catch (NoSuchFieldException e) {
2121       // TODO Auto-generated catch block
2122       throw new RuntimeException(e);
2123     } catch (IllegalArgumentException e) {
2124       throw new RuntimeException(e);
2125     } catch (IllegalAccessException e) {
2126       throw new RuntimeException(e);
2127     }
2128   }
2129 
2130   /**
2131    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2132    * filesystem.
2133    * @param servers  The number of <code>TaskTracker</code>'s to start.
2134    * @throws IOException When starting the cluster fails.
2135    */
2136   private void startMiniMapReduceCluster(final int servers) throws IOException {
2137     if (mrCluster != null) {
2138       throw new IllegalStateException("MiniMRCluster is already running");
2139     }
2140     LOG.info("Starting mini mapreduce cluster...");
2141     setupClusterTestDir();
2142     createDirsAndSetProperties();
2143 
2144     forceChangeTaskLogDir();
2145 
2146     //// hadoop2 specific settings
2147     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2148     // we up the VM usable so that processes don't get killed.
2149     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2150 
2151     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2152     // this avoids the problem by disabling speculative task execution in tests.
2153     conf.setBoolean("mapreduce.map.speculative", false);
2154     conf.setBoolean("mapreduce.reduce.speculative", false);
2155     ////
2156 
2157     // Allow the user to override FS URI for this map-reduce cluster to use.
2158     mrCluster = new MiniMRCluster(servers,
2159       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2160       null, null, new JobConf(this.conf));
2161     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2162     if (jobConf == null) {
2163       jobConf = mrCluster.createJobConf();
2164     }
2165 
2166     jobConf.set("mapred.local.dir",
2167       conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
2168     LOG.info("Mini mapreduce cluster started");
2169 
2170     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2171     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2172     // necessary config properties here.  YARN-129 required adding a few properties.
2173     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2174     // this for mrv2 support; mr1 ignores this
2175     conf.set("mapreduce.framework.name", "yarn");
2176     conf.setBoolean("yarn.is.minicluster", true);
2177     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2178     if (rmAddress != null) {
2179       conf.set("yarn.resourcemanager.address", rmAddress);
2180     }
2181     String schedulerAddress =
2182       jobConf.get("yarn.resourcemanager.scheduler.address");
2183     if (schedulerAddress != null) {
2184       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2185     }
2186   }
2187 
2188   /**
2189    * Stops the previously started <code>MiniMRCluster</code>.
2190    */
2191   public void shutdownMiniMapReduceCluster() {
2192     LOG.info("Stopping mini mapreduce cluster...");
2193     if (mrCluster != null) {
2194       mrCluster.shutdown();
2195       mrCluster = null;
2196     }
2197     // Restore configuration to point to local jobtracker
2198     conf.set("mapred.job.tracker", "local");
2199     LOG.info("Mini mapreduce cluster stopped");
2200   }
2201 
2202   /**
2203    * Create a stubbed out RegionServerService, mainly for getting FS.
2204    */
2205   public RegionServerServices createMockRegionServerService() throws IOException {
2206     return createMockRegionServerService((ServerName)null);
2207   }
2208 
2209   /**
2210    * Create a stubbed out RegionServerService, mainly for getting FS.
2211    * This version is used by TestTokenAuthentication
2212    */
2213   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2214     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2215     rss.setFileSystem(getTestFileSystem());
2216     rss.setRpcServer(rpc);
2217     return rss;
2218   }
2219 
2220   /**
2221    * Create a stubbed out RegionServerService, mainly for getting FS.
2222    * This version is used by TestOpenRegionHandler
2223    */
2224   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2225     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2226     rss.setFileSystem(getTestFileSystem());
2227     return rss;
2228   }
2229 
2230   /**
2231    * Switches the logger for the given class to DEBUG level.
2232    *
2233    * @param clazz  The class for which to switch to debug logging.
2234    */
2235   public void enableDebug(Class<?> clazz) {
2236     Log l = LogFactory.getLog(clazz);
2237     if (l instanceof Log4JLogger) {
2238       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2239     } else if (l instanceof Jdk14Logger) {
2240       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2241     }
2242   }
2243 
2244   /**
2245    * Expire the Master's session
2246    * @throws Exception
2247    */
2248   public void expireMasterSession() throws Exception {
2249     HMaster master = getMiniHBaseCluster().getMaster();
2250     expireSession(master.getZooKeeper(), false);
2251   }
2252 
2253   /**
2254    * Expire a region server's session
2255    * @param index which RS
2256    * @throws Exception
2257    */
2258   public void expireRegionServerSession(int index) throws Exception {
2259     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2260     expireSession(rs.getZooKeeper(), false);
2261     decrementMinRegionServerCount();
2262   }
2263 
2264   private void decrementMinRegionServerCount() {
2265     // decrement the count for this.conf, for newly spwaned master
2266     // this.hbaseCluster shares this configuration too
2267     decrementMinRegionServerCount(getConfiguration());
2268 
2269     // each master thread keeps a copy of configuration
2270     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2271       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2272     }
2273   }
2274 
2275   private void decrementMinRegionServerCount(Configuration conf) {
2276     int currentCount = conf.getInt(
2277         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2278     if (currentCount != -1) {
2279       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2280           Math.max(currentCount - 1, 1));
2281     }
2282   }
2283 
2284   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2285    expireSession(nodeZK, false);
2286   }
2287 
2288   @Deprecated
2289   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2290     throws Exception {
2291     expireSession(nodeZK, false);
2292   }
2293 
2294   /**
2295    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2296    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2297    * There are issues when doing this:
2298    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2299    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2300    *
2301    * @param nodeZK - the ZK watcher to expire
2302    * @param checkStatus - true to check if we can create an HTable with the
2303    *                    current configuration.
2304    */
2305   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2306     throws Exception {
2307     Configuration c = new Configuration(this.conf);
2308     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2309     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2310     byte[] password = zk.getSessionPasswd();
2311     long sessionID = zk.getSessionId();
2312 
2313     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2314     //  so we create a first watcher to be sure that the
2315     //  event was sent. We expect that if our watcher receives the event
2316     //  other watchers on the same machine will get is as well.
2317     // When we ask to close the connection, ZK does not close it before
2318     //  we receive all the events, so don't have to capture the event, just
2319     //  closing the connection should be enough.
2320     ZooKeeper monitor = new ZooKeeper(quorumServers,
2321       1000, new org.apache.zookeeper.Watcher(){
2322       @Override
2323       public void process(WatchedEvent watchedEvent) {
2324         LOG.info("Monitor ZKW received event="+watchedEvent);
2325       }
2326     } , sessionID, password);
2327 
2328     // Making it expire
2329     ZooKeeper newZK = new ZooKeeper(quorumServers,
2330         1000, EmptyWatcher.instance, sessionID, password);
2331 
2332     //ensure that we have connection to the server before closing down, otherwise
2333     //the close session event will be eaten out before we start CONNECTING state
2334     long start = System.currentTimeMillis();
2335     while (newZK.getState() != States.CONNECTED
2336          && System.currentTimeMillis() - start < 1000) {
2337        Thread.sleep(1);
2338     }
2339     newZK.close();
2340     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2341 
2342     // Now closing & waiting to be sure that the clients get it.
2343     monitor.close();
2344 
2345     if (checkStatus) {
2346       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2347     }
2348   }
2349 
2350   /**
2351    * Get the Mini HBase cluster.
2352    *
2353    * @return hbase cluster
2354    * @see #getHBaseClusterInterface()
2355    */
2356   public MiniHBaseCluster getHBaseCluster() {
2357     return getMiniHBaseCluster();
2358   }
2359 
2360   /**
2361    * Returns the HBaseCluster instance.
2362    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2363    * tests referring this should not assume that the cluster is a mini cluster or a
2364    * distributed one. If the test only works on a mini cluster, then specific
2365    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2366    * need to type-cast.
2367    */
2368   public HBaseCluster getHBaseClusterInterface() {
2369     //implementation note: we should rename this method as #getHBaseCluster(),
2370     //but this would require refactoring 90+ calls.
2371     return hbaseCluster;
2372   }
2373 
2374   /**
2375    * Returns a HBaseAdmin instance.
2376    * This instance is shared between HBaseTestingUtility instance users.
2377    * Closing it has no effect, it will be closed automatically when the
2378    * cluster shutdowns
2379    *
2380    * @return The HBaseAdmin instance.
2381    * @throws IOException
2382    */
2383   public synchronized HBaseAdmin getHBaseAdmin()
2384   throws IOException {
2385     if (hbaseAdmin == null){
2386       hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2387     }
2388     return hbaseAdmin;
2389   }
2390 
2391   private HBaseAdminForTests hbaseAdmin = null;
2392   private static class HBaseAdminForTests extends HBaseAdmin {
2393     public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2394         ZooKeeperConnectionException, IOException {
2395       super(c);
2396     }
2397 
2398     @Override
2399     public synchronized void close() throws IOException {
2400       LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2401     }
2402 
2403     private synchronized void close0() throws IOException {
2404       super.close();
2405     }
2406   }
2407 
2408   /**
2409    * Returns a ZooKeeperWatcher instance.
2410    * This instance is shared between HBaseTestingUtility instance users.
2411    * Don't close it, it will be closed automatically when the
2412    * cluster shutdowns
2413    *
2414    * @return The ZooKeeperWatcher instance.
2415    * @throws IOException
2416    */
2417   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2418     throws IOException {
2419     if (zooKeeperWatcher == null) {
2420       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2421         new Abortable() {
2422         @Override public void abort(String why, Throwable e) {
2423           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2424         }
2425         @Override public boolean isAborted() {return false;}
2426       });
2427     }
2428     return zooKeeperWatcher;
2429   }
2430   private ZooKeeperWatcher zooKeeperWatcher;
2431 
2432 
2433 
2434   /**
2435    * Closes the named region.
2436    *
2437    * @param regionName  The region to close.
2438    * @throws IOException
2439    */
2440   public void closeRegion(String regionName) throws IOException {
2441     closeRegion(Bytes.toBytes(regionName));
2442   }
2443 
2444   /**
2445    * Closes the named region.
2446    *
2447    * @param regionName  The region to close.
2448    * @throws IOException
2449    */
2450   public void closeRegion(byte[] regionName) throws IOException {
2451     getHBaseAdmin().closeRegion(regionName, null);
2452   }
2453 
2454   /**
2455    * Closes the region containing the given row.
2456    *
2457    * @param row  The row to find the containing region.
2458    * @param table  The table to find the region.
2459    * @throws IOException
2460    */
2461   public void closeRegionByRow(String row, HTable table) throws IOException {
2462     closeRegionByRow(Bytes.toBytes(row), table);
2463   }
2464 
2465   /**
2466    * Closes the region containing the given row.
2467    *
2468    * @param row  The row to find the containing region.
2469    * @param table  The table to find the region.
2470    * @throws IOException
2471    */
2472   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2473     HRegionLocation hrl = table.getRegionLocation(row);
2474     closeRegion(hrl.getRegionInfo().getRegionName());
2475   }
2476 
2477   /*
2478    * Retrieves a splittable region randomly from tableName
2479    *
2480    * @param tableName name of table
2481    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2482    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2483    */
2484   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2485     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2486     int regCount = regions.size();
2487     Set<Integer> attempted = new HashSet<Integer>();
2488     int idx;
2489     int attempts = 0;
2490     do {
2491       regions = getHBaseCluster().getRegions(tableName);
2492       if (regCount != regions.size()) {
2493         // if there was region movement, clear attempted Set
2494         attempted.clear();
2495       }
2496       regCount = regions.size();
2497       // There are chances that before we get the region for the table from an RS the region may
2498       // be going for CLOSE.  This may be because online schema change is enabled
2499       if (regCount > 0) {
2500         idx = random.nextInt(regCount);
2501         // if we have just tried this region, there is no need to try again
2502         if (attempted.contains(idx))
2503           continue;
2504         try {
2505           regions.get(idx).checkSplit();
2506           return regions.get(idx);
2507         } catch (Exception ex) {
2508           LOG.warn("Caught exception", ex);
2509           attempted.add(idx);
2510         }
2511       }
2512       attempts++;
2513     } while (maxAttempts == -1 || attempts < maxAttempts);
2514     return null;
2515   }
2516 
2517   public MiniZooKeeperCluster getZkCluster() {
2518     return zkCluster;
2519   }
2520 
2521   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2522     this.passedZkCluster = true;
2523     this.zkCluster = zkCluster;
2524     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2525   }
2526 
2527   public MiniDFSCluster getDFSCluster() {
2528     return dfsCluster;
2529   }
2530 
2531   public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2532     if (dfsCluster != null && dfsCluster.isClusterUp()) {
2533       throw new IOException("DFSCluster is already running! Shut it down first.");
2534     }
2535     this.dfsCluster = cluster;
2536   }
2537 
2538   public FileSystem getTestFileSystem() throws IOException {
2539     return HFileSystem.get(conf);
2540   }
2541 
2542   /**
2543    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2544    * (30 seconds).
2545    * @param table Table to wait on.
2546    * @throws InterruptedException
2547    * @throws IOException
2548    */
2549   public void waitTableAvailable(byte[] table)
2550       throws InterruptedException, IOException {
2551     waitTableAvailable(getHBaseAdmin(), table, 30000);
2552   }
2553 
2554   public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2555       throws InterruptedException, IOException {
2556     waitTableAvailable(admin, table, 30000);
2557   }
2558 
2559   /**
2560    * Wait until all regions in a table have been assigned
2561    * @param table Table to wait on.
2562    * @param timeoutMillis Timeout.
2563    * @throws InterruptedException
2564    * @throws IOException
2565    */
2566   public void waitTableAvailable(byte[] table, long timeoutMillis)
2567   throws InterruptedException, IOException {
2568     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2569   }
2570 
2571   public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2572   throws InterruptedException, IOException {
2573     long startWait = System.currentTimeMillis();
2574     while (!admin.isTableAvailable(table)) {
2575       assertTrue("Timed out waiting for table to become available " +
2576         Bytes.toStringBinary(table),
2577         System.currentTimeMillis() - startWait < timeoutMillis);
2578       Thread.sleep(200);
2579     }
2580   }
2581 
2582   /**
2583    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2584    * regions have been all assigned.  Will timeout after default period (30 seconds)
2585    * @see #waitTableAvailable(byte[])
2586    * @param table Table to wait on.
2587    * @param table
2588    * @throws InterruptedException
2589    * @throws IOException
2590    */
2591   public void waitTableEnabled(byte[] table)
2592       throws InterruptedException, IOException {
2593     waitTableEnabled(getHBaseAdmin(), table, 30000);
2594   }
2595 
2596   public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2597       throws InterruptedException, IOException {
2598     waitTableEnabled(admin, table, 30000);
2599   }
2600 
2601   /**
2602    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2603    * regions have been all assigned.
2604    * @see #waitTableAvailable(byte[])
2605    * @param table Table to wait on.
2606    * @param timeoutMillis Time to wait on it being marked enabled.
2607    * @throws InterruptedException
2608    * @throws IOException
2609    */
2610   public void waitTableEnabled(byte[] table, long timeoutMillis)
2611   throws InterruptedException, IOException {
2612     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2613   }
2614 
2615   public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2616   throws InterruptedException, IOException {
2617     long startWait = System.currentTimeMillis();
2618     waitTableAvailable(admin, table, timeoutMillis);
2619     long remainder = System.currentTimeMillis() - startWait;
2620     while (!admin.isTableEnabled(table)) {
2621       assertTrue("Timed out waiting for table to become available and enabled " +
2622          Bytes.toStringBinary(table),
2623          System.currentTimeMillis() - remainder < timeoutMillis);
2624       Thread.sleep(200);
2625     }
2626     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2627     // in the hbase:meta table and almost open on all regionservers but there setting the region
2628     // online in the regionserver is the very last thing done and can take a little while to happen.
2629     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2630     // RegionOpeningException.  It is crass but when done all will be online.
2631     try {
2632       Canary.sniff(admin, TableName.valueOf(table));
2633     } catch (Exception e) {
2634       throw new IOException(e);
2635     }
2636   }
2637 
2638   /**
2639    * Make sure that at least the specified number of region servers
2640    * are running
2641    * @param num minimum number of region servers that should be running
2642    * @return true if we started some servers
2643    * @throws IOException
2644    */
2645   public boolean ensureSomeRegionServersAvailable(final int num)
2646       throws IOException {
2647     boolean startedServer = false;
2648     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2649     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2650       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2651       startedServer = true;
2652     }
2653 
2654     return startedServer;
2655   }
2656 
2657 
2658   /**
2659    * Make sure that at least the specified number of region servers
2660    * are running. We don't count the ones that are currently stopping or are
2661    * stopped.
2662    * @param num minimum number of region servers that should be running
2663    * @return true if we started some servers
2664    * @throws IOException
2665    */
2666   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2667     throws IOException {
2668     boolean startedServer = ensureSomeRegionServersAvailable(num);
2669 
2670     int nonStoppedServers = 0;
2671     for (JVMClusterUtil.RegionServerThread rst :
2672       getMiniHBaseCluster().getRegionServerThreads()) {
2673 
2674       HRegionServer hrs = rst.getRegionServer();
2675       if (hrs.isStopping() || hrs.isStopped()) {
2676         LOG.info("A region server is stopped or stopping:"+hrs);
2677       } else {
2678         nonStoppedServers++;
2679       }
2680     }
2681     for (int i=nonStoppedServers; i<num; ++i) {
2682       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2683       startedServer = true;
2684     }
2685     return startedServer;
2686   }
2687 
2688 
2689   /**
2690    * This method clones the passed <code>c</code> configuration setting a new
2691    * user into the clone.  Use it getting new instances of FileSystem.  Only
2692    * works for DistributedFileSystem.
2693    * @param c Initial configuration
2694    * @param differentiatingSuffix Suffix to differentiate this user from others.
2695    * @return A new configuration instance with a different user set into it.
2696    * @throws IOException
2697    */
2698   public static User getDifferentUser(final Configuration c,
2699     final String differentiatingSuffix)
2700   throws IOException {
2701     FileSystem currentfs = FileSystem.get(c);
2702     if (!(currentfs instanceof DistributedFileSystem)) {
2703       return User.getCurrent();
2704     }
2705     // Else distributed filesystem.  Make a new instance per daemon.  Below
2706     // code is taken from the AppendTestUtil over in hdfs.
2707     String username = User.getCurrent().getName() +
2708       differentiatingSuffix;
2709     User user = User.createUserForTesting(c, username,
2710         new String[]{"supergroup"});
2711     return user;
2712   }
2713 
2714   /**
2715    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
2716    * makes tests linger.  Here is the exception you'll see:
2717    * <pre>
2718    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2719    * </pre>
2720    * @param stream A DFSClient.DFSOutputStream.
2721    * @param max
2722    * @throws NoSuchFieldException
2723    * @throws SecurityException
2724    * @throws IllegalAccessException
2725    * @throws IllegalArgumentException
2726    */
2727   public static void setMaxRecoveryErrorCount(final OutputStream stream,
2728       final int max) {
2729     try {
2730       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2731       for (Class<?> clazz: clazzes) {
2732         String className = clazz.getSimpleName();
2733         if (className.equals("DFSOutputStream")) {
2734           if (clazz.isInstance(stream)) {
2735             Field maxRecoveryErrorCountField =
2736               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2737             maxRecoveryErrorCountField.setAccessible(true);
2738             maxRecoveryErrorCountField.setInt(stream, max);
2739             break;
2740           }
2741         }
2742       }
2743     } catch (Exception e) {
2744       LOG.info("Could not set max recovery field", e);
2745     }
2746   }
2747 
2748   /**
2749    * Wait until all regions for a table in hbase:meta have a non-empty
2750    * info:server, up to 60 seconds. This means all regions have been deployed,
2751    * master has been informed and updated hbase:meta with the regions deployed
2752    * server.
2753    * @param tableName the table name
2754    * @throws IOException
2755    */
2756   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2757     waitUntilAllRegionsAssigned(tableName, 60000);
2758   }
2759 
2760   /**
2761    * Wait until all regions for a table in hbase:meta have a non-empty
2762    * info:server, or until timeout.  This means all regions have been deployed,
2763    * master has been informed and updated hbase:meta with the regions deployed
2764    * server.
2765    * @param tableName the table name
2766    * @param timeout timeout, in milliseconds
2767    * @throws IOException
2768    */
2769   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2770       throws IOException {
2771     final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2772     try {
2773       waitFor(timeout, 200, true, new Predicate<IOException>() {
2774         @Override
2775         public boolean evaluate() throws IOException {
2776           boolean allRegionsAssigned = true;
2777           Scan scan = new Scan();
2778           scan.addFamily(HConstants.CATALOG_FAMILY);
2779           ResultScanner s = meta.getScanner(scan);
2780           try {
2781             Result r;
2782             while ((r = s.next()) != null) {
2783               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2784               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2785               if (info != null && info.getTable().equals(tableName)) {
2786                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2787                 allRegionsAssigned &= (b != null);
2788               }
2789             }
2790           } finally {
2791             s.close();
2792           }
2793           return allRegionsAssigned;
2794         }
2795       });
2796     } finally {
2797       meta.close();
2798     }
2799   }
2800 
2801   /**
2802    * Do a small get/scan against one store. This is required because store
2803    * has no actual methods of querying itself, and relies on StoreScanner.
2804    */
2805   public static List<Cell> getFromStoreFile(HStore store,
2806                                                 Get get) throws IOException {
2807     Scan scan = new Scan(get);
2808     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2809         scan.getFamilyMap().get(store.getFamily().getName()),
2810         // originally MultiVersionConsistencyControl.resetThreadReadPoint() was called to set
2811         // readpoint 0.
2812         0);
2813 
2814     List<Cell> result = new ArrayList<Cell>();
2815     scanner.next(result);
2816     if (!result.isEmpty()) {
2817       // verify that we are on the row we want:
2818       Cell kv = result.get(0);
2819       if (!CellUtil.matchingRow(kv, get.getRow())) {
2820         result.clear();
2821       }
2822     }
2823     scanner.close();
2824     return result;
2825   }
2826 
2827   /**
2828    * Create region split keys between startkey and endKey
2829    *
2830    * @param startKey
2831    * @param endKey
2832    * @param numRegions the number of regions to be created. it has to be greater than 3.
2833    * @return
2834    */
2835   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2836     assertTrue(numRegions>3);
2837     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2838     byte [][] result = new byte[tmpSplitKeys.length+1][];
2839     for (int i=0;i<tmpSplitKeys.length;i++) {
2840       result[i+1] = tmpSplitKeys[i];
2841     }
2842     result[0] = HConstants.EMPTY_BYTE_ARRAY;
2843     return result;
2844   }
2845 
2846   /**
2847    * Do a small get/scan against one store. This is required because store
2848    * has no actual methods of querying itself, and relies on StoreScanner.
2849    */
2850   public static List<Cell> getFromStoreFile(HStore store,
2851                                                 byte [] row,
2852                                                 NavigableSet<byte[]> columns
2853                                                 ) throws IOException {
2854     Get get = new Get(row);
2855     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2856     s.put(store.getFamily().getName(), columns);
2857 
2858     return getFromStoreFile(store,get);
2859   }
2860 
2861   /**
2862    * Gets a ZooKeeperWatcher.
2863    * @param TEST_UTIL
2864    */
2865   public static ZooKeeperWatcher getZooKeeperWatcher(
2866       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2867       IOException {
2868     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2869         "unittest", new Abortable() {
2870           boolean aborted = false;
2871 
2872           @Override
2873           public void abort(String why, Throwable e) {
2874             aborted = true;
2875             throw new RuntimeException("Fatal ZK error, why=" + why, e);
2876           }
2877 
2878           @Override
2879           public boolean isAborted() {
2880             return aborted;
2881           }
2882         });
2883     return zkw;
2884   }
2885 
2886   /**
2887    * Creates a znode with OPENED state.
2888    * @param TEST_UTIL
2889    * @param region
2890    * @param serverName
2891    * @return
2892    * @throws IOException
2893    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
2894    * @throws KeeperException
2895    * @throws NodeExistsException
2896    */
2897   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2898       HBaseTestingUtility TEST_UTIL, HRegion region,
2899       ServerName serverName) throws ZooKeeperConnectionException,
2900       IOException, KeeperException, NodeExistsException {
2901     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2902     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2903     int version = ZKAssign.transitionNodeOpening(zkw, region
2904         .getRegionInfo(), serverName);
2905     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2906         version);
2907     return zkw;
2908   }
2909 
2910   public static void assertKVListsEqual(String additionalMsg,
2911       final List<? extends Cell> expected,
2912       final List<? extends Cell> actual) {
2913     final int eLen = expected.size();
2914     final int aLen = actual.size();
2915     final int minLen = Math.min(eLen, aLen);
2916 
2917     int i;
2918     for (i = 0; i < minLen
2919         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2920         ++i) {}
2921 
2922     if (additionalMsg == null) {
2923       additionalMsg = "";
2924     }
2925     if (!additionalMsg.isEmpty()) {
2926       additionalMsg = ". " + additionalMsg;
2927     }
2928 
2929     if (eLen != aLen || i != minLen) {
2930       throw new AssertionError(
2931           "Expected and actual KV arrays differ at position " + i + ": " +
2932           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2933           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2934     }
2935   }
2936 
2937   private static <T> String safeGetAsStr(List<T> lst, int i) {
2938     if (0 <= i && i < lst.size()) {
2939       return lst.get(i).toString();
2940     } else {
2941       return "<out_of_range>";
2942     }
2943   }
2944 
2945   public String getClusterKey() {
2946     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2947         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2948         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2949             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2950   }
2951 
2952   /** Creates a random table with the given parameters */
2953   public HTable createRandomTable(String tableName,
2954       final Collection<String> families,
2955       final int maxVersions,
2956       final int numColsPerRow,
2957       final int numFlushes,
2958       final int numRegions,
2959       final int numRowsPerFlush)
2960       throws IOException, InterruptedException {
2961 
2962     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2963         " regions, " + numFlushes + " storefiles per region, " +
2964         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
2965         "\n");
2966 
2967     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2968     final int numCF = families.size();
2969     final byte[][] cfBytes = new byte[numCF][];
2970     {
2971       int cfIndex = 0;
2972       for (String cf : families) {
2973         cfBytes[cfIndex++] = Bytes.toBytes(cf);
2974       }
2975     }
2976 
2977     final int actualStartKey = 0;
2978     final int actualEndKey = Integer.MAX_VALUE;
2979     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2980     final int splitStartKey = actualStartKey + keysPerRegion;
2981     final int splitEndKey = actualEndKey - keysPerRegion;
2982     final String keyFormat = "%08x";
2983     final HTable table = createTable(tableName, cfBytes,
2984         maxVersions,
2985         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2986         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2987         numRegions);
2988 
2989     if (hbaseCluster != null) {
2990       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2991     }
2992 
2993     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2994       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2995         final byte[] row = Bytes.toBytes(String.format(keyFormat,
2996             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2997 
2998         Put put = new Put(row);
2999         Delete del = new Delete(row);
3000         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3001           final byte[] cf = cfBytes[rand.nextInt(numCF)];
3002           final long ts = rand.nextInt();
3003           final byte[] qual = Bytes.toBytes("col" + iCol);
3004           if (rand.nextBoolean()) {
3005             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3006                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3007                 ts + "_random_" + rand.nextLong());
3008             put.add(cf, qual, ts, value);
3009           } else if (rand.nextDouble() < 0.8) {
3010             del.deleteColumn(cf, qual, ts);
3011           } else {
3012             del.deleteColumns(cf, qual, ts);
3013           }
3014         }
3015 
3016         if (!put.isEmpty()) {
3017           table.put(put);
3018         }
3019 
3020         if (!del.isEmpty()) {
3021           table.delete(del);
3022         }
3023       }
3024       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3025       table.flushCommits();
3026       if (hbaseCluster != null) {
3027         getMiniHBaseCluster().flushcache(table.getName());
3028       }
3029     }
3030 
3031     return table;
3032   }
3033 
3034   private static final int MIN_RANDOM_PORT = 0xc000;
3035   private static final int MAX_RANDOM_PORT = 0xfffe;
3036   private static Random random = new Random();
3037 
3038   /**
3039    * Returns a random port. These ports cannot be registered with IANA and are
3040    * intended for dynamic allocation (see http://bit.ly/dynports).
3041    */
3042   public static int randomPort() {
3043     return MIN_RANDOM_PORT
3044         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3045   }
3046 
3047   /**
3048    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3049    * called from single-threaded test setup code/
3050    */
3051   public static int randomFreePort() {
3052     int port = 0;
3053     do {
3054       port = randomPort();
3055       if (takenRandomPorts.contains(port)) {
3056         continue;
3057       }
3058       takenRandomPorts.add(port);
3059 
3060       try {
3061         ServerSocket sock = new ServerSocket(port);
3062         sock.close();
3063       } catch (IOException ex) {
3064         port = 0;
3065       }
3066     } while (port == 0);
3067     return port;
3068   }
3069 
3070 
3071   public static String randomMultiCastAddress() {
3072     return "226.1.1." + random.nextInt(254);
3073   }
3074 
3075 
3076 
3077   public static void waitForHostPort(String host, int port)
3078       throws IOException {
3079     final int maxTimeMs = 10000;
3080     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3081     IOException savedException = null;
3082     LOG.info("Waiting for server at " + host + ":" + port);
3083     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3084       try {
3085         Socket sock = new Socket(InetAddress.getByName(host), port);
3086         sock.close();
3087         savedException = null;
3088         LOG.info("Server at " + host + ":" + port + " is available");
3089         break;
3090       } catch (UnknownHostException e) {
3091         throw new IOException("Failed to look up " + host, e);
3092       } catch (IOException e) {
3093         savedException = e;
3094       }
3095       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3096     }
3097 
3098     if (savedException != null) {
3099       throw savedException;
3100     }
3101   }
3102 
3103   /**
3104    * Creates a pre-split table for load testing. If the table already exists,
3105    * logs a warning and continues.
3106    * @return the number of regions the table was split into
3107    */
3108   public static int createPreSplitLoadTestTable(Configuration conf,
3109       TableName tableName, byte[] columnFamily, Algorithm compression,
3110       DataBlockEncoding dataBlockEncoding) throws IOException {
3111     HTableDescriptor desc = new HTableDescriptor(tableName);
3112     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3113     hcd.setDataBlockEncoding(dataBlockEncoding);
3114     hcd.setCompressionType(compression);
3115     return createPreSplitLoadTestTable(conf, desc, hcd);
3116   }
3117 
3118   /**
3119    * Creates a pre-split table for load testing. If the table already exists,
3120    * logs a warning and continues.
3121    * @return the number of regions the table was split into
3122    */
3123   public static int createPreSplitLoadTestTable(Configuration conf,
3124       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3125     if (!desc.hasFamily(hcd.getName())) {
3126       desc.addFamily(hcd);
3127     }
3128 
3129     int totalNumberOfRegions = 0;
3130     HBaseAdmin admin = new HBaseAdmin(conf);
3131     try {
3132       // create a table a pre-splits regions.
3133       // The number of splits is set as:
3134       //    region servers * regions per region server).
3135       int numberOfServers = admin.getClusterStatus().getServers().size();
3136       if (numberOfServers == 0) {
3137         throw new IllegalStateException("No live regionservers");
3138       }
3139 
3140       totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3141       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3142           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3143           "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3144 
3145       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3146           totalNumberOfRegions);
3147 
3148       admin.createTable(desc, splits);
3149     } catch (MasterNotRunningException e) {
3150       LOG.error("Master not running", e);
3151       throw new IOException(e);
3152     } catch (TableExistsException e) {
3153       LOG.warn("Table " + desc.getTableName() +
3154           " already exists, continuing");
3155     } finally {
3156       admin.close();
3157     }
3158     return totalNumberOfRegions;
3159   }
3160 
3161   public static int getMetaRSPort(Configuration conf) throws IOException {
3162     HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3163     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3164     table.close();
3165     return hloc.getPort();
3166   }
3167 
3168   /**
3169    *  Due to async racing issue, a region may not be in
3170    *  the online region list of a region server yet, after
3171    *  the assignment znode is deleted and the new assignment
3172    *  is recorded in master.
3173    */
3174   public void assertRegionOnServer(
3175       final HRegionInfo hri, final ServerName server,
3176       final long timeout) throws IOException, InterruptedException {
3177     long timeoutTime = System.currentTimeMillis() + timeout;
3178     while (true) {
3179       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3180       if (regions.contains(hri)) return;
3181       long now = System.currentTimeMillis();
3182       if (now > timeoutTime) break;
3183       Thread.sleep(10);
3184     }
3185     fail("Could not find region " + hri.getRegionNameAsString()
3186       + " on server " + server);
3187   }
3188 
3189   /**
3190    * Check to make sure the region is open on the specified
3191    * region server, but not on any other one.
3192    */
3193   public void assertRegionOnlyOnServer(
3194       final HRegionInfo hri, final ServerName server,
3195       final long timeout) throws IOException, InterruptedException {
3196     long timeoutTime = System.currentTimeMillis() + timeout;
3197     while (true) {
3198       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3199       if (regions.contains(hri)) {
3200         List<JVMClusterUtil.RegionServerThread> rsThreads =
3201           getHBaseCluster().getLiveRegionServerThreads();
3202         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3203           HRegionServer rs = rsThread.getRegionServer();
3204           if (server.equals(rs.getServerName())) {
3205             continue;
3206           }
3207           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3208           for (HRegion r: hrs) {
3209             assertTrue("Region should not be double assigned",
3210               r.getRegionId() != hri.getRegionId());
3211           }
3212         }
3213         return; // good, we are happy
3214       }
3215       long now = System.currentTimeMillis();
3216       if (now > timeoutTime) break;
3217       Thread.sleep(10);
3218     }
3219     fail("Could not find region " + hri.getRegionNameAsString()
3220       + " on server " + server);
3221   }
3222 
3223   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3224       throws IOException {
3225     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3226     htd.addFamily(hcd);
3227     HRegionInfo info =
3228         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3229     HRegion region =
3230         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3231     return region;
3232   }
3233 
3234   public void setFileSystemURI(String fsURI) {
3235     FS_URI = fsURI;
3236   }
3237 
3238   /**
3239    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3240    */
3241   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3242       throws E {
3243     return Waiter.waitFor(this.conf, timeout, predicate);
3244   }
3245 
3246   /**
3247    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3248    */
3249   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3250       throws E {
3251     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3252   }
3253 
3254   /**
3255    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3256    */
3257   public <E extends Exception> long waitFor(long timeout, long interval,
3258       boolean failIfTimeout, Predicate<E> predicate) throws E {
3259     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3260   }
3261 
3262   /**
3263    * Returns a {@link Predicate} for checking that there are no regions in transition in master
3264    */
3265   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3266     return new Waiter.Predicate<Exception>() {
3267       @Override
3268       public boolean evaluate() throws Exception {
3269         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3270             .getAssignmentManager().getRegionStates();
3271         return !regionStates.isRegionsInTransition();
3272       }
3273     };
3274   }
3275 
3276   /**
3277    * Returns a {@link Predicate} for checking that table is enabled
3278    */
3279   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3280     return new Waiter.Predicate<Exception>() {
3281      @Override
3282      public boolean evaluate() throws Exception {
3283        return getHBaseAdmin().isTableEnabled(tableName);
3284       }
3285     };
3286   }
3287 
3288   /**
3289    * Create a set of column descriptors with the combination of compression,
3290    * encoding, bloom codecs available.
3291    * @return the list of column descriptors
3292    */
3293   public static List<HColumnDescriptor> generateColumnDescriptors() {
3294     return generateColumnDescriptors("");
3295   }
3296 
3297   /**
3298    * Create a set of column descriptors with the combination of compression,
3299    * encoding, bloom codecs available.
3300    * @param prefix family names prefix
3301    * @return the list of column descriptors
3302    */
3303   public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3304     List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3305     long familyId = 0;
3306     for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3307       for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3308         for (BloomType bloomType: BloomType.values()) {
3309           String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3310           HColumnDescriptor htd = new HColumnDescriptor(name);
3311           htd.setCompressionType(compressionType);
3312           htd.setDataBlockEncoding(encodingType);
3313           htd.setBloomFilterType(bloomType);
3314           htds.add(htd);
3315           familyId++;
3316         }
3317       }
3318     }
3319     return htds;
3320   }
3321 
3322   /**
3323    * Get supported compression algorithms.
3324    * @return supported compression algorithms.
3325    */
3326   public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3327     String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3328     List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3329     for (String algoName : allAlgos) {
3330       try {
3331         Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3332         algo.getCompressor();
3333         supportedAlgos.add(algo);
3334       } catch (Throwable t) {
3335         // this algo is not available
3336       }
3337     }
3338     return supportedAlgos.toArray(new Compression.Algorithm[0]);
3339   }
3340 }