View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Method;
28  import java.lang.reflect.Modifier;
29  import java.net.InetAddress;
30  import java.net.InetSocketAddress;
31  import java.net.ServerSocket;
32  import java.net.Socket;
33  import java.net.UnknownHostException;
34  import java.security.MessageDigest;
35  import java.util.ArrayList;
36  import java.util.Arrays;
37  import java.util.Collection;
38  import java.util.Collections;
39  import java.util.HashSet;
40  import java.util.List;
41  import java.util.Map;
42  import java.util.NavigableSet;
43  import java.util.Random;
44  import java.util.Set;
45  import java.util.UUID;
46  import java.util.concurrent.TimeUnit;
47  
48  import org.apache.commons.logging.Log;
49  import org.apache.commons.logging.LogFactory;
50  import org.apache.commons.logging.impl.Jdk14Logger;
51  import org.apache.commons.logging.impl.Log4JLogger;
52  import org.apache.hadoop.hbase.classification.InterfaceAudience;
53  import org.apache.hadoop.hbase.classification.InterfaceStability;
54  import org.apache.hadoop.conf.Configuration;
55  import org.apache.hadoop.fs.FileSystem;
56  import org.apache.hadoop.fs.Path;
57  import org.apache.hadoop.hbase.Waiter.Predicate;
58  import org.apache.hadoop.hbase.catalog.MetaEditor;
59  import org.apache.hadoop.hbase.client.Delete;
60  import org.apache.hadoop.hbase.client.Durability;
61  import org.apache.hadoop.hbase.client.Get;
62  import org.apache.hadoop.hbase.client.HBaseAdmin;
63  import org.apache.hadoop.hbase.client.HConnection;
64  import org.apache.hadoop.hbase.client.HConnectionManager;
65  import org.apache.hadoop.hbase.client.HTable;
66  import org.apache.hadoop.hbase.client.Put;
67  import org.apache.hadoop.hbase.client.Result;
68  import org.apache.hadoop.hbase.client.ResultScanner;
69  import org.apache.hadoop.hbase.client.Scan;
70  import org.apache.hadoop.hbase.fs.HFileSystem;
71  import org.apache.hadoop.hbase.io.compress.Compression;
72  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
73  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
74  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
75  import org.apache.hadoop.hbase.io.hfile.HFile;
76  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
77  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
78  import org.apache.hadoop.hbase.master.HMaster;
79  import org.apache.hadoop.hbase.master.RegionStates;
80  import org.apache.hadoop.hbase.master.ServerManager;
81  import org.apache.hadoop.hbase.regionserver.BloomType;
82  import org.apache.hadoop.hbase.regionserver.HRegion;
83  import org.apache.hadoop.hbase.regionserver.HRegionServer;
84  import org.apache.hadoop.hbase.regionserver.HStore;
85  import org.apache.hadoop.hbase.regionserver.InternalScanner;
86  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
87  import org.apache.hadoop.hbase.regionserver.wal.HLog;
88  import org.apache.hadoop.hbase.security.User;
89  import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
90  import org.apache.hadoop.hbase.tool.Canary;
91  import org.apache.hadoop.hbase.util.Bytes;
92  import org.apache.hadoop.hbase.util.FSTableDescriptors;
93  import org.apache.hadoop.hbase.util.FSUtils;
94  import org.apache.hadoop.hbase.util.JVMClusterUtil;
95  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
96  import org.apache.hadoop.hbase.util.RegionSplitter;
97  import org.apache.hadoop.hbase.util.RetryCounter;
98  import org.apache.hadoop.hbase.util.Threads;
99  import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
100 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
101 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
102 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
103 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
104 import org.apache.hadoop.hdfs.DFSClient;
105 import org.apache.hadoop.hdfs.DistributedFileSystem;
106 import org.apache.hadoop.hdfs.MiniDFSCluster;
107 import org.apache.hadoop.mapred.JobConf;
108 import org.apache.hadoop.mapred.MiniMRCluster;
109 import org.apache.hadoop.mapred.TaskLog;
110 import org.apache.hadoop.security.UserGroupInformation;
111 import org.apache.zookeeper.KeeperException;
112 import org.apache.zookeeper.KeeperException.NodeExistsException;
113 import org.apache.zookeeper.WatchedEvent;
114 import org.apache.zookeeper.ZooKeeper;
115 import org.apache.zookeeper.ZooKeeper.States;
116 
117 /**
118  * Facility for testing HBase. Replacement for
119  * old HBaseTestCase and HBaseClusterTestCase functionality.
120  * Create an instance and keep it around testing HBase.  This class is
121  * meant to be your one-stop shop for anything you might need testing.  Manages
122  * one cluster at a time only. Managed cluster can be an in-process
123  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
124  * Not all methods work with the real cluster.
125  * Depends on log4j being on classpath and
126  * hbase-site.xml for logging and test-run configuration.  It does not set
127  * logging levels nor make changes to configuration parameters.
128  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
129  * setting it to true.
130  */
131 @InterfaceAudience.Public
132 @InterfaceStability.Evolving
133 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
134    private MiniZooKeeperCluster zkCluster = null;
135 
136   public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
137   /**
138    * The default number of regions per regionserver when creating a pre-split
139    * table.
140    */
141   public static final int DEFAULT_REGIONS_PER_SERVER = 5;
142 
143   /**
144    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
145    * part of general shutdown.
146    */
147   private boolean passedZkCluster = false;
148   private MiniDFSCluster dfsCluster = null;
149 
150   private HBaseCluster hbaseCluster = null;
151   private MiniMRCluster mrCluster = null;
152 
153   /** If there is a mini cluster running for this testing utility instance. */
154   private boolean miniClusterRunning;
155 
156   private String hadoopLogDir;
157 
158   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
159   private File clusterTestDir = null;
160 
161   /** Directory on test filesystem where we put the data for this instance of
162     * HBaseTestingUtility*/
163   private Path dataTestDirOnTestFS = null;
164 
165   /**
166    * System property key to get test directory value.
167    * Name is as it is because mini dfs has hard-codings to put test data here.
168    * It should NOT be used directly in HBase, as it's a property used in
169    *  mini dfs.
170    *  @deprecated can be used only with mini dfs
171    */
172   @Deprecated
173   private static final String TEST_DIRECTORY_KEY = "test.build.data";
174 
175   /** Filesystem URI used for map-reduce mini-cluster setup */
176   private static String FS_URI;
177 
178   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
179   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
180 
181   /** Compression algorithms to use in parameterized JUnit 4 tests */
182   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
183     Arrays.asList(new Object[][] {
184       { Compression.Algorithm.NONE },
185       { Compression.Algorithm.GZ }
186     });
187 
188   /** This is for unit tests parameterized with a two booleans. */
189   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
190       Arrays.asList(new Object[][] {
191           { new Boolean(false) },
192           { new Boolean(true) }
193       });
194 
195   /** This is for unit tests parameterized with a single boolean. */
196   public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination()  ;
197   /** Compression algorithms to use in testing */
198   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
199       Compression.Algorithm.NONE, Compression.Algorithm.GZ
200     };
201 
202   /**
203    * Create all combinations of Bloom filters and compression algorithms for
204    * testing.
205    */
206   private static List<Object[]> bloomAndCompressionCombinations() {
207     List<Object[]> configurations = new ArrayList<Object[]>();
208     for (Compression.Algorithm comprAlgo :
209          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
210       for (BloomType bloomType : BloomType.values()) {
211         configurations.add(new Object[] { comprAlgo, bloomType });
212       }
213     }
214     return Collections.unmodifiableList(configurations);
215   }
216 
217   /**
218    * Create combination of memstoreTS and tags
219    */
220   private static List<Object[]> memStoreTSAndTagsCombination() {
221     List<Object[]> configurations = new ArrayList<Object[]>();
222     configurations.add(new Object[] { false, false });
223     configurations.add(new Object[] { false, true });
224     configurations.add(new Object[] { true, false });
225     configurations.add(new Object[] { true, true });
226     return Collections.unmodifiableList(configurations);
227   }
228 
229   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
230       bloomAndCompressionCombinations();
231 
232   public HBaseTestingUtility() {
233     this(HBaseConfiguration.create());
234   }
235 
236   public HBaseTestingUtility(Configuration conf) {
237     super(conf);
238 
239     // a hbase checksum verification failure will cause unit tests to fail
240     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
241   }
242 
243   /**
244    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
245    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
246    * test dir.  Use this when you aren't using an Mini HDFS cluster.
247    * @return HBaseTestingUtility that use local fs for temp files.
248    */
249   public static HBaseTestingUtility createLocalHTU() {
250     Configuration c = HBaseConfiguration.create();
251     return createLocalHTU(c);
252   }
253 
254   /**
255    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
256    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
257    * test dir.  Use this when you aren't using an Mini HDFS cluster.
258    * @param c Configuration (will be modified)
259    * @return HBaseTestingUtility that use local fs for temp files.
260    */
261   public static HBaseTestingUtility createLocalHTU(Configuration c) {
262     HBaseTestingUtility htu = new HBaseTestingUtility(c);
263     String dataTestDir = htu.getDataTestDir().toString();
264     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
265     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
266     return htu;
267   }
268 
269   /**
270    * Controls how many attempts we will make in the face of failures in HDFS.
271    * @deprecated to be removed with Hadoop 1.x support
272    */
273   @Deprecated
274   public void setHDFSClientRetry(final int retries) {
275     this.conf.setInt("hdfs.client.retries.number", retries);
276     if (0 == retries) {
277       makeDFSClientNonRetrying();
278     }
279   }
280 
281   /**
282    * Returns this classes's instance of {@link Configuration}.  Be careful how
283    * you use the returned Configuration since {@link HConnection} instances
284    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
285    * say, a Connection was being used against a cluster that had been shutdown,
286    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
287    * be wholesome.  Rather than use the return direct, its usually best to
288    * make a copy and use that.  Do
289    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
290    * @return Instance of Configuration.
291    */
292   @Override
293   public Configuration getConfiguration() {
294     return super.getConfiguration();
295   }
296 
297   public void setHBaseCluster(HBaseCluster hbaseCluster) {
298     this.hbaseCluster = hbaseCluster;
299   }
300 
301   /**
302    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
303    * Give it a random name so can have many concurrent tests running if
304    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
305    * System property, as it's what minidfscluster bases
306    * it data dir on.  Moding a System property is not the way to do concurrent
307    * instances -- another instance could grab the temporary
308    * value unintentionally -- but not anything can do about it at moment;
309    * single instance only is how the minidfscluster works.
310    *
311    * We also create the underlying directory for
312    *  hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
313    *  in the conf, and as a system property for hadoop.tmp.dir
314    *
315    * @return The calculated data test build directory, if newly-created.
316    */
317   @Override
318   protected Path setupDataTestDir() {
319     Path testPath = super.setupDataTestDir();
320     if (null == testPath) {
321       return null;
322     }
323 
324     createSubDirAndSystemProperty(
325       "hadoop.log.dir",
326       testPath, "hadoop-log-dir");
327 
328     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
329     //  we want our own value to ensure uniqueness on the same machine
330     createSubDirAndSystemProperty(
331       "hadoop.tmp.dir",
332       testPath, "hadoop-tmp-dir");
333 
334     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
335     createSubDir(
336       "mapred.local.dir",
337       testPath, "mapred-local-dir");
338 
339     return testPath;
340   }
341 
342   private void createSubDirAndSystemProperty(
343     String propertyName, Path parent, String subDirName){
344 
345     String sysValue = System.getProperty(propertyName);
346 
347     if (sysValue != null) {
348       // There is already a value set. So we do nothing but hope
349       //  that there will be no conflicts
350       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351         sysValue + " so I do NOT create it in " + parent);
352       String confValue = conf.get(propertyName);
353       if (confValue != null && !confValue.endsWith(sysValue)){
354        LOG.warn(
355          propertyName + " property value differs in configuration and system: "+
356          "Configuration="+confValue+" while System="+sysValue+
357          " Erasing configuration value by system value."
358        );
359       }
360       conf.set(propertyName, sysValue);
361     } else {
362       // Ok, it's not set, so we create it as a subdirectory
363       createSubDir(propertyName, parent, subDirName);
364       System.setProperty(propertyName, conf.get(propertyName));
365     }
366   }
367 
368   /**
369    * @return Where to write test data on the test filesystem; Returns working directory
370    * for the test filesystem by default
371    * @see #setupDataTestDirOnTestFS()
372    * @see #getTestFileSystem()
373    */
374   private Path getBaseTestDirOnTestFS() throws IOException {
375     FileSystem fs = getTestFileSystem();
376     return new Path(fs.getWorkingDirectory(), "test-data");
377   }
378 
379   /**
380    * @return META table descriptor
381    */
382   public HTableDescriptor getMetaTableDescriptor() {
383     try {
384       return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385     } catch (IOException e) {
386       throw new RuntimeException("Unable to create META table descriptor", e);
387     }
388   }
389 
390   /**
391    * @return Where the DFS cluster will write data on the local subsystem.
392    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
393    * @see #getTestFileSystem()
394    */
395   Path getClusterTestDir() {
396     if (clusterTestDir == null){
397       setupClusterTestDir();
398     }
399     return new Path(clusterTestDir.getAbsolutePath());
400   }
401 
402   /**
403    * Creates a directory for the DFS cluster, under the test data
404    */
405   private void setupClusterTestDir() {
406     if (clusterTestDir != null) {
407       return;
408     }
409 
410     // Using randomUUID ensures that multiple clusters can be launched by
411     //  a same test, if it stops & starts them
412     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414     // Have it cleaned up on exit
415     boolean b = deleteOnExit();
416     if (b) clusterTestDir.deleteOnExit();
417     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419   }
420 
421   /**
422    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
423    * to write temporary test data. Call this method after setting up the mini dfs cluster
424    * if the test relies on it.
425    * @return a unique path in the test filesystem
426    */
427   public Path getDataTestDirOnTestFS() throws IOException {
428     if (dataTestDirOnTestFS == null) {
429       setupDataTestDirOnTestFS();
430     }
431 
432     return dataTestDirOnTestFS;
433   }
434 
435   /**
436    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
437    * to write temporary test data. Call this method after setting up the mini dfs cluster
438    * if the test relies on it.
439    * @return a unique path in the test filesystem
440    * @param subdirName name of the subdir to create under the base test dir
441    */
442   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443     return new Path(getDataTestDirOnTestFS(), subdirName);
444   }
445 
446   /**
447    * Sets up a path in test filesystem to be used by tests
448    */
449   private void setupDataTestDirOnTestFS() throws IOException {
450     if (dataTestDirOnTestFS != null) {
451       LOG.warn("Data test on test fs dir already setup in "
452           + dataTestDirOnTestFS.toString());
453       return;
454     }
455 
456     //The file system can be either local, mini dfs, or if the configuration
457     //is supplied externally, it can be an external cluster FS. If it is a local
458     //file system, the tests should use getBaseTestDir, otherwise, we can use
459     //the working directory, and create a unique sub dir there
460     FileSystem fs = getTestFileSystem();
461     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
462       File dataTestDir = new File(getDataTestDir().toString());
463       if (deleteOnExit()) dataTestDir.deleteOnExit();
464       dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
465     } else {
466       Path base = getBaseTestDirOnTestFS();
467       String randomStr = UUID.randomUUID().toString();
468       dataTestDirOnTestFS = new Path(base, randomStr);
469       if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
470     }
471   }
472 
473   /**
474    * Cleans the test data directory on the test filesystem.
475    * @return True if we removed the test dirs
476    * @throws IOException
477    */
478   public boolean cleanupDataTestDirOnTestFS() throws IOException {
479     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
480     if (ret)
481       dataTestDirOnTestFS = null;
482     return ret;
483   }
484 
485   /**
486    * Cleans a subdirectory under the test data directory on the test filesystem.
487    * @return True if we removed child
488    * @throws IOException
489    */
490   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
491     Path cpath = getDataTestDirOnTestFS(subdirName);
492     return getTestFileSystem().delete(cpath, true);
493   }
494 
495   /**
496    * Start a minidfscluster.
497    * @param servers How many DNs to start.
498    * @throws Exception
499    * @see {@link #shutdownMiniDFSCluster()}
500    * @return The mini dfs cluster created.
501    */
502   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
503     return startMiniDFSCluster(servers, null);
504   }
505 
506   /**
507    * Start a minidfscluster.
508    * This is useful if you want to run datanode on distinct hosts for things
509    * like HDFS block location verification.
510    * If you start MiniDFSCluster without host names, all instances of the
511    * datanodes will have the same host name.
512    * @param hosts hostnames DNs to run on.
513    * @throws Exception
514    * @see {@link #shutdownMiniDFSCluster()}
515    * @return The mini dfs cluster created.
516    */
517   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
518   throws Exception {
519     if ( hosts != null && hosts.length != 0) {
520       return startMiniDFSCluster(hosts.length, hosts);
521     } else {
522       return startMiniDFSCluster(1, null);
523     }
524   }
525 
526   /**
527    * Start a minidfscluster.
528    * Can only create one.
529    * @param servers How many DNs to start.
530    * @param hosts hostnames DNs to run on.
531    * @throws Exception
532    * @see {@link #shutdownMiniDFSCluster()}
533    * @return The mini dfs cluster created.
534    */
535   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
536   throws Exception {
537     createDirsAndSetProperties();
538     try {
539       Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
540           .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
541       m.invoke(null, new Object[] {true});
542     } catch (ClassNotFoundException e) {
543       LOG.info("EditLogFileOutputStream not found");
544     }
545 
546     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
547     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
548         setLevel(org.apache.log4j.Level.ERROR);
549     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
550         setLevel(org.apache.log4j.Level.ERROR);
551 
552 
553     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
554       true, null, null, hosts, null);
555 
556     // Set this just-started cluster as our filesystem.
557     setFs();
558 
559     // Wait for the cluster to be totally up
560     this.dfsCluster.waitClusterUp();
561 
562     //reset the test directory for test file system
563     dataTestDirOnTestFS = null;
564 
565     return this.dfsCluster;
566   }
567 
568   private void setFs() throws IOException {
569     if(this.dfsCluster == null){
570       LOG.info("Skipping setting fs because dfsCluster is null");
571       return;
572     }
573     FileSystem fs = this.dfsCluster.getFileSystem();
574     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
575   }
576 
577   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
578       throws Exception {
579     createDirsAndSetProperties();
580     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
581         true, null, racks, hosts, null);
582 
583     // Set this just-started cluster as our filesystem.
584     FileSystem fs = this.dfsCluster.getFileSystem();
585     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
586 
587     // Wait for the cluster to be totally up
588     this.dfsCluster.waitClusterUp();
589 
590     //reset the test directory for test file system
591     dataTestDirOnTestFS = null;
592 
593     return this.dfsCluster;
594   }
595 
596   public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
597     createDirsAndSetProperties();
598     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
599         null, null, null);
600     return dfsCluster;
601   }
602 
603   /** This is used before starting HDFS and map-reduce mini-clusters */
604   private void createDirsAndSetProperties() throws IOException {
605     setupClusterTestDir();
606     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
607     createDirAndSetProperty("cache_data", "test.cache.data");
608     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
609     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
610     createDirAndSetProperty("mapred_local", "mapred.local.dir");
611     createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
612     enableShortCircuit();
613 
614     Path root = getDataTestDirOnTestFS("hadoop");
615     conf.set(MapreduceTestingShim.getMROutputDirProp(),
616       new Path(root, "mapred-output-dir").toString());
617     conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
618     conf.set("mapreduce.jobtracker.staging.root.dir",
619       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
620     conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
621   }
622 
623 
624   /**
625    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
626    *  This allows to specify this parameter on the command line.
627    *   If not set, default is true.
628    */
629   public boolean isReadShortCircuitOn(){
630     final String propName = "hbase.tests.use.shortcircuit.reads";
631     String readOnProp = System.getProperty(propName);
632     if (readOnProp != null){
633       return  Boolean.parseBoolean(readOnProp);
634     } else {
635       return conf.getBoolean(propName, false);
636     }
637   }
638 
639   /** Enable the short circuit read, unless configured differently.
640    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
641    */
642   private void enableShortCircuit() {
643     if (isReadShortCircuitOn()) {
644       String curUser = System.getProperty("user.name");
645       LOG.info("read short circuit is ON for user " + curUser);
646       // read short circuit, for hdfs
647       conf.set("dfs.block.local-path-access.user", curUser);
648       // read short circuit, for hbase
649       conf.setBoolean("dfs.client.read.shortcircuit", true);
650       // Skip checking checksum, for the hdfs client and the datanode
651       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
652     } else {
653       LOG.info("read short circuit is OFF");
654     }
655   }
656 
657   private String createDirAndSetProperty(final String relPath, String property) {
658     String path = getDataTestDir(relPath).toString();
659     System.setProperty(property, path);
660     conf.set(property, path);
661     new File(path).mkdirs();
662     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
663     return path;
664   }
665 
666   /**
667    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
668    * or does nothing.
669    * @throws IOException
670    */
671   public void shutdownMiniDFSCluster() throws IOException {
672     if (this.dfsCluster != null) {
673       // The below throws an exception per dn, AsynchronousCloseException.
674       this.dfsCluster.shutdown();
675       dfsCluster = null;
676       dataTestDirOnTestFS = null;
677       FSUtils.setFsDefault(this.conf, new Path("file:///"));
678     }
679   }
680 
681   /**
682    * Call this if you only want a zk cluster.
683    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
684    * @throws Exception
685    * @see #shutdownMiniZKCluster()
686    * @return zk cluster started.
687    */
688   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
689     return startMiniZKCluster(1);
690   }
691 
692   /**
693    * Call this if you only want a zk cluster.
694    * @param zooKeeperServerNum
695    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
696    * @throws Exception
697    * @see #shutdownMiniZKCluster()
698    * @return zk cluster started.
699    */
700   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
701       throws Exception {
702     setupClusterTestDir();
703     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
704   }
705 
706   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
707     throws Exception {
708     return startMiniZKCluster(dir,1);
709   }
710 
711   /**
712    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
713    *  the port mentionned is used as the default port for ZooKeeper.
714    */
715   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
716       int zooKeeperServerNum)
717   throws Exception {
718     if (this.zkCluster != null) {
719       throw new IOException("Cluster already running at " + dir);
720     }
721     this.passedZkCluster = false;
722     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
723     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
724     if (defPort > 0){
725       // If there is a port in the config file, we use it.
726       this.zkCluster.setDefaultClientPort(defPort);
727     }
728     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
729     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
730       Integer.toString(clientPort));
731     return this.zkCluster;
732   }
733 
734   /**
735    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
736    * or does nothing.
737    * @throws IOException
738    * @see #startMiniZKCluster()
739    */
740   public void shutdownMiniZKCluster() throws IOException {
741     if (this.zkCluster != null) {
742       this.zkCluster.shutdown();
743       this.zkCluster = null;
744     }
745   }
746 
747   /**
748    * Start up a minicluster of hbase, dfs, and zookeeper.
749    * @throws Exception
750    * @return Mini hbase cluster instance created.
751    * @see {@link #shutdownMiniDFSCluster()}
752    */
753   public MiniHBaseCluster startMiniCluster() throws Exception {
754     return startMiniCluster(1, 1);
755   }
756 
757   /**
758    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
759    * Modifies Configuration.  Homes the cluster data directory under a random
760    * subdirectory in a directory under System property test.build.data.
761    * Directory is cleaned up on exit.
762    * @param numSlaves Number of slaves to start up.  We'll start this many
763    * datanodes and regionservers.  If numSlaves is > 1, then make sure
764    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
765    * bind errors.
766    * @throws Exception
767    * @see {@link #shutdownMiniCluster()}
768    * @return Mini hbase cluster instance created.
769    */
770   public MiniHBaseCluster startMiniCluster(final int numSlaves)
771   throws Exception {
772     return startMiniCluster(1, numSlaves);
773   }
774 
775 
776   /**
777    * start minicluster
778    * @throws Exception
779    * @see {@link #shutdownMiniCluster()}
780    * @return Mini hbase cluster instance created.
781    */
782   public MiniHBaseCluster startMiniCluster(final int numMasters,
783     final int numSlaves)
784   throws Exception {
785     return startMiniCluster(numMasters, numSlaves, null);
786   }
787 
788   /**
789    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
790    * Modifies Configuration.  Homes the cluster data directory under a random
791    * subdirectory in a directory under System property test.build.data.
792    * Directory is cleaned up on exit.
793    * @param numMasters Number of masters to start up.  We'll start this many
794    * hbase masters.  If numMasters > 1, you can find the active/primary master
795    * with {@link MiniHBaseCluster#getMaster()}.
796    * @param numSlaves Number of slaves to start up.  We'll start this many
797    * regionservers. If dataNodeHosts == null, this also indicates the number of
798    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
799    * based on dataNodeHosts.length.
800    * If numSlaves is > 1, then make sure
801    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
802    * bind errors.
803    * @param dataNodeHosts hostnames DNs to run on.
804    * This is useful if you want to run datanode on distinct hosts for things
805    * like HDFS block location verification.
806    * If you start MiniDFSCluster without host names,
807    * all instances of the datanodes will have the same host name.
808    * @throws Exception
809    * @see {@link #shutdownMiniCluster()}
810    * @return Mini hbase cluster instance created.
811    */
812   public MiniHBaseCluster startMiniCluster(final int numMasters,
813       final int numSlaves, final String[] dataNodeHosts) throws Exception {
814     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
815   }
816 
817   /**
818    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
819    * @param numDataNodes Number of data nodes.
820    */
821   public MiniHBaseCluster startMiniCluster(final int numMasters,
822       final int numSlaves, final int numDataNodes) throws Exception {
823     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
824   }
825 
826   /**
827    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
828    * Modifies Configuration.  Homes the cluster data directory under a random
829    * subdirectory in a directory under System property test.build.data.
830    * Directory is cleaned up on exit.
831    * @param numMasters Number of masters to start up.  We'll start this many
832    * hbase masters.  If numMasters > 1, you can find the active/primary master
833    * with {@link MiniHBaseCluster#getMaster()}.
834    * @param numSlaves Number of slaves to start up.  We'll start this many
835    * regionservers. If dataNodeHosts == null, this also indicates the number of
836    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
837    * based on dataNodeHosts.length.
838    * If numSlaves is > 1, then make sure
839    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
840    * bind errors.
841    * @param dataNodeHosts hostnames DNs to run on.
842    * This is useful if you want to run datanode on distinct hosts for things
843    * like HDFS block location verification.
844    * If you start MiniDFSCluster without host names,
845    * all instances of the datanodes will have the same host name.
846    * @param masterClass The class to use as HMaster, or null for default
847    * @param regionserverClass The class to use as HRegionServer, or null for
848    * default
849    * @throws Exception
850    * @see {@link #shutdownMiniCluster()}
851    * @return Mini hbase cluster instance created.
852    */
853   public MiniHBaseCluster startMiniCluster(final int numMasters,
854       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
855       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
856           throws Exception {
857     return startMiniCluster(
858         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
859   }
860 
861   /**
862    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
863    * number of datanodes.
864    * @param numDataNodes Number of data nodes.
865    */
866   public MiniHBaseCluster startMiniCluster(final int numMasters,
867     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
868     Class<? extends HMaster> masterClass,
869     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
870   throws Exception {
871     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
872       numDataNodes = dataNodeHosts.length;
873     }
874 
875     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
876         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
877 
878     // If we already put up a cluster, fail.
879     if (miniClusterRunning) {
880       throw new IllegalStateException("A mini-cluster is already running");
881     }
882     miniClusterRunning = true;
883 
884     setupClusterTestDir();
885     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
886 
887     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
888     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
889     if(this.dfsCluster == null) {
890       dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
891     }
892 
893     // Start up a zk cluster.
894     if (this.zkCluster == null) {
895       startMiniZKCluster(clusterTestDir);
896     }
897 
898     // Start the MiniHBaseCluster
899     return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
900   }
901 
902   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
903       throws IOException, InterruptedException{
904     return startMiniHBaseCluster(numMasters, numSlaves, null, null);
905   }
906 
907   /**
908    * Starts up mini hbase cluster.  Usually used after call to
909    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
910    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
911    * @param numMasters
912    * @param numSlaves
913    * @return Reference to the hbase mini hbase cluster.
914    * @throws IOException
915    * @throws InterruptedException
916    * @see {@link #startMiniCluster()}
917    */
918   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
919         final int numSlaves, Class<? extends HMaster> masterClass,
920         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
921   throws IOException, InterruptedException {
922     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
923     createRootDir();
924 
925     // These settings will make the server waits until this exact number of
926     // regions servers are connected.
927     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
928       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
929     }
930     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
931       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
932     }
933 
934     Configuration c = new Configuration(this.conf);
935     this.hbaseCluster =
936         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
937     // Don't leave here till we've done a successful scan of the hbase:meta
938     HTable t = new HTable(c, TableName.META_TABLE_NAME);
939     ResultScanner s = t.getScanner(new Scan());
940     while (s.next() != null) {
941       continue;
942     }
943     s.close();
944     t.close();
945 
946     getHBaseAdmin(); // create immediately the hbaseAdmin
947     LOG.info("Minicluster is up");
948     return (MiniHBaseCluster)this.hbaseCluster;
949   }
950 
951   /**
952    * Starts the hbase cluster up again after shutting it down previously in a
953    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
954    * @param servers number of region servers
955    * @throws IOException
956    */
957   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
958     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
959     // Don't leave here till we've done a successful scan of the hbase:meta
960     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
961     ResultScanner s = t.getScanner(new Scan());
962     while (s.next() != null) {
963       // do nothing
964     }
965     LOG.info("HBase has been restarted");
966     s.close();
967     t.close();
968   }
969 
970   /**
971    * @return Current mini hbase cluster. Only has something in it after a call
972    * to {@link #startMiniCluster()}.
973    * @see #startMiniCluster()
974    */
975   public MiniHBaseCluster getMiniHBaseCluster() {
976     if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
977       return (MiniHBaseCluster)this.hbaseCluster;
978     }
979     throw new RuntimeException(hbaseCluster + " not an instance of " +
980                                MiniHBaseCluster.class.getName());
981   }
982 
983   /**
984    * Stops mini hbase, zk, and hdfs clusters.
985    * @throws IOException
986    * @see {@link #startMiniCluster(int)}
987    */
988   public void shutdownMiniCluster() throws Exception {
989     LOG.info("Shutting down minicluster");
990     shutdownMiniHBaseCluster();
991     if (!this.passedZkCluster){
992       shutdownMiniZKCluster();
993     }
994     shutdownMiniDFSCluster();
995 
996     cleanupTestDir();
997     miniClusterRunning = false;
998     LOG.info("Minicluster is down");
999   }
1000 
1001   /**
1002    * @return True if we removed the test dirs
1003    * @throws IOException
1004    */
1005   @Override
1006   public boolean cleanupTestDir() throws IOException {
1007     boolean ret = super.cleanupTestDir();
1008     if (deleteDir(this.clusterTestDir)) {
1009       this.clusterTestDir = null;
1010       return ret & true;
1011     }
1012     return false;
1013   }
1014 
1015   /**
1016    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
1017    * @throws IOException
1018    */
1019   public void shutdownMiniHBaseCluster() throws IOException {
1020     if (hbaseAdmin != null) {
1021       hbaseAdmin.close0();
1022       hbaseAdmin = null;
1023     }
1024 
1025     // unset the configuration for MIN and MAX RS to start
1026     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1027     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1028     if (this.hbaseCluster != null) {
1029       this.hbaseCluster.shutdown();
1030       // Wait till hbase is down before going on to shutdown zk.
1031       this.hbaseCluster.waitUntilShutDown();
1032       this.hbaseCluster = null;
1033     }
1034 
1035     if (zooKeeperWatcher != null) {
1036       zooKeeperWatcher.close();
1037       zooKeeperWatcher = null;
1038     }
1039   }
1040 
1041   /**
1042    * Returns the path to the default root dir the minicluster uses.
1043    * Note: this does not cause the root dir to be created.
1044    * @return Fully qualified path for the default hbase root dir
1045    * @throws IOException
1046    */
1047   public Path getDefaultRootDirPath() throws IOException {
1048 	FileSystem fs = FileSystem.get(this.conf);
1049 	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1050   }
1051 
1052   /**
1053    * Creates an hbase rootdir in user home directory.  Also creates hbase
1054    * version file.  Normally you won't make use of this method.  Root hbasedir
1055    * is created for you as part of mini cluster startup.  You'd only use this
1056    * method if you were doing manual operation.
1057    * @return Fully qualified path to hbase root dir
1058    * @throws IOException
1059    */
1060   public Path createRootDir() throws IOException {
1061     FileSystem fs = FileSystem.get(this.conf);
1062     Path hbaseRootdir = getDefaultRootDirPath();
1063     FSUtils.setRootDir(this.conf, hbaseRootdir);
1064     fs.mkdirs(hbaseRootdir);
1065     FSUtils.setVersion(fs, hbaseRootdir);
1066     return hbaseRootdir;
1067   }
1068 
1069   /**
1070    * Flushes all caches in the mini hbase cluster
1071    * @throws IOException
1072    */
1073   public void flush() throws IOException {
1074     getMiniHBaseCluster().flushcache();
1075   }
1076 
1077   /**
1078    * Flushes all caches in the mini hbase cluster
1079    * @throws IOException
1080    */
1081   public void flush(TableName tableName) throws IOException {
1082     getMiniHBaseCluster().flushcache(tableName);
1083   }
1084 
1085   /**
1086    * Compact all regions in the mini hbase cluster
1087    * @throws IOException
1088    */
1089   public void compact(boolean major) throws IOException {
1090     getMiniHBaseCluster().compact(major);
1091   }
1092 
1093   /**
1094    * Compact all of a table's reagion in the mini hbase cluster
1095    * @throws IOException
1096    */
1097   public void compact(TableName tableName, boolean major) throws IOException {
1098     getMiniHBaseCluster().compact(tableName, major);
1099   }
1100 
1101   /**
1102    * Create a table.
1103    * @param tableName
1104    * @param family
1105    * @return An HTable instance for the created table.
1106    * @throws IOException
1107    */
1108   public HTable createTable(String tableName, String family)
1109   throws IOException{
1110     return createTable(TableName.valueOf(tableName), new String[]{family});
1111   }
1112 
1113   /**
1114    * Create a table.
1115    * @param tableName
1116    * @param family
1117    * @return An HTable instance for the created table.
1118    * @throws IOException
1119    */
1120   public HTable createTable(byte[] tableName, byte[] family)
1121   throws IOException{
1122     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1123   }
1124 
1125   /**
1126    * Create a table.
1127    * @param tableName
1128    * @param families
1129    * @return An HTable instance for the created table.
1130    * @throws IOException
1131    */
1132   public HTable createTable(TableName tableName, String[] families)
1133   throws IOException {
1134     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1135     for (String family : families) {
1136       fams.add(Bytes.toBytes(family));
1137     }
1138     return createTable(tableName, fams.toArray(new byte[0][]));
1139   }
1140 
1141   /**
1142    * Create a table.
1143    * @param tableName
1144    * @param family
1145    * @return An HTable instance for the created table.
1146    * @throws IOException
1147    */
1148   public HTable createTable(TableName tableName, byte[] family)
1149   throws IOException{
1150     return createTable(tableName, new byte[][]{family});
1151   }
1152 
1153 
1154   /**
1155    * Create a table.
1156    * @param tableName
1157    * @param families
1158    * @return An HTable instance for the created table.
1159    * @throws IOException
1160    */
1161   public HTable createTable(byte[] tableName, byte[][] families)
1162   throws IOException {
1163     return createTable(tableName, families,
1164         new Configuration(getConfiguration()));
1165   }
1166 
1167   /**
1168    * Create a table.
1169    * @param tableName
1170    * @param families
1171    * @return An HTable instance for the created table.
1172    * @throws IOException
1173    */
1174   public HTable createTable(TableName tableName, byte[][] families)
1175   throws IOException {
1176     return createTable(tableName, families,
1177         new Configuration(getConfiguration()));
1178   }
1179 
1180   public HTable createTable(byte[] tableName, byte[][] families,
1181       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1182     return createTable(TableName.valueOf(tableName), families, numVersions,
1183         startKey, endKey, numRegions);
1184   }
1185 
1186   public HTable createTable(String tableName, byte[][] families,
1187       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1188     return createTable(TableName.valueOf(tableName), families, numVersions,
1189         startKey, endKey, numRegions);
1190   }
1191 
1192   public HTable createTable(TableName tableName, byte[][] families,
1193       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1194   throws IOException{
1195     HTableDescriptor desc = new HTableDescriptor(tableName);
1196     for (byte[] family : families) {
1197       HColumnDescriptor hcd = new HColumnDescriptor(family)
1198           .setMaxVersions(numVersions);
1199       desc.addFamily(hcd);
1200     }
1201     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1202     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1203     waitUntilAllRegionsAssigned(tableName);
1204     return new HTable(getConfiguration(), tableName);
1205   }
1206 
1207   /**
1208    * Create a table.
1209    * @param htd
1210    * @param families
1211    * @param c Configuration to use
1212    * @return An HTable instance for the created table.
1213    * @throws IOException
1214    */
1215   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1216   throws IOException {
1217     for(byte[] family : families) {
1218       HColumnDescriptor hcd = new HColumnDescriptor(family);
1219       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1220       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1221       // on is interfering.
1222       hcd.setBloomFilterType(BloomType.NONE);
1223       htd.addFamily(hcd);
1224     }
1225     getHBaseAdmin().createTable(htd);
1226     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1227     waitUntilAllRegionsAssigned(htd.getTableName());
1228     return new HTable(c, htd.getTableName());
1229   }
1230 
1231   /**
1232    * Create a table.
1233    * @param htd
1234    * @param splitRows
1235    * @return An HTable instance for the created table.
1236    * @throws IOException
1237    */
1238   public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1239       throws IOException {
1240     getHBaseAdmin().createTable(htd, splitRows);
1241     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1242     waitUntilAllRegionsAssigned(htd.getTableName());
1243     return new HTable(getConfiguration(), htd.getTableName());
1244   }
1245 
1246   /**
1247    * Create a table.
1248    * @param tableName
1249    * @param families
1250    * @param c Configuration to use
1251    * @return An HTable instance for the created table.
1252    * @throws IOException
1253    */
1254   public HTable createTable(TableName tableName, byte[][] families,
1255       final Configuration c)
1256   throws IOException {
1257     return createTable(new HTableDescriptor(tableName), families, c);
1258   }
1259 
1260   /**
1261    * Create a table.
1262    * @param tableName
1263    * @param families
1264    * @param c Configuration to use
1265    * @return An HTable instance for the created table.
1266    * @throws IOException
1267    */
1268   public HTable createTable(byte[] tableName, byte[][] families,
1269       final Configuration c)
1270   throws IOException {
1271     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1272     for(byte[] family : families) {
1273       HColumnDescriptor hcd = new HColumnDescriptor(family);
1274       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1275       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1276       // on is interfering.
1277       hcd.setBloomFilterType(BloomType.NONE);
1278       desc.addFamily(hcd);
1279     }
1280     getHBaseAdmin().createTable(desc);
1281     return new HTable(c, tableName);
1282   }
1283 
1284   /**
1285    * Create a table.
1286    * @param tableName
1287    * @param families
1288    * @param c Configuration to use
1289    * @param numVersions
1290    * @return An HTable instance for the created table.
1291    * @throws IOException
1292    */
1293   public HTable createTable(TableName tableName, byte[][] families,
1294       final Configuration c, int numVersions)
1295   throws IOException {
1296     HTableDescriptor desc = new HTableDescriptor(tableName);
1297     for(byte[] family : families) {
1298       HColumnDescriptor hcd = new HColumnDescriptor(family)
1299           .setMaxVersions(numVersions);
1300       desc.addFamily(hcd);
1301     }
1302     getHBaseAdmin().createTable(desc);
1303     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1304     waitUntilAllRegionsAssigned(tableName);
1305     return new HTable(c, tableName);
1306   }
1307 
1308   /**
1309    * Create a table.
1310    * @param tableName
1311    * @param families
1312    * @param c Configuration to use
1313    * @param numVersions
1314    * @return An HTable instance for the created table.
1315    * @throws IOException
1316    */
1317   public HTable createTable(byte[] tableName, byte[][] families,
1318       final Configuration c, int numVersions)
1319   throws IOException {
1320     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1321     for(byte[] family : families) {
1322       HColumnDescriptor hcd = new HColumnDescriptor(family)
1323           .setMaxVersions(numVersions);
1324       desc.addFamily(hcd);
1325     }
1326     getHBaseAdmin().createTable(desc);
1327     return new HTable(c, tableName);
1328   }
1329 
1330   /**
1331    * Create a table.
1332    * @param tableName
1333    * @param family
1334    * @param numVersions
1335    * @return An HTable instance for the created table.
1336    * @throws IOException
1337    */
1338   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1339   throws IOException {
1340     return createTable(tableName, new byte[][]{family}, numVersions);
1341   }
1342 
1343   /**
1344    * Create a table.
1345    * @param tableName
1346    * @param family
1347    * @param numVersions
1348    * @return An HTable instance for the created table.
1349    * @throws IOException
1350    */
1351   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1352   throws IOException {
1353     return createTable(tableName, new byte[][]{family}, numVersions);
1354   }
1355 
1356   /**
1357    * Create a table.
1358    * @param tableName
1359    * @param families
1360    * @param numVersions
1361    * @return An HTable instance for the created table.
1362    * @throws IOException
1363    */
1364   public HTable createTable(byte[] tableName, byte[][] families,
1365       int numVersions)
1366   throws IOException {
1367     return createTable(TableName.valueOf(tableName), families, numVersions);
1368   }
1369 
1370   /**
1371    * Create a table.
1372    * @param tableName
1373    * @param families
1374    * @param numVersions
1375    * @return An HTable instance for the created table.
1376    * @throws IOException
1377    */
1378   public HTable createTable(TableName tableName, byte[][] families,
1379       int numVersions)
1380   throws IOException {
1381     HTableDescriptor desc = new HTableDescriptor(tableName);
1382     for (byte[] family : families) {
1383       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1384       desc.addFamily(hcd);
1385     }
1386     getHBaseAdmin().createTable(desc);
1387     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1388     waitUntilAllRegionsAssigned(tableName);
1389     return new HTable(new Configuration(getConfiguration()), tableName);
1390   }
1391 
1392   /**
1393    * Create a table.
1394    * @param tableName
1395    * @param families
1396    * @param numVersions
1397    * @return An HTable instance for the created table.
1398    * @throws IOException
1399    */
1400   public HTable createTable(byte[] tableName, byte[][] families,
1401     int numVersions, int blockSize) throws IOException {
1402     return createTable(TableName.valueOf(tableName),
1403         families, numVersions, blockSize);
1404   }
1405 
1406   /**
1407    * Create a table.
1408    * @param tableName
1409    * @param families
1410    * @param numVersions
1411    * @return An HTable instance for the created table.
1412    * @throws IOException
1413    */
1414   public HTable createTable(TableName tableName, byte[][] families,
1415     int numVersions, int blockSize) throws IOException {
1416     HTableDescriptor desc = new HTableDescriptor(tableName);
1417     for (byte[] family : families) {
1418       HColumnDescriptor hcd = new HColumnDescriptor(family)
1419           .setMaxVersions(numVersions)
1420           .setBlocksize(blockSize);
1421       desc.addFamily(hcd);
1422     }
1423     getHBaseAdmin().createTable(desc);
1424     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1425     waitUntilAllRegionsAssigned(tableName);
1426     return new HTable(new Configuration(getConfiguration()), tableName);
1427   }
1428 
1429   /**
1430    * Create a table.
1431    * @param tableName
1432    * @param families
1433    * @param numVersions
1434    * @return An HTable instance for the created table.
1435    * @throws IOException
1436    */
1437   public HTable createTable(byte[] tableName, byte[][] families,
1438       int[] numVersions)
1439   throws IOException {
1440     return createTable(TableName.valueOf(tableName), families, numVersions);
1441   }
1442 
1443   /**
1444    * Create a table.
1445    * @param tableName
1446    * @param families
1447    * @param numVersions
1448    * @return An HTable instance for the created table.
1449    * @throws IOException
1450    */
1451   public HTable createTable(TableName tableName, byte[][] families,
1452       int[] numVersions)
1453   throws IOException {
1454     HTableDescriptor desc = new HTableDescriptor(tableName);
1455     int i = 0;
1456     for (byte[] family : families) {
1457       HColumnDescriptor hcd = new HColumnDescriptor(family)
1458           .setMaxVersions(numVersions[i]);
1459       desc.addFamily(hcd);
1460       i++;
1461     }
1462     getHBaseAdmin().createTable(desc);
1463     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1464     waitUntilAllRegionsAssigned(tableName);
1465     return new HTable(new Configuration(getConfiguration()), tableName);
1466   }
1467 
1468   /**
1469    * Create a table.
1470    * @param tableName
1471    * @param family
1472    * @param splitRows
1473    * @return An HTable instance for the created table.
1474    * @throws IOException
1475    */
1476   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1477     throws IOException{
1478     return createTable(TableName.valueOf(tableName), family, splitRows);
1479   }
1480 
1481   /**
1482    * Create a table.
1483    * @param tableName
1484    * @param family
1485    * @param splitRows
1486    * @return An HTable instance for the created table.
1487    * @throws IOException
1488    */
1489   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1490       throws IOException {
1491     HTableDescriptor desc = new HTableDescriptor(tableName);
1492     HColumnDescriptor hcd = new HColumnDescriptor(family);
1493     desc.addFamily(hcd);
1494     getHBaseAdmin().createTable(desc, splitRows);
1495     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1496     waitUntilAllRegionsAssigned(tableName);
1497     return new HTable(getConfiguration(), tableName);
1498   }
1499 
1500   /**
1501    * Create a table.
1502    * @param tableName
1503    * @param families
1504    * @param splitRows
1505    * @return An HTable instance for the created table.
1506    * @throws IOException
1507    */
1508   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1509       throws IOException {
1510     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1511     for(byte[] family:families) {
1512       HColumnDescriptor hcd = new HColumnDescriptor(family);
1513       desc.addFamily(hcd);
1514     }
1515     getHBaseAdmin().createTable(desc, splitRows);
1516     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1517     waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1518     return new HTable(getConfiguration(), tableName);
1519   }
1520 
1521   /**
1522    * Drop an existing table
1523    * @param tableName existing table
1524    */
1525   public void deleteTable(String tableName) throws IOException {
1526     deleteTable(TableName.valueOf(tableName));
1527   }
1528 
1529   /**
1530    * Drop an existing table
1531    * @param tableName existing table
1532    */
1533   public void deleteTable(byte[] tableName) throws IOException {
1534     deleteTable(TableName.valueOf(tableName));
1535   }
1536 
1537   /**
1538    * Drop an existing table
1539    * @param tableName existing table
1540    */
1541   public void deleteTable(TableName tableName) throws IOException {
1542     try {
1543       getHBaseAdmin().disableTable(tableName);
1544     } catch (TableNotEnabledException e) {
1545       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1546     }
1547     getHBaseAdmin().deleteTable(tableName);
1548   }
1549 
1550   // ==========================================================================
1551   // Canned table and table descriptor creation
1552   // TODO replace HBaseTestCase
1553 
1554   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1555   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1556   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1557   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1558   private static final int MAXVERSIONS = 3;
1559 
1560   public static final char FIRST_CHAR = 'a';
1561   public static final char LAST_CHAR = 'z';
1562   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1563   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1564 
1565   /**
1566    * Create a table of name <code>name</code> with {@link COLUMNS} for
1567    * families.
1568    * @param name Name to give table.
1569    * @param versions How many versions to allow per column.
1570    * @return Column descriptor.
1571    */
1572   public HTableDescriptor createTableDescriptor(final String name,
1573       final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1574     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1575     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1576       htd.addFamily(new HColumnDescriptor(cfName)
1577           .setMinVersions(minVersions)
1578           .setMaxVersions(versions)
1579           .setKeepDeletedCells(keepDeleted)
1580           .setBlockCacheEnabled(false)
1581           .setTimeToLive(ttl)
1582       );
1583     }
1584     return htd;
1585   }
1586 
1587   /**
1588    * Create a table of name <code>name</code> with {@link COLUMNS} for
1589    * families.
1590    * @param name Name to give table.
1591    * @return Column descriptor.
1592    */
1593   public HTableDescriptor createTableDescriptor(final String name) {
1594     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1595         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1596   }
1597 
1598   /**
1599    * Create an HRegion that writes to the local tmp dirs
1600    * @param desc
1601    * @param startKey
1602    * @param endKey
1603    * @return
1604    * @throws IOException
1605    */
1606   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1607       byte [] endKey)
1608   throws IOException {
1609     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1610     return createLocalHRegion(hri, desc);
1611   }
1612 
1613   /**
1614    * Create an HRegion that writes to the local tmp dirs
1615    * @param info
1616    * @param desc
1617    * @return
1618    * @throws IOException
1619    */
1620   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1621     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1622   }
1623 
1624   /**
1625    * Create an HRegion that writes to the local tmp dirs with specified hlog
1626    * @param info regioninfo
1627    * @param desc table descriptor
1628    * @param hlog hlog for this region.
1629    * @return created hregion
1630    * @throws IOException
1631    */
1632   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1633     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1634   }
1635 
1636 
1637   /**
1638    * @param tableName
1639    * @param startKey
1640    * @param stopKey
1641    * @param callingMethod
1642    * @param conf
1643    * @param isReadOnly
1644    * @param families
1645    * @throws IOException
1646    * @return A region on which you must call
1647    *         {@link HRegion#closeHRegion(HRegion)} when done.
1648    */
1649   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1650       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1651       HLog hlog, byte[]... families) throws IOException {
1652     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1653     htd.setReadOnly(isReadOnly);
1654     for (byte[] family : families) {
1655       HColumnDescriptor hcd = new HColumnDescriptor(family);
1656       // Set default to be three versions.
1657       hcd.setMaxVersions(Integer.MAX_VALUE);
1658       htd.addFamily(hcd);
1659     }
1660     htd.setDurability(durability);
1661     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1662     return createLocalHRegion(info, htd, hlog);
1663   }
1664   //
1665   // ==========================================================================
1666 
1667   /**
1668    * Provide an existing table name to truncate
1669    * @param tableName existing table
1670    * @return HTable to that new table
1671    * @throws IOException
1672    */
1673   public HTable truncateTable(byte[] tableName) throws IOException {
1674     return truncateTable(TableName.valueOf(tableName));
1675   }
1676 
1677   /**
1678    * Provide an existing table name to truncate
1679    * @param tableName existing table
1680    * @return HTable to that new table
1681    * @throws IOException
1682    */
1683   public HTable truncateTable(TableName tableName) throws IOException {
1684     HTable table = new HTable(getConfiguration(), tableName);
1685     Scan scan = new Scan();
1686     ResultScanner resScan = table.getScanner(scan);
1687     for(Result res : resScan) {
1688       Delete del = new Delete(res.getRow());
1689       table.delete(del);
1690     }
1691     resScan = table.getScanner(scan);
1692     resScan.close();
1693     return table;
1694   }
1695 
1696   /**
1697    * Load table with rows from 'aaa' to 'zzz'.
1698    * @param t Table
1699    * @param f Family
1700    * @return Count of rows loaded.
1701    * @throws IOException
1702    */
1703   public int loadTable(final HTable t, final byte[] f) throws IOException {
1704     return loadTable(t, new byte[][] {f});
1705   }
1706 
1707   /**
1708    * Load table with rows from 'aaa' to 'zzz'.
1709    * @param t Table
1710    * @param f Family
1711    * @return Count of rows loaded.
1712    * @throws IOException
1713    */
1714   public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1715     return loadTable(t, new byte[][] {f}, null, writeToWAL);
1716   }
1717 
1718   /**
1719    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1720    * @param t Table
1721    * @param f Array of Families to load
1722    * @return Count of rows loaded.
1723    * @throws IOException
1724    */
1725   public int loadTable(final HTable t, final byte[][] f) throws IOException {
1726     return loadTable(t, f, null);
1727   }
1728 
1729   /**
1730    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1731    * @param t Table
1732    * @param f Array of Families to load
1733    * @param value the values of the cells. If null is passed, the row key is used as value
1734    * @return Count of rows loaded.
1735    * @throws IOException
1736    */
1737   public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1738     return loadTable(t, f, value, true);
1739   }
1740 
1741   /**
1742    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1743    * @param t Table
1744    * @param f Array of Families to load
1745    * @param value the values of the cells. If null is passed, the row key is used as value
1746    * @return Count of rows loaded.
1747    * @throws IOException
1748    */
1749   public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1750     t.setAutoFlush(false);
1751     int rowCount = 0;
1752     for (byte[] row : HBaseTestingUtility.ROWS) {
1753       Put put = new Put(row);
1754       put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1755       for (int i = 0; i < f.length; i++) {
1756         put.add(f[i], null, value != null ? value : row);
1757       }
1758       t.put(put);
1759       rowCount++;
1760     }
1761     t.flushCommits();
1762     return rowCount;
1763   }
1764 
1765   /** A tracker for tracking and validating table rows
1766    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1767    */
1768   public static class SeenRowTracker {
1769     int dim = 'z' - 'a' + 1;
1770     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1771     byte[] startRow;
1772     byte[] stopRow;
1773 
1774     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1775       this.startRow = startRow;
1776       this.stopRow = stopRow;
1777     }
1778 
1779     void reset() {
1780       for (byte[] row : ROWS) {
1781         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1782       }
1783     }
1784 
1785     int i(byte b) {
1786       return b - 'a';
1787     }
1788 
1789     public void addRow(byte[] row) {
1790       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1791     }
1792 
1793     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1794      * all other rows none
1795      */
1796     public void validate() {
1797       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1798         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1799           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1800             int count = seenRows[i(b1)][i(b2)][i(b3)];
1801             int expectedCount = 0;
1802             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1803                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1804               expectedCount = 1;
1805             }
1806             if (count != expectedCount) {
1807               String row = new String(new byte[] {b1,b2,b3});
1808               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1809             }
1810           }
1811         }
1812       }
1813     }
1814   }
1815 
1816   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1817     return loadRegion(r, f, false);
1818   }
1819 
1820   /**
1821    * Load region with rows from 'aaa' to 'zzz'.
1822    * @param r Region
1823    * @param f Family
1824    * @param flush flush the cache if true
1825    * @return Count of rows loaded.
1826    * @throws IOException
1827    */
1828   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1829   throws IOException {
1830     byte[] k = new byte[3];
1831     int rowCount = 0;
1832     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1833       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1834         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1835           k[0] = b1;
1836           k[1] = b2;
1837           k[2] = b3;
1838           Put put = new Put(k);
1839           put.setDurability(Durability.SKIP_WAL);
1840           put.add(f, null, k);
1841           if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1842 
1843           int preRowCount = rowCount;
1844           int pause = 10;
1845           int maxPause = 1000;
1846           while (rowCount == preRowCount) {
1847             try {
1848               r.put(put);
1849               rowCount++;
1850             } catch (RegionTooBusyException e) {
1851               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1852               Threads.sleep(pause);
1853             }
1854           }
1855         }
1856       }
1857       if (flush) {
1858         r.flushcache();
1859       }
1860     }
1861     return rowCount;
1862   }
1863 
1864   public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1865     for (int i = startRow; i < endRow; i++) {
1866       byte[] data = Bytes.toBytes(String.valueOf(i));
1867       Put put = new Put(data);
1868       put.add(f, null, data);
1869       t.put(put);
1870     }
1871   }
1872 
1873   /**
1874    * Return the number of rows in the given table.
1875    */
1876   public int countRows(final HTable table) throws IOException {
1877     Scan scan = new Scan();
1878     ResultScanner results = table.getScanner(scan);
1879     int count = 0;
1880     for (@SuppressWarnings("unused") Result res : results) {
1881       count++;
1882     }
1883     results.close();
1884     return count;
1885   }
1886 
1887   public int countRows(final HTable table, final byte[]... families) throws IOException {
1888     Scan scan = new Scan();
1889     for (byte[] family: families) {
1890       scan.addFamily(family);
1891     }
1892     ResultScanner results = table.getScanner(scan);
1893     int count = 0;
1894     for (@SuppressWarnings("unused") Result res : results) {
1895       count++;
1896     }
1897     results.close();
1898     return count;
1899   }
1900 
1901   /**
1902    * Return an md5 digest of the entire contents of a table.
1903    */
1904   public String checksumRows(final HTable table) throws Exception {
1905     Scan scan = new Scan();
1906     ResultScanner results = table.getScanner(scan);
1907     MessageDigest digest = MessageDigest.getInstance("MD5");
1908     for (Result res : results) {
1909       digest.update(res.getRow());
1910     }
1911     results.close();
1912     return digest.toString();
1913   }
1914 
1915   /**
1916    * Creates many regions names "aaa" to "zzz".
1917    *
1918    * @param table  The table to use for the data.
1919    * @param columnFamily  The family to insert the data into.
1920    * @return count of regions created.
1921    * @throws IOException When creating the regions fails.
1922    */
1923   public int createMultiRegions(HTable table, byte[] columnFamily)
1924   throws IOException {
1925     return createMultiRegions(getConfiguration(), table, columnFamily);
1926   }
1927 
1928   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
1929   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
1930   static {
1931     int i = 0;
1932     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1933       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1934         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1935           ROWS[i][0] = b1;
1936           ROWS[i][1] = b2;
1937           ROWS[i][2] = b3;
1938           i++;
1939         }
1940       }
1941     }
1942   }
1943 
1944   public static final byte[][] KEYS = {
1945     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1946     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1947     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1948     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1949     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1950     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1951     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1952     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1953     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1954   };
1955 
1956   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1957       Bytes.toBytes("bbb"),
1958       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1959       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1960       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1961       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1962       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1963       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1964       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1965       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1966   };
1967 
1968   /**
1969    * Creates many regions names "aaa" to "zzz".
1970    * @param c Configuration to use.
1971    * @param table  The table to use for the data.
1972    * @param columnFamily  The family to insert the data into.
1973    * @return count of regions created.
1974    * @throws IOException When creating the regions fails.
1975    */
1976   public int createMultiRegions(final Configuration c, final HTable table,
1977       final byte[] columnFamily)
1978   throws IOException {
1979     return createMultiRegions(c, table, columnFamily, KEYS);
1980   }
1981 
1982   void makeDFSClientNonRetrying() {
1983     if (null == this.dfsCluster) {
1984       LOG.debug("dfsCluster has not started, can't make client non-retrying.");
1985       return;
1986     }
1987     try {
1988       final FileSystem filesystem = this.dfsCluster.getFileSystem();
1989       if (!(filesystem instanceof DistributedFileSystem)) {
1990         LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
1991         return;
1992       }
1993       // rely on FileSystem.CACHE to alter how we talk via DFSClient
1994       final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
1995       // retrieve the backing DFSClient instance
1996       final Field dfsField = fs.getClass().getDeclaredField("dfs");
1997       dfsField.setAccessible(true);
1998       final Class<?> dfsClazz = dfsField.getType();
1999       final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
2000 
2001       // expose the method for creating direct RPC connections.
2002       final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
2003       createRPCNamenode.setAccessible(true);
2004 
2005       // grab the DFSClient instance's backing connection information
2006       final Field nnField = dfsClazz.getDeclaredField("nnAddress");
2007       nnField.setAccessible(true);
2008       final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
2009       final Field confField = dfsClazz.getDeclaredField("conf");
2010       confField.setAccessible(true);
2011       final Configuration conf = Configuration.class.cast(confField.get(dfs));
2012       final Field ugiField = dfsClazz.getDeclaredField("ugi");
2013       ugiField.setAccessible(true);
2014       final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
2015 
2016       // replace the proxy for the namenode rpc with a direct instance
2017       final Field namenodeField = dfsClazz.getDeclaredField("namenode");
2018       namenodeField.setAccessible(true);
2019       namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
2020       LOG.debug("Set DSFClient namenode to bare RPC");
2021     } catch (Exception exception) {
2022       LOG.info("Could not alter DFSClient to be non-retrying.", exception);
2023     }
2024   }
2025 
2026   /**
2027    * Creates the specified number of regions in the specified table.
2028    * @param c
2029    * @param table
2030    * @param family
2031    * @param numRegions
2032    * @return
2033    * @throws IOException
2034    */
2035   public int createMultiRegions(final Configuration c, final HTable table,
2036       final byte [] family, int numRegions)
2037   throws IOException {
2038     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2039     byte [] startKey = Bytes.toBytes("aaaaa");
2040     byte [] endKey = Bytes.toBytes("zzzzz");
2041     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2042     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2043     System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2044     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2045     return createMultiRegions(c, table, family, regionStartKeys);
2046   }
2047 
2048   @SuppressWarnings("deprecation")
2049   public int createMultiRegions(final Configuration c, final HTable table,
2050       final byte[] columnFamily, byte [][] startKeys)
2051   throws IOException {
2052     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2053     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2054     HTableDescriptor htd = table.getTableDescriptor();
2055     if(!htd.hasFamily(columnFamily)) {
2056       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2057       htd.addFamily(hcd);
2058     }
2059     // remove empty region - this is tricky as the mini cluster during the test
2060     // setup already has the "<tablename>,,123456789" row with an empty start
2061     // and end key. Adding the custom regions below adds those blindly,
2062     // including the new start region from empty to "bbb". lg
2063     List<byte[]> rows = getMetaTableRows(htd.getTableName());
2064     String regionToDeleteInFS = table
2065         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2066         .getRegionInfo().getEncodedName();
2067     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2068     // add custom ones
2069     int count = 0;
2070     for (int i = 0; i < startKeys.length; i++) {
2071       int j = (i + 1) % startKeys.length;
2072       HRegionInfo hri = new HRegionInfo(table.getName(),
2073         startKeys[i], startKeys[j]);
2074       MetaEditor.addRegionToMeta(meta, hri);
2075       newRegions.add(hri);
2076       count++;
2077     }
2078     // see comment above, remove "old" (or previous) single region
2079     for (byte[] row : rows) {
2080       LOG.info("createMultiRegions: deleting meta row -> " +
2081         Bytes.toStringBinary(row));
2082       meta.delete(new Delete(row));
2083     }
2084     // remove the "old" region from FS
2085     Path tableDir = new Path(getDefaultRootDirPath().toString()
2086         + System.getProperty("file.separator") + htd.getTableName()
2087         + System.getProperty("file.separator") + regionToDeleteInFS);
2088     FileSystem.get(c).delete(tableDir);
2089     // flush cache of regions
2090     HConnection conn = table.getConnection();
2091     conn.clearRegionCache();
2092     // assign all the new regions IF table is enabled.
2093     HBaseAdmin admin = getHBaseAdmin();
2094     if (admin.isTableEnabled(table.getTableName())) {
2095       for(HRegionInfo hri : newRegions) {
2096         admin.assign(hri.getRegionName());
2097       }
2098     }
2099 
2100     meta.close();
2101 
2102     return count;
2103   }
2104 
2105   /**
2106    * Create rows in hbase:meta for regions of the specified table with the specified
2107    * start keys.  The first startKey should be a 0 length byte array if you
2108    * want to form a proper range of regions.
2109    * @param conf
2110    * @param htd
2111    * @param startKeys
2112    * @return list of region info for regions added to meta
2113    * @throws IOException
2114    */
2115   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2116       final HTableDescriptor htd, byte [][] startKeys)
2117   throws IOException {
2118     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2119     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2120     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2121     // add custom ones
2122     for (int i = 0; i < startKeys.length; i++) {
2123       int j = (i + 1) % startKeys.length;
2124       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2125           startKeys[j]);
2126       MetaEditor.addRegionToMeta(meta, hri);
2127       newRegions.add(hri);
2128     }
2129 
2130     meta.close();
2131     return newRegions;
2132   }
2133 
2134   /**
2135    * Returns all rows from the hbase:meta table.
2136    *
2137    * @throws IOException When reading the rows fails.
2138    */
2139   public List<byte[]> getMetaTableRows() throws IOException {
2140     // TODO: Redo using MetaReader class
2141     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2142     List<byte[]> rows = new ArrayList<byte[]>();
2143     ResultScanner s = t.getScanner(new Scan());
2144     for (Result result : s) {
2145       LOG.info("getMetaTableRows: row -> " +
2146         Bytes.toStringBinary(result.getRow()));
2147       rows.add(result.getRow());
2148     }
2149     s.close();
2150     t.close();
2151     return rows;
2152   }
2153 
2154   /**
2155    * Returns all rows from the hbase:meta table for a given user table
2156    *
2157    * @throws IOException When reading the rows fails.
2158    */
2159   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2160     // TODO: Redo using MetaReader.
2161     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2162     List<byte[]> rows = new ArrayList<byte[]>();
2163     ResultScanner s = t.getScanner(new Scan());
2164     for (Result result : s) {
2165       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2166       if (info == null) {
2167         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2168         // TODO figure out what to do for this new hosed case.
2169         continue;
2170       }
2171 
2172       if (info.getTable().equals(tableName)) {
2173         LOG.info("getMetaTableRows: row -> " +
2174             Bytes.toStringBinary(result.getRow()) + info);
2175         rows.add(result.getRow());
2176       }
2177     }
2178     s.close();
2179     t.close();
2180     return rows;
2181   }
2182 
2183   /**
2184    * Tool to get the reference to the region server object that holds the
2185    * region of the specified user table.
2186    * It first searches for the meta rows that contain the region of the
2187    * specified table, then gets the index of that RS, and finally retrieves
2188    * the RS's reference.
2189    * @param tableName user table to lookup in hbase:meta
2190    * @return region server that holds it, null if the row doesn't exist
2191    * @throws IOException
2192    * @throws InterruptedException
2193    */
2194   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2195       throws IOException, InterruptedException {
2196     return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2197   }
2198   /**
2199    * Tool to get the reference to the region server object that holds the
2200    * region of the specified user table.
2201    * It first searches for the meta rows that contain the region of the
2202    * specified table, then gets the index of that RS, and finally retrieves
2203    * the RS's reference.
2204    * @param tableName user table to lookup in hbase:meta
2205    * @return region server that holds it, null if the row doesn't exist
2206    * @throws IOException
2207    */
2208   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2209       throws IOException, InterruptedException {
2210     List<byte[]> metaRows = getMetaTableRows(tableName);
2211     if (metaRows == null || metaRows.isEmpty()) {
2212       return null;
2213     }
2214     LOG.debug("Found " + metaRows.size() + " rows for table " +
2215       tableName);
2216     byte [] firstrow = metaRows.get(0);
2217     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2218     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2219       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2220     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2221       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2222     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2223     while(retrier.shouldRetry()) {
2224       int index = getMiniHBaseCluster().getServerWith(firstrow);
2225       if (index != -1) {
2226         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2227       }
2228       // Came back -1.  Region may not be online yet.  Sleep a while.
2229       retrier.sleepUntilNextRetry();
2230     }
2231     return null;
2232   }
2233 
2234   /**
2235    * Starts a <code>MiniMRCluster</code> with a default number of
2236    * <code>TaskTracker</code>'s.
2237    *
2238    * @throws IOException When starting the cluster fails.
2239    */
2240   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2241     startMiniMapReduceCluster(2);
2242     return mrCluster;
2243   }
2244 
2245   /**
2246    * Tasktracker has a bug where changing the hadoop.log.dir system property
2247    * will not change its internal static LOG_DIR variable.
2248    */
2249   private void forceChangeTaskLogDir() {
2250     Field logDirField;
2251     try {
2252       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2253       logDirField.setAccessible(true);
2254 
2255       Field modifiersField = Field.class.getDeclaredField("modifiers");
2256       modifiersField.setAccessible(true);
2257       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2258 
2259       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2260     } catch (SecurityException e) {
2261       throw new RuntimeException(e);
2262     } catch (NoSuchFieldException e) {
2263       // TODO Auto-generated catch block
2264       throw new RuntimeException(e);
2265     } catch (IllegalArgumentException e) {
2266       throw new RuntimeException(e);
2267     } catch (IllegalAccessException e) {
2268       throw new RuntimeException(e);
2269     }
2270   }
2271 
2272   /**
2273    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2274    * filesystem.
2275    * @param servers  The number of <code>TaskTracker</code>'s to start.
2276    * @throws IOException When starting the cluster fails.
2277    */
2278   private void startMiniMapReduceCluster(final int servers) throws IOException {
2279     if (mrCluster != null) {
2280       throw new IllegalStateException("MiniMRCluster is already running");
2281     }
2282     LOG.info("Starting mini mapreduce cluster...");
2283     setupClusterTestDir();
2284     createDirsAndSetProperties();
2285 
2286     forceChangeTaskLogDir();
2287 
2288     //// hadoop2 specific settings
2289     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2290     // we up the VM usable so that processes don't get killed.
2291     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2292 
2293     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2294     // this avoids the problem by disabling speculative task execution in tests.
2295     conf.setBoolean("mapreduce.map.speculative", false);
2296     conf.setBoolean("mapreduce.reduce.speculative", false);
2297     ////
2298 
2299     // Allow the user to override FS URI for this map-reduce cluster to use.
2300     mrCluster = new MiniMRCluster(servers,
2301       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2302       null, null, new JobConf(this.conf));
2303     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2304     if (jobConf == null) {
2305       jobConf = mrCluster.createJobConf();
2306     }
2307 
2308     jobConf.set("mapred.local.dir",
2309       conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
2310     LOG.info("Mini mapreduce cluster started");
2311 
2312     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2313     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2314     // necessary config properties here.  YARN-129 required adding a few properties.
2315     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2316     // this for mrv2 support; mr1 ignores this
2317     conf.set("mapreduce.framework.name", "yarn");
2318     conf.setBoolean("yarn.is.minicluster", true);
2319     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2320     if (rmAddress != null) {
2321       conf.set("yarn.resourcemanager.address", rmAddress);
2322     }
2323     String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2324     if (historyAddress != null) {
2325       conf.set("mapreduce.jobhistory.address", historyAddress);
2326     }
2327     String schedulerAddress =
2328       jobConf.get("yarn.resourcemanager.scheduler.address");
2329     if (schedulerAddress != null) {
2330       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2331     }
2332   }
2333 
2334   /**
2335    * Stops the previously started <code>MiniMRCluster</code>.
2336    */
2337   public void shutdownMiniMapReduceCluster() {
2338     LOG.info("Stopping mini mapreduce cluster...");
2339     if (mrCluster != null) {
2340       mrCluster.shutdown();
2341       mrCluster = null;
2342     }
2343     // Restore configuration to point to local jobtracker
2344     conf.set("mapred.job.tracker", "local");
2345     LOG.info("Mini mapreduce cluster stopped");
2346   }
2347 
2348   /**
2349    * Create a stubbed out RegionServerService, mainly for getting FS.
2350    */
2351   public RegionServerServices createMockRegionServerService() throws IOException {
2352     return createMockRegionServerService((ServerName)null);
2353   }
2354 
2355   /**
2356    * Create a stubbed out RegionServerService, mainly for getting FS.
2357    * This version is used by TestTokenAuthentication
2358    */
2359   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2360     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2361     rss.setFileSystem(getTestFileSystem());
2362     rss.setRpcServer(rpc);
2363     return rss;
2364   }
2365 
2366   /**
2367    * Create a stubbed out RegionServerService, mainly for getting FS.
2368    * This version is used by TestOpenRegionHandler
2369    */
2370   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2371     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2372     rss.setFileSystem(getTestFileSystem());
2373     return rss;
2374   }
2375 
2376   /**
2377    * Switches the logger for the given class to DEBUG level.
2378    *
2379    * @param clazz  The class for which to switch to debug logging.
2380    */
2381   public void enableDebug(Class<?> clazz) {
2382     Log l = LogFactory.getLog(clazz);
2383     if (l instanceof Log4JLogger) {
2384       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2385     } else if (l instanceof Jdk14Logger) {
2386       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2387     }
2388   }
2389 
2390   /**
2391    * Expire the Master's session
2392    * @throws Exception
2393    */
2394   public void expireMasterSession() throws Exception {
2395     HMaster master = getMiniHBaseCluster().getMaster();
2396     expireSession(master.getZooKeeper(), false);
2397   }
2398 
2399   /**
2400    * Expire a region server's session
2401    * @param index which RS
2402    * @throws Exception
2403    */
2404   public void expireRegionServerSession(int index) throws Exception {
2405     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2406     expireSession(rs.getZooKeeper(), false);
2407     decrementMinRegionServerCount();
2408   }
2409 
2410   private void decrementMinRegionServerCount() {
2411     // decrement the count for this.conf, for newly spwaned master
2412     // this.hbaseCluster shares this configuration too
2413     decrementMinRegionServerCount(getConfiguration());
2414 
2415     // each master thread keeps a copy of configuration
2416     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2417       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2418     }
2419   }
2420 
2421   private void decrementMinRegionServerCount(Configuration conf) {
2422     int currentCount = conf.getInt(
2423         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2424     if (currentCount != -1) {
2425       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2426           Math.max(currentCount - 1, 1));
2427     }
2428   }
2429 
2430   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2431    expireSession(nodeZK, false);
2432   }
2433 
2434   @Deprecated
2435   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2436     throws Exception {
2437     expireSession(nodeZK, false);
2438   }
2439 
2440   /**
2441    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2442    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2443    * There are issues when doing this:
2444    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2445    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2446    *
2447    * @param nodeZK - the ZK watcher to expire
2448    * @param checkStatus - true to check if we can create an HTable with the
2449    *                    current configuration.
2450    */
2451   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2452     throws Exception {
2453     Configuration c = new Configuration(this.conf);
2454     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2455     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2456     byte[] password = zk.getSessionPasswd();
2457     long sessionID = zk.getSessionId();
2458 
2459     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2460     //  so we create a first watcher to be sure that the
2461     //  event was sent. We expect that if our watcher receives the event
2462     //  other watchers on the same machine will get is as well.
2463     // When we ask to close the connection, ZK does not close it before
2464     //  we receive all the events, so don't have to capture the event, just
2465     //  closing the connection should be enough.
2466     ZooKeeper monitor = new ZooKeeper(quorumServers,
2467       1000, new org.apache.zookeeper.Watcher(){
2468       @Override
2469       public void process(WatchedEvent watchedEvent) {
2470         LOG.info("Monitor ZKW received event="+watchedEvent);
2471       }
2472     } , sessionID, password);
2473 
2474     // Making it expire
2475     ZooKeeper newZK = new ZooKeeper(quorumServers,
2476         1000, EmptyWatcher.instance, sessionID, password);
2477 
2478     //ensure that we have connection to the server before closing down, otherwise
2479     //the close session event will be eaten out before we start CONNECTING state
2480     long start = System.currentTimeMillis();
2481     while (newZK.getState() != States.CONNECTED
2482          && System.currentTimeMillis() - start < 1000) {
2483        Thread.sleep(1);
2484     }
2485     newZK.close();
2486     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2487 
2488     // Now closing & waiting to be sure that the clients get it.
2489     monitor.close();
2490 
2491     if (checkStatus) {
2492       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2493     }
2494   }
2495 
2496   /**
2497    * Get the Mini HBase cluster.
2498    *
2499    * @return hbase cluster
2500    * @see #getHBaseClusterInterface()
2501    */
2502   public MiniHBaseCluster getHBaseCluster() {
2503     return getMiniHBaseCluster();
2504   }
2505 
2506   /**
2507    * Returns the HBaseCluster instance.
2508    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2509    * tests referring this should not assume that the cluster is a mini cluster or a
2510    * distributed one. If the test only works on a mini cluster, then specific
2511    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2512    * need to type-cast.
2513    */
2514   public HBaseCluster getHBaseClusterInterface() {
2515     //implementation note: we should rename this method as #getHBaseCluster(),
2516     //but this would require refactoring 90+ calls.
2517     return hbaseCluster;
2518   }
2519 
2520   /**
2521    * Returns a HBaseAdmin instance.
2522    * This instance is shared between HBaseTestingUtility instance users.
2523    * Closing it has no effect, it will be closed automatically when the
2524    * cluster shutdowns
2525    *
2526    * @return The HBaseAdmin instance.
2527    * @throws IOException
2528    */
2529   public synchronized HBaseAdmin getHBaseAdmin()
2530   throws IOException {
2531     if (hbaseAdmin == null){
2532       hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2533     }
2534     return hbaseAdmin;
2535   }
2536 
2537   private HBaseAdminForTests hbaseAdmin = null;
2538   private static class HBaseAdminForTests extends HBaseAdmin {
2539     public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2540         ZooKeeperConnectionException, IOException {
2541       super(c);
2542     }
2543 
2544     @Override
2545     public synchronized void close() throws IOException {
2546       LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2547     }
2548 
2549     private synchronized void close0() throws IOException {
2550       super.close();
2551     }
2552   }
2553 
2554   /**
2555    * Returns a ZooKeeperWatcher instance.
2556    * This instance is shared between HBaseTestingUtility instance users.
2557    * Don't close it, it will be closed automatically when the
2558    * cluster shutdowns
2559    *
2560    * @return The ZooKeeperWatcher instance.
2561    * @throws IOException
2562    */
2563   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2564     throws IOException {
2565     if (zooKeeperWatcher == null) {
2566       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2567         new Abortable() {
2568         @Override public void abort(String why, Throwable e) {
2569           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2570         }
2571         @Override public boolean isAborted() {return false;}
2572       });
2573     }
2574     return zooKeeperWatcher;
2575   }
2576   private ZooKeeperWatcher zooKeeperWatcher;
2577 
2578 
2579 
2580   /**
2581    * Closes the named region.
2582    *
2583    * @param regionName  The region to close.
2584    * @throws IOException
2585    */
2586   public void closeRegion(String regionName) throws IOException {
2587     closeRegion(Bytes.toBytes(regionName));
2588   }
2589 
2590   /**
2591    * Closes the named region.
2592    *
2593    * @param regionName  The region to close.
2594    * @throws IOException
2595    */
2596   public void closeRegion(byte[] regionName) throws IOException {
2597     getHBaseAdmin().closeRegion(regionName, null);
2598   }
2599 
2600   /**
2601    * Closes the region containing the given row.
2602    *
2603    * @param row  The row to find the containing region.
2604    * @param table  The table to find the region.
2605    * @throws IOException
2606    */
2607   public void closeRegionByRow(String row, HTable table) throws IOException {
2608     closeRegionByRow(Bytes.toBytes(row), table);
2609   }
2610 
2611   /**
2612    * Closes the region containing the given row.
2613    *
2614    * @param row  The row to find the containing region.
2615    * @param table  The table to find the region.
2616    * @throws IOException
2617    */
2618   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2619     HRegionLocation hrl = table.getRegionLocation(row);
2620     closeRegion(hrl.getRegionInfo().getRegionName());
2621   }
2622 
2623   /*
2624    * Retrieves a splittable region randomly from tableName
2625    *
2626    * @param tableName name of table
2627    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2628    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2629    */
2630   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2631     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2632     int regCount = regions.size();
2633     Set<Integer> attempted = new HashSet<Integer>();
2634     int idx;
2635     int attempts = 0;
2636     do {
2637       regions = getHBaseCluster().getRegions(tableName);
2638       if (regCount != regions.size()) {
2639         // if there was region movement, clear attempted Set
2640         attempted.clear();
2641       }
2642       regCount = regions.size();
2643       // There are chances that before we get the region for the table from an RS the region may
2644       // be going for CLOSE.  This may be because online schema change is enabled
2645       if (regCount > 0) {
2646         idx = random.nextInt(regCount);
2647         // if we have just tried this region, there is no need to try again
2648         if (attempted.contains(idx))
2649           continue;
2650         try {
2651           regions.get(idx).checkSplit();
2652           return regions.get(idx);
2653         } catch (Exception ex) {
2654           LOG.warn("Caught exception", ex);
2655           attempted.add(idx);
2656         }
2657       }
2658       attempts++;
2659     } while (maxAttempts == -1 || attempts < maxAttempts);
2660     return null;
2661   }
2662 
2663   public MiniZooKeeperCluster getZkCluster() {
2664     return zkCluster;
2665   }
2666 
2667   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2668     this.passedZkCluster = true;
2669     this.zkCluster = zkCluster;
2670     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2671   }
2672 
2673   public MiniDFSCluster getDFSCluster() {
2674     return dfsCluster;
2675   }
2676 
2677   public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
2678     setDFSCluster(cluster, true);
2679   }
2680 
2681   /**
2682    * Set the MiniDFSCluster
2683    * @param cluster cluster to use
2684    * @param requireDown requireDown require the that cluster not be "up"
2685    *  (MiniDFSCluster#isClusterUp) before it is set.
2686    * @throws IllegalStateException if the passed cluster is up when it is required to be down
2687    * @throws IOException if the FileSystem could not be set from the passed dfs cluster
2688    */
2689   public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
2690       throws IllegalStateException, IOException {
2691     if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
2692       throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2693     }
2694     this.dfsCluster = cluster;
2695     this.setFs();
2696   }
2697 
2698   public FileSystem getTestFileSystem() throws IOException {
2699     return HFileSystem.get(conf);
2700   }
2701 
2702   /**
2703    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2704    * (30 seconds).
2705    * @param table Table to wait on.
2706    * @throws InterruptedException
2707    * @throws IOException
2708    */
2709   public void waitTableAvailable(byte[] table)
2710       throws InterruptedException, IOException {
2711     waitTableAvailable(getHBaseAdmin(), table, 30000);
2712   }
2713 
2714   public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2715       throws InterruptedException, IOException {
2716     waitTableAvailable(admin, table, 30000);
2717   }
2718 
2719   /**
2720    * Wait until all regions in a table have been assigned
2721    * @param table Table to wait on.
2722    * @param timeoutMillis Timeout.
2723    * @throws InterruptedException
2724    * @throws IOException
2725    */
2726   public void waitTableAvailable(byte[] table, long timeoutMillis)
2727   throws InterruptedException, IOException {
2728     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2729   }
2730 
2731   public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2732   throws InterruptedException, IOException {
2733     long startWait = System.currentTimeMillis();
2734     while (!admin.isTableAvailable(table)) {
2735       assertTrue("Timed out waiting for table to become available " +
2736         Bytes.toStringBinary(table),
2737         System.currentTimeMillis() - startWait < timeoutMillis);
2738       Thread.sleep(200);
2739     }
2740   }
2741 
2742   /**
2743    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2744    * regions have been all assigned.  Will timeout after default period (30 seconds)
2745    * @see #waitTableAvailable(byte[])
2746    * @param table Table to wait on.
2747    * @param table
2748    * @throws InterruptedException
2749    * @throws IOException
2750    */
2751   public void waitTableEnabled(byte[] table)
2752       throws InterruptedException, IOException {
2753     waitTableEnabled(getHBaseAdmin(), table, 30000);
2754   }
2755 
2756   public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2757       throws InterruptedException, IOException {
2758     waitTableEnabled(admin, table, 30000);
2759   }
2760 
2761   /**
2762    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2763    * regions have been all assigned.
2764    * @see #waitTableAvailable(byte[])
2765    * @param table Table to wait on.
2766    * @param timeoutMillis Time to wait on it being marked enabled.
2767    * @throws InterruptedException
2768    * @throws IOException
2769    */
2770   public void waitTableEnabled(byte[] table, long timeoutMillis)
2771   throws InterruptedException, IOException {
2772     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2773   }
2774 
2775   public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2776   throws InterruptedException, IOException {
2777     long startWait = System.currentTimeMillis();
2778     waitTableAvailable(admin, table, timeoutMillis);
2779     while (!admin.isTableEnabled(table)) {
2780       assertTrue("Timed out waiting for table to become available and enabled " +
2781          Bytes.toStringBinary(table),
2782          System.currentTimeMillis() - startWait < timeoutMillis);
2783       Thread.sleep(200);
2784     }
2785     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2786     // in the hbase:meta table and almost open on all regionservers but there setting the region
2787     // online in the regionserver is the very last thing done and can take a little while to happen.
2788     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2789     // RegionOpeningException.  It is crass but when done all will be online.
2790     HConnection connection = HConnectionManager.createConnection(conf);
2791     try {
2792       Canary.sniff(connection, TableName.valueOf(table));
2793     } catch (Exception e) {
2794       throw new IOException(e);
2795     } finally {
2796       connection.close();
2797     }
2798   }
2799 
2800   /**
2801    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2802    * Will timeout after default period (30 seconds)
2803    * @param table Table to wait on.
2804    * @throws InterruptedException
2805    * @throws IOException
2806    */
2807   public void waitTableDisabled(byte[] table)
2808       throws InterruptedException, IOException {
2809     waitTableDisabled(getHBaseAdmin(), table, 30000);
2810   }
2811 
2812   public void waitTableDisabled(HBaseAdmin admin, byte[] table)
2813       throws InterruptedException, IOException {
2814     waitTableDisabled(admin, table, 30000);
2815   }
2816 
2817   /**
2818    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2819    * @param table Table to wait on.
2820    * @param timeoutMillis Time to wait on it being marked disabled.
2821    * @throws InterruptedException
2822    * @throws IOException
2823    */
2824   public void waitTableDisabled(byte[] table, long timeoutMillis)
2825       throws InterruptedException, IOException {
2826     waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2827   }
2828 
2829   public void waitTableDisabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2830       throws InterruptedException, IOException {
2831     TableName tableName = TableName.valueOf(table);
2832     long startWait = System.currentTimeMillis();
2833     while (!admin.isTableDisabled(tableName)) {
2834       assertTrue("Timed out waiting for table to become disabled " +
2835               Bytes.toStringBinary(table),
2836           System.currentTimeMillis() - startWait < timeoutMillis);
2837       Thread.sleep(200);
2838     }
2839   }
2840 
2841   /**
2842    * 
2843    * Make sure that at least the specified number of region servers
2844    * are running
2845    * @param num minimum number of region servers that should be running
2846    * @return true if we started some servers
2847    * @throws IOException
2848    */
2849   public boolean ensureSomeRegionServersAvailable(final int num)
2850       throws IOException {
2851     boolean startedServer = false;
2852     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2853     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2854       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2855       startedServer = true;
2856     }
2857 
2858     return startedServer;
2859   }
2860 
2861 
2862   /**
2863    * Make sure that at least the specified number of region servers
2864    * are running. We don't count the ones that are currently stopping or are
2865    * stopped.
2866    * @param num minimum number of region servers that should be running
2867    * @return true if we started some servers
2868    * @throws IOException
2869    */
2870   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2871     throws IOException {
2872     boolean startedServer = ensureSomeRegionServersAvailable(num);
2873 
2874     int nonStoppedServers = 0;
2875     for (JVMClusterUtil.RegionServerThread rst :
2876       getMiniHBaseCluster().getRegionServerThreads()) {
2877 
2878       HRegionServer hrs = rst.getRegionServer();
2879       if (hrs.isStopping() || hrs.isStopped()) {
2880         LOG.info("A region server is stopped or stopping:"+hrs);
2881       } else {
2882         nonStoppedServers++;
2883       }
2884     }
2885     for (int i=nonStoppedServers; i<num; ++i) {
2886       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2887       startedServer = true;
2888     }
2889     return startedServer;
2890   }
2891 
2892 
2893   /**
2894    * This method clones the passed <code>c</code> configuration setting a new
2895    * user into the clone.  Use it getting new instances of FileSystem.  Only
2896    * works for DistributedFileSystem.
2897    * @param c Initial configuration
2898    * @param differentiatingSuffix Suffix to differentiate this user from others.
2899    * @return A new configuration instance with a different user set into it.
2900    * @throws IOException
2901    */
2902   public static User getDifferentUser(final Configuration c,
2903     final String differentiatingSuffix)
2904   throws IOException {
2905     FileSystem currentfs = FileSystem.get(c);
2906     if (!(currentfs instanceof DistributedFileSystem)) {
2907       return User.getCurrent();
2908     }
2909     // Else distributed filesystem.  Make a new instance per daemon.  Below
2910     // code is taken from the AppendTestUtil over in hdfs.
2911     String username = User.getCurrent().getName() +
2912       differentiatingSuffix;
2913     User user = User.createUserForTesting(c, username,
2914         new String[]{"supergroup"});
2915     return user;
2916   }
2917 
2918   /**
2919    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
2920    * makes tests linger.  Here is the exception you'll see:
2921    * <pre>
2922    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2923    * </pre>
2924    * @param stream A DFSClient.DFSOutputStream.
2925    * @param max
2926    * @throws NoSuchFieldException
2927    * @throws SecurityException
2928    * @throws IllegalAccessException
2929    * @throws IllegalArgumentException
2930    */
2931   public static void setMaxRecoveryErrorCount(final OutputStream stream,
2932       final int max) {
2933     try {
2934       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2935       for (Class<?> clazz: clazzes) {
2936         String className = clazz.getSimpleName();
2937         if (className.equals("DFSOutputStream")) {
2938           if (clazz.isInstance(stream)) {
2939             Field maxRecoveryErrorCountField =
2940               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2941             maxRecoveryErrorCountField.setAccessible(true);
2942             maxRecoveryErrorCountField.setInt(stream, max);
2943             break;
2944           }
2945         }
2946       }
2947     } catch (Exception e) {
2948       LOG.info("Could not set max recovery field", e);
2949     }
2950   }
2951 
2952   /**
2953    * Wait until all regions for a table in hbase:meta have a non-empty
2954    * info:server, up to 60 seconds. This means all regions have been deployed,
2955    * master has been informed and updated hbase:meta with the regions deployed
2956    * server.
2957    * @param tableName the table name
2958    * @throws IOException
2959    */
2960   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2961     waitUntilAllRegionsAssigned(tableName, 60000);
2962   }
2963 
2964   /**
2965    * Wait until all regions for a table in hbase:meta have a non-empty
2966    * info:server, or until timeout.  This means all regions have been deployed,
2967    * master has been informed and updated hbase:meta with the regions deployed
2968    * server.
2969    * @param tableName the table name
2970    * @param timeout timeout, in milliseconds
2971    * @throws IOException
2972    */
2973   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2974       throws IOException {
2975     final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2976     try {
2977       waitFor(timeout, 200, true, new Predicate<IOException>() {
2978         @Override
2979         public boolean evaluate() throws IOException {
2980           boolean allRegionsAssigned = true;
2981           Scan scan = new Scan();
2982           scan.addFamily(HConstants.CATALOG_FAMILY);
2983           ResultScanner s = meta.getScanner(scan);
2984           try {
2985             Result r;
2986             while ((r = s.next()) != null) {
2987               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2988               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2989               if (info != null && info.getTable().equals(tableName)) {
2990                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2991                 allRegionsAssigned &= (b != null);
2992               }
2993             }
2994           } finally {
2995             s.close();
2996           }
2997           return allRegionsAssigned;
2998         }
2999       });
3000     } finally {
3001       meta.close();
3002     }
3003     // So, all regions are in the meta table but make sure master knows of the assignments before
3004     // returing -- sometimes this can lag.
3005     HMaster master = getHBaseCluster().getMaster();
3006     final RegionStates states = master.getAssignmentManager().getRegionStates();
3007     waitFor(timeout, 200, new Predicate<IOException>() {
3008       @Override
3009       public boolean evaluate() throws IOException {
3010         List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3011         return hris != null && !hris.isEmpty();
3012       }
3013     });
3014   }
3015 
3016   /**
3017    * Do a small get/scan against one store. This is required because store
3018    * has no actual methods of querying itself, and relies on StoreScanner.
3019    */
3020   public static List<Cell> getFromStoreFile(HStore store,
3021                                                 Get get) throws IOException {
3022     Scan scan = new Scan(get);
3023     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3024         scan.getFamilyMap().get(store.getFamily().getName()),
3025         // originally MultiVersionConsistencyControl.resetThreadReadPoint() was called to set
3026         // readpoint 0.
3027         0);
3028 
3029     List<Cell> result = new ArrayList<Cell>();
3030     scanner.next(result);
3031     if (!result.isEmpty()) {
3032       // verify that we are on the row we want:
3033       Cell kv = result.get(0);
3034       if (!CellUtil.matchingRow(kv, get.getRow())) {
3035         result.clear();
3036       }
3037     }
3038     scanner.close();
3039     return result;
3040   }
3041 
3042   /**
3043    * Create region split keys between startkey and endKey
3044    *
3045    * @param startKey
3046    * @param endKey
3047    * @param numRegions the number of regions to be created. it has to be greater than 3.
3048    * @return
3049    */
3050   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3051     assertTrue(numRegions>3);
3052     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3053     byte [][] result = new byte[tmpSplitKeys.length+1][];
3054     System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3055     result[0] = HConstants.EMPTY_BYTE_ARRAY;
3056     return result;
3057   }
3058 
3059   /**
3060    * Do a small get/scan against one store. This is required because store
3061    * has no actual methods of querying itself, and relies on StoreScanner.
3062    */
3063   public static List<Cell> getFromStoreFile(HStore store,
3064                                                 byte [] row,
3065                                                 NavigableSet<byte[]> columns
3066                                                 ) throws IOException {
3067     Get get = new Get(row);
3068     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3069     s.put(store.getFamily().getName(), columns);
3070 
3071     return getFromStoreFile(store,get);
3072   }
3073 
3074   /**
3075    * Gets a ZooKeeperWatcher.
3076    * @param TEST_UTIL
3077    */
3078   public static ZooKeeperWatcher getZooKeeperWatcher(
3079       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3080       IOException {
3081     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3082         "unittest", new Abortable() {
3083           boolean aborted = false;
3084 
3085           @Override
3086           public void abort(String why, Throwable e) {
3087             aborted = true;
3088             throw new RuntimeException("Fatal ZK error, why=" + why, e);
3089           }
3090 
3091           @Override
3092           public boolean isAborted() {
3093             return aborted;
3094           }
3095         });
3096     return zkw;
3097   }
3098 
3099   /**
3100    * Creates a znode with OPENED state.
3101    * @param TEST_UTIL
3102    * @param region
3103    * @param serverName
3104    * @return
3105    * @throws IOException
3106    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
3107    * @throws KeeperException
3108    * @throws NodeExistsException
3109    */
3110   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3111       HBaseTestingUtility TEST_UTIL, HRegion region,
3112       ServerName serverName) throws ZooKeeperConnectionException,
3113       IOException, KeeperException, NodeExistsException {
3114     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3115     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3116     int version = ZKAssign.transitionNodeOpening(zkw, region
3117         .getRegionInfo(), serverName);
3118     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3119         version);
3120     return zkw;
3121   }
3122 
3123   public static void assertKVListsEqual(String additionalMsg,
3124       final List<? extends Cell> expected,
3125       final List<? extends Cell> actual) {
3126     final int eLen = expected.size();
3127     final int aLen = actual.size();
3128     final int minLen = Math.min(eLen, aLen);
3129 
3130     int i;
3131     for (i = 0; i < minLen
3132         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3133         ++i) {}
3134 
3135     if (additionalMsg == null) {
3136       additionalMsg = "";
3137     }
3138     if (!additionalMsg.isEmpty()) {
3139       additionalMsg = ". " + additionalMsg;
3140     }
3141 
3142     if (eLen != aLen || i != minLen) {
3143       throw new AssertionError(
3144           "Expected and actual KV arrays differ at position " + i + ": " +
3145           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3146           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3147     }
3148   }
3149 
3150   private static <T> String safeGetAsStr(List<T> lst, int i) {
3151     if (0 <= i && i < lst.size()) {
3152       return lst.get(i).toString();
3153     } else {
3154       return "<out_of_range>";
3155     }
3156   }
3157 
3158   public String getClusterKey() {
3159     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3160         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3161         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3162             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3163   }
3164 
3165   /** Creates a random table with the given parameters */
3166   public HTable createRandomTable(String tableName,
3167       final Collection<String> families,
3168       final int maxVersions,
3169       final int numColsPerRow,
3170       final int numFlushes,
3171       final int numRegions,
3172       final int numRowsPerFlush)
3173       throws IOException, InterruptedException {
3174 
3175     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3176         " regions, " + numFlushes + " storefiles per region, " +
3177         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3178         "\n");
3179 
3180     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3181     final int numCF = families.size();
3182     final byte[][] cfBytes = new byte[numCF][];
3183     {
3184       int cfIndex = 0;
3185       for (String cf : families) {
3186         cfBytes[cfIndex++] = Bytes.toBytes(cf);
3187       }
3188     }
3189 
3190     final int actualStartKey = 0;
3191     final int actualEndKey = Integer.MAX_VALUE;
3192     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3193     final int splitStartKey = actualStartKey + keysPerRegion;
3194     final int splitEndKey = actualEndKey - keysPerRegion;
3195     final String keyFormat = "%08x";
3196     final HTable table = createTable(tableName, cfBytes,
3197         maxVersions,
3198         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3199         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3200         numRegions);
3201 
3202     if (hbaseCluster != null) {
3203       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3204     }
3205 
3206     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3207       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3208         final byte[] row = Bytes.toBytes(String.format(keyFormat,
3209             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3210 
3211         Put put = new Put(row);
3212         Delete del = new Delete(row);
3213         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3214           final byte[] cf = cfBytes[rand.nextInt(numCF)];
3215           final long ts = rand.nextInt();
3216           final byte[] qual = Bytes.toBytes("col" + iCol);
3217           if (rand.nextBoolean()) {
3218             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3219                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3220                 ts + "_random_" + rand.nextLong());
3221             put.add(cf, qual, ts, value);
3222           } else if (rand.nextDouble() < 0.8) {
3223             del.deleteColumn(cf, qual, ts);
3224           } else {
3225             del.deleteColumns(cf, qual, ts);
3226           }
3227         }
3228 
3229         if (!put.isEmpty()) {
3230           table.put(put);
3231         }
3232 
3233         if (!del.isEmpty()) {
3234           table.delete(del);
3235         }
3236       }
3237       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3238       table.flushCommits();
3239       if (hbaseCluster != null) {
3240         getMiniHBaseCluster().flushcache(table.getName());
3241       }
3242     }
3243 
3244     return table;
3245   }
3246 
3247   private static final int MIN_RANDOM_PORT = 0xc000;
3248   private static final int MAX_RANDOM_PORT = 0xfffe;
3249   private static Random random = new Random();
3250 
3251   /**
3252    * Returns a random port. These ports cannot be registered with IANA and are
3253    * intended for dynamic allocation (see http://bit.ly/dynports).
3254    */
3255   public static int randomPort() {
3256     return MIN_RANDOM_PORT
3257         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3258   }
3259 
3260   /**
3261    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3262    * called from single-threaded test setup code/
3263    */
3264   public static int randomFreePort() {
3265     int port = 0;
3266     do {
3267       port = randomPort();
3268       if (takenRandomPorts.contains(port)) {
3269         continue;
3270       }
3271       takenRandomPorts.add(port);
3272 
3273       try {
3274         ServerSocket sock = new ServerSocket(port);
3275         sock.close();
3276       } catch (IOException ex) {
3277         port = 0;
3278       }
3279     } while (port == 0);
3280     return port;
3281   }
3282 
3283 
3284   public static String randomMultiCastAddress() {
3285     return "226.1.1." + random.nextInt(254);
3286   }
3287 
3288 
3289 
3290   public static void waitForHostPort(String host, int port)
3291       throws IOException {
3292     final int maxTimeMs = 10000;
3293     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3294     IOException savedException = null;
3295     LOG.info("Waiting for server at " + host + ":" + port);
3296     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3297       try {
3298         Socket sock = new Socket(InetAddress.getByName(host), port);
3299         sock.close();
3300         savedException = null;
3301         LOG.info("Server at " + host + ":" + port + " is available");
3302         break;
3303       } catch (UnknownHostException e) {
3304         throw new IOException("Failed to look up " + host, e);
3305       } catch (IOException e) {
3306         savedException = e;
3307       }
3308       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3309     }
3310 
3311     if (savedException != null) {
3312       throw savedException;
3313     }
3314   }
3315 
3316   /**
3317    * Creates a pre-split table for load testing. If the table already exists,
3318    * logs a warning and continues.
3319    * @return the number of regions the table was split into
3320    */
3321   public static int createPreSplitLoadTestTable(Configuration conf,
3322       TableName tableName, byte[] columnFamily, Algorithm compression,
3323       DataBlockEncoding dataBlockEncoding) throws IOException {
3324     return createPreSplitLoadTestTable(conf, tableName,
3325       columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER,
3326       Durability.USE_DEFAULT);
3327   }
3328   /**
3329    * Creates a pre-split table for load testing. If the table already exists,
3330    * logs a warning and continues.
3331    * @return the number of regions the table was split into
3332    */
3333   public static int createPreSplitLoadTestTable(Configuration conf,
3334       TableName tableName, byte[] columnFamily, Algorithm compression,
3335       DataBlockEncoding dataBlockEncoding, int numRegionsPerServer,
3336       Durability durability)
3337           throws IOException {
3338     HTableDescriptor desc = new HTableDescriptor(tableName);
3339     desc.setDurability(durability);
3340     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3341     hcd.setDataBlockEncoding(dataBlockEncoding);
3342     hcd.setCompressionType(compression);
3343     return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3344   }
3345 
3346   /**
3347    * Creates a pre-split table for load testing. If the table already exists,
3348    * logs a warning and continues.
3349    * @return the number of regions the table was split into
3350    */
3351   public static int createPreSplitLoadTestTable(Configuration conf,
3352       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3353     return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3354   }
3355 
3356   /**
3357    * Creates a pre-split table for load testing. If the table already exists,
3358    * logs a warning and continues.
3359    * @return the number of regions the table was split into
3360    */
3361   public static int createPreSplitLoadTestTable(Configuration conf,
3362       HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3363     if (!desc.hasFamily(hcd.getName())) {
3364       desc.addFamily(hcd);
3365     }
3366 
3367     int totalNumberOfRegions = 0;
3368     HBaseAdmin admin = new HBaseAdmin(conf);
3369     try {
3370       // create a table a pre-splits regions.
3371       // The number of splits is set as:
3372       //    region servers * regions per region server).
3373       int numberOfServers = admin.getClusterStatus().getServers().size();
3374       if (numberOfServers == 0) {
3375         throw new IllegalStateException("No live regionservers");
3376       }
3377 
3378       totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3379       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3380           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3381           "(regions per server: " + numRegionsPerServer + ")");
3382 
3383       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3384           totalNumberOfRegions);
3385 
3386       admin.createTable(desc, splits);
3387     } catch (MasterNotRunningException e) {
3388       LOG.error("Master not running", e);
3389       throw new IOException(e);
3390     } catch (TableExistsException e) {
3391       LOG.warn("Table " + desc.getTableName() +
3392           " already exists, continuing");
3393     } finally {
3394       admin.close();
3395     }
3396     return totalNumberOfRegions;
3397   }
3398 
3399   public static int getMetaRSPort(Configuration conf) throws IOException {
3400     HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3401     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3402     table.close();
3403     return hloc.getPort();
3404   }
3405 
3406   /**
3407    *  Due to async racing issue, a region may not be in
3408    *  the online region list of a region server yet, after
3409    *  the assignment znode is deleted and the new assignment
3410    *  is recorded in master.
3411    */
3412   public void assertRegionOnServer(
3413       final HRegionInfo hri, final ServerName server,
3414       final long timeout) throws IOException, InterruptedException {
3415     long timeoutTime = System.currentTimeMillis() + timeout;
3416     while (true) {
3417       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3418       if (regions.contains(hri)) return;
3419       long now = System.currentTimeMillis();
3420       if (now > timeoutTime) break;
3421       Thread.sleep(10);
3422     }
3423     fail("Could not find region " + hri.getRegionNameAsString()
3424       + " on server " + server);
3425   }
3426 
3427   /**
3428    * Check to make sure the region is open on the specified
3429    * region server, but not on any other one.
3430    */
3431   public void assertRegionOnlyOnServer(
3432       final HRegionInfo hri, final ServerName server,
3433       final long timeout) throws IOException, InterruptedException {
3434     long timeoutTime = System.currentTimeMillis() + timeout;
3435     while (true) {
3436       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3437       if (regions.contains(hri)) {
3438         List<JVMClusterUtil.RegionServerThread> rsThreads =
3439           getHBaseCluster().getLiveRegionServerThreads();
3440         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3441           HRegionServer rs = rsThread.getRegionServer();
3442           if (server.equals(rs.getServerName())) {
3443             continue;
3444           }
3445           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3446           for (HRegion r: hrs) {
3447             assertTrue("Region should not be double assigned",
3448               r.getRegionId() != hri.getRegionId());
3449           }
3450         }
3451         return; // good, we are happy
3452       }
3453       long now = System.currentTimeMillis();
3454       if (now > timeoutTime) break;
3455       Thread.sleep(10);
3456     }
3457     fail("Could not find region " + hri.getRegionNameAsString()
3458       + " on server " + server);
3459   }
3460 
3461   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3462       throws IOException {
3463     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3464     htd.addFamily(hcd);
3465     HRegionInfo info =
3466         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3467     HRegion region =
3468         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3469     return region;
3470   }
3471 
3472   public void setFileSystemURI(String fsURI) {
3473     FS_URI = fsURI;
3474   }
3475 
3476   /**
3477    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3478    */
3479   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3480       throws E {
3481     return Waiter.waitFor(this.conf, timeout, predicate);
3482   }
3483 
3484   /**
3485    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3486    */
3487   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3488       throws E {
3489     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3490   }
3491 
3492   /**
3493    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3494    */
3495   public <E extends Exception> long waitFor(long timeout, long interval,
3496       boolean failIfTimeout, Predicate<E> predicate) throws E {
3497     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3498   }
3499   
3500   /**
3501    * Wait until no regions in transition.
3502    * @param timeout How long to wait.
3503    * @throws Exception
3504    */
3505   public void waitUntilNoRegionsInTransition(
3506       final long timeout) throws Exception {
3507     waitFor(timeout, predicateNoRegionsInTransition());
3508   }
3509 
3510   /**
3511    * Returns a {@link Predicate} for checking that there are no regions in transition in master
3512    */
3513   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3514     return new Waiter.Predicate<Exception>() {
3515       @Override
3516       public boolean evaluate() throws Exception {
3517         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3518             .getAssignmentManager().getRegionStates();
3519         return !regionStates.isRegionsInTransition();
3520       }
3521     };
3522   }
3523 
3524   /**
3525    * Returns a {@link Predicate} for checking that table is enabled
3526    */
3527   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3528     return new Waiter.Predicate<Exception>() {
3529      @Override
3530      public boolean evaluate() throws Exception {
3531        return getHBaseAdmin().isTableEnabled(tableName);
3532       }
3533     };
3534   }
3535 
3536   /**
3537    * Wait until labels is ready in VisibilityLabelsCache.
3538    * @param timeoutMillis
3539    * @param labels
3540    */
3541   public void waitLabelAvailable(long timeoutMillis, final String... labels) {
3542     final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
3543     waitFor(timeoutMillis, new Waiter.Predicate<RuntimeException>() {
3544 
3545       @Override
3546       public boolean evaluate() {
3547         for (String label : labels) {
3548           if (labelsCache.getLabelOrdinal(label) == 0) {
3549             return false;
3550           }
3551         }
3552         return true;
3553       }
3554     });
3555   }
3556 
3557   /**
3558    * Create a set of column descriptors with the combination of compression,
3559    * encoding, bloom codecs available.
3560    * @return the list of column descriptors
3561    */
3562   public static List<HColumnDescriptor> generateColumnDescriptors() {
3563     return generateColumnDescriptors("");
3564   }
3565 
3566   /**
3567    * Create a set of column descriptors with the combination of compression,
3568    * encoding, bloom codecs available.
3569    * @param prefix family names prefix
3570    * @return the list of column descriptors
3571    */
3572   public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3573     List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3574     long familyId = 0;
3575     for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3576       for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3577         for (BloomType bloomType: BloomType.values()) {
3578           String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3579           HColumnDescriptor htd = new HColumnDescriptor(name);
3580           htd.setCompressionType(compressionType);
3581           htd.setDataBlockEncoding(encodingType);
3582           htd.setBloomFilterType(bloomType);
3583           htds.add(htd);
3584           familyId++;
3585         }
3586       }
3587     }
3588     return htds;
3589   }
3590 
3591   /**
3592    * Get supported compression algorithms.
3593    * @return supported compression algorithms.
3594    */
3595   public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3596     String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3597     List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3598     for (String algoName : allAlgos) {
3599       try {
3600         Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3601         algo.getCompressor();
3602         supportedAlgos.add(algo);
3603       } catch (Throwable t) {
3604         // this algo is not available
3605       }
3606     }
3607     return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3608   }
3609 }