View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Method;
28  import java.lang.reflect.Modifier;
29  import java.net.InetAddress;
30  import java.net.InetSocketAddress;
31  import java.net.ServerSocket;
32  import java.net.Socket;
33  import java.net.UnknownHostException;
34  import java.security.MessageDigest;
35  import java.util.ArrayList;
36  import java.util.Arrays;
37  import java.util.Collection;
38  import java.util.Collections;
39  import java.util.HashSet;
40  import java.util.List;
41  import java.util.Map;
42  import java.util.NavigableSet;
43  import java.util.Random;
44  import java.util.Set;
45  import java.util.UUID;
46  import java.util.concurrent.TimeUnit;
47  
48  import org.apache.commons.logging.Log;
49  import org.apache.commons.logging.LogFactory;
50  import org.apache.commons.logging.impl.Jdk14Logger;
51  import org.apache.commons.logging.impl.Log4JLogger;
52  import org.apache.hadoop.hbase.classification.InterfaceAudience;
53  import org.apache.hadoop.hbase.classification.InterfaceStability;
54  import org.apache.hadoop.conf.Configuration;
55  import org.apache.hadoop.fs.FileSystem;
56  import org.apache.hadoop.fs.Path;
57  import org.apache.hadoop.hbase.Waiter.Predicate;
58  import org.apache.hadoop.hbase.catalog.MetaEditor;
59  import org.apache.hadoop.hbase.client.Delete;
60  import org.apache.hadoop.hbase.client.Durability;
61  import org.apache.hadoop.hbase.client.Get;
62  import org.apache.hadoop.hbase.client.HBaseAdmin;
63  import org.apache.hadoop.hbase.client.HConnection;
64  import org.apache.hadoop.hbase.client.HConnectionManager;
65  import org.apache.hadoop.hbase.client.HTable;
66  import org.apache.hadoop.hbase.client.Put;
67  import org.apache.hadoop.hbase.client.Result;
68  import org.apache.hadoop.hbase.client.ResultScanner;
69  import org.apache.hadoop.hbase.client.Scan;
70  import org.apache.hadoop.hbase.fs.HFileSystem;
71  import org.apache.hadoop.hbase.io.compress.Compression;
72  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
73  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
74  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
75  import org.apache.hadoop.hbase.io.hfile.HFile;
76  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
77  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
78  import org.apache.hadoop.hbase.master.HMaster;
79  import org.apache.hadoop.hbase.master.RegionStates;
80  import org.apache.hadoop.hbase.master.ServerManager;
81  import org.apache.hadoop.hbase.regionserver.BloomType;
82  import org.apache.hadoop.hbase.regionserver.HRegion;
83  import org.apache.hadoop.hbase.regionserver.HRegionServer;
84  import org.apache.hadoop.hbase.regionserver.HStore;
85  import org.apache.hadoop.hbase.regionserver.InternalScanner;
86  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
87  import org.apache.hadoop.hbase.regionserver.wal.HLog;
88  import org.apache.hadoop.hbase.security.User;
89  import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
90  import org.apache.hadoop.hbase.tool.Canary;
91  import org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
92  import org.apache.hadoop.hbase.util.Bytes;
93  import org.apache.hadoop.hbase.util.FSTableDescriptors;
94  import org.apache.hadoop.hbase.util.FSUtils;
95  import org.apache.hadoop.hbase.util.JVMClusterUtil;
96  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
97  import org.apache.hadoop.hbase.util.RegionSplitter;
98  import org.apache.hadoop.hbase.util.RetryCounter;
99  import org.apache.hadoop.hbase.util.Threads;
100 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
101 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
102 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
103 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
104 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
105 import org.apache.hadoop.hdfs.DFSClient;
106 import org.apache.hadoop.hdfs.DistributedFileSystem;
107 import org.apache.hadoop.hdfs.MiniDFSCluster;
108 import org.apache.hadoop.mapred.JobConf;
109 import org.apache.hadoop.mapred.MiniMRCluster;
110 import org.apache.hadoop.mapred.TaskLog;
111 import org.apache.hadoop.security.UserGroupInformation;
112 import org.apache.zookeeper.KeeperException;
113 import org.apache.zookeeper.KeeperException.NodeExistsException;
114 import org.apache.zookeeper.WatchedEvent;
115 import org.apache.zookeeper.ZooKeeper;
116 import org.apache.zookeeper.ZooKeeper.States;
117 
118 /**
119  * Facility for testing HBase. Replacement for
120  * old HBaseTestCase and HBaseClusterTestCase functionality.
121  * Create an instance and keep it around testing HBase.  This class is
122  * meant to be your one-stop shop for anything you might need testing.  Manages
123  * one cluster at a time only. Managed cluster can be an in-process
124  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
125  * Not all methods work with the real cluster.
126  * Depends on log4j being on classpath and
127  * hbase-site.xml for logging and test-run configuration.  It does not set
128  * logging levels nor make changes to configuration parameters.
129  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
130  * setting it to true.
131  */
132 @InterfaceAudience.Public
133 @InterfaceStability.Evolving
134 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
135    private MiniZooKeeperCluster zkCluster = null;
136 
137   public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
138   /**
139    * The default number of regions per regionserver when creating a pre-split
140    * table.
141    */
142   public static final int DEFAULT_REGIONS_PER_SERVER = 5;
143 
144   /**
145    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
146    * part of general shutdown.
147    */
148   private boolean passedZkCluster = false;
149   private MiniDFSCluster dfsCluster = null;
150 
151   private HBaseCluster hbaseCluster = null;
152   private MiniMRCluster mrCluster = null;
153 
154   /** If there is a mini cluster running for this testing utility instance. */
155   private boolean miniClusterRunning;
156 
157   private String hadoopLogDir;
158 
159   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
160   private File clusterTestDir = null;
161 
162   /** Directory on test filesystem where we put the data for this instance of
163     * HBaseTestingUtility*/
164   private Path dataTestDirOnTestFS = null;
165 
166   /**
167    * System property key to get test directory value.
168    * Name is as it is because mini dfs has hard-codings to put test data here.
169    * It should NOT be used directly in HBase, as it's a property used in
170    *  mini dfs.
171    *  @deprecated can be used only with mini dfs
172    */
173   @Deprecated
174   private static final String TEST_DIRECTORY_KEY = "test.build.data";
175 
176   /** Filesystem URI used for map-reduce mini-cluster setup */
177   private static String FS_URI;
178 
179   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
180   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
181 
182   /** Compression algorithms to use in parameterized JUnit 4 tests */
183   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
184     Arrays.asList(new Object[][] {
185       { Compression.Algorithm.NONE },
186       { Compression.Algorithm.GZ }
187     });
188 
189   /** This is for unit tests parameterized with a two booleans. */
190   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
191       Arrays.asList(new Object[][] {
192           { new Boolean(false) },
193           { new Boolean(true) }
194       });
195 
196   /** This is for unit tests parameterized with a single boolean. */
197   public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination()  ;
198   /** Compression algorithms to use in testing */
199   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
200       Compression.Algorithm.NONE, Compression.Algorithm.GZ
201     };
202 
203   /**
204    * Create all combinations of Bloom filters and compression algorithms for
205    * testing.
206    */
207   private static List<Object[]> bloomAndCompressionCombinations() {
208     List<Object[]> configurations = new ArrayList<Object[]>();
209     for (Compression.Algorithm comprAlgo :
210          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
211       for (BloomType bloomType : BloomType.values()) {
212         configurations.add(new Object[] { comprAlgo, bloomType });
213       }
214     }
215     return Collections.unmodifiableList(configurations);
216   }
217 
218   /**
219    * Create combination of memstoreTS and tags
220    */
221   private static List<Object[]> memStoreTSAndTagsCombination() {
222     List<Object[]> configurations = new ArrayList<Object[]>();
223     configurations.add(new Object[] { false, false });
224     configurations.add(new Object[] { false, true });
225     configurations.add(new Object[] { true, false });
226     configurations.add(new Object[] { true, true });
227     return Collections.unmodifiableList(configurations);
228   }
229 
230   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
231       bloomAndCompressionCombinations();
232 
233   public HBaseTestingUtility() {
234     this(HBaseConfiguration.create());
235   }
236 
237   public HBaseTestingUtility(Configuration conf) {
238     super(conf);
239 
240     // a hbase checksum verification failure will cause unit tests to fail
241     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
242   }
243 
244   /**
245    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
246    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
247    * test dir.  Use this when you aren't using an Mini HDFS cluster.
248    * @return HBaseTestingUtility that use local fs for temp files.
249    */
250   public static HBaseTestingUtility createLocalHTU() {
251     Configuration c = HBaseConfiguration.create();
252     return createLocalHTU(c);
253   }
254 
255   /**
256    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
257    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
258    * test dir.  Use this when you aren't using an Mini HDFS cluster.
259    * @param c Configuration (will be modified)
260    * @return HBaseTestingUtility that use local fs for temp files.
261    */
262   public static HBaseTestingUtility createLocalHTU(Configuration c) {
263     HBaseTestingUtility htu = new HBaseTestingUtility(c);
264     String dataTestDir = htu.getDataTestDir().toString();
265     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
266     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
267     return htu;
268   }
269 
270   /**
271    * Controls how many attempts we will make in the face of failures in HDFS.
272    * @deprecated to be removed with Hadoop 1.x support
273    */
274   @Deprecated
275   public void setHDFSClientRetry(final int retries) {
276     this.conf.setInt("hdfs.client.retries.number", retries);
277     if (0 == retries) {
278       makeDFSClientNonRetrying();
279     }
280   }
281 
282   /**
283    * Returns this classes's instance of {@link Configuration}.  Be careful how
284    * you use the returned Configuration since {@link HConnection} instances
285    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
286    * say, a Connection was being used against a cluster that had been shutdown,
287    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
288    * be wholesome.  Rather than use the return direct, its usually best to
289    * make a copy and use that.  Do
290    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
291    * @return Instance of Configuration.
292    */
293   @Override
294   public Configuration getConfiguration() {
295     return super.getConfiguration();
296   }
297 
298   public void setHBaseCluster(HBaseCluster hbaseCluster) {
299     this.hbaseCluster = hbaseCluster;
300   }
301 
302   /**
303    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
304    * Give it a random name so can have many concurrent tests running if
305    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
306    * System property, as it's what minidfscluster bases
307    * it data dir on.  Moding a System property is not the way to do concurrent
308    * instances -- another instance could grab the temporary
309    * value unintentionally -- but not anything can do about it at moment;
310    * single instance only is how the minidfscluster works.
311    *
312    * We also create the underlying directory for
313    *  hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
314    *  in the conf, and as a system property for hadoop.tmp.dir
315    *
316    * @return The calculated data test build directory, if newly-created.
317    */
318   @Override
319   protected Path setupDataTestDir() {
320     Path testPath = super.setupDataTestDir();
321     if (null == testPath) {
322       return null;
323     }
324 
325     createSubDirAndSystemProperty(
326       "hadoop.log.dir",
327       testPath, "hadoop-log-dir");
328 
329     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
330     //  we want our own value to ensure uniqueness on the same machine
331     createSubDirAndSystemProperty(
332       "hadoop.tmp.dir",
333       testPath, "hadoop-tmp-dir");
334 
335     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
336     createSubDir(
337       "mapred.local.dir",
338       testPath, "mapred-local-dir");
339 
340     return testPath;
341   }
342 
343   private void createSubDirAndSystemProperty(
344     String propertyName, Path parent, String subDirName){
345 
346     String sysValue = System.getProperty(propertyName);
347 
348     if (sysValue != null) {
349       // There is already a value set. So we do nothing but hope
350       //  that there will be no conflicts
351       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
352         sysValue + " so I do NOT create it in " + parent);
353       String confValue = conf.get(propertyName);
354       if (confValue != null && !confValue.endsWith(sysValue)){
355        LOG.warn(
356          propertyName + " property value differs in configuration and system: "+
357          "Configuration="+confValue+" while System="+sysValue+
358          " Erasing configuration value by system value."
359        );
360       }
361       conf.set(propertyName, sysValue);
362     } else {
363       // Ok, it's not set, so we create it as a subdirectory
364       createSubDir(propertyName, parent, subDirName);
365       System.setProperty(propertyName, conf.get(propertyName));
366     }
367   }
368 
369   /**
370    * @return Where to write test data on the test filesystem; Returns working directory
371    * for the test filesystem by default
372    * @see #setupDataTestDirOnTestFS()
373    * @see #getTestFileSystem()
374    */
375   private Path getBaseTestDirOnTestFS() throws IOException {
376     FileSystem fs = getTestFileSystem();
377     return new Path(fs.getWorkingDirectory(), "test-data");
378   }
379 
380   /**
381    * @return META table descriptor
382    */
383   public HTableDescriptor getMetaTableDescriptor() {
384     try {
385       return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
386     } catch (IOException e) {
387       throw new RuntimeException("Unable to create META table descriptor", e);
388     }
389   }
390 
391   /**
392    * @return Where the DFS cluster will write data on the local subsystem.
393    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
394    * @see #getTestFileSystem()
395    */
396   Path getClusterTestDir() {
397     if (clusterTestDir == null){
398       setupClusterTestDir();
399     }
400     return new Path(clusterTestDir.getAbsolutePath());
401   }
402 
403   /**
404    * Creates a directory for the DFS cluster, under the test data
405    */
406   private void setupClusterTestDir() {
407     if (clusterTestDir != null) {
408       return;
409     }
410 
411     // Using randomUUID ensures that multiple clusters can be launched by
412     //  a same test, if it stops & starts them
413     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
414     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
415     // Have it cleaned up on exit
416     boolean b = deleteOnExit();
417     if (b) clusterTestDir.deleteOnExit();
418     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
419     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
420   }
421 
422   /**
423    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
424    * to write temporary test data. Call this method after setting up the mini dfs cluster
425    * if the test relies on it.
426    * @return a unique path in the test filesystem
427    */
428   public Path getDataTestDirOnTestFS() throws IOException {
429     if (dataTestDirOnTestFS == null) {
430       setupDataTestDirOnTestFS();
431     }
432 
433     return dataTestDirOnTestFS;
434   }
435 
436   /**
437    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
438    * to write temporary test data. Call this method after setting up the mini dfs cluster
439    * if the test relies on it.
440    * @return a unique path in the test filesystem
441    * @param subdirName name of the subdir to create under the base test dir
442    */
443   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
444     return new Path(getDataTestDirOnTestFS(), subdirName);
445   }
446 
447   /**
448    * Sets up a path in test filesystem to be used by tests
449    */
450   private void setupDataTestDirOnTestFS() throws IOException {
451     if (dataTestDirOnTestFS != null) {
452       LOG.warn("Data test on test fs dir already setup in "
453           + dataTestDirOnTestFS.toString());
454       return;
455     }
456 
457     //The file system can be either local, mini dfs, or if the configuration
458     //is supplied externally, it can be an external cluster FS. If it is a local
459     //file system, the tests should use getBaseTestDir, otherwise, we can use
460     //the working directory, and create a unique sub dir there
461     FileSystem fs = getTestFileSystem();
462     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
463       File dataTestDir = new File(getDataTestDir().toString());
464       if (deleteOnExit()) dataTestDir.deleteOnExit();
465       dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
466     } else {
467       Path base = getBaseTestDirOnTestFS();
468       String randomStr = UUID.randomUUID().toString();
469       dataTestDirOnTestFS = new Path(base, randomStr);
470       if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
471     }
472   }
473 
474   /**
475    * Cleans the test data directory on the test filesystem.
476    * @return True if we removed the test dirs
477    * @throws IOException
478    */
479   public boolean cleanupDataTestDirOnTestFS() throws IOException {
480     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
481     if (ret)
482       dataTestDirOnTestFS = null;
483     return ret;
484   }
485 
486   /**
487    * Cleans a subdirectory under the test data directory on the test filesystem.
488    * @return True if we removed child
489    * @throws IOException
490    */
491   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
492     Path cpath = getDataTestDirOnTestFS(subdirName);
493     return getTestFileSystem().delete(cpath, true);
494   }
495 
496   /**
497    * Start a minidfscluster.
498    * @param servers How many DNs to start.
499    * @throws Exception
500    * @see {@link #shutdownMiniDFSCluster()}
501    * @return The mini dfs cluster created.
502    */
503   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
504     return startMiniDFSCluster(servers, null);
505   }
506 
507   /**
508    * Start a minidfscluster.
509    * This is useful if you want to run datanode on distinct hosts for things
510    * like HDFS block location verification.
511    * If you start MiniDFSCluster without host names, all instances of the
512    * datanodes will have the same host name.
513    * @param hosts hostnames DNs to run on.
514    * @throws Exception
515    * @see {@link #shutdownMiniDFSCluster()}
516    * @return The mini dfs cluster created.
517    */
518   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
519   throws Exception {
520     if ( hosts != null && hosts.length != 0) {
521       return startMiniDFSCluster(hosts.length, hosts);
522     } else {
523       return startMiniDFSCluster(1, null);
524     }
525   }
526 
527   /**
528    * Start a minidfscluster.
529    * Can only create one.
530    * @param servers How many DNs to start.
531    * @param hosts hostnames DNs to run on.
532    * @throws Exception
533    * @see {@link #shutdownMiniDFSCluster()}
534    * @return The mini dfs cluster created.
535    */
536   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
537   throws Exception {
538     createDirsAndSetProperties();
539     try {
540       Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
541           .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
542       m.invoke(null, new Object[] {true});
543     } catch (ClassNotFoundException e) {
544       LOG.info("EditLogFileOutputStream not found");
545     }
546 
547     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
548     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
549         setLevel(org.apache.log4j.Level.ERROR);
550     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
551         setLevel(org.apache.log4j.Level.ERROR);
552 
553 
554     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
555       true, null, null, hosts, null);
556 
557     // Set this just-started cluster as our filesystem.
558     setFs();
559 
560     // Wait for the cluster to be totally up
561     this.dfsCluster.waitClusterUp();
562 
563     //reset the test directory for test file system
564     dataTestDirOnTestFS = null;
565 
566     return this.dfsCluster;
567   }
568 
569   private void setFs() throws IOException {
570     if(this.dfsCluster == null){
571       LOG.info("Skipping setting fs because dfsCluster is null");
572       return;
573     }
574     FileSystem fs = this.dfsCluster.getFileSystem();
575     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
576   }
577 
578   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
579       throws Exception {
580     createDirsAndSetProperties();
581     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
582         true, null, racks, hosts, null);
583 
584     // Set this just-started cluster as our filesystem.
585     FileSystem fs = this.dfsCluster.getFileSystem();
586     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
587 
588     // Wait for the cluster to be totally up
589     this.dfsCluster.waitClusterUp();
590 
591     //reset the test directory for test file system
592     dataTestDirOnTestFS = null;
593 
594     return this.dfsCluster;
595   }
596 
597   public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
598     createDirsAndSetProperties();
599     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
600         null, null, null);
601     return dfsCluster;
602   }
603 
604   /** This is used before starting HDFS and map-reduce mini-clusters */
605   private void createDirsAndSetProperties() throws IOException {
606     setupClusterTestDir();
607     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
608     createDirAndSetProperty("cache_data", "test.cache.data");
609     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
610     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
611     createDirAndSetProperty("mapred_local", "mapred.local.dir");
612     createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
613     enableShortCircuit();
614 
615     Path root = getDataTestDirOnTestFS("hadoop");
616     conf.set(MapreduceTestingShim.getMROutputDirProp(),
617       new Path(root, "mapred-output-dir").toString());
618     conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
619     conf.set("mapreduce.jobtracker.staging.root.dir",
620       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
621     conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
622   }
623 
624 
625   /**
626    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
627    *  This allows to specify this parameter on the command line.
628    *   If not set, default is true.
629    */
630   public boolean isReadShortCircuitOn(){
631     final String propName = "hbase.tests.use.shortcircuit.reads";
632     String readOnProp = System.getProperty(propName);
633     if (readOnProp != null){
634       return  Boolean.parseBoolean(readOnProp);
635     } else {
636       return conf.getBoolean(propName, false);
637     }
638   }
639 
640   /** Enable the short circuit read, unless configured differently.
641    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
642    */
643   private void enableShortCircuit() {
644     if (isReadShortCircuitOn()) {
645       String curUser = System.getProperty("user.name");
646       LOG.info("read short circuit is ON for user " + curUser);
647       // read short circuit, for hdfs
648       conf.set("dfs.block.local-path-access.user", curUser);
649       // read short circuit, for hbase
650       conf.setBoolean("dfs.client.read.shortcircuit", true);
651       // Skip checking checksum, for the hdfs client and the datanode
652       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
653     } else {
654       LOG.info("read short circuit is OFF");
655     }
656   }
657 
658   private String createDirAndSetProperty(final String relPath, String property) {
659     String path = getDataTestDir(relPath).toString();
660     System.setProperty(property, path);
661     conf.set(property, path);
662     new File(path).mkdirs();
663     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
664     return path;
665   }
666 
667   /**
668    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
669    * or does nothing.
670    * @throws IOException
671    */
672   public void shutdownMiniDFSCluster() throws IOException {
673     if (this.dfsCluster != null) {
674       // The below throws an exception per dn, AsynchronousCloseException.
675       this.dfsCluster.shutdown();
676       dfsCluster = null;
677       dataTestDirOnTestFS = null;
678       FSUtils.setFsDefault(this.conf, new Path("file:///"));
679     }
680   }
681 
682   /**
683    * Call this if you only want a zk cluster.
684    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
685    * @throws Exception
686    * @see #shutdownMiniZKCluster()
687    * @return zk cluster started.
688    */
689   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
690     return startMiniZKCluster(1);
691   }
692 
693   /**
694    * Call this if you only want a zk cluster.
695    * @param zooKeeperServerNum
696    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
697    * @throws Exception
698    * @see #shutdownMiniZKCluster()
699    * @return zk cluster started.
700    */
701   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
702       throws Exception {
703     setupClusterTestDir();
704     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
705   }
706 
707   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
708     throws Exception {
709     return startMiniZKCluster(dir,1);
710   }
711 
712   /**
713    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
714    *  the port mentionned is used as the default port for ZooKeeper.
715    */
716   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
717       int zooKeeperServerNum)
718   throws Exception {
719     if (this.zkCluster != null) {
720       throw new IOException("Cluster already running at " + dir);
721     }
722     this.passedZkCluster = false;
723     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
724     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
725     if (defPort > 0){
726       // If there is a port in the config file, we use it.
727       this.zkCluster.setDefaultClientPort(defPort);
728     }
729     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
730     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
731       Integer.toString(clientPort));
732     return this.zkCluster;
733   }
734 
735   /**
736    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
737    * or does nothing.
738    * @throws IOException
739    * @see #startMiniZKCluster()
740    */
741   public void shutdownMiniZKCluster() throws IOException {
742     if (this.zkCluster != null) {
743       this.zkCluster.shutdown();
744       this.zkCluster = null;
745     }
746   }
747 
748   /**
749    * Start up a minicluster of hbase, dfs, and zookeeper.
750    * @throws Exception
751    * @return Mini hbase cluster instance created.
752    * @see {@link #shutdownMiniDFSCluster()}
753    */
754   public MiniHBaseCluster startMiniCluster() throws Exception {
755     return startMiniCluster(1, 1);
756   }
757 
758   /**
759    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
760    * Modifies Configuration.  Homes the cluster data directory under a random
761    * subdirectory in a directory under System property test.build.data.
762    * Directory is cleaned up on exit.
763    * @param numSlaves Number of slaves to start up.  We'll start this many
764    * datanodes and regionservers.  If numSlaves is > 1, then make sure
765    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
766    * bind errors.
767    * @throws Exception
768    * @see {@link #shutdownMiniCluster()}
769    * @return Mini hbase cluster instance created.
770    */
771   public MiniHBaseCluster startMiniCluster(final int numSlaves)
772   throws Exception {
773     return startMiniCluster(1, numSlaves);
774   }
775 
776 
777   /**
778    * start minicluster
779    * @throws Exception
780    * @see {@link #shutdownMiniCluster()}
781    * @return Mini hbase cluster instance created.
782    */
783   public MiniHBaseCluster startMiniCluster(final int numMasters,
784     final int numSlaves)
785   throws Exception {
786     return startMiniCluster(numMasters, numSlaves, null);
787   }
788 
789   /**
790    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
791    * Modifies Configuration.  Homes the cluster data directory under a random
792    * subdirectory in a directory under System property test.build.data.
793    * Directory is cleaned up on exit.
794    * @param numMasters Number of masters to start up.  We'll start this many
795    * hbase masters.  If numMasters > 1, you can find the active/primary master
796    * with {@link MiniHBaseCluster#getMaster()}.
797    * @param numSlaves Number of slaves to start up.  We'll start this many
798    * regionservers. If dataNodeHosts == null, this also indicates the number of
799    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
800    * based on dataNodeHosts.length.
801    * If numSlaves is > 1, then make sure
802    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
803    * bind errors.
804    * @param dataNodeHosts hostnames DNs to run on.
805    * This is useful if you want to run datanode on distinct hosts for things
806    * like HDFS block location verification.
807    * If you start MiniDFSCluster without host names,
808    * all instances of the datanodes will have the same host name.
809    * @throws Exception
810    * @see {@link #shutdownMiniCluster()}
811    * @return Mini hbase cluster instance created.
812    */
813   public MiniHBaseCluster startMiniCluster(final int numMasters,
814       final int numSlaves, final String[] dataNodeHosts) throws Exception {
815     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
816   }
817 
818   /**
819    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
820    * @param numDataNodes Number of data nodes.
821    */
822   public MiniHBaseCluster startMiniCluster(final int numMasters,
823       final int numSlaves, final int numDataNodes) throws Exception {
824     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
825   }
826 
827   /**
828    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
829    * Modifies Configuration.  Homes the cluster data directory under a random
830    * subdirectory in a directory under System property test.build.data.
831    * Directory is cleaned up on exit.
832    * @param numMasters Number of masters to start up.  We'll start this many
833    * hbase masters.  If numMasters > 1, you can find the active/primary master
834    * with {@link MiniHBaseCluster#getMaster()}.
835    * @param numSlaves Number of slaves to start up.  We'll start this many
836    * regionservers. If dataNodeHosts == null, this also indicates the number of
837    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
838    * based on dataNodeHosts.length.
839    * If numSlaves is > 1, then make sure
840    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
841    * bind errors.
842    * @param dataNodeHosts hostnames DNs to run on.
843    * This is useful if you want to run datanode on distinct hosts for things
844    * like HDFS block location verification.
845    * If you start MiniDFSCluster without host names,
846    * all instances of the datanodes will have the same host name.
847    * @param masterClass The class to use as HMaster, or null for default
848    * @param regionserverClass The class to use as HRegionServer, or null for
849    * default
850    * @throws Exception
851    * @see {@link #shutdownMiniCluster()}
852    * @return Mini hbase cluster instance created.
853    */
854   public MiniHBaseCluster startMiniCluster(final int numMasters,
855       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
856       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
857           throws Exception {
858     return startMiniCluster(
859         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
860   }
861 
862   /**
863    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
864    * number of datanodes.
865    * @param numDataNodes Number of data nodes.
866    */
867   public MiniHBaseCluster startMiniCluster(final int numMasters,
868     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
869     Class<? extends HMaster> masterClass,
870     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
871   throws Exception {
872     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
873       numDataNodes = dataNodeHosts.length;
874     }
875 
876     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
877         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
878 
879     // If we already put up a cluster, fail.
880     if (miniClusterRunning) {
881       throw new IllegalStateException("A mini-cluster is already running");
882     }
883     miniClusterRunning = true;
884 
885     setupClusterTestDir();
886     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
887 
888     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
889     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
890     if(this.dfsCluster == null) {
891       dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
892     }
893 
894     // Start up a zk cluster.
895     if (this.zkCluster == null) {
896       startMiniZKCluster(clusterTestDir);
897     }
898 
899     // Start the MiniHBaseCluster
900     return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
901   }
902 
903   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
904       throws IOException, InterruptedException{
905     return startMiniHBaseCluster(numMasters, numSlaves, null, null);
906   }
907 
908   /**
909    * Starts up mini hbase cluster.  Usually used after call to
910    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
911    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
912    * @param numMasters
913    * @param numSlaves
914    * @return Reference to the hbase mini hbase cluster.
915    * @throws IOException
916    * @throws InterruptedException
917    * @see {@link #startMiniCluster()}
918    */
919   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
920         final int numSlaves, Class<? extends HMaster> masterClass,
921         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
922   throws IOException, InterruptedException {
923     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
924     createRootDir();
925 
926     // These settings will make the server waits until this exact number of
927     // regions servers are connected.
928     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
929       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
930     }
931     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
932       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
933     }
934 
935     Configuration c = new Configuration(this.conf);
936     this.hbaseCluster =
937         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
938     // Don't leave here till we've done a successful scan of the hbase:meta
939     HTable t = new HTable(c, TableName.META_TABLE_NAME);
940     ResultScanner s = t.getScanner(new Scan());
941     while (s.next() != null) {
942       continue;
943     }
944     s.close();
945     t.close();
946 
947     getHBaseAdmin(); // create immediately the hbaseAdmin
948     LOG.info("Minicluster is up");
949 
950     // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
951     // for tests that do not read hbase-defaults.xml
952     setHBaseFsTmpDir();
953 
954     return (MiniHBaseCluster)this.hbaseCluster;
955   }
956 
957   /**
958    * Starts the hbase cluster up again after shutting it down previously in a
959    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
960    * @param servers number of region servers
961    * @throws IOException
962    */
963   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
964     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
965     // Don't leave here till we've done a successful scan of the hbase:meta
966     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
967     ResultScanner s = t.getScanner(new Scan());
968     while (s.next() != null) {
969       // do nothing
970     }
971     LOG.info("HBase has been restarted");
972     s.close();
973     t.close();
974   }
975 
976   /**
977    * @return Current mini hbase cluster. Only has something in it after a call
978    * to {@link #startMiniCluster()}.
979    * @see #startMiniCluster()
980    */
981   public MiniHBaseCluster getMiniHBaseCluster() {
982     if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
983       return (MiniHBaseCluster)this.hbaseCluster;
984     }
985     throw new RuntimeException(hbaseCluster + " not an instance of " +
986                                MiniHBaseCluster.class.getName());
987   }
988 
989   /**
990    * Stops mini hbase, zk, and hdfs clusters.
991    * @throws IOException
992    * @see {@link #startMiniCluster(int)}
993    */
994   public void shutdownMiniCluster() throws Exception {
995     LOG.info("Shutting down minicluster");
996     shutdownMiniHBaseCluster();
997     if (!this.passedZkCluster){
998       shutdownMiniZKCluster();
999     }
1000     shutdownMiniDFSCluster();
1001 
1002     cleanupTestDir();
1003     miniClusterRunning = false;
1004     LOG.info("Minicluster is down");
1005   }
1006 
1007   /**
1008    * @return True if we removed the test dirs
1009    * @throws IOException
1010    */
1011   @Override
1012   public boolean cleanupTestDir() throws IOException {
1013     boolean ret = super.cleanupTestDir();
1014     if (deleteDir(this.clusterTestDir)) {
1015       this.clusterTestDir = null;
1016       return ret & true;
1017     }
1018     return false;
1019   }
1020 
1021   /**
1022    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
1023    * @throws IOException
1024    */
1025   public void shutdownMiniHBaseCluster() throws IOException {
1026     if (hbaseAdmin != null) {
1027       hbaseAdmin.close0();
1028       hbaseAdmin = null;
1029     }
1030 
1031     // unset the configuration for MIN and MAX RS to start
1032     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1033     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1034     if (this.hbaseCluster != null) {
1035       this.hbaseCluster.shutdown();
1036       // Wait till hbase is down before going on to shutdown zk.
1037       this.hbaseCluster.waitUntilShutDown();
1038       this.hbaseCluster = null;
1039     }
1040 
1041     if (zooKeeperWatcher != null) {
1042       zooKeeperWatcher.close();
1043       zooKeeperWatcher = null;
1044     }
1045   }
1046 
1047   /**
1048    * Returns the path to the default root dir the minicluster uses.
1049    * Note: this does not cause the root dir to be created.
1050    * @return Fully qualified path for the default hbase root dir
1051    * @throws IOException
1052    */
1053   public Path getDefaultRootDirPath() throws IOException {
1054 	FileSystem fs = FileSystem.get(this.conf);
1055 	return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1056   }
1057 
1058   /**
1059    * Creates an hbase rootdir in user home directory.  Also creates hbase
1060    * version file.  Normally you won't make use of this method.  Root hbasedir
1061    * is created for you as part of mini cluster startup.  You'd only use this
1062    * method if you were doing manual operation.
1063    * @return Fully qualified path to hbase root dir
1064    * @throws IOException
1065    */
1066   public Path createRootDir() throws IOException {
1067     FileSystem fs = FileSystem.get(this.conf);
1068     Path hbaseRootdir = getDefaultRootDirPath();
1069     FSUtils.setRootDir(this.conf, hbaseRootdir);
1070     fs.mkdirs(hbaseRootdir);
1071     FSUtils.setVersion(fs, hbaseRootdir);
1072     return hbaseRootdir;
1073   }
1074 
1075   private void setHBaseFsTmpDir() throws IOException {
1076     String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1077     if (hbaseFsTmpDirInString == null) {
1078       this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
1079       LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1080     } else {
1081       LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1082     }
1083   }
1084 
1085   /**
1086    * Flushes all caches in the mini hbase cluster
1087    * @throws IOException
1088    */
1089   public void flush() throws IOException {
1090     getMiniHBaseCluster().flushcache();
1091   }
1092 
1093   /**
1094    * Flushes all caches in the mini hbase cluster
1095    * @throws IOException
1096    */
1097   public void flush(TableName tableName) throws IOException {
1098     getMiniHBaseCluster().flushcache(tableName);
1099   }
1100 
1101   /**
1102    * Compact all regions in the mini hbase cluster
1103    * @throws IOException
1104    */
1105   public void compact(boolean major) throws IOException {
1106     getMiniHBaseCluster().compact(major);
1107   }
1108 
1109   /**
1110    * Compact all of a table's reagion in the mini hbase cluster
1111    * @throws IOException
1112    */
1113   public void compact(TableName tableName, boolean major) throws IOException {
1114     getMiniHBaseCluster().compact(tableName, major);
1115   }
1116 
1117   /**
1118    * Create a table.
1119    * @param tableName
1120    * @param family
1121    * @return An HTable instance for the created table.
1122    * @throws IOException
1123    */
1124   public HTable createTable(String tableName, String family)
1125   throws IOException{
1126     return createTable(TableName.valueOf(tableName), new String[]{family});
1127   }
1128 
1129   /**
1130    * Create a table.
1131    * @param tableName
1132    * @param family
1133    * @return An HTable instance for the created table.
1134    * @throws IOException
1135    */
1136   public HTable createTable(byte[] tableName, byte[] family)
1137   throws IOException{
1138     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1139   }
1140 
1141   /**
1142    * Create a table.
1143    * @param tableName
1144    * @param families
1145    * @return An HTable instance for the created table.
1146    * @throws IOException
1147    */
1148   public HTable createTable(TableName tableName, String[] families)
1149   throws IOException {
1150     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1151     for (String family : families) {
1152       fams.add(Bytes.toBytes(family));
1153     }
1154     return createTable(tableName, fams.toArray(new byte[0][]));
1155   }
1156 
1157   /**
1158    * Create a table.
1159    * @param tableName
1160    * @param family
1161    * @return An HTable instance for the created table.
1162    * @throws IOException
1163    */
1164   public HTable createTable(TableName tableName, byte[] family)
1165   throws IOException{
1166     return createTable(tableName, new byte[][]{family});
1167   }
1168 
1169 
1170   /**
1171    * Create a table.
1172    * @param tableName
1173    * @param families
1174    * @return An HTable instance for the created table.
1175    * @throws IOException
1176    */
1177   public HTable createTable(byte[] tableName, byte[][] families)
1178   throws IOException {
1179     return createTable(tableName, families,
1180         new Configuration(getConfiguration()));
1181   }
1182 
1183   /**
1184    * Create a table.
1185    * @param tableName
1186    * @param families
1187    * @return An HTable instance for the created table.
1188    * @throws IOException
1189    */
1190   public HTable createTable(TableName tableName, byte[][] families)
1191   throws IOException {
1192     return createTable(tableName, families,
1193         new Configuration(getConfiguration()));
1194   }
1195 
1196   public HTable createTable(byte[] tableName, byte[][] families,
1197       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1198     return createTable(TableName.valueOf(tableName), families, numVersions,
1199         startKey, endKey, numRegions);
1200   }
1201 
1202   public HTable createTable(String tableName, byte[][] families,
1203       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1204     return createTable(TableName.valueOf(tableName), families, numVersions,
1205         startKey, endKey, numRegions);
1206   }
1207 
1208   public HTable createTable(TableName tableName, byte[][] families,
1209       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1210   throws IOException{
1211     HTableDescriptor desc = new HTableDescriptor(tableName);
1212     for (byte[] family : families) {
1213       HColumnDescriptor hcd = new HColumnDescriptor(family)
1214           .setMaxVersions(numVersions);
1215       desc.addFamily(hcd);
1216     }
1217     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1218     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1219     waitUntilAllRegionsAssigned(tableName);
1220     return new HTable(getConfiguration(), tableName);
1221   }
1222 
1223   /**
1224    * Create a table.
1225    * @param htd
1226    * @param families
1227    * @param c Configuration to use
1228    * @return An HTable instance for the created table.
1229    * @throws IOException
1230    */
1231   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1232   throws IOException {
1233     for(byte[] family : families) {
1234       HColumnDescriptor hcd = new HColumnDescriptor(family);
1235       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1236       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1237       // on is interfering.
1238       hcd.setBloomFilterType(BloomType.NONE);
1239       htd.addFamily(hcd);
1240     }
1241     getHBaseAdmin().createTable(htd);
1242     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1243     waitUntilAllRegionsAssigned(htd.getTableName());
1244     return new HTable(c, htd.getTableName());
1245   }
1246 
1247   /**
1248    * Create a table.
1249    * @param htd
1250    * @param splitRows
1251    * @return An HTable instance for the created table.
1252    * @throws IOException
1253    */
1254   public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1255       throws IOException {
1256     getHBaseAdmin().createTable(htd, splitRows);
1257     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1258     waitUntilAllRegionsAssigned(htd.getTableName());
1259     return new HTable(getConfiguration(), htd.getTableName());
1260   }
1261 
1262   /**
1263    * Create a table.
1264    * @param tableName
1265    * @param families
1266    * @param c Configuration to use
1267    * @return An HTable instance for the created table.
1268    * @throws IOException
1269    */
1270   public HTable createTable(TableName tableName, byte[][] families,
1271       final Configuration c)
1272   throws IOException {
1273     return createTable(new HTableDescriptor(tableName), families, c);
1274   }
1275 
1276   /**
1277    * Create a table.
1278    * @param tableName
1279    * @param families
1280    * @param c Configuration to use
1281    * @return An HTable instance for the created table.
1282    * @throws IOException
1283    */
1284   public HTable createTable(byte[] tableName, byte[][] families,
1285       final Configuration c)
1286   throws IOException {
1287     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1288     for(byte[] family : families) {
1289       HColumnDescriptor hcd = new HColumnDescriptor(family);
1290       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1291       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1292       // on is interfering.
1293       hcd.setBloomFilterType(BloomType.NONE);
1294       desc.addFamily(hcd);
1295     }
1296     getHBaseAdmin().createTable(desc);
1297     return new HTable(c, tableName);
1298   }
1299 
1300   /**
1301    * Create a table.
1302    * @param tableName
1303    * @param families
1304    * @param c Configuration to use
1305    * @param numVersions
1306    * @return An HTable instance for the created table.
1307    * @throws IOException
1308    */
1309   public HTable createTable(TableName tableName, byte[][] families,
1310       final Configuration c, int numVersions)
1311   throws IOException {
1312     HTableDescriptor desc = new HTableDescriptor(tableName);
1313     for(byte[] family : families) {
1314       HColumnDescriptor hcd = new HColumnDescriptor(family)
1315           .setMaxVersions(numVersions);
1316       desc.addFamily(hcd);
1317     }
1318     getHBaseAdmin().createTable(desc);
1319     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1320     waitUntilAllRegionsAssigned(tableName);
1321     return new HTable(c, tableName);
1322   }
1323 
1324   /**
1325    * Create a table.
1326    * @param tableName
1327    * @param families
1328    * @param c Configuration to use
1329    * @param numVersions
1330    * @return An HTable instance for the created table.
1331    * @throws IOException
1332    */
1333   public HTable createTable(byte[] tableName, byte[][] families,
1334       final Configuration c, int numVersions)
1335   throws IOException {
1336     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1337     for(byte[] family : families) {
1338       HColumnDescriptor hcd = new HColumnDescriptor(family)
1339           .setMaxVersions(numVersions);
1340       desc.addFamily(hcd);
1341     }
1342     getHBaseAdmin().createTable(desc);
1343     return new HTable(c, tableName);
1344   }
1345 
1346   /**
1347    * Create a table.
1348    * @param tableName
1349    * @param family
1350    * @param numVersions
1351    * @return An HTable instance for the created table.
1352    * @throws IOException
1353    */
1354   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1355   throws IOException {
1356     return createTable(tableName, new byte[][]{family}, numVersions);
1357   }
1358 
1359   /**
1360    * Create a table.
1361    * @param tableName
1362    * @param family
1363    * @param numVersions
1364    * @return An HTable instance for the created table.
1365    * @throws IOException
1366    */
1367   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1368   throws IOException {
1369     return createTable(tableName, new byte[][]{family}, numVersions);
1370   }
1371 
1372   /**
1373    * Create a table.
1374    * @param tableName
1375    * @param families
1376    * @param numVersions
1377    * @return An HTable instance for the created table.
1378    * @throws IOException
1379    */
1380   public HTable createTable(byte[] tableName, byte[][] families,
1381       int numVersions)
1382   throws IOException {
1383     return createTable(TableName.valueOf(tableName), families, numVersions);
1384   }
1385 
1386   /**
1387    * Create a table.
1388    * @param tableName
1389    * @param families
1390    * @param numVersions
1391    * @return An HTable instance for the created table.
1392    * @throws IOException
1393    */
1394   public HTable createTable(TableName tableName, byte[][] families,
1395       int numVersions)
1396   throws IOException {
1397     HTableDescriptor desc = new HTableDescriptor(tableName);
1398     for (byte[] family : families) {
1399       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1400       desc.addFamily(hcd);
1401     }
1402     getHBaseAdmin().createTable(desc);
1403     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1404     waitUntilAllRegionsAssigned(tableName);
1405     return new HTable(new Configuration(getConfiguration()), tableName);
1406   }
1407 
1408   /**
1409    * Create a table.
1410    * @param tableName
1411    * @param families
1412    * @param numVersions
1413    * @return An HTable instance for the created table.
1414    * @throws IOException
1415    */
1416   public HTable createTable(byte[] tableName, byte[][] families,
1417     int numVersions, int blockSize) throws IOException {
1418     return createTable(TableName.valueOf(tableName),
1419         families, numVersions, blockSize);
1420   }
1421 
1422   /**
1423    * Create a table.
1424    * @param tableName
1425    * @param families
1426    * @param numVersions
1427    * @return An HTable instance for the created table.
1428    * @throws IOException
1429    */
1430   public HTable createTable(TableName tableName, byte[][] families,
1431     int numVersions, int blockSize) throws IOException {
1432     HTableDescriptor desc = new HTableDescriptor(tableName);
1433     for (byte[] family : families) {
1434       HColumnDescriptor hcd = new HColumnDescriptor(family)
1435           .setMaxVersions(numVersions)
1436           .setBlocksize(blockSize);
1437       desc.addFamily(hcd);
1438     }
1439     getHBaseAdmin().createTable(desc);
1440     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1441     waitUntilAllRegionsAssigned(tableName);
1442     return new HTable(new Configuration(getConfiguration()), tableName);
1443   }
1444 
1445   /**
1446    * Create a table.
1447    * @param tableName
1448    * @param families
1449    * @param numVersions
1450    * @return An HTable instance for the created table.
1451    * @throws IOException
1452    */
1453   public HTable createTable(byte[] tableName, byte[][] families,
1454       int[] numVersions)
1455   throws IOException {
1456     return createTable(TableName.valueOf(tableName), families, numVersions);
1457   }
1458 
1459   /**
1460    * Create a table.
1461    * @param tableName
1462    * @param families
1463    * @param numVersions
1464    * @return An HTable instance for the created table.
1465    * @throws IOException
1466    */
1467   public HTable createTable(TableName tableName, byte[][] families,
1468       int[] numVersions)
1469   throws IOException {
1470     HTableDescriptor desc = new HTableDescriptor(tableName);
1471     int i = 0;
1472     for (byte[] family : families) {
1473       HColumnDescriptor hcd = new HColumnDescriptor(family)
1474           .setMaxVersions(numVersions[i]);
1475       desc.addFamily(hcd);
1476       i++;
1477     }
1478     getHBaseAdmin().createTable(desc);
1479     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1480     waitUntilAllRegionsAssigned(tableName);
1481     return new HTable(new Configuration(getConfiguration()), tableName);
1482   }
1483 
1484   /**
1485    * Create a table.
1486    * @param tableName
1487    * @param family
1488    * @param splitRows
1489    * @return An HTable instance for the created table.
1490    * @throws IOException
1491    */
1492   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1493     throws IOException{
1494     return createTable(TableName.valueOf(tableName), family, splitRows);
1495   }
1496 
1497   /**
1498    * Create a table.
1499    * @param tableName
1500    * @param family
1501    * @param splitRows
1502    * @return An HTable instance for the created table.
1503    * @throws IOException
1504    */
1505   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1506       throws IOException {
1507     HTableDescriptor desc = new HTableDescriptor(tableName);
1508     HColumnDescriptor hcd = new HColumnDescriptor(family);
1509     desc.addFamily(hcd);
1510     getHBaseAdmin().createTable(desc, splitRows);
1511     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1512     waitUntilAllRegionsAssigned(tableName);
1513     return new HTable(getConfiguration(), tableName);
1514   }
1515 
1516   /**
1517    * Create a table.
1518    * @param tableName
1519    * @param families
1520    * @param splitRows
1521    * @return An HTable instance for the created table.
1522    * @throws IOException
1523    */
1524   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1525       throws IOException {
1526     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1527     for(byte[] family:families) {
1528       HColumnDescriptor hcd = new HColumnDescriptor(family);
1529       desc.addFamily(hcd);
1530     }
1531     getHBaseAdmin().createTable(desc, splitRows);
1532     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1533     waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1534     return new HTable(getConfiguration(), tableName);
1535   }
1536 
1537   /**
1538    * Create a table with multiple regions.
1539    * @param tableName
1540    * @param family
1541    * @param numRegions
1542    * @return An HTable instance for the created table.
1543    * @throws IOException
1544    */
1545   public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1546       throws IOException {
1547     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1548     byte[] startKey = Bytes.toBytes("aaaaa");
1549     byte[] endKey = Bytes.toBytes("zzzzz");
1550     byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1551 
1552     return createTable(tableName, family, splitKeys);
1553   }
1554 
1555   /**
1556    * Drop an existing table
1557    * @param tableName existing table
1558    */
1559   public void deleteTable(String tableName) throws IOException {
1560     deleteTable(TableName.valueOf(tableName));
1561   }
1562 
1563   /**
1564    * Drop an existing table
1565    * @param tableName existing table
1566    */
1567   public void deleteTable(byte[] tableName) throws IOException {
1568     deleteTable(TableName.valueOf(tableName));
1569   }
1570 
1571   /**
1572    * Drop an existing table
1573    * @param tableName existing table
1574    */
1575   public void deleteTable(TableName tableName) throws IOException {
1576     try {
1577       getHBaseAdmin().disableTable(tableName);
1578     } catch (TableNotEnabledException e) {
1579       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1580     }
1581     getHBaseAdmin().deleteTable(tableName);
1582   }
1583 
1584   // ==========================================================================
1585   // Canned table and table descriptor creation
1586   // TODO replace HBaseTestCase
1587 
1588   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1589   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1590   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1591   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1592   private static final int MAXVERSIONS = 3;
1593 
1594   public static final char FIRST_CHAR = 'a';
1595   public static final char LAST_CHAR = 'z';
1596   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1597   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1598 
1599   /**
1600    * Create a table of name <code>name</code> with {@link COLUMNS} for
1601    * families.
1602    * @param name Name to give table.
1603    * @param versions How many versions to allow per column.
1604    * @return Column descriptor.
1605    */
1606   public HTableDescriptor createTableDescriptor(final String name,
1607       final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1608     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1609     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1610       htd.addFamily(new HColumnDescriptor(cfName)
1611           .setMinVersions(minVersions)
1612           .setMaxVersions(versions)
1613           .setKeepDeletedCells(keepDeleted)
1614           .setBlockCacheEnabled(false)
1615           .setTimeToLive(ttl)
1616       );
1617     }
1618     return htd;
1619   }
1620 
1621   /**
1622    * Create a table of name <code>name</code> with {@link COLUMNS} for
1623    * families.
1624    * @param name Name to give table.
1625    * @return Column descriptor.
1626    */
1627   public HTableDescriptor createTableDescriptor(final String name) {
1628     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1629         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1630   }
1631 
1632   /**
1633    * Create an HRegion that writes to the local tmp dirs
1634    * @param desc
1635    * @param startKey
1636    * @param endKey
1637    * @return
1638    * @throws IOException
1639    */
1640   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1641       byte [] endKey)
1642   throws IOException {
1643     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1644     return createLocalHRegion(hri, desc);
1645   }
1646 
1647   /**
1648    * Create an HRegion that writes to the local tmp dirs
1649    * @param info
1650    * @param desc
1651    * @return
1652    * @throws IOException
1653    */
1654   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1655     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1656   }
1657 
1658   /**
1659    * Create an HRegion that writes to the local tmp dirs with specified hlog
1660    * @param info regioninfo
1661    * @param desc table descriptor
1662    * @param hlog hlog for this region.
1663    * @return created hregion
1664    * @throws IOException
1665    */
1666   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1667     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1668   }
1669 
1670 
1671   /**
1672    * @param tableName
1673    * @param startKey
1674    * @param stopKey
1675    * @param callingMethod
1676    * @param conf
1677    * @param isReadOnly
1678    * @param families
1679    * @throws IOException
1680    * @return A region on which you must call
1681    *         {@link HRegion#closeHRegion(HRegion)} when done.
1682    */
1683   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1684       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1685       HLog hlog, byte[]... families) throws IOException {
1686     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1687     htd.setReadOnly(isReadOnly);
1688     for (byte[] family : families) {
1689       HColumnDescriptor hcd = new HColumnDescriptor(family);
1690       // Set default to be three versions.
1691       hcd.setMaxVersions(Integer.MAX_VALUE);
1692       htd.addFamily(hcd);
1693     }
1694     htd.setDurability(durability);
1695     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1696     return createLocalHRegion(info, htd, hlog);
1697   }
1698   //
1699   // ==========================================================================
1700 
1701   /**
1702    * Provide an existing table name to truncate
1703    * @param tableName existing table
1704    * @return HTable to that new table
1705    * @throws IOException
1706    */
1707   public HTable truncateTable(byte[] tableName) throws IOException {
1708     return truncateTable(TableName.valueOf(tableName));
1709   }
1710 
1711   /**
1712    * Provide an existing table name to truncate
1713    * @param tableName existing table
1714    * @return HTable to that new table
1715    * @throws IOException
1716    */
1717   public HTable truncateTable(TableName tableName) throws IOException {
1718     HTable table = new HTable(getConfiguration(), tableName);
1719     Scan scan = new Scan();
1720     ResultScanner resScan = table.getScanner(scan);
1721     for(Result res : resScan) {
1722       Delete del = new Delete(res.getRow());
1723       table.delete(del);
1724     }
1725     resScan = table.getScanner(scan);
1726     resScan.close();
1727     return table;
1728   }
1729 
1730   /**
1731    * Load table with rows from 'aaa' to 'zzz'.
1732    * @param t Table
1733    * @param f Family
1734    * @return Count of rows loaded.
1735    * @throws IOException
1736    */
1737   public int loadTable(final HTable t, final byte[] f) throws IOException {
1738     return loadTable(t, new byte[][] {f});
1739   }
1740 
1741   /**
1742    * Load table with rows from 'aaa' to 'zzz'.
1743    * @param t Table
1744    * @param f Family
1745    * @return Count of rows loaded.
1746    * @throws IOException
1747    */
1748   public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1749     return loadTable(t, new byte[][] {f}, null, writeToWAL);
1750   }
1751 
1752   /**
1753    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1754    * @param t Table
1755    * @param f Array of Families to load
1756    * @return Count of rows loaded.
1757    * @throws IOException
1758    */
1759   public int loadTable(final HTable t, final byte[][] f) throws IOException {
1760     return loadTable(t, f, null);
1761   }
1762 
1763   /**
1764    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1765    * @param t Table
1766    * @param f Array of Families to load
1767    * @param value the values of the cells. If null is passed, the row key is used as value
1768    * @return Count of rows loaded.
1769    * @throws IOException
1770    */
1771   public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1772     return loadTable(t, f, value, true);
1773   }
1774 
1775   /**
1776    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1777    * @param t Table
1778    * @param f Array of Families to load
1779    * @param value the values of the cells. If null is passed, the row key is used as value
1780    * @return Count of rows loaded.
1781    * @throws IOException
1782    */
1783   public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1784     t.setAutoFlush(false);
1785     int rowCount = 0;
1786     for (byte[] row : HBaseTestingUtility.ROWS) {
1787       Put put = new Put(row);
1788       put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1789       for (int i = 0; i < f.length; i++) {
1790         put.add(f[i], null, value != null ? value : row);
1791       }
1792       t.put(put);
1793       rowCount++;
1794     }
1795     t.flushCommits();
1796     return rowCount;
1797   }
1798 
1799   /** A tracker for tracking and validating table rows
1800    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1801    */
1802   public static class SeenRowTracker {
1803     int dim = 'z' - 'a' + 1;
1804     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1805     byte[] startRow;
1806     byte[] stopRow;
1807 
1808     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1809       this.startRow = startRow;
1810       this.stopRow = stopRow;
1811     }
1812 
1813     void reset() {
1814       for (byte[] row : ROWS) {
1815         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1816       }
1817     }
1818 
1819     int i(byte b) {
1820       return b - 'a';
1821     }
1822 
1823     public void addRow(byte[] row) {
1824       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1825     }
1826 
1827     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1828      * all other rows none
1829      */
1830     public void validate() {
1831       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1832         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1833           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1834             int count = seenRows[i(b1)][i(b2)][i(b3)];
1835             int expectedCount = 0;
1836             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1837                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1838               expectedCount = 1;
1839             }
1840             if (count != expectedCount) {
1841               String row = new String(new byte[] {b1,b2,b3});
1842               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1843             }
1844           }
1845         }
1846       }
1847     }
1848   }
1849 
1850   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1851     return loadRegion(r, f, false);
1852   }
1853 
1854   /**
1855    * Load region with rows from 'aaa' to 'zzz'.
1856    * @param r Region
1857    * @param f Family
1858    * @param flush flush the cache if true
1859    * @return Count of rows loaded.
1860    * @throws IOException
1861    */
1862   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1863   throws IOException {
1864     byte[] k = new byte[3];
1865     int rowCount = 0;
1866     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1867       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1868         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1869           k[0] = b1;
1870           k[1] = b2;
1871           k[2] = b3;
1872           Put put = new Put(k);
1873           put.setDurability(Durability.SKIP_WAL);
1874           put.add(f, null, k);
1875           if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1876 
1877           int preRowCount = rowCount;
1878           int pause = 10;
1879           int maxPause = 1000;
1880           while (rowCount == preRowCount) {
1881             try {
1882               r.put(put);
1883               rowCount++;
1884             } catch (RegionTooBusyException e) {
1885               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1886               Threads.sleep(pause);
1887             }
1888           }
1889         }
1890       }
1891       if (flush) {
1892         r.flushcache();
1893       }
1894     }
1895     return rowCount;
1896   }
1897 
1898   public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1899     for (int i = startRow; i < endRow; i++) {
1900       byte[] data = Bytes.toBytes(String.valueOf(i));
1901       Put put = new Put(data);
1902       put.add(f, null, data);
1903       t.put(put);
1904     }
1905   }
1906 
1907   /**
1908    * Return the number of rows in the given table.
1909    */
1910   public int countRows(final HTable table) throws IOException {
1911     Scan scan = new Scan();
1912     ResultScanner results = table.getScanner(scan);
1913     int count = 0;
1914     for (@SuppressWarnings("unused") Result res : results) {
1915       count++;
1916     }
1917     results.close();
1918     return count;
1919   }
1920 
1921   public int countRows(final HTable table, final byte[]... families) throws IOException {
1922     Scan scan = new Scan();
1923     for (byte[] family: families) {
1924       scan.addFamily(family);
1925     }
1926     ResultScanner results = table.getScanner(scan);
1927     int count = 0;
1928     for (@SuppressWarnings("unused") Result res : results) {
1929       count++;
1930     }
1931     results.close();
1932     return count;
1933   }
1934 
1935   /**
1936    * Return an md5 digest of the entire contents of a table.
1937    */
1938   public String checksumRows(final HTable table) throws Exception {
1939     Scan scan = new Scan();
1940     ResultScanner results = table.getScanner(scan);
1941     MessageDigest digest = MessageDigest.getInstance("MD5");
1942     for (Result res : results) {
1943       digest.update(res.getRow());
1944     }
1945     results.close();
1946     return digest.toString();
1947   }
1948 
1949   /**
1950    * Creates many regions names "aaa" to "zzz".
1951    *
1952    * @param table  The table to use for the data.
1953    * @param columnFamily  The family to insert the data into.
1954    * @return count of regions created.
1955    * @throws IOException When creating the regions fails.
1956    */
1957   public int createMultiRegions(HTable table, byte[] columnFamily)
1958   throws IOException {
1959     return createMultiRegions(getConfiguration(), table, columnFamily);
1960   }
1961 
1962   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
1963   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
1964   static {
1965     int i = 0;
1966     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1967       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1968         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1969           ROWS[i][0] = b1;
1970           ROWS[i][1] = b2;
1971           ROWS[i][2] = b3;
1972           i++;
1973         }
1974       }
1975     }
1976   }
1977 
1978   public static final byte[][] KEYS = {
1979     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1980     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1981     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1982     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1983     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1984     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1985     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1986     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1987     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1988   };
1989 
1990   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1991       Bytes.toBytes("bbb"),
1992       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1993       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1994       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1995       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1996       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1997       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1998       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1999       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2000   };
2001 
2002   /**
2003    * Creates many regions names "aaa" to "zzz".
2004    * @param c Configuration to use.
2005    * @param table  The table to use for the data.
2006    * @param columnFamily  The family to insert the data into.
2007    * @return count of regions created.
2008    * @throws IOException When creating the regions fails.
2009    */
2010   public int createMultiRegions(final Configuration c, final HTable table,
2011       final byte[] columnFamily)
2012   throws IOException {
2013     return createMultiRegions(c, table, columnFamily, KEYS);
2014   }
2015 
2016   void makeDFSClientNonRetrying() {
2017     if (null == this.dfsCluster) {
2018       LOG.debug("dfsCluster has not started, can't make client non-retrying.");
2019       return;
2020     }
2021     try {
2022       final FileSystem filesystem = this.dfsCluster.getFileSystem();
2023       if (!(filesystem instanceof DistributedFileSystem)) {
2024         LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
2025         return;
2026       }
2027       // rely on FileSystem.CACHE to alter how we talk via DFSClient
2028       final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
2029       // retrieve the backing DFSClient instance
2030       final Field dfsField = fs.getClass().getDeclaredField("dfs");
2031       dfsField.setAccessible(true);
2032       final Class<?> dfsClazz = dfsField.getType();
2033       final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
2034 
2035       // expose the method for creating direct RPC connections.
2036       final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
2037       createRPCNamenode.setAccessible(true);
2038 
2039       // grab the DFSClient instance's backing connection information
2040       final Field nnField = dfsClazz.getDeclaredField("nnAddress");
2041       nnField.setAccessible(true);
2042       final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
2043       final Field confField = dfsClazz.getDeclaredField("conf");
2044       confField.setAccessible(true);
2045       final Configuration conf = Configuration.class.cast(confField.get(dfs));
2046       final Field ugiField = dfsClazz.getDeclaredField("ugi");
2047       ugiField.setAccessible(true);
2048       final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
2049 
2050       // replace the proxy for the namenode rpc with a direct instance
2051       final Field namenodeField = dfsClazz.getDeclaredField("namenode");
2052       namenodeField.setAccessible(true);
2053       namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
2054       LOG.debug("Set DSFClient namenode to bare RPC");
2055     } catch (Exception exception) {
2056       LOG.info("Could not alter DFSClient to be non-retrying.", exception);
2057     }
2058   }
2059 
2060   /**
2061    * Creates the specified number of regions in the specified table.
2062    * @param c
2063    * @param table
2064    * @param family
2065    * @param numRegions
2066    * @return
2067    * @throws IOException
2068    */
2069   public int createMultiRegions(final Configuration c, final HTable table,
2070       final byte [] family, int numRegions)
2071   throws IOException {
2072     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2073     byte [] startKey = Bytes.toBytes("aaaaa");
2074     byte [] endKey = Bytes.toBytes("zzzzz");
2075     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2076     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2077     System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2078     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2079     return createMultiRegions(c, table, family, regionStartKeys);
2080   }
2081 
2082   @SuppressWarnings("deprecation")
2083   public int createMultiRegions(final Configuration c, final HTable table,
2084       final byte[] columnFamily, byte [][] startKeys)
2085   throws IOException {
2086     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2087     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2088     HTableDescriptor htd = table.getTableDescriptor();
2089     if(!htd.hasFamily(columnFamily)) {
2090       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2091       htd.addFamily(hcd);
2092     }
2093     // remove empty region - this is tricky as the mini cluster during the test
2094     // setup already has the "<tablename>,,123456789" row with an empty start
2095     // and end key. Adding the custom regions below adds those blindly,
2096     // including the new start region from empty to "bbb". lg
2097     List<byte[]> rows = getMetaTableRows(htd.getTableName());
2098     String regionToDeleteInFS = table
2099         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2100         .getRegionInfo().getEncodedName();
2101     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2102     // add custom ones
2103     int count = 0;
2104     for (int i = 0; i < startKeys.length; i++) {
2105       int j = (i + 1) % startKeys.length;
2106       HRegionInfo hri = new HRegionInfo(table.getName(),
2107         startKeys[i], startKeys[j]);
2108       MetaEditor.addRegionToMeta(meta, hri);
2109       newRegions.add(hri);
2110       count++;
2111     }
2112     // see comment above, remove "old" (or previous) single region
2113     for (byte[] row : rows) {
2114       LOG.info("createMultiRegions: deleting meta row -> " +
2115         Bytes.toStringBinary(row));
2116       meta.delete(new Delete(row));
2117     }
2118     // remove the "old" region from FS
2119     Path tableDir = new Path(getDefaultRootDirPath().toString()
2120         + System.getProperty("file.separator") + htd.getTableName()
2121         + System.getProperty("file.separator") + regionToDeleteInFS);
2122     FileSystem.get(c).delete(tableDir);
2123     // flush cache of regions
2124     HConnection conn = table.getConnection();
2125     conn.clearRegionCache();
2126     // assign all the new regions IF table is enabled.
2127     HBaseAdmin admin = getHBaseAdmin();
2128     if (admin.isTableEnabled(table.getTableName())) {
2129       for(HRegionInfo hri : newRegions) {
2130         admin.assign(hri.getRegionName());
2131       }
2132     }
2133 
2134     meta.close();
2135 
2136     return count;
2137   }
2138 
2139   /**
2140    * Create rows in hbase:meta for regions of the specified table with the specified
2141    * start keys.  The first startKey should be a 0 length byte array if you
2142    * want to form a proper range of regions.
2143    * @param conf
2144    * @param htd
2145    * @param startKeys
2146    * @return list of region info for regions added to meta
2147    * @throws IOException
2148    */
2149   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2150       final HTableDescriptor htd, byte [][] startKeys)
2151   throws IOException {
2152     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2153     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2154     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2155     // add custom ones
2156     for (int i = 0; i < startKeys.length; i++) {
2157       int j = (i + 1) % startKeys.length;
2158       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2159           startKeys[j]);
2160       MetaEditor.addRegionToMeta(meta, hri);
2161       newRegions.add(hri);
2162     }
2163 
2164     meta.close();
2165     return newRegions;
2166   }
2167 
2168   /**
2169    * Returns all rows from the hbase:meta table.
2170    *
2171    * @throws IOException When reading the rows fails.
2172    */
2173   public List<byte[]> getMetaTableRows() throws IOException {
2174     // TODO: Redo using MetaReader class
2175     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2176     List<byte[]> rows = new ArrayList<byte[]>();
2177     ResultScanner s = t.getScanner(new Scan());
2178     for (Result result : s) {
2179       LOG.info("getMetaTableRows: row -> " +
2180         Bytes.toStringBinary(result.getRow()));
2181       rows.add(result.getRow());
2182     }
2183     s.close();
2184     t.close();
2185     return rows;
2186   }
2187 
2188   /**
2189    * Returns all rows from the hbase:meta table for a given user table
2190    *
2191    * @throws IOException When reading the rows fails.
2192    */
2193   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2194     // TODO: Redo using MetaReader.
2195     HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2196     List<byte[]> rows = new ArrayList<byte[]>();
2197     ResultScanner s = t.getScanner(new Scan());
2198     for (Result result : s) {
2199       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2200       if (info == null) {
2201         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2202         // TODO figure out what to do for this new hosed case.
2203         continue;
2204       }
2205 
2206       if (info.getTable().equals(tableName)) {
2207         LOG.info("getMetaTableRows: row -> " +
2208             Bytes.toStringBinary(result.getRow()) + info);
2209         rows.add(result.getRow());
2210       }
2211     }
2212     s.close();
2213     t.close();
2214     return rows;
2215   }
2216 
2217   /**
2218    * Tool to get the reference to the region server object that holds the
2219    * region of the specified user table.
2220    * It first searches for the meta rows that contain the region of the
2221    * specified table, then gets the index of that RS, and finally retrieves
2222    * the RS's reference.
2223    * @param tableName user table to lookup in hbase:meta
2224    * @return region server that holds it, null if the row doesn't exist
2225    * @throws IOException
2226    * @throws InterruptedException
2227    */
2228   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2229       throws IOException, InterruptedException {
2230     return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2231   }
2232   /**
2233    * Tool to get the reference to the region server object that holds the
2234    * region of the specified user table.
2235    * It first searches for the meta rows that contain the region of the
2236    * specified table, then gets the index of that RS, and finally retrieves
2237    * the RS's reference.
2238    * @param tableName user table to lookup in hbase:meta
2239    * @return region server that holds it, null if the row doesn't exist
2240    * @throws IOException
2241    */
2242   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2243       throws IOException, InterruptedException {
2244     List<byte[]> metaRows = getMetaTableRows(tableName);
2245     if (metaRows == null || metaRows.isEmpty()) {
2246       return null;
2247     }
2248     LOG.debug("Found " + metaRows.size() + " rows for table " +
2249       tableName);
2250     byte [] firstrow = metaRows.get(0);
2251     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2252     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2253       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2254     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2255       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2256     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2257     while(retrier.shouldRetry()) {
2258       int index = getMiniHBaseCluster().getServerWith(firstrow);
2259       if (index != -1) {
2260         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2261       }
2262       // Came back -1.  Region may not be online yet.  Sleep a while.
2263       retrier.sleepUntilNextRetry();
2264     }
2265     return null;
2266   }
2267 
2268   /**
2269    * Starts a <code>MiniMRCluster</code> with a default number of
2270    * <code>TaskTracker</code>'s.
2271    *
2272    * @throws IOException When starting the cluster fails.
2273    */
2274   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2275     startMiniMapReduceCluster(2);
2276     return mrCluster;
2277   }
2278 
2279   /**
2280    * Tasktracker has a bug where changing the hadoop.log.dir system property
2281    * will not change its internal static LOG_DIR variable.
2282    */
2283   private void forceChangeTaskLogDir() {
2284     Field logDirField;
2285     try {
2286       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2287       logDirField.setAccessible(true);
2288 
2289       Field modifiersField = Field.class.getDeclaredField("modifiers");
2290       modifiersField.setAccessible(true);
2291       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2292 
2293       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2294     } catch (SecurityException e) {
2295       throw new RuntimeException(e);
2296     } catch (NoSuchFieldException e) {
2297       // TODO Auto-generated catch block
2298       throw new RuntimeException(e);
2299     } catch (IllegalArgumentException e) {
2300       throw new RuntimeException(e);
2301     } catch (IllegalAccessException e) {
2302       throw new RuntimeException(e);
2303     }
2304   }
2305 
2306   /**
2307    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2308    * filesystem.
2309    * @param servers  The number of <code>TaskTracker</code>'s to start.
2310    * @throws IOException When starting the cluster fails.
2311    */
2312   private void startMiniMapReduceCluster(final int servers) throws IOException {
2313     if (mrCluster != null) {
2314       throw new IllegalStateException("MiniMRCluster is already running");
2315     }
2316     LOG.info("Starting mini mapreduce cluster...");
2317     setupClusterTestDir();
2318     createDirsAndSetProperties();
2319 
2320     forceChangeTaskLogDir();
2321 
2322     //// hadoop2 specific settings
2323     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2324     // we up the VM usable so that processes don't get killed.
2325     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2326 
2327     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2328     // this avoids the problem by disabling speculative task execution in tests.
2329     conf.setBoolean("mapreduce.map.speculative", false);
2330     conf.setBoolean("mapreduce.reduce.speculative", false);
2331     ////
2332 
2333     // Allow the user to override FS URI for this map-reduce cluster to use.
2334     mrCluster = new MiniMRCluster(servers,
2335       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2336       null, null, new JobConf(this.conf));
2337     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2338     if (jobConf == null) {
2339       jobConf = mrCluster.createJobConf();
2340     }
2341 
2342     jobConf.set("mapred.local.dir",
2343       conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
2344     LOG.info("Mini mapreduce cluster started");
2345 
2346     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2347     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2348     // necessary config properties here.  YARN-129 required adding a few properties.
2349     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2350     // this for mrv2 support; mr1 ignores this
2351     conf.set("mapreduce.framework.name", "yarn");
2352     conf.setBoolean("yarn.is.minicluster", true);
2353     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2354     if (rmAddress != null) {
2355       conf.set("yarn.resourcemanager.address", rmAddress);
2356     }
2357     String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2358     if (historyAddress != null) {
2359       conf.set("mapreduce.jobhistory.address", historyAddress);
2360     }
2361     String schedulerAddress =
2362       jobConf.get("yarn.resourcemanager.scheduler.address");
2363     if (schedulerAddress != null) {
2364       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2365     }
2366   }
2367 
2368   /**
2369    * Stops the previously started <code>MiniMRCluster</code>.
2370    */
2371   public void shutdownMiniMapReduceCluster() {
2372     LOG.info("Stopping mini mapreduce cluster...");
2373     if (mrCluster != null) {
2374       mrCluster.shutdown();
2375       mrCluster = null;
2376     }
2377     // Restore configuration to point to local jobtracker
2378     conf.set("mapred.job.tracker", "local");
2379     LOG.info("Mini mapreduce cluster stopped");
2380   }
2381 
2382   /**
2383    * Create a stubbed out RegionServerService, mainly for getting FS.
2384    */
2385   public RegionServerServices createMockRegionServerService() throws IOException {
2386     return createMockRegionServerService((ServerName)null);
2387   }
2388 
2389   /**
2390    * Create a stubbed out RegionServerService, mainly for getting FS.
2391    * This version is used by TestTokenAuthentication
2392    */
2393   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2394     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2395     rss.setFileSystem(getTestFileSystem());
2396     rss.setRpcServer(rpc);
2397     return rss;
2398   }
2399 
2400   /**
2401    * Create a stubbed out RegionServerService, mainly for getting FS.
2402    * This version is used by TestOpenRegionHandler
2403    */
2404   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2405     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2406     rss.setFileSystem(getTestFileSystem());
2407     return rss;
2408   }
2409 
2410   /**
2411    * Switches the logger for the given class to DEBUG level.
2412    *
2413    * @param clazz  The class for which to switch to debug logging.
2414    */
2415   public void enableDebug(Class<?> clazz) {
2416     Log l = LogFactory.getLog(clazz);
2417     if (l instanceof Log4JLogger) {
2418       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2419     } else if (l instanceof Jdk14Logger) {
2420       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2421     }
2422   }
2423 
2424   /**
2425    * Expire the Master's session
2426    * @throws Exception
2427    */
2428   public void expireMasterSession() throws Exception {
2429     HMaster master = getMiniHBaseCluster().getMaster();
2430     expireSession(master.getZooKeeper(), false);
2431   }
2432 
2433   /**
2434    * Expire a region server's session
2435    * @param index which RS
2436    * @throws Exception
2437    */
2438   public void expireRegionServerSession(int index) throws Exception {
2439     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2440     expireSession(rs.getZooKeeper(), false);
2441     decrementMinRegionServerCount();
2442   }
2443 
2444   private void decrementMinRegionServerCount() {
2445     // decrement the count for this.conf, for newly spwaned master
2446     // this.hbaseCluster shares this configuration too
2447     decrementMinRegionServerCount(getConfiguration());
2448 
2449     // each master thread keeps a copy of configuration
2450     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2451       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2452     }
2453   }
2454 
2455   private void decrementMinRegionServerCount(Configuration conf) {
2456     int currentCount = conf.getInt(
2457         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2458     if (currentCount != -1) {
2459       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2460           Math.max(currentCount - 1, 1));
2461     }
2462   }
2463 
2464   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2465    expireSession(nodeZK, false);
2466   }
2467 
2468   @Deprecated
2469   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2470     throws Exception {
2471     expireSession(nodeZK, false);
2472   }
2473 
2474   /**
2475    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2476    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2477    * There are issues when doing this:
2478    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2479    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2480    *
2481    * @param nodeZK - the ZK watcher to expire
2482    * @param checkStatus - true to check if we can create an HTable with the
2483    *                    current configuration.
2484    */
2485   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2486     throws Exception {
2487     Configuration c = new Configuration(this.conf);
2488     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2489     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2490     byte[] password = zk.getSessionPasswd();
2491     long sessionID = zk.getSessionId();
2492 
2493     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2494     //  so we create a first watcher to be sure that the
2495     //  event was sent. We expect that if our watcher receives the event
2496     //  other watchers on the same machine will get is as well.
2497     // When we ask to close the connection, ZK does not close it before
2498     //  we receive all the events, so don't have to capture the event, just
2499     //  closing the connection should be enough.
2500     ZooKeeper monitor = new ZooKeeper(quorumServers,
2501       1000, new org.apache.zookeeper.Watcher(){
2502       @Override
2503       public void process(WatchedEvent watchedEvent) {
2504         LOG.info("Monitor ZKW received event="+watchedEvent);
2505       }
2506     } , sessionID, password);
2507 
2508     // Making it expire
2509     ZooKeeper newZK = new ZooKeeper(quorumServers,
2510         1000, EmptyWatcher.instance, sessionID, password);
2511 
2512     //ensure that we have connection to the server before closing down, otherwise
2513     //the close session event will be eaten out before we start CONNECTING state
2514     long start = System.currentTimeMillis();
2515     while (newZK.getState() != States.CONNECTED
2516          && System.currentTimeMillis() - start < 1000) {
2517        Thread.sleep(1);
2518     }
2519     newZK.close();
2520     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2521 
2522     // Now closing & waiting to be sure that the clients get it.
2523     monitor.close();
2524 
2525     if (checkStatus) {
2526       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2527     }
2528   }
2529 
2530   /**
2531    * Get the Mini HBase cluster.
2532    *
2533    * @return hbase cluster
2534    * @see #getHBaseClusterInterface()
2535    */
2536   public MiniHBaseCluster getHBaseCluster() {
2537     return getMiniHBaseCluster();
2538   }
2539 
2540   /**
2541    * Returns the HBaseCluster instance.
2542    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2543    * tests referring this should not assume that the cluster is a mini cluster or a
2544    * distributed one. If the test only works on a mini cluster, then specific
2545    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2546    * need to type-cast.
2547    */
2548   public HBaseCluster getHBaseClusterInterface() {
2549     //implementation note: we should rename this method as #getHBaseCluster(),
2550     //but this would require refactoring 90+ calls.
2551     return hbaseCluster;
2552   }
2553 
2554   /**
2555    * Returns a HBaseAdmin instance.
2556    * This instance is shared between HBaseTestingUtility instance users.
2557    * Closing it has no effect, it will be closed automatically when the
2558    * cluster shutdowns
2559    *
2560    * @return The HBaseAdmin instance.
2561    * @throws IOException
2562    */
2563   public synchronized HBaseAdmin getHBaseAdmin()
2564   throws IOException {
2565     if (hbaseAdmin == null){
2566       hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2567     }
2568     return hbaseAdmin;
2569   }
2570 
2571   private HBaseAdminForTests hbaseAdmin = null;
2572   private static class HBaseAdminForTests extends HBaseAdmin {
2573     public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2574         ZooKeeperConnectionException, IOException {
2575       super(c);
2576     }
2577 
2578     @Override
2579     public synchronized void close() throws IOException {
2580       LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2581     }
2582 
2583     private synchronized void close0() throws IOException {
2584       super.close();
2585     }
2586   }
2587 
2588   /**
2589    * Returns a ZooKeeperWatcher instance.
2590    * This instance is shared between HBaseTestingUtility instance users.
2591    * Don't close it, it will be closed automatically when the
2592    * cluster shutdowns
2593    *
2594    * @return The ZooKeeperWatcher instance.
2595    * @throws IOException
2596    */
2597   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2598     throws IOException {
2599     if (zooKeeperWatcher == null) {
2600       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2601         new Abortable() {
2602         @Override public void abort(String why, Throwable e) {
2603           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2604         }
2605         @Override public boolean isAborted() {return false;}
2606       });
2607     }
2608     return zooKeeperWatcher;
2609   }
2610   private ZooKeeperWatcher zooKeeperWatcher;
2611 
2612 
2613 
2614   /**
2615    * Closes the named region.
2616    *
2617    * @param regionName  The region to close.
2618    * @throws IOException
2619    */
2620   public void closeRegion(String regionName) throws IOException {
2621     closeRegion(Bytes.toBytes(regionName));
2622   }
2623 
2624   /**
2625    * Closes the named region.
2626    *
2627    * @param regionName  The region to close.
2628    * @throws IOException
2629    */
2630   public void closeRegion(byte[] regionName) throws IOException {
2631     getHBaseAdmin().closeRegion(regionName, null);
2632   }
2633 
2634   /**
2635    * Closes the region containing the given row.
2636    *
2637    * @param row  The row to find the containing region.
2638    * @param table  The table to find the region.
2639    * @throws IOException
2640    */
2641   public void closeRegionByRow(String row, HTable table) throws IOException {
2642     closeRegionByRow(Bytes.toBytes(row), table);
2643   }
2644 
2645   /**
2646    * Closes the region containing the given row.
2647    *
2648    * @param row  The row to find the containing region.
2649    * @param table  The table to find the region.
2650    * @throws IOException
2651    */
2652   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2653     HRegionLocation hrl = table.getRegionLocation(row);
2654     closeRegion(hrl.getRegionInfo().getRegionName());
2655   }
2656 
2657   /*
2658    * Retrieves a splittable region randomly from tableName
2659    *
2660    * @param tableName name of table
2661    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2662    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2663    */
2664   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2665     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2666     int regCount = regions.size();
2667     Set<Integer> attempted = new HashSet<Integer>();
2668     int idx;
2669     int attempts = 0;
2670     do {
2671       regions = getHBaseCluster().getRegions(tableName);
2672       if (regCount != regions.size()) {
2673         // if there was region movement, clear attempted Set
2674         attempted.clear();
2675       }
2676       regCount = regions.size();
2677       // There are chances that before we get the region for the table from an RS the region may
2678       // be going for CLOSE.  This may be because online schema change is enabled
2679       if (regCount > 0) {
2680         idx = random.nextInt(regCount);
2681         // if we have just tried this region, there is no need to try again
2682         if (attempted.contains(idx))
2683           continue;
2684         try {
2685           regions.get(idx).checkSplit();
2686           return regions.get(idx);
2687         } catch (Exception ex) {
2688           LOG.warn("Caught exception", ex);
2689           attempted.add(idx);
2690         }
2691       }
2692       attempts++;
2693     } while (maxAttempts == -1 || attempts < maxAttempts);
2694     return null;
2695   }
2696 
2697   public MiniZooKeeperCluster getZkCluster() {
2698     return zkCluster;
2699   }
2700 
2701   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2702     this.passedZkCluster = true;
2703     this.zkCluster = zkCluster;
2704     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2705   }
2706 
2707   public MiniDFSCluster getDFSCluster() {
2708     return dfsCluster;
2709   }
2710 
2711   public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
2712     setDFSCluster(cluster, true);
2713   }
2714 
2715   /**
2716    * Set the MiniDFSCluster
2717    * @param cluster cluster to use
2718    * @param requireDown requireDown require the that cluster not be "up"
2719    *  (MiniDFSCluster#isClusterUp) before it is set.
2720    * @throws IllegalStateException if the passed cluster is up when it is required to be down
2721    * @throws IOException if the FileSystem could not be set from the passed dfs cluster
2722    */
2723   public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
2724       throws IllegalStateException, IOException {
2725     if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
2726       throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2727     }
2728     this.dfsCluster = cluster;
2729     this.setFs();
2730   }
2731 
2732   public FileSystem getTestFileSystem() throws IOException {
2733     return HFileSystem.get(conf);
2734   }
2735 
2736   /**
2737    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2738    * (30 seconds).
2739    * @param table Table to wait on.
2740    * @throws InterruptedException
2741    * @throws IOException
2742    */
2743   public void waitTableAvailable(byte[] table)
2744       throws InterruptedException, IOException {
2745     waitTableAvailable(getHBaseAdmin(), table, 30000);
2746   }
2747 
2748   public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2749       throws InterruptedException, IOException {
2750     waitTableAvailable(admin, table, 30000);
2751   }
2752 
2753   /**
2754    * Wait until all regions in a table have been assigned
2755    * @param table Table to wait on.
2756    * @param timeoutMillis Timeout.
2757    * @throws InterruptedException
2758    * @throws IOException
2759    */
2760   public void waitTableAvailable(byte[] table, long timeoutMillis)
2761   throws InterruptedException, IOException {
2762     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2763   }
2764 
2765   public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2766   throws InterruptedException, IOException {
2767     long startWait = System.currentTimeMillis();
2768     while (!admin.isTableAvailable(table)) {
2769       assertTrue("Timed out waiting for table to become available " +
2770         Bytes.toStringBinary(table),
2771         System.currentTimeMillis() - startWait < timeoutMillis);
2772       Thread.sleep(200);
2773     }
2774   }
2775 
2776   /**
2777    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2778    * regions have been all assigned.  Will timeout after default period (30 seconds)
2779    * @see #waitTableAvailable(byte[])
2780    * @param table Table to wait on.
2781    * @param table
2782    * @throws InterruptedException
2783    * @throws IOException
2784    */
2785   public void waitTableEnabled(byte[] table)
2786       throws InterruptedException, IOException {
2787     waitTableEnabled(getHBaseAdmin(), table, 30000);
2788   }
2789 
2790   public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2791       throws InterruptedException, IOException {
2792     waitTableEnabled(admin, table, 30000);
2793   }
2794 
2795   /**
2796    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2797    * regions have been all assigned.
2798    * @see #waitTableAvailable(byte[])
2799    * @param table Table to wait on.
2800    * @param timeoutMillis Time to wait on it being marked enabled.
2801    * @throws InterruptedException
2802    * @throws IOException
2803    */
2804   public void waitTableEnabled(byte[] table, long timeoutMillis)
2805   throws InterruptedException, IOException {
2806     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2807   }
2808 
2809   public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2810   throws InterruptedException, IOException {
2811     long startWait = System.currentTimeMillis();
2812     waitTableAvailable(admin, table, timeoutMillis);
2813     while (!admin.isTableEnabled(table)) {
2814       assertTrue("Timed out waiting for table to become available and enabled " +
2815          Bytes.toStringBinary(table),
2816          System.currentTimeMillis() - startWait < timeoutMillis);
2817       Thread.sleep(200);
2818     }
2819     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2820     // in the hbase:meta table and almost open on all regionservers but there setting the region
2821     // online in the regionserver is the very last thing done and can take a little while to happen.
2822     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2823     // RegionOpeningException.  It is crass but when done all will be online.
2824     HConnection connection = HConnectionManager.createConnection(conf);
2825     try {
2826       Canary.sniff(connection, TableName.valueOf(table), TaskType.READ);
2827     } catch (Exception e) {
2828       throw new IOException(e);
2829     } finally {
2830       connection.close();
2831     }
2832   }
2833 
2834   /**
2835    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2836    * Will timeout after default period (30 seconds)
2837    * @param table Table to wait on.
2838    * @throws InterruptedException
2839    * @throws IOException
2840    */
2841   public void waitTableDisabled(byte[] table)
2842       throws InterruptedException, IOException {
2843     waitTableDisabled(getHBaseAdmin(), table, 30000);
2844   }
2845 
2846   public void waitTableDisabled(HBaseAdmin admin, byte[] table)
2847       throws InterruptedException, IOException {
2848     waitTableDisabled(admin, table, 30000);
2849   }
2850 
2851   /**
2852    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2853    * @param table Table to wait on.
2854    * @param timeoutMillis Time to wait on it being marked disabled.
2855    * @throws InterruptedException
2856    * @throws IOException
2857    */
2858   public void waitTableDisabled(byte[] table, long timeoutMillis)
2859       throws InterruptedException, IOException {
2860     waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2861   }
2862 
2863   public void waitTableDisabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2864       throws InterruptedException, IOException {
2865     TableName tableName = TableName.valueOf(table);
2866     long startWait = System.currentTimeMillis();
2867     while (!admin.isTableDisabled(tableName)) {
2868       assertTrue("Timed out waiting for table to become disabled " +
2869               Bytes.toStringBinary(table),
2870           System.currentTimeMillis() - startWait < timeoutMillis);
2871       Thread.sleep(200);
2872     }
2873   }
2874 
2875   /**
2876    * 
2877    * Make sure that at least the specified number of region servers
2878    * are running
2879    * @param num minimum number of region servers that should be running
2880    * @return true if we started some servers
2881    * @throws IOException
2882    */
2883   public boolean ensureSomeRegionServersAvailable(final int num)
2884       throws IOException {
2885     boolean startedServer = false;
2886     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2887     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2888       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2889       startedServer = true;
2890     }
2891 
2892     return startedServer;
2893   }
2894 
2895 
2896   /**
2897    * Make sure that at least the specified number of region servers
2898    * are running. We don't count the ones that are currently stopping or are
2899    * stopped.
2900    * @param num minimum number of region servers that should be running
2901    * @return true if we started some servers
2902    * @throws IOException
2903    */
2904   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2905     throws IOException {
2906     boolean startedServer = ensureSomeRegionServersAvailable(num);
2907 
2908     int nonStoppedServers = 0;
2909     for (JVMClusterUtil.RegionServerThread rst :
2910       getMiniHBaseCluster().getRegionServerThreads()) {
2911 
2912       HRegionServer hrs = rst.getRegionServer();
2913       if (hrs.isStopping() || hrs.isStopped()) {
2914         LOG.info("A region server is stopped or stopping:"+hrs);
2915       } else {
2916         nonStoppedServers++;
2917       }
2918     }
2919     for (int i=nonStoppedServers; i<num; ++i) {
2920       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2921       startedServer = true;
2922     }
2923     return startedServer;
2924   }
2925 
2926 
2927   /**
2928    * This method clones the passed <code>c</code> configuration setting a new
2929    * user into the clone.  Use it getting new instances of FileSystem.  Only
2930    * works for DistributedFileSystem w/o Kerberos.
2931    * @param c Initial configuration
2932    * @param differentiatingSuffix Suffix to differentiate this user from others.
2933    * @return A new configuration instance with a different user set into it.
2934    * @throws IOException
2935    */
2936   public static User getDifferentUser(final Configuration c,
2937     final String differentiatingSuffix)
2938   throws IOException {
2939     FileSystem currentfs = FileSystem.get(c);
2940     if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
2941       return User.getCurrent();
2942     }
2943     // Else distributed filesystem.  Make a new instance per daemon.  Below
2944     // code is taken from the AppendTestUtil over in hdfs.
2945     String username = User.getCurrent().getName() +
2946       differentiatingSuffix;
2947     User user = User.createUserForTesting(c, username,
2948         new String[]{"supergroup"});
2949     return user;
2950   }
2951 
2952   /**
2953    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
2954    * makes tests linger.  Here is the exception you'll see:
2955    * <pre>
2956    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2957    * </pre>
2958    * @param stream A DFSClient.DFSOutputStream.
2959    * @param max
2960    * @throws NoSuchFieldException
2961    * @throws SecurityException
2962    * @throws IllegalAccessException
2963    * @throws IllegalArgumentException
2964    */
2965   public static void setMaxRecoveryErrorCount(final OutputStream stream,
2966       final int max) {
2967     try {
2968       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2969       for (Class<?> clazz: clazzes) {
2970         String className = clazz.getSimpleName();
2971         if (className.equals("DFSOutputStream")) {
2972           if (clazz.isInstance(stream)) {
2973             Field maxRecoveryErrorCountField =
2974               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2975             maxRecoveryErrorCountField.setAccessible(true);
2976             maxRecoveryErrorCountField.setInt(stream, max);
2977             break;
2978           }
2979         }
2980       }
2981     } catch (Exception e) {
2982       LOG.info("Could not set max recovery field", e);
2983     }
2984   }
2985 
2986   /**
2987    * Wait until all regions for a table in hbase:meta have a non-empty
2988    * info:server, up to 60 seconds. This means all regions have been deployed,
2989    * master has been informed and updated hbase:meta with the regions deployed
2990    * server.
2991    * @param tableName the table name
2992    * @throws IOException
2993    */
2994   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2995     waitUntilAllRegionsAssigned(tableName, 60000);
2996   }
2997 
2998   /**
2999    * Wait until all regions for a table in hbase:meta have a non-empty
3000    * info:server, or until timeout.  This means all regions have been deployed,
3001    * master has been informed and updated hbase:meta with the regions deployed
3002    * server.
3003    * @param tableName the table name
3004    * @param timeout timeout, in milliseconds
3005    * @throws IOException
3006    */
3007   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3008       throws IOException {
3009     final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3010     try {
3011       waitFor(timeout, 200, true, new Predicate<IOException>() {
3012         @Override
3013         public boolean evaluate() throws IOException {
3014           boolean allRegionsAssigned = true;
3015           Scan scan = new Scan();
3016           scan.addFamily(HConstants.CATALOG_FAMILY);
3017           ResultScanner s = meta.getScanner(scan);
3018           try {
3019             Result r;
3020             while ((r = s.next()) != null) {
3021               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3022               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3023               if (info != null && info.getTable().equals(tableName)) {
3024                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3025                 allRegionsAssigned &= (b != null);
3026               }
3027             }
3028           } finally {
3029             s.close();
3030           }
3031           return allRegionsAssigned;
3032         }
3033       });
3034     } finally {
3035       meta.close();
3036     }
3037     // So, all regions are in the meta table but make sure master knows of the assignments before
3038     // returing -- sometimes this can lag.
3039     HMaster master = getHBaseCluster().getMaster();
3040     final RegionStates states = master.getAssignmentManager().getRegionStates();
3041     waitFor(timeout, 200, new Predicate<IOException>() {
3042       @Override
3043       public boolean evaluate() throws IOException {
3044         List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3045         return hris != null && !hris.isEmpty();
3046       }
3047     });
3048   }
3049 
3050   /**
3051    * Do a small get/scan against one store. This is required because store
3052    * has no actual methods of querying itself, and relies on StoreScanner.
3053    */
3054   public static List<Cell> getFromStoreFile(HStore store,
3055                                                 Get get) throws IOException {
3056     Scan scan = new Scan(get);
3057     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3058         scan.getFamilyMap().get(store.getFamily().getName()),
3059         // originally MultiVersionConsistencyControl.resetThreadReadPoint() was called to set
3060         // readpoint 0.
3061         0);
3062 
3063     List<Cell> result = new ArrayList<Cell>();
3064     scanner.next(result);
3065     if (!result.isEmpty()) {
3066       // verify that we are on the row we want:
3067       Cell kv = result.get(0);
3068       if (!CellUtil.matchingRow(kv, get.getRow())) {
3069         result.clear();
3070       }
3071     }
3072     scanner.close();
3073     return result;
3074   }
3075 
3076   /**
3077    * Create region split keys between startkey and endKey
3078    *
3079    * @param startKey
3080    * @param endKey
3081    * @param numRegions the number of regions to be created. it has to be greater than 3.
3082    * @return
3083    */
3084   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3085     assertTrue(numRegions>3);
3086     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3087     byte [][] result = new byte[tmpSplitKeys.length+1][];
3088     System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3089     result[0] = HConstants.EMPTY_BYTE_ARRAY;
3090     return result;
3091   }
3092 
3093   /**
3094    * Do a small get/scan against one store. This is required because store
3095    * has no actual methods of querying itself, and relies on StoreScanner.
3096    */
3097   public static List<Cell> getFromStoreFile(HStore store,
3098                                                 byte [] row,
3099                                                 NavigableSet<byte[]> columns
3100                                                 ) throws IOException {
3101     Get get = new Get(row);
3102     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3103     s.put(store.getFamily().getName(), columns);
3104 
3105     return getFromStoreFile(store,get);
3106   }
3107 
3108   /**
3109    * Gets a ZooKeeperWatcher.
3110    * @param TEST_UTIL
3111    */
3112   public static ZooKeeperWatcher getZooKeeperWatcher(
3113       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3114       IOException {
3115     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3116         "unittest", new Abortable() {
3117           boolean aborted = false;
3118 
3119           @Override
3120           public void abort(String why, Throwable e) {
3121             aborted = true;
3122             throw new RuntimeException("Fatal ZK error, why=" + why, e);
3123           }
3124 
3125           @Override
3126           public boolean isAborted() {
3127             return aborted;
3128           }
3129         });
3130     return zkw;
3131   }
3132 
3133   /**
3134    * Creates a znode with OPENED state.
3135    * @param TEST_UTIL
3136    * @param region
3137    * @param serverName
3138    * @return
3139    * @throws IOException
3140    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
3141    * @throws KeeperException
3142    * @throws NodeExistsException
3143    */
3144   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3145       HBaseTestingUtility TEST_UTIL, HRegion region,
3146       ServerName serverName) throws ZooKeeperConnectionException,
3147       IOException, KeeperException, NodeExistsException {
3148     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3149     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3150     int version = ZKAssign.transitionNodeOpening(zkw, region
3151         .getRegionInfo(), serverName);
3152     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3153         version);
3154     return zkw;
3155   }
3156 
3157   public static void assertKVListsEqual(String additionalMsg,
3158       final List<? extends Cell> expected,
3159       final List<? extends Cell> actual) {
3160     final int eLen = expected.size();
3161     final int aLen = actual.size();
3162     final int minLen = Math.min(eLen, aLen);
3163 
3164     int i;
3165     for (i = 0; i < minLen
3166         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3167         ++i) {}
3168 
3169     if (additionalMsg == null) {
3170       additionalMsg = "";
3171     }
3172     if (!additionalMsg.isEmpty()) {
3173       additionalMsg = ". " + additionalMsg;
3174     }
3175 
3176     if (eLen != aLen || i != minLen) {
3177       throw new AssertionError(
3178           "Expected and actual KV arrays differ at position " + i + ": " +
3179           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3180           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3181     }
3182   }
3183 
3184   private static <T> String safeGetAsStr(List<T> lst, int i) {
3185     if (0 <= i && i < lst.size()) {
3186       return lst.get(i).toString();
3187     } else {
3188       return "<out_of_range>";
3189     }
3190   }
3191 
3192   public String getClusterKey() {
3193     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3194         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3195         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3196             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3197   }
3198 
3199   /** Creates a random table with the given parameters */
3200   public HTable createRandomTable(String tableName,
3201       final Collection<String> families,
3202       final int maxVersions,
3203       final int numColsPerRow,
3204       final int numFlushes,
3205       final int numRegions,
3206       final int numRowsPerFlush)
3207       throws IOException, InterruptedException {
3208 
3209     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3210         " regions, " + numFlushes + " storefiles per region, " +
3211         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3212         "\n");
3213 
3214     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3215     final int numCF = families.size();
3216     final byte[][] cfBytes = new byte[numCF][];
3217     {
3218       int cfIndex = 0;
3219       for (String cf : families) {
3220         cfBytes[cfIndex++] = Bytes.toBytes(cf);
3221       }
3222     }
3223 
3224     final int actualStartKey = 0;
3225     final int actualEndKey = Integer.MAX_VALUE;
3226     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3227     final int splitStartKey = actualStartKey + keysPerRegion;
3228     final int splitEndKey = actualEndKey - keysPerRegion;
3229     final String keyFormat = "%08x";
3230     final HTable table = createTable(tableName, cfBytes,
3231         maxVersions,
3232         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3233         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3234         numRegions);
3235 
3236     if (hbaseCluster != null) {
3237       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3238     }
3239 
3240     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3241       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3242         final byte[] row = Bytes.toBytes(String.format(keyFormat,
3243             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3244 
3245         Put put = new Put(row);
3246         Delete del = new Delete(row);
3247         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3248           final byte[] cf = cfBytes[rand.nextInt(numCF)];
3249           final long ts = rand.nextInt();
3250           final byte[] qual = Bytes.toBytes("col" + iCol);
3251           if (rand.nextBoolean()) {
3252             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3253                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3254                 ts + "_random_" + rand.nextLong());
3255             put.add(cf, qual, ts, value);
3256           } else if (rand.nextDouble() < 0.8) {
3257             del.deleteColumn(cf, qual, ts);
3258           } else {
3259             del.deleteColumns(cf, qual, ts);
3260           }
3261         }
3262 
3263         if (!put.isEmpty()) {
3264           table.put(put);
3265         }
3266 
3267         if (!del.isEmpty()) {
3268           table.delete(del);
3269         }
3270       }
3271       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3272       table.flushCommits();
3273       if (hbaseCluster != null) {
3274         getMiniHBaseCluster().flushcache(table.getName());
3275       }
3276     }
3277 
3278     return table;
3279   }
3280 
3281   private static final int MIN_RANDOM_PORT = 0xc000;
3282   private static final int MAX_RANDOM_PORT = 0xfffe;
3283   private static Random random = new Random();
3284 
3285   /**
3286    * Returns a random port. These ports cannot be registered with IANA and are
3287    * intended for dynamic allocation (see http://bit.ly/dynports).
3288    */
3289   public static int randomPort() {
3290     return MIN_RANDOM_PORT
3291         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3292   }
3293 
3294   /**
3295    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3296    * called from single-threaded test setup code/
3297    */
3298   public static int randomFreePort() {
3299     int port = 0;
3300     do {
3301       port = randomPort();
3302       if (takenRandomPorts.contains(port)) {
3303         continue;
3304       }
3305       takenRandomPorts.add(port);
3306 
3307       try {
3308         ServerSocket sock = new ServerSocket(port);
3309         sock.close();
3310       } catch (IOException ex) {
3311         port = 0;
3312       }
3313     } while (port == 0);
3314     return port;
3315   }
3316 
3317 
3318   public static String randomMultiCastAddress() {
3319     return "226.1.1." + random.nextInt(254);
3320   }
3321 
3322 
3323 
3324   public static void waitForHostPort(String host, int port)
3325       throws IOException {
3326     final int maxTimeMs = 10000;
3327     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3328     IOException savedException = null;
3329     LOG.info("Waiting for server at " + host + ":" + port);
3330     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3331       try {
3332         Socket sock = new Socket(InetAddress.getByName(host), port);
3333         sock.close();
3334         savedException = null;
3335         LOG.info("Server at " + host + ":" + port + " is available");
3336         break;
3337       } catch (UnknownHostException e) {
3338         throw new IOException("Failed to look up " + host, e);
3339       } catch (IOException e) {
3340         savedException = e;
3341       }
3342       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3343     }
3344 
3345     if (savedException != null) {
3346       throw savedException;
3347     }
3348   }
3349 
3350   /**
3351    * Creates a pre-split table for load testing. If the table already exists,
3352    * logs a warning and continues.
3353    * @return the number of regions the table was split into
3354    */
3355   public static int createPreSplitLoadTestTable(Configuration conf,
3356       TableName tableName, byte[] columnFamily, Algorithm compression,
3357       DataBlockEncoding dataBlockEncoding) throws IOException {
3358     return createPreSplitLoadTestTable(conf, tableName,
3359       columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER,
3360       Durability.USE_DEFAULT);
3361   }
3362   /**
3363    * Creates a pre-split table for load testing. If the table already exists,
3364    * logs a warning and continues.
3365    * @return the number of regions the table was split into
3366    */
3367   public static int createPreSplitLoadTestTable(Configuration conf,
3368       TableName tableName, byte[] columnFamily, Algorithm compression,
3369       DataBlockEncoding dataBlockEncoding, int numRegionsPerServer,
3370       Durability durability)
3371           throws IOException {
3372     HTableDescriptor desc = new HTableDescriptor(tableName);
3373     desc.setDurability(durability);
3374     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3375     hcd.setDataBlockEncoding(dataBlockEncoding);
3376     hcd.setCompressionType(compression);
3377     return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3378   }
3379 
3380   /**
3381    * Creates a pre-split table for load testing. If the table already exists,
3382    * logs a warning and continues.
3383    * @return the number of regions the table was split into
3384    */
3385   public static int createPreSplitLoadTestTable(Configuration conf,
3386       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3387     return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3388   }
3389 
3390   /**
3391    * Creates a pre-split table for load testing. If the table already exists,
3392    * logs a warning and continues.
3393    * @return the number of regions the table was split into
3394    */
3395   public static int createPreSplitLoadTestTable(Configuration conf,
3396       HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3397     if (!desc.hasFamily(hcd.getName())) {
3398       desc.addFamily(hcd);
3399     }
3400 
3401     int totalNumberOfRegions = 0;
3402     HBaseAdmin admin = new HBaseAdmin(conf);
3403     try {
3404       // create a table a pre-splits regions.
3405       // The number of splits is set as:
3406       //    region servers * regions per region server).
3407       int numberOfServers = admin.getClusterStatus().getServers().size();
3408       if (numberOfServers == 0) {
3409         throw new IllegalStateException("No live regionservers");
3410       }
3411 
3412       totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3413       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3414           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3415           "(regions per server: " + numRegionsPerServer + ")");
3416 
3417       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3418           totalNumberOfRegions);
3419 
3420       admin.createTable(desc, splits);
3421     } catch (MasterNotRunningException e) {
3422       LOG.error("Master not running", e);
3423       throw new IOException(e);
3424     } catch (TableExistsException e) {
3425       LOG.warn("Table " + desc.getTableName() +
3426           " already exists, continuing");
3427     } finally {
3428       admin.close();
3429     }
3430     return totalNumberOfRegions;
3431   }
3432 
3433   public static int getMetaRSPort(Configuration conf) throws IOException {
3434     HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3435     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3436     table.close();
3437     return hloc.getPort();
3438   }
3439 
3440   /**
3441    *  Due to async racing issue, a region may not be in
3442    *  the online region list of a region server yet, after
3443    *  the assignment znode is deleted and the new assignment
3444    *  is recorded in master.
3445    */
3446   public void assertRegionOnServer(
3447       final HRegionInfo hri, final ServerName server,
3448       final long timeout) throws IOException, InterruptedException {
3449     long timeoutTime = System.currentTimeMillis() + timeout;
3450     while (true) {
3451       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3452       if (regions.contains(hri)) return;
3453       long now = System.currentTimeMillis();
3454       if (now > timeoutTime) break;
3455       Thread.sleep(10);
3456     }
3457     fail("Could not find region " + hri.getRegionNameAsString()
3458       + " on server " + server);
3459   }
3460 
3461   /**
3462    * Check to make sure the region is open on the specified
3463    * region server, but not on any other one.
3464    */
3465   public void assertRegionOnlyOnServer(
3466       final HRegionInfo hri, final ServerName server,
3467       final long timeout) throws IOException, InterruptedException {
3468     long timeoutTime = System.currentTimeMillis() + timeout;
3469     while (true) {
3470       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3471       if (regions.contains(hri)) {
3472         List<JVMClusterUtil.RegionServerThread> rsThreads =
3473           getHBaseCluster().getLiveRegionServerThreads();
3474         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3475           HRegionServer rs = rsThread.getRegionServer();
3476           if (server.equals(rs.getServerName())) {
3477             continue;
3478           }
3479           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3480           for (HRegion r: hrs) {
3481             assertTrue("Region should not be double assigned",
3482               r.getRegionId() != hri.getRegionId());
3483           }
3484         }
3485         return; // good, we are happy
3486       }
3487       long now = System.currentTimeMillis();
3488       if (now > timeoutTime) break;
3489       Thread.sleep(10);
3490     }
3491     fail("Could not find region " + hri.getRegionNameAsString()
3492       + " on server " + server);
3493   }
3494 
3495   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3496       throws IOException {
3497     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3498     htd.addFamily(hcd);
3499     HRegionInfo info =
3500         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3501     HRegion region =
3502         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3503     return region;
3504   }
3505 
3506   public void setFileSystemURI(String fsURI) {
3507     FS_URI = fsURI;
3508   }
3509 
3510   /**
3511    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3512    */
3513   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3514       throws E {
3515     return Waiter.waitFor(this.conf, timeout, predicate);
3516   }
3517 
3518   /**
3519    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3520    */
3521   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3522       throws E {
3523     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3524   }
3525 
3526   /**
3527    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3528    */
3529   public <E extends Exception> long waitFor(long timeout, long interval,
3530       boolean failIfTimeout, Predicate<E> predicate) throws E {
3531     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3532   }
3533   
3534   /**
3535    * Wait until no regions in transition.
3536    * @param timeout How long to wait.
3537    * @throws Exception
3538    */
3539   public void waitUntilNoRegionsInTransition(
3540       final long timeout) throws Exception {
3541     waitFor(timeout, predicateNoRegionsInTransition());
3542   }
3543 
3544   /**
3545    * Returns a {@link Predicate} for checking that there are no regions in transition in master
3546    */
3547   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3548     return new Waiter.Predicate<Exception>() {
3549       @Override
3550       public boolean evaluate() throws Exception {
3551         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3552             .getAssignmentManager().getRegionStates();
3553         return !regionStates.isRegionsInTransition();
3554       }
3555     };
3556   }
3557 
3558   /**
3559    * Returns a {@link Predicate} for checking that table is enabled
3560    */
3561   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3562     return new Waiter.Predicate<Exception>() {
3563      @Override
3564      public boolean evaluate() throws Exception {
3565        return getHBaseAdmin().isTableEnabled(tableName);
3566       }
3567     };
3568   }
3569 
3570   /**
3571    * Wait until labels is ready in VisibilityLabelsCache.
3572    * @param timeoutMillis
3573    * @param labels
3574    */
3575   public void waitLabelAvailable(long timeoutMillis, final String... labels) {
3576     final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
3577     waitFor(timeoutMillis, new Waiter.Predicate<RuntimeException>() {
3578 
3579       @Override
3580       public boolean evaluate() {
3581         for (String label : labels) {
3582           if (labelsCache.getLabelOrdinal(label) == 0) {
3583             return false;
3584           }
3585         }
3586         return true;
3587       }
3588     });
3589   }
3590 
3591   /**
3592    * Create a set of column descriptors with the combination of compression,
3593    * encoding, bloom codecs available.
3594    * @return the list of column descriptors
3595    */
3596   public static List<HColumnDescriptor> generateColumnDescriptors() {
3597     return generateColumnDescriptors("");
3598   }
3599 
3600   /**
3601    * Create a set of column descriptors with the combination of compression,
3602    * encoding, bloom codecs available.
3603    * @param prefix family names prefix
3604    * @return the list of column descriptors
3605    */
3606   public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3607     List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3608     long familyId = 0;
3609     for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3610       for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3611         for (BloomType bloomType: BloomType.values()) {
3612           String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3613           HColumnDescriptor htd = new HColumnDescriptor(name);
3614           htd.setCompressionType(compressionType);
3615           htd.setDataBlockEncoding(encodingType);
3616           htd.setBloomFilterType(bloomType);
3617           htds.add(htd);
3618           familyId++;
3619         }
3620       }
3621     }
3622     return htds;
3623   }
3624 
3625   /**
3626    * Get supported compression algorithms.
3627    * @return supported compression algorithms.
3628    */
3629   public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3630     String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3631     List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3632     for (String algoName : allAlgos) {
3633       try {
3634         Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3635         algo.getCompressor();
3636         supportedAlgos.add(algo);
3637       } catch (Throwable t) {
3638         // this algo is not available
3639       }
3640     }
3641     return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3642   }
3643 }