View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Modifier;
28  import java.net.InetAddress;
29  import java.net.ServerSocket;
30  import java.net.Socket;
31  import java.net.UnknownHostException;
32  import java.security.MessageDigest;
33  import java.util.ArrayList;
34  import java.util.Arrays;
35  import java.util.Collection;
36  import java.util.Collections;
37  import java.util.HashSet;
38  import java.util.List;
39  import java.util.Map;
40  import java.util.NavigableSet;
41  import java.util.Random;
42  import java.util.Set;
43  import java.util.TreeSet;
44  import java.util.UUID;
45  import java.util.concurrent.TimeUnit;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.commons.logging.impl.Jdk14Logger;
50  import org.apache.commons.logging.impl.Log4JLogger;
51  import org.apache.hadoop.conf.Configuration;
52  import org.apache.hadoop.fs.FileSystem;
53  import org.apache.hadoop.fs.Path;
54  import org.apache.hadoop.hbase.Waiter.Predicate;
55  import org.apache.hadoop.hbase.classification.InterfaceAudience;
56  import org.apache.hadoop.hbase.classification.InterfaceStability;
57  import org.apache.hadoop.hbase.client.Admin;
58  import org.apache.hadoop.hbase.client.Connection;
59  import org.apache.hadoop.hbase.client.ConnectionFactory;
60  import org.apache.hadoop.hbase.client.Delete;
61  import org.apache.hadoop.hbase.client.Durability;
62  import org.apache.hadoop.hbase.client.Get;
63  import org.apache.hadoop.hbase.client.HBaseAdmin;
64  import org.apache.hadoop.hbase.client.HConnection;
65  import org.apache.hadoop.hbase.client.HTable;
66  import org.apache.hadoop.hbase.client.Put;
67  import org.apache.hadoop.hbase.client.RegionLocator;
68  import org.apache.hadoop.hbase.client.Result;
69  import org.apache.hadoop.hbase.client.ResultScanner;
70  import org.apache.hadoop.hbase.client.Scan;
71  import org.apache.hadoop.hbase.client.Table;
72  import org.apache.hadoop.hbase.fs.HFileSystem;
73  import org.apache.hadoop.hbase.io.compress.Compression;
74  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
75  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
77  import org.apache.hadoop.hbase.io.hfile.HFile;
78  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
79  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
80  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
81  import org.apache.hadoop.hbase.master.HMaster;
82  import org.apache.hadoop.hbase.master.RegionStates;
83  import org.apache.hadoop.hbase.master.ServerManager;
84  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
85  import org.apache.hadoop.hbase.regionserver.BloomType;
86  import org.apache.hadoop.hbase.regionserver.HRegion;
87  import org.apache.hadoop.hbase.regionserver.HRegionServer;
88  import org.apache.hadoop.hbase.regionserver.HStore;
89  import org.apache.hadoop.hbase.regionserver.InternalScanner;
90  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
91  import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
92  import org.apache.hadoop.hbase.wal.WAL;
93  import org.apache.hadoop.hbase.security.User;
94  import org.apache.hadoop.hbase.tool.Canary;
95  import org.apache.hadoop.hbase.util.Bytes;
96  import org.apache.hadoop.hbase.util.FSTableDescriptors;
97  import org.apache.hadoop.hbase.util.FSUtils;
98  import org.apache.hadoop.hbase.util.JVMClusterUtil;
99  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
100 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
101 import org.apache.hadoop.hbase.util.Pair;
102 import org.apache.hadoop.hbase.util.RegionSplitter;
103 import org.apache.hadoop.hbase.util.RetryCounter;
104 import org.apache.hadoop.hbase.util.Threads;
105 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
106 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
107 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
108 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
109 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
110 import org.apache.hadoop.hdfs.DFSClient;
111 import org.apache.hadoop.hdfs.DistributedFileSystem;
112 import org.apache.hadoop.hdfs.MiniDFSCluster;
113 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
114 import org.apache.hadoop.mapred.JobConf;
115 import org.apache.hadoop.mapred.MiniMRCluster;
116 import org.apache.hadoop.mapred.TaskLog;
117 import org.apache.zookeeper.KeeperException;
118 import org.apache.zookeeper.KeeperException.NodeExistsException;
119 import org.apache.zookeeper.WatchedEvent;
120 import org.apache.zookeeper.ZooKeeper;
121 import org.apache.zookeeper.ZooKeeper.States;
122 
123 /**
124  * Facility for testing HBase. Replacement for
125  * old HBaseTestCase and HBaseClusterTestCase functionality.
126  * Create an instance and keep it around testing HBase.  This class is
127  * meant to be your one-stop shop for anything you might need testing.  Manages
128  * one cluster at a time only. Managed cluster can be an in-process
129  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
130  * Not all methods work with the real cluster.
131  * Depends on log4j being on classpath and
132  * hbase-site.xml for logging and test-run configuration.  It does not set
133  * logging levels nor make changes to configuration parameters.
134  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
135  * setting it to true.
136  */
137 @InterfaceAudience.Public
138 @InterfaceStability.Evolving
139 @SuppressWarnings("deprecation")
140 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
141    private MiniZooKeeperCluster zkCluster = null;
142 
143   public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
144   /**
145    * The default number of regions per regionserver when creating a pre-split
146    * table.
147    */
148   public static final int DEFAULT_REGIONS_PER_SERVER = 5;
149 
150   /**
151    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
152    * part of general shutdown.
153    */
154   private boolean passedZkCluster = false;
155   private MiniDFSCluster dfsCluster = null;
156 
157   private volatile HBaseCluster hbaseCluster = null;
158   private MiniMRCluster mrCluster = null;
159 
160   /** If there is a mini cluster running for this testing utility instance. */
161   private volatile boolean miniClusterRunning;
162 
163   private String hadoopLogDir;
164 
165   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
166   private File clusterTestDir = null;
167 
168   /** Directory on test filesystem where we put the data for this instance of
169     * HBaseTestingUtility*/
170   private Path dataTestDirOnTestFS = null;
171 
172   /**
173    * Shared cluster connection.
174    */
175   private volatile Connection connection;
176 
177   /**
178    * System property key to get test directory value.
179    * Name is as it is because mini dfs has hard-codings to put test data here.
180    * It should NOT be used directly in HBase, as it's a property used in
181    *  mini dfs.
182    *  @deprecated can be used only with mini dfs
183    */
184   @Deprecated
185   private static final String TEST_DIRECTORY_KEY = "test.build.data";
186 
187   /** Filesystem URI used for map-reduce mini-cluster setup */
188   private static String FS_URI;
189 
190   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
191   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
192 
193   /** Compression algorithms to use in parameterized JUnit 4 tests */
194   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
195     Arrays.asList(new Object[][] {
196       { Compression.Algorithm.NONE },
197       { Compression.Algorithm.GZ }
198     });
199 
200   /** This is for unit tests parameterized with a two booleans. */
201   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
202       Arrays.asList(new Object[][] {
203           { new Boolean(false) },
204           { new Boolean(true) }
205       });
206 
207   /** This is for unit tests parameterized with a single boolean. */
208   public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination()  ;
209   /** Compression algorithms to use in testing */
210   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
211       Compression.Algorithm.NONE, Compression.Algorithm.GZ
212     };
213 
214   /**
215    * Create all combinations of Bloom filters and compression algorithms for
216    * testing.
217    */
218   private static List<Object[]> bloomAndCompressionCombinations() {
219     List<Object[]> configurations = new ArrayList<Object[]>();
220     for (Compression.Algorithm comprAlgo :
221          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
222       for (BloomType bloomType : BloomType.values()) {
223         configurations.add(new Object[] { comprAlgo, bloomType });
224       }
225     }
226     return Collections.unmodifiableList(configurations);
227   }
228 
229   /**
230    * Create combination of memstoreTS and tags
231    */
232   private static List<Object[]> memStoreTSAndTagsCombination() {
233     List<Object[]> configurations = new ArrayList<Object[]>();
234     configurations.add(new Object[] { false, false });
235     configurations.add(new Object[] { false, true });
236     configurations.add(new Object[] { true, false });
237     configurations.add(new Object[] { true, true });
238     return Collections.unmodifiableList(configurations);
239   }
240 
241   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
242       bloomAndCompressionCombinations();
243 
244   public HBaseTestingUtility() {
245     this(HBaseConfiguration.create());
246   }
247 
248   public HBaseTestingUtility(Configuration conf) {
249     super(conf);
250 
251     // a hbase checksum verification failure will cause unit tests to fail
252     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
253   }
254 
255   /**
256    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
257    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
258    * test dir.  Use this when you aren't using an Mini HDFS cluster.
259    * @return HBaseTestingUtility that use local fs for temp files.
260    */
261   public static HBaseTestingUtility createLocalHTU() {
262     Configuration c = HBaseConfiguration.create();
263     return createLocalHTU(c);
264   }
265 
266   /**
267    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
268    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
269    * test dir.  Use this when you aren't using an Mini HDFS cluster.
270    * @param c Configuration (will be modified)
271    * @return HBaseTestingUtility that use local fs for temp files.
272    */
273   public static HBaseTestingUtility createLocalHTU(Configuration c) {
274     HBaseTestingUtility htu = new HBaseTestingUtility(c);
275     String dataTestDir = htu.getDataTestDir().toString();
276     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
277     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
278     return htu;
279   }
280 
281   /**
282    * Returns this classes's instance of {@link Configuration}.  Be careful how
283    * you use the returned Configuration since {@link HConnection} instances
284    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
285    * say, a Connection was being used against a cluster that had been shutdown,
286    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
287    * be wholesome.  Rather than use the return direct, its usually best to
288    * make a copy and use that.  Do
289    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
290    * @return Instance of Configuration.
291    */
292   @Override
293   public Configuration getConfiguration() {
294     return super.getConfiguration();
295   }
296 
297   public void setHBaseCluster(HBaseCluster hbaseCluster) {
298     this.hbaseCluster = hbaseCluster;
299   }
300 
301   /**
302    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
303    * Give it a random name so can have many concurrent tests running if
304    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
305    * System property, as it's what minidfscluster bases
306    * it data dir on.  Moding a System property is not the way to do concurrent
307    * instances -- another instance could grab the temporary
308    * value unintentionally -- but not anything can do about it at moment;
309    * single instance only is how the minidfscluster works.
310    *
311    * We also create the underlying directory for
312    *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
313    *  in the conf, and as a system property for hadoop.tmp.dir
314    *
315    * @return The calculated data test build directory, if newly-created.
316    */
317   @Override
318   protected Path setupDataTestDir() {
319     Path testPath = super.setupDataTestDir();
320     if (null == testPath) {
321       return null;
322     }
323 
324     createSubDirAndSystemProperty(
325       "hadoop.log.dir",
326       testPath, "hadoop-log-dir");
327 
328     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
329     //  we want our own value to ensure uniqueness on the same machine
330     createSubDirAndSystemProperty(
331       "hadoop.tmp.dir",
332       testPath, "hadoop-tmp-dir");
333 
334     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
335     createSubDir(
336       "mapreduce.cluster.local.dir",
337       testPath, "mapred-local-dir");
338 
339     return testPath;
340   }
341 
342   private void createSubDirAndSystemProperty(
343     String propertyName, Path parent, String subDirName){
344 
345     String sysValue = System.getProperty(propertyName);
346 
347     if (sysValue != null) {
348       // There is already a value set. So we do nothing but hope
349       //  that there will be no conflicts
350       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351         sysValue + " so I do NOT create it in " + parent);
352       String confValue = conf.get(propertyName);
353       if (confValue != null && !confValue.endsWith(sysValue)){
354        LOG.warn(
355          propertyName + " property value differs in configuration and system: "+
356          "Configuration="+confValue+" while System="+sysValue+
357          " Erasing configuration value by system value."
358        );
359       }
360       conf.set(propertyName, sysValue);
361     } else {
362       // Ok, it's not set, so we create it as a subdirectory
363       createSubDir(propertyName, parent, subDirName);
364       System.setProperty(propertyName, conf.get(propertyName));
365     }
366   }
367 
368   /**
369    * @return Where to write test data on the test filesystem; Returns working directory
370    * for the test filesystem by default
371    * @see #setupDataTestDirOnTestFS()
372    * @see #getTestFileSystem()
373    */
374   private Path getBaseTestDirOnTestFS() throws IOException {
375     FileSystem fs = getTestFileSystem();
376     return new Path(fs.getWorkingDirectory(), "test-data");
377   }
378 
379   /**
380    * @return META table descriptor
381    */
382   public HTableDescriptor getMetaTableDescriptor() {
383     try {
384       return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385     } catch (IOException e) {
386       throw new RuntimeException("Unable to create META table descriptor", e);
387     }
388   }
389 
390   /**
391    * @return Where the DFS cluster will write data on the local subsystem.
392    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
393    * @see #getTestFileSystem()
394    */
395   Path getClusterTestDir() {
396     if (clusterTestDir == null){
397       setupClusterTestDir();
398     }
399     return new Path(clusterTestDir.getAbsolutePath());
400   }
401 
402   /**
403    * Creates a directory for the DFS cluster, under the test data
404    */
405   private void setupClusterTestDir() {
406     if (clusterTestDir != null) {
407       return;
408     }
409 
410     // Using randomUUID ensures that multiple clusters can be launched by
411     //  a same test, if it stops & starts them
412     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414     // Have it cleaned up on exit
415     boolean b = deleteOnExit();
416     if (b) clusterTestDir.deleteOnExit();
417     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419   }
420 
421   /**
422    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
423    * to write temporary test data. Call this method after setting up the mini dfs cluster
424    * if the test relies on it.
425    * @return a unique path in the test filesystem
426    */
427   public Path getDataTestDirOnTestFS() throws IOException {
428     if (dataTestDirOnTestFS == null) {
429       setupDataTestDirOnTestFS();
430     }
431 
432     return dataTestDirOnTestFS;
433   }
434 
435   /**
436    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
437    * to write temporary test data. Call this method after setting up the mini dfs cluster
438    * if the test relies on it.
439    * @return a unique path in the test filesystem
440    * @param subdirName name of the subdir to create under the base test dir
441    */
442   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443     return new Path(getDataTestDirOnTestFS(), subdirName);
444   }
445 
446   /**
447    * Sets up a path in test filesystem to be used by tests.
448    * Creates a new directory if not already setup.
449    */
450   private void setupDataTestDirOnTestFS() throws IOException {
451     if (dataTestDirOnTestFS != null) {
452       LOG.warn("Data test on test fs dir already setup in "
453           + dataTestDirOnTestFS.toString());
454       return;
455     }
456     dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
457   }
458 
459   /**
460    * Sets up a new path in test filesystem to be used by tests.
461    */
462   private Path getNewDataTestDirOnTestFS() throws IOException {
463     //The file system can be either local, mini dfs, or if the configuration
464     //is supplied externally, it can be an external cluster FS. If it is a local
465     //file system, the tests should use getBaseTestDir, otherwise, we can use
466     //the working directory, and create a unique sub dir there
467     FileSystem fs = getTestFileSystem();
468     Path newDataTestDir = null;
469     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
470       File dataTestDir = new File(getDataTestDir().toString());
471       if (deleteOnExit()) dataTestDir.deleteOnExit();
472       newDataTestDir = new Path(dataTestDir.getAbsolutePath());
473     } else {
474       Path base = getBaseTestDirOnTestFS();
475       String randomStr = UUID.randomUUID().toString();
476       newDataTestDir = new Path(base, randomStr);
477       if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
478     }
479     return newDataTestDir;
480   }
481 
482   /**
483    * Cleans the test data directory on the test filesystem.
484    * @return True if we removed the test dirs
485    * @throws IOException
486    */
487   public boolean cleanupDataTestDirOnTestFS() throws IOException {
488     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
489     if (ret)
490       dataTestDirOnTestFS = null;
491     return ret;
492   }
493 
494   /**
495    * Cleans a subdirectory under the test data directory on the test filesystem.
496    * @return True if we removed child
497    * @throws IOException
498    */
499   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
500     Path cpath = getDataTestDirOnTestFS(subdirName);
501     return getTestFileSystem().delete(cpath, true);
502   }
503 
504   /**
505    * Start a minidfscluster.
506    * @param servers How many DNs to start.
507    * @throws Exception
508    * @see {@link #shutdownMiniDFSCluster()}
509    * @return The mini dfs cluster created.
510    */
511   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
512     return startMiniDFSCluster(servers, null);
513   }
514 
515   /**
516    * Start a minidfscluster.
517    * This is useful if you want to run datanode on distinct hosts for things
518    * like HDFS block location verification.
519    * If you start MiniDFSCluster without host names, all instances of the
520    * datanodes will have the same host name.
521    * @param hosts hostnames DNs to run on.
522    * @throws Exception
523    * @see {@link #shutdownMiniDFSCluster()}
524    * @return The mini dfs cluster created.
525    */
526   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
527   throws Exception {
528     if ( hosts != null && hosts.length != 0) {
529       return startMiniDFSCluster(hosts.length, hosts);
530     } else {
531       return startMiniDFSCluster(1, null);
532     }
533   }
534 
535   /**
536    * Start a minidfscluster.
537    * Can only create one.
538    * @param servers How many DNs to start.
539    * @param hosts hostnames DNs to run on.
540    * @throws Exception
541    * @see {@link #shutdownMiniDFSCluster()}
542    * @return The mini dfs cluster created.
543    */
544   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
545   throws Exception {
546     createDirsAndSetProperties();
547     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
548 
549     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
550     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
551         setLevel(org.apache.log4j.Level.ERROR);
552     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
553         setLevel(org.apache.log4j.Level.ERROR);
554 
555 
556     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
557       true, null, null, hosts, null);
558 
559     // Set this just-started cluster as our filesystem.
560     FileSystem fs = this.dfsCluster.getFileSystem();
561     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
562 
563     // Wait for the cluster to be totally up
564     this.dfsCluster.waitClusterUp();
565 
566     //reset the test directory for test file system
567     dataTestDirOnTestFS = null;
568 
569     return this.dfsCluster;
570   }
571 
572 
573   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
574       throws Exception {
575     createDirsAndSetProperties();
576     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
577         true, null, racks, hosts, null);
578 
579     // Set this just-started cluster as our filesystem.
580     FileSystem fs = this.dfsCluster.getFileSystem();
581     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
582 
583     // Wait for the cluster to be totally up
584     this.dfsCluster.waitClusterUp();
585 
586     //reset the test directory for test file system
587     dataTestDirOnTestFS = null;
588 
589     return this.dfsCluster;
590   }
591 
592   public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
593     createDirsAndSetProperties();
594     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
595         null, null, null);
596     return dfsCluster;
597   }
598 
599   /** This is used before starting HDFS and map-reduce mini-clusters */
600   private void createDirsAndSetProperties() throws IOException {
601     setupClusterTestDir();
602     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
603     createDirAndSetProperty("cache_data", "test.cache.data");
604     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
605     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
606     createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
607     createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
608     enableShortCircuit();
609 
610     Path root = getDataTestDirOnTestFS("hadoop");
611     conf.set(MapreduceTestingShim.getMROutputDirProp(),
612       new Path(root, "mapred-output-dir").toString());
613     conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
614     conf.set("mapreduce.jobtracker.staging.root.dir",
615       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
616     conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
617   }
618 
619 
620   /**
621    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
622    *  This allows to specify this parameter on the command line.
623    *   If not set, default is true.
624    */
625   public boolean isReadShortCircuitOn(){
626     final String propName = "hbase.tests.use.shortcircuit.reads";
627     String readOnProp = System.getProperty(propName);
628     if (readOnProp != null){
629       return  Boolean.parseBoolean(readOnProp);
630     } else {
631       return conf.getBoolean(propName, false);
632     }
633   }
634 
635   /** Enable the short circuit read, unless configured differently.
636    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
637    */
638   private void enableShortCircuit() {
639     if (isReadShortCircuitOn()) {
640       String curUser = System.getProperty("user.name");
641       LOG.info("read short circuit is ON for user " + curUser);
642       // read short circuit, for hdfs
643       conf.set("dfs.block.local-path-access.user", curUser);
644       // read short circuit, for hbase
645       conf.setBoolean("dfs.client.read.shortcircuit", true);
646       // Skip checking checksum, for the hdfs client and the datanode
647       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
648     } else {
649       LOG.info("read short circuit is OFF");
650     }
651   }
652 
653   private String createDirAndSetProperty(final String relPath, String property) {
654     String path = getDataTestDir(relPath).toString();
655     System.setProperty(property, path);
656     conf.set(property, path);
657     new File(path).mkdirs();
658     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
659     return path;
660   }
661 
662   /**
663    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
664    * or does nothing.
665    * @throws IOException
666    */
667   public void shutdownMiniDFSCluster() throws IOException {
668     if (this.dfsCluster != null) {
669       // The below throws an exception per dn, AsynchronousCloseException.
670       this.dfsCluster.shutdown();
671       dfsCluster = null;
672       dataTestDirOnTestFS = null;
673       FSUtils.setFsDefault(this.conf, new Path("file:///"));
674     }
675   }
676 
677   /**
678    * Call this if you only want a zk cluster.
679    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
680    * @throws Exception
681    * @see #shutdownMiniZKCluster()
682    * @return zk cluster started.
683    */
684   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
685     return startMiniZKCluster(1);
686   }
687 
688   /**
689    * Call this if you only want a zk cluster.
690    * @param zooKeeperServerNum
691    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
692    * @throws Exception
693    * @see #shutdownMiniZKCluster()
694    * @return zk cluster started.
695    */
696   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
697       throws Exception {
698     setupClusterTestDir();
699     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
700   }
701 
702   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
703     throws Exception {
704     return startMiniZKCluster(dir,1);
705   }
706 
707   /**
708    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
709    *  the port mentionned is used as the default port for ZooKeeper.
710    */
711   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
712       int zooKeeperServerNum)
713   throws Exception {
714     if (this.zkCluster != null) {
715       throw new IOException("Cluster already running at " + dir);
716     }
717     this.passedZkCluster = false;
718     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
719     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
720     if (defPort > 0){
721       // If there is a port in the config file, we use it.
722       this.zkCluster.setDefaultClientPort(defPort);
723     }
724     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
725     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
726       Integer.toString(clientPort));
727     return this.zkCluster;
728   }
729 
730   /**
731    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
732    * or does nothing.
733    * @throws IOException
734    * @see #startMiniZKCluster()
735    */
736   public void shutdownMiniZKCluster() throws IOException {
737     if (this.zkCluster != null) {
738       this.zkCluster.shutdown();
739       this.zkCluster = null;
740     }
741   }
742 
743   /**
744    * Start up a minicluster of hbase, dfs, and zookeeper.
745    * @throws Exception
746    * @return Mini hbase cluster instance created.
747    * @see {@link #shutdownMiniDFSCluster()}
748    */
749   public MiniHBaseCluster startMiniCluster() throws Exception {
750     return startMiniCluster(1, 1);
751   }
752 
753   /**
754    * Start up a minicluster of hbase, dfs, and zookeeper.
755    * Set the <code>create</code> flag to create root or data directory path or not
756    * (will overwrite if dir already exists)
757    * @throws Exception
758    * @return Mini hbase cluster instance created.
759    * @see {@link #shutdownMiniDFSCluster()}
760    */
761   public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
762   throws Exception {
763     return startMiniCluster(1, numSlaves, create);
764   }
765 
766   /**
767    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
768    * Modifies Configuration.  Homes the cluster data directory under a random
769    * subdirectory in a directory under System property test.build.data.
770    * Directory is cleaned up on exit.
771    * @param numSlaves Number of slaves to start up.  We'll start this many
772    * datanodes and regionservers.  If numSlaves is > 1, then make sure
773    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
774    * bind errors.
775    * @throws Exception
776    * @see {@link #shutdownMiniCluster()}
777    * @return Mini hbase cluster instance created.
778    */
779   public MiniHBaseCluster startMiniCluster(final int numSlaves)
780   throws Exception {
781     return startMiniCluster(1, numSlaves, false);
782   }
783 
784   /**
785    * Start minicluster. Whether to create a new root or data dir path even if such a path
786    * has been created earlier is decided based on flag <code>create</code>
787    * @throws Exception
788    * @see {@link #shutdownMiniCluster()}
789    * @return Mini hbase cluster instance created.
790    */
791   public MiniHBaseCluster startMiniCluster(final int numMasters,
792       final int numSlaves, boolean create)
793     throws Exception {
794       return startMiniCluster(numMasters, numSlaves, null, create);
795   }
796 
797   /**
798    * start minicluster
799    * @throws Exception
800    * @see {@link #shutdownMiniCluster()}
801    * @return Mini hbase cluster instance created.
802    */
803   public MiniHBaseCluster startMiniCluster(final int numMasters,
804     final int numSlaves)
805   throws Exception {
806     return startMiniCluster(numMasters, numSlaves, null, false);
807   }
808 
809   public MiniHBaseCluster startMiniCluster(final int numMasters,
810       final int numSlaves, final String[] dataNodeHosts, boolean create)
811       throws Exception {
812     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
813         null, null, create);
814   }
815 
816   /**
817    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
818    * Modifies Configuration.  Homes the cluster data directory under a random
819    * subdirectory in a directory under System property test.build.data.
820    * Directory is cleaned up on exit.
821    * @param numMasters Number of masters to start up.  We'll start this many
822    * hbase masters.  If numMasters > 1, you can find the active/primary master
823    * with {@link MiniHBaseCluster#getMaster()}.
824    * @param numSlaves Number of slaves to start up.  We'll start this many
825    * regionservers. If dataNodeHosts == null, this also indicates the number of
826    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
827    * based on dataNodeHosts.length.
828    * If numSlaves is > 1, then make sure
829    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
830    * bind errors.
831    * @param dataNodeHosts hostnames DNs to run on.
832    * This is useful if you want to run datanode on distinct hosts for things
833    * like HDFS block location verification.
834    * If you start MiniDFSCluster without host names,
835    * all instances of the datanodes will have the same host name.
836    * @throws Exception
837    * @see {@link #shutdownMiniCluster()}
838    * @return Mini hbase cluster instance created.
839    */
840   public MiniHBaseCluster startMiniCluster(final int numMasters,
841       final int numSlaves, final String[] dataNodeHosts) throws Exception {
842     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
843         null, null);
844   }
845 
846   /**
847    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
848    * @param numDataNodes Number of data nodes.
849    */
850   public MiniHBaseCluster startMiniCluster(final int numMasters,
851       final int numSlaves, final int numDataNodes) throws Exception {
852     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
853   }
854 
855   /**
856    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
857    * Modifies Configuration.  Homes the cluster data directory under a random
858    * subdirectory in a directory under System property test.build.data.
859    * Directory is cleaned up on exit.
860    * @param numMasters Number of masters to start up.  We'll start this many
861    * hbase masters.  If numMasters > 1, you can find the active/primary master
862    * with {@link MiniHBaseCluster#getMaster()}.
863    * @param numSlaves Number of slaves to start up.  We'll start this many
864    * regionservers. If dataNodeHosts == null, this also indicates the number of
865    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
866    * based on dataNodeHosts.length.
867    * If numSlaves is > 1, then make sure
868    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
869    * bind errors.
870    * @param dataNodeHosts hostnames DNs to run on.
871    * This is useful if you want to run datanode on distinct hosts for things
872    * like HDFS block location verification.
873    * If you start MiniDFSCluster without host names,
874    * all instances of the datanodes will have the same host name.
875    * @param masterClass The class to use as HMaster, or null for default
876    * @param regionserverClass The class to use as HRegionServer, or null for
877    * default
878    * @throws Exception
879    * @see {@link #shutdownMiniCluster()}
880    * @return Mini hbase cluster instance created.
881    */
882   public MiniHBaseCluster startMiniCluster(final int numMasters,
883       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
884       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
885           throws Exception {
886     return startMiniCluster(
887         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
888   }
889 
890   public MiniHBaseCluster startMiniCluster(final int numMasters,
891       final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
892       Class<? extends HMaster> masterClass,
893       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
894     throws Exception {
895     return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
896         masterClass, regionserverClass, false);
897   }
898 
899   /**
900    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
901    * number of datanodes.
902    * @param numDataNodes Number of data nodes.
903    * @param create Set this flag to create a new
904    * root or data directory path or not (will overwrite if exists already).
905    */
906   public MiniHBaseCluster startMiniCluster(final int numMasters,
907     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
908     Class<? extends HMaster> masterClass,
909     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
910     boolean create)
911   throws Exception {
912     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
913       numDataNodes = dataNodeHosts.length;
914     }
915 
916     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
917         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
918 
919     // If we already put up a cluster, fail.
920     if (miniClusterRunning) {
921       throw new IllegalStateException("A mini-cluster is already running");
922     }
923     miniClusterRunning = true;
924 
925     setupClusterTestDir();
926     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
927 
928     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
929     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
930     startMiniDFSCluster(numDataNodes, dataNodeHosts);
931 
932     // Start up a zk cluster.
933     if (this.zkCluster == null) {
934       startMiniZKCluster(clusterTestDir);
935     }
936 
937     // Start the MiniHBaseCluster
938     return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
939       regionserverClass, create);
940   }
941 
942   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
943       throws IOException, InterruptedException{
944     return startMiniHBaseCluster(numMasters, numSlaves, null, null, false);
945   }
946 
947   /**
948    * Starts up mini hbase cluster.  Usually used after call to
949    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
950    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
951    * @param numMasters
952    * @param numSlaves
953    * @param create Whether to create a
954    * root or data directory path or not; will overwrite if exists already.
955    * @return Reference to the hbase mini hbase cluster.
956    * @throws IOException
957    * @throws InterruptedException
958    * @see {@link #startMiniCluster()}
959    */
960   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
961         final int numSlaves, Class<? extends HMaster> masterClass,
962         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
963         boolean create)
964   throws IOException, InterruptedException {
965     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
966     createRootDir(create);
967 
968     // These settings will make the server waits until this exact number of
969     // regions servers are connected.
970     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
971       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
972     }
973     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
974       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
975     }
976 
977     Configuration c = new Configuration(this.conf);
978     this.hbaseCluster =
979         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
980     // Don't leave here till we've done a successful scan of the hbase:meta
981     Table t = new HTable(c, TableName.META_TABLE_NAME);
982     ResultScanner s = t.getScanner(new Scan());
983     while (s.next() != null) {
984       continue;
985     }
986     s.close();
987     t.close();
988 
989     getHBaseAdmin(); // create immediately the hbaseAdmin
990     LOG.info("Minicluster is up");
991     return (MiniHBaseCluster)this.hbaseCluster;
992   }
993 
994   /**
995    * Starts the hbase cluster up again after shutting it down previously in a
996    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
997    * @param servers number of region servers
998    * @throws IOException
999    */
1000   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1001     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1002     // Don't leave here till we've done a successful scan of the hbase:meta
1003     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1004     ResultScanner s = t.getScanner(new Scan());
1005     while (s.next() != null) {
1006       // do nothing
1007     }
1008     LOG.info("HBase has been restarted");
1009     s.close();
1010     t.close();
1011   }
1012 
1013   /**
1014    * @return Current mini hbase cluster. Only has something in it after a call
1015    * to {@link #startMiniCluster()}.
1016    * @see #startMiniCluster()
1017    */
1018   public MiniHBaseCluster getMiniHBaseCluster() {
1019     if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1020       return (MiniHBaseCluster)this.hbaseCluster;
1021     }
1022     throw new RuntimeException(hbaseCluster + " not an instance of " +
1023                                MiniHBaseCluster.class.getName());
1024   }
1025 
1026   /**
1027    * Stops mini hbase, zk, and hdfs clusters.
1028    * @throws IOException
1029    * @see {@link #startMiniCluster(int)}
1030    */
1031   public void shutdownMiniCluster() throws Exception {
1032     LOG.info("Shutting down minicluster");
1033     if (this.connection != null && !this.connection.isClosed()) {
1034       this.connection.close();
1035       this.connection = null;
1036     }
1037     shutdownMiniHBaseCluster();
1038     if (!this.passedZkCluster){
1039       shutdownMiniZKCluster();
1040     }
1041     shutdownMiniDFSCluster();
1042 
1043     cleanupTestDir();
1044     miniClusterRunning = false;
1045     LOG.info("Minicluster is down");
1046   }
1047 
1048   /**
1049    * @return True if we removed the test dirs
1050    * @throws IOException
1051    */
1052   @Override
1053   public boolean cleanupTestDir() throws IOException {
1054     boolean ret = super.cleanupTestDir();
1055     if (deleteDir(this.clusterTestDir)) {
1056       this.clusterTestDir = null;
1057       return ret & true;
1058     }
1059     return false;
1060   }
1061 
1062   /**
1063    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
1064    * @throws IOException
1065    */
1066   public void shutdownMiniHBaseCluster() throws IOException {
1067     if (hbaseAdmin != null) {
1068       hbaseAdmin.close0();
1069       hbaseAdmin = null;
1070     }
1071 
1072     // unset the configuration for MIN and MAX RS to start
1073     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1074     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1075     if (this.hbaseCluster != null) {
1076       this.hbaseCluster.shutdown();
1077       // Wait till hbase is down before going on to shutdown zk.
1078       this.hbaseCluster.waitUntilShutDown();
1079       this.hbaseCluster = null;
1080     }
1081 
1082     if (zooKeeperWatcher != null) {
1083       zooKeeperWatcher.close();
1084       zooKeeperWatcher = null;
1085     }
1086   }
1087 
1088   /**
1089    * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1090    * is true, a new root directory path is fetched irrespective of whether it has been fetched
1091    * before or not. If false, previous path is used.
1092    * Note: this does not cause the root dir to be created.
1093    * @return Fully qualified path for the default hbase root dir
1094    * @throws IOException
1095    */
1096   public Path getDefaultRootDirPath(boolean create) throws IOException {
1097     if (!create) {
1098       return getDataTestDirOnTestFS();
1099     } else {
1100       return getNewDataTestDirOnTestFS();
1101     }
1102   }
1103 
1104   /**
1105    * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1106    * except that <code>create</code> flag is false.
1107    * Note: this does not cause the root dir to be created.
1108    * @return Fully qualified path for the default hbase root dir
1109    * @throws IOException
1110    */
1111   public Path getDefaultRootDirPath() throws IOException {
1112     return getDefaultRootDirPath(false);
1113   }
1114 
1115   /**
1116    * Creates an hbase rootdir in user home directory.  Also creates hbase
1117    * version file.  Normally you won't make use of this method.  Root hbasedir
1118    * is created for you as part of mini cluster startup.  You'd only use this
1119    * method if you were doing manual operation.
1120    * @param create This flag decides whether to get a new
1121    * root or data directory path or not, if it has been fetched already.
1122    * Note : Directory will be made irrespective of whether path has been fetched or not.
1123    * If directory already exists, it will be overwritten
1124    * @return Fully qualified path to hbase root dir
1125    * @throws IOException
1126    */
1127   public Path createRootDir(boolean create) throws IOException {
1128     FileSystem fs = FileSystem.get(this.conf);
1129     Path hbaseRootdir = getDefaultRootDirPath(create);
1130     FSUtils.setRootDir(this.conf, hbaseRootdir);
1131     fs.mkdirs(hbaseRootdir);
1132     FSUtils.setVersion(fs, hbaseRootdir);
1133     return hbaseRootdir;
1134   }
1135 
1136   /**
1137    * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1138    * except that <code>create</code> flag is false.
1139    * @return Fully qualified path to hbase root dir
1140    * @throws IOException
1141    */
1142   public Path createRootDir() throws IOException {
1143     return createRootDir(false);
1144   }
1145 
1146   /**
1147    * Flushes all caches in the mini hbase cluster
1148    * @throws IOException
1149    */
1150   public void flush() throws IOException {
1151     getMiniHBaseCluster().flushcache();
1152   }
1153 
1154   /**
1155    * Flushes all caches in the mini hbase cluster
1156    * @throws IOException
1157    */
1158   public void flush(TableName tableName) throws IOException {
1159     getMiniHBaseCluster().flushcache(tableName);
1160   }
1161 
1162   /**
1163    * Compact all regions in the mini hbase cluster
1164    * @throws IOException
1165    */
1166   public void compact(boolean major) throws IOException {
1167     getMiniHBaseCluster().compact(major);
1168   }
1169 
1170   /**
1171    * Compact all of a table's reagion in the mini hbase cluster
1172    * @throws IOException
1173    */
1174   public void compact(TableName tableName, boolean major) throws IOException {
1175     getMiniHBaseCluster().compact(tableName, major);
1176   }
1177 
1178   /**
1179    * Create a table.
1180    * @param tableName
1181    * @param family
1182    * @return An HTable instance for the created table.
1183    * @throws IOException
1184    */
1185   public Table createTable(TableName tableName, String family)
1186   throws IOException{
1187     return createTable(tableName, new String[]{family});
1188   }
1189 
1190   /**
1191    * Create a table.
1192    * @param tableName
1193    * @param family
1194    * @return An HTable instance for the created table.
1195    * @throws IOException
1196    */
1197   public HTable createTable(byte[] tableName, byte[] family)
1198   throws IOException{
1199     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1200   }
1201 
1202   /**
1203    * Create a table.
1204    * @param tableName
1205    * @param families
1206    * @return An HTable instance for the created table.
1207    * @throws IOException
1208    */
1209   public Table createTable(TableName tableName, String[] families)
1210   throws IOException {
1211     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1212     for (String family : families) {
1213       fams.add(Bytes.toBytes(family));
1214     }
1215     return createTable(tableName, fams.toArray(new byte[0][]));
1216   }
1217 
1218   /**
1219    * Create a table.
1220    * @param tableName
1221    * @param family
1222    * @return An HTable instance for the created table.
1223    * @throws IOException
1224    */
1225   public HTable createTable(TableName tableName, byte[] family)
1226   throws IOException{
1227     return createTable(tableName, new byte[][]{family});
1228   }
1229 
1230 
1231   /**
1232    * Create a table.
1233    * @param tableName
1234    * @param families
1235    * @return An HTable instance for the created table.
1236    * @throws IOException
1237    */
1238   public HTable createTable(byte[] tableName, byte[][] families)
1239   throws IOException {
1240     return createTable(tableName, families,
1241         new Configuration(getConfiguration()));
1242   }
1243 
1244   /**
1245    * Create a table.
1246    * @param tableName
1247    * @param families
1248    * @return An HT
1249    * able instance for the created table.
1250    * @throws IOException
1251    */
1252   public HTable createTable(TableName tableName, byte[][] families)
1253   throws IOException {
1254     return createTable(tableName, families, new Configuration(getConfiguration()));
1255   }
1256 
1257   public HTable createTable(byte[] tableName, byte[][] families,
1258       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1259     return createTable(TableName.valueOf(tableName), families, numVersions,
1260         startKey, endKey, numRegions);
1261   }
1262 
1263   public HTable createTable(String tableName, byte[][] families,
1264       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1265     return createTable(TableName.valueOf(tableName), families, numVersions,
1266         startKey, endKey, numRegions);
1267   }
1268 
1269   public HTable createTable(TableName tableName, byte[][] families,
1270       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1271   throws IOException{
1272     HTableDescriptor desc = new HTableDescriptor(tableName);
1273     for (byte[] family : families) {
1274       HColumnDescriptor hcd = new HColumnDescriptor(family)
1275           .setMaxVersions(numVersions);
1276       desc.addFamily(hcd);
1277     }
1278     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1279     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1280     waitUntilAllRegionsAssigned(tableName);
1281     return new HTable(getConfiguration(), tableName);
1282   }
1283 
1284   /**
1285    * Create a table.
1286    * @param htd
1287    * @param families
1288    * @param c Configuration to use
1289    * @return An HTable instance for the created table.
1290    * @throws IOException
1291    */
1292   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1293   throws IOException {
1294     for(byte[] family : families) {
1295       HColumnDescriptor hcd = new HColumnDescriptor(family);
1296       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1297       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1298       // on is interfering.
1299       hcd.setBloomFilterType(BloomType.NONE);
1300       htd.addFamily(hcd);
1301     }
1302     getHBaseAdmin().createTable(htd);
1303     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1304     waitUntilAllRegionsAssigned(htd.getTableName());
1305     return (HTable)getConnection().getTable(htd.getTableName());
1306   }
1307 
1308   /**
1309    * Create a table.
1310    * @param htd
1311    * @param splitRows
1312    * @return An HTable instance for the created table.
1313    * @throws IOException
1314    */
1315   public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1316       throws IOException {
1317     getHBaseAdmin().createTable(htd, splitRows);
1318     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1319     waitUntilAllRegionsAssigned(htd.getTableName());
1320     return new HTable(getConfiguration(), htd.getTableName());
1321   }
1322 
1323   /**
1324    * Create a table.
1325    * @param tableName
1326    * @param families
1327    * @param c Configuration to use
1328    * @return An HTable instance for the created table.
1329    * @throws IOException
1330    */
1331   public HTable createTable(TableName tableName, byte[][] families,
1332       final Configuration c)
1333   throws IOException {
1334     return createTable(new HTableDescriptor(tableName), families, c);
1335   }
1336 
1337   /**
1338    * Create a table.
1339    * @param tableName
1340    * @param families
1341    * @param c Configuration to use
1342    * @return An HTable instance for the created table.
1343    * @throws IOException
1344    */
1345   public HTable createTable(byte[] tableName, byte[][] families,
1346       final Configuration c)
1347   throws IOException {
1348     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1349     for(byte[] family : families) {
1350       HColumnDescriptor hcd = new HColumnDescriptor(family);
1351       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1352       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1353       // on is interfering.
1354       hcd.setBloomFilterType(BloomType.NONE);
1355       desc.addFamily(hcd);
1356     }
1357     getHBaseAdmin().createTable(desc);
1358     return new HTable(c, desc.getTableName());
1359   }
1360 
1361   /**
1362    * Create a table.
1363    * @param tableName
1364    * @param families
1365    * @param c Configuration to use
1366    * @param numVersions
1367    * @return An HTable instance for the created table.
1368    * @throws IOException
1369    */
1370   public HTable createTable(TableName tableName, byte[][] families,
1371       final Configuration c, int numVersions)
1372   throws IOException {
1373     HTableDescriptor desc = new HTableDescriptor(tableName);
1374     for(byte[] family : families) {
1375       HColumnDescriptor hcd = new HColumnDescriptor(family)
1376           .setMaxVersions(numVersions);
1377       desc.addFamily(hcd);
1378     }
1379     getHBaseAdmin().createTable(desc);
1380     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1381     waitUntilAllRegionsAssigned(tableName);
1382     return new HTable(c, tableName);
1383   }
1384 
1385   /**
1386    * Create a table.
1387    * @param tableName
1388    * @param families
1389    * @param c Configuration to use
1390    * @param numVersions
1391    * @return An HTable instance for the created table.
1392    * @throws IOException
1393    */
1394   public HTable createTable(byte[] tableName, byte[][] families,
1395       final Configuration c, int numVersions)
1396   throws IOException {
1397     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1398     for(byte[] family : families) {
1399       HColumnDescriptor hcd = new HColumnDescriptor(family)
1400           .setMaxVersions(numVersions);
1401       desc.addFamily(hcd);
1402     }
1403     getHBaseAdmin().createTable(desc);
1404     return new HTable(c, desc.getTableName());
1405   }
1406 
1407   /**
1408    * Create a table.
1409    * @param tableName
1410    * @param family
1411    * @param numVersions
1412    * @return An HTable instance for the created table.
1413    * @throws IOException
1414    */
1415   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1416   throws IOException {
1417     return createTable(tableName, new byte[][]{family}, numVersions);
1418   }
1419 
1420   /**
1421    * Create a table.
1422    * @param tableName
1423    * @param family
1424    * @param numVersions
1425    * @return An HTable instance for the created table.
1426    * @throws IOException
1427    */
1428   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1429   throws IOException {
1430     return createTable(tableName, new byte[][]{family}, numVersions);
1431   }
1432 
1433   /**
1434    * Create a table.
1435    * @param tableName
1436    * @param families
1437    * @param numVersions
1438    * @return An HTable instance for the created table.
1439    * @throws IOException
1440    */
1441   public HTable createTable(byte[] tableName, byte[][] families,
1442       int numVersions)
1443   throws IOException {
1444     return createTable(TableName.valueOf(tableName), families, numVersions);
1445   }
1446 
1447   /**
1448    * Create a table.
1449    * @param tableName
1450    * @param families
1451    * @param numVersions
1452    * @return An HTable instance for the created table.
1453    * @throws IOException
1454    */
1455   public HTable createTable(TableName tableName, byte[][] families,
1456       int numVersions)
1457   throws IOException {
1458     HTableDescriptor desc = new HTableDescriptor(tableName);
1459     for (byte[] family : families) {
1460       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1461       desc.addFamily(hcd);
1462     }
1463     getHBaseAdmin().createTable(desc);
1464     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1465     waitUntilAllRegionsAssigned(tableName);
1466     return new HTable(new Configuration(getConfiguration()), tableName);
1467   }
1468 
1469   /**
1470    * Create a table.
1471    * @param tableName
1472    * @param families
1473    * @param numVersions
1474    * @return An HTable instance for the created table.
1475    * @throws IOException
1476    */
1477   public HTable createTable(byte[] tableName, byte[][] families,
1478     int numVersions, int blockSize) throws IOException {
1479     return createTable(TableName.valueOf(tableName),
1480         families, numVersions, blockSize);
1481   }
1482 
1483   /**
1484    * Create a table.
1485    * @param tableName
1486    * @param families
1487    * @param numVersions
1488    * @return An HTable instance for the created table.
1489    * @throws IOException
1490    */
1491   public HTable createTable(TableName tableName, byte[][] families,
1492     int numVersions, int blockSize) throws IOException {
1493     HTableDescriptor desc = new HTableDescriptor(tableName);
1494     for (byte[] family : families) {
1495       HColumnDescriptor hcd = new HColumnDescriptor(family)
1496           .setMaxVersions(numVersions)
1497           .setBlocksize(blockSize);
1498       desc.addFamily(hcd);
1499     }
1500     getHBaseAdmin().createTable(desc);
1501     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1502     waitUntilAllRegionsAssigned(tableName);
1503     return new HTable(new Configuration(getConfiguration()), tableName);
1504   }
1505 
1506   /**
1507    * Create a table.
1508    * @param tableName
1509    * @param families
1510    * @param numVersions
1511    * @return An HTable instance for the created table.
1512    * @throws IOException
1513    */
1514   public HTable createTable(byte[] tableName, byte[][] families,
1515       int[] numVersions)
1516   throws IOException {
1517     return createTable(TableName.valueOf(tableName), families, numVersions);
1518   }
1519 
1520   /**
1521    * Create a table.
1522    * @param tableName
1523    * @param families
1524    * @param numVersions
1525    * @return An HTable instance for the created table.
1526    * @throws IOException
1527    */
1528   public HTable createTable(TableName tableName, byte[][] families,
1529       int[] numVersions)
1530   throws IOException {
1531     HTableDescriptor desc = new HTableDescriptor(tableName);
1532     int i = 0;
1533     for (byte[] family : families) {
1534       HColumnDescriptor hcd = new HColumnDescriptor(family)
1535           .setMaxVersions(numVersions[i]);
1536       desc.addFamily(hcd);
1537       i++;
1538     }
1539     getHBaseAdmin().createTable(desc);
1540     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1541     waitUntilAllRegionsAssigned(tableName);
1542     return new HTable(new Configuration(getConfiguration()), tableName);
1543   }
1544 
1545   /**
1546    * Create a table.
1547    * @param tableName
1548    * @param family
1549    * @param splitRows
1550    * @return An HTable instance for the created table.
1551    * @throws IOException
1552    */
1553   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1554     throws IOException{
1555     return createTable(TableName.valueOf(tableName), family, splitRows);
1556   }
1557 
1558   /**
1559    * Create a table.
1560    * @param tableName
1561    * @param family
1562    * @param splitRows
1563    * @return An HTable instance for the created table.
1564    * @throws IOException
1565    */
1566   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1567       throws IOException {
1568     HTableDescriptor desc = new HTableDescriptor(tableName);
1569     HColumnDescriptor hcd = new HColumnDescriptor(family);
1570     desc.addFamily(hcd);
1571     getHBaseAdmin().createTable(desc, splitRows);
1572     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1573     waitUntilAllRegionsAssigned(tableName);
1574     return new HTable(getConfiguration(), tableName);
1575   }
1576 
1577   /**
1578    * Create a table.
1579    * @param tableName
1580    * @param families
1581    * @param splitRows
1582    * @return An HTable instance for the created table.
1583    * @throws IOException
1584    */
1585   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1586       throws IOException {
1587     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1588     for(byte[] family:families) {
1589       HColumnDescriptor hcd = new HColumnDescriptor(family);
1590       desc.addFamily(hcd);
1591     }
1592     getHBaseAdmin().createTable(desc, splitRows);
1593     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1594     waitUntilAllRegionsAssigned(desc.getTableName());
1595     return new HTable(getConfiguration(), desc.getTableName());
1596   }
1597 
1598   /**
1599    * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1600    */
1601   @SuppressWarnings("serial")
1602   public static void modifyTableSync(Admin admin, HTableDescriptor desc)
1603       throws IOException, InterruptedException {
1604     admin.modifyTable(desc.getTableName(), desc);
1605     Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1606       setFirst(0);
1607       setSecond(0);
1608     }};
1609     for (int i = 0; status.getFirst() != 0 && i < 500; i++) { // wait up to 500 seconds
1610       status = admin.getAlterStatus(desc.getTableName());
1611       if (status.getSecond() != 0) {
1612         LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1613           + " regions updated.");
1614         Thread.sleep(1 * 1000l);
1615       } else {
1616         LOG.debug("All regions updated.");
1617         break;
1618       }
1619     }
1620     if (status.getSecond() != 0) {
1621       throw new IOException("Failed to update replica count after 500 seconds.");
1622     }
1623   }
1624 
1625   /**
1626    * Set the number of Region replicas.
1627    */
1628   public static void setReplicas(Admin admin, TableName table, int replicaCount)
1629       throws IOException, InterruptedException {
1630     admin.disableTable(table);
1631     HTableDescriptor desc = admin.getTableDescriptor(table);
1632     desc.setRegionReplication(replicaCount);
1633     modifyTableSync(admin, desc);
1634     admin.enableTable(table);
1635   }
1636 
1637   /**
1638    * Drop an existing table
1639    * @param tableName existing table
1640    */
1641   public void deleteTable(String tableName) throws IOException {
1642     deleteTable(TableName.valueOf(tableName));
1643   }
1644 
1645   /**
1646    * Drop an existing table
1647    * @param tableName existing table
1648    */
1649   public void deleteTable(byte[] tableName) throws IOException {
1650     deleteTable(TableName.valueOf(tableName));
1651   }
1652 
1653   /**
1654    * Drop an existing table
1655    * @param tableName existing table
1656    */
1657   public void deleteTable(TableName tableName) throws IOException {
1658     try {
1659       getHBaseAdmin().disableTable(tableName);
1660     } catch (TableNotEnabledException e) {
1661       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1662     }
1663     getHBaseAdmin().deleteTable(tableName);
1664   }
1665 
1666   // ==========================================================================
1667   // Canned table and table descriptor creation
1668   // TODO replace HBaseTestCase
1669 
1670   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1671   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1672   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1673   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1674   private static final int MAXVERSIONS = 3;
1675 
1676   public static final char FIRST_CHAR = 'a';
1677   public static final char LAST_CHAR = 'z';
1678   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1679   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1680 
1681   /**
1682    * Create a table of name <code>name</code> with {@link COLUMNS} for
1683    * families.
1684    * @param name Name to give table.
1685    * @param versions How many versions to allow per column.
1686    * @return Column descriptor.
1687    */
1688   public HTableDescriptor createTableDescriptor(final String name,
1689       final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1690     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1691     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1692       htd.addFamily(new HColumnDescriptor(cfName)
1693           .setMinVersions(minVersions)
1694           .setMaxVersions(versions)
1695           .setKeepDeletedCells(keepDeleted)
1696           .setBlockCacheEnabled(false)
1697           .setTimeToLive(ttl)
1698       );
1699     }
1700     return htd;
1701   }
1702 
1703   /**
1704    * Create a table of name <code>name</code> with {@link COLUMNS} for
1705    * families.
1706    * @param name Name to give table.
1707    * @return Column descriptor.
1708    */
1709   public HTableDescriptor createTableDescriptor(final String name) {
1710     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1711         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1712   }
1713 
1714   /**
1715    * Create an HRegion that writes to the local tmp dirs
1716    * @param desc
1717    * @param startKey
1718    * @param endKey
1719    * @return
1720    * @throws IOException
1721    */
1722   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1723       byte [] endKey)
1724   throws IOException {
1725     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1726     return createLocalHRegion(hri, desc);
1727   }
1728 
1729   /**
1730    * Create an HRegion that writes to the local tmp dirs
1731    * @param info
1732    * @param desc
1733    * @return
1734    * @throws IOException
1735    */
1736   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1737     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1738   }
1739 
1740   /**
1741    * Create an HRegion that writes to the local tmp dirs with specified wal
1742    * @param info regioninfo
1743    * @param desc table descriptor
1744    * @param wal wal for this region.
1745    * @return created hregion
1746    * @throws IOException
1747    */
1748   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal)
1749       throws IOException {
1750     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
1751   }
1752 
1753   /**
1754    * @param tableName
1755    * @param startKey
1756    * @param stopKey
1757    * @param callingMethod
1758    * @param conf
1759    * @param isReadOnly
1760    * @param families
1761    * @throws IOException
1762    * @return A region on which you must call
1763    *         {@link HRegion#closeHRegion(HRegion)} when done.
1764    */
1765   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1766       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1767       WAL wal, byte[]... families) throws IOException {
1768     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1769     htd.setReadOnly(isReadOnly);
1770     for (byte[] family : families) {
1771       HColumnDescriptor hcd = new HColumnDescriptor(family);
1772       // Set default to be three versions.
1773       hcd.setMaxVersions(Integer.MAX_VALUE);
1774       htd.addFamily(hcd);
1775     }
1776     htd.setDurability(durability);
1777     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1778     return createLocalHRegion(info, htd, wal);
1779   }
1780   //
1781   // ==========================================================================
1782 
1783   /**
1784    * Provide an existing table name to truncate
1785    * @param tableName existing table
1786    * @return HTable to that new table
1787    * @throws IOException
1788    */
1789   public HTable truncateTable(byte[] tableName) throws IOException {
1790     return truncateTable(TableName.valueOf(tableName));
1791   }
1792 
1793   /**
1794    * Provide an existing table name to truncate
1795    * @param tableName existing table
1796    * @return HTable to that new table
1797    * @throws IOException
1798    */
1799   public HTable truncateTable(TableName tableName) throws IOException {
1800     HTable table = new HTable(getConfiguration(), tableName);
1801     Scan scan = new Scan();
1802     ResultScanner resScan = table.getScanner(scan);
1803     for(Result res : resScan) {
1804       Delete del = new Delete(res.getRow());
1805       table.delete(del);
1806     }
1807     resScan = table.getScanner(scan);
1808     resScan.close();
1809     return table;
1810   }
1811 
1812   /**
1813    * Load table with rows from 'aaa' to 'zzz'.
1814    * @param t Table
1815    * @param f Family
1816    * @return Count of rows loaded.
1817    * @throws IOException
1818    */
1819   public int loadTable(final Table t, final byte[] f) throws IOException {
1820     return loadTable(t, new byte[][] {f});
1821   }
1822 
1823   /**
1824    * Load table with rows from 'aaa' to 'zzz'.
1825    * @param t Table
1826    * @param f Family
1827    * @return Count of rows loaded.
1828    * @throws IOException
1829    */
1830   public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
1831     return loadTable(t, new byte[][] {f}, null, writeToWAL);
1832   }
1833 
1834   /**
1835    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1836    * @param t Table
1837    * @param f Array of Families to load
1838    * @return Count of rows loaded.
1839    * @throws IOException
1840    */
1841   public int loadTable(final Table t, final byte[][] f) throws IOException {
1842     return loadTable(t, f, null);
1843   }
1844 
1845   /**
1846    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1847    * @param t Table
1848    * @param f Array of Families to load
1849    * @param value the values of the cells. If null is passed, the row key is used as value
1850    * @return Count of rows loaded.
1851    * @throws IOException
1852    */
1853   public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
1854     return loadTable(t, f, value, true);
1855   }
1856 
1857   /**
1858    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1859    * @param t Table
1860    * @param f Array of Families to load
1861    * @param value the values of the cells. If null is passed, the row key is used as value
1862    * @return Count of rows loaded.
1863    * @throws IOException
1864    */
1865   public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1866     List<Put> puts = new ArrayList<>();
1867     for (byte[] row : HBaseTestingUtility.ROWS) {
1868       Put put = new Put(row);
1869       put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1870       for (int i = 0; i < f.length; i++) {
1871         put.add(f[i], null, value != null ? value : row);
1872       }
1873       puts.add(put);
1874     }
1875     t.put(puts);
1876     return puts.size();
1877   }
1878 
1879   /** A tracker for tracking and validating table rows
1880    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1881    */
1882   public static class SeenRowTracker {
1883     int dim = 'z' - 'a' + 1;
1884     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1885     byte[] startRow;
1886     byte[] stopRow;
1887 
1888     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1889       this.startRow = startRow;
1890       this.stopRow = stopRow;
1891     }
1892 
1893     void reset() {
1894       for (byte[] row : ROWS) {
1895         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1896       }
1897     }
1898 
1899     int i(byte b) {
1900       return b - 'a';
1901     }
1902 
1903     public void addRow(byte[] row) {
1904       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1905     }
1906 
1907     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1908      * all other rows none
1909      */
1910     public void validate() {
1911       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1912         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1913           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1914             int count = seenRows[i(b1)][i(b2)][i(b3)];
1915             int expectedCount = 0;
1916             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1917                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1918               expectedCount = 1;
1919             }
1920             if (count != expectedCount) {
1921               String row = new String(new byte[] {b1,b2,b3});
1922               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1923             }
1924           }
1925         }
1926       }
1927     }
1928   }
1929 
1930   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1931     return loadRegion(r, f, false);
1932   }
1933 
1934   /**
1935    * Load region with rows from 'aaa' to 'zzz'.
1936    * @param r Region
1937    * @param f Family
1938    * @param flush flush the cache if true
1939    * @return Count of rows loaded.
1940    * @throws IOException
1941    */
1942   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1943   throws IOException {
1944     byte[] k = new byte[3];
1945     int rowCount = 0;
1946     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1947       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1948         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1949           k[0] = b1;
1950           k[1] = b2;
1951           k[2] = b3;
1952           Put put = new Put(k);
1953           put.setDurability(Durability.SKIP_WAL);
1954           put.add(f, null, k);
1955           if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL);
1956 
1957           int preRowCount = rowCount;
1958           int pause = 10;
1959           int maxPause = 1000;
1960           while (rowCount == preRowCount) {
1961             try {
1962               r.put(put);
1963               rowCount++;
1964             } catch (RegionTooBusyException e) {
1965               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1966               Threads.sleep(pause);
1967             }
1968           }
1969         }
1970       }
1971       if (flush) {
1972         r.flushcache();
1973       }
1974     }
1975     return rowCount;
1976   }
1977 
1978   public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1979     for (int i = startRow; i < endRow; i++) {
1980       byte[] data = Bytes.toBytes(String.valueOf(i));
1981       Put put = new Put(data);
1982       put.add(f, null, data);
1983       t.put(put);
1984     }
1985   }
1986 
1987   public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1988     for (int i = startRow; i < endRow; i++) {
1989       byte[] data = Bytes.toBytes(String.valueOf(i));
1990       Delete delete = new Delete(data);
1991       delete.deleteFamily(f);
1992       t.delete(delete);
1993     }
1994   }
1995 
1996   /**
1997    * Return the number of rows in the given table.
1998    */
1999   public int countRows(final Table table) throws IOException {
2000     Scan scan = new Scan();
2001     ResultScanner results = table.getScanner(scan);
2002     int count = 0;
2003     for (@SuppressWarnings("unused") Result res : results) {
2004       count++;
2005     }
2006     results.close();
2007     return count;
2008   }
2009 
2010   public int countRows(final Table table, final byte[]... families) throws IOException {
2011     Scan scan = new Scan();
2012     for (byte[] family: families) {
2013       scan.addFamily(family);
2014     }
2015     ResultScanner results = table.getScanner(scan);
2016     int count = 0;
2017     for (@SuppressWarnings("unused") Result res : results) {
2018       count++;
2019     }
2020     results.close();
2021     return count;
2022   }
2023 
2024   /**
2025    * Return an md5 digest of the entire contents of a table.
2026    */
2027   public String checksumRows(final Table table) throws Exception {
2028     Scan scan = new Scan();
2029     ResultScanner results = table.getScanner(scan);
2030     MessageDigest digest = MessageDigest.getInstance("MD5");
2031     for (Result res : results) {
2032       digest.update(res.getRow());
2033     }
2034     results.close();
2035     return digest.toString();
2036   }
2037 
2038   /**
2039    * Creates many regions names "aaa" to "zzz".
2040    *
2041    * @param table  The table to use for the data.
2042    * @param columnFamily  The family to insert the data into.
2043    * @return count of regions created.
2044    * @throws IOException When creating the regions fails.
2045    */
2046   public int createMultiRegions(HTable table, byte[] columnFamily)
2047   throws IOException {
2048     return createMultiRegions(getConfiguration(), table, columnFamily);
2049   }
2050 
2051   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
2052   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2053   static {
2054     int i = 0;
2055     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2056       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2057         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2058           ROWS[i][0] = b1;
2059           ROWS[i][1] = b2;
2060           ROWS[i][2] = b3;
2061           i++;
2062         }
2063       }
2064     }
2065   }
2066 
2067   public static final byte[][] KEYS = {
2068     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2069     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2070     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2071     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2072     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2073     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2074     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2075     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2076     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2077   };
2078 
2079   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2080       Bytes.toBytes("bbb"),
2081       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2082       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2083       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2084       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2085       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2086       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2087       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2088       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2089   };
2090 
2091   /**
2092    * Creates many regions names "aaa" to "zzz".
2093    * @param c Configuration to use.
2094    * @param table  The table to use for the data.
2095    * @param columnFamily  The family to insert the data into.
2096    * @return count of regions created.
2097    * @throws IOException When creating the regions fails.
2098    */
2099   public int createMultiRegions(final Configuration c, final HTable table,
2100       final byte[] columnFamily)
2101   throws IOException {
2102     return createMultiRegions(c, table, columnFamily, KEYS);
2103   }
2104 
2105   /**
2106    * Creates the specified number of regions in the specified table.
2107    * @param c
2108    * @param table
2109    * @param family
2110    * @param numRegions
2111    * @return
2112    * @throws IOException
2113    */
2114   public int createMultiRegions(final Configuration c, final HTable table,
2115       final byte [] family, int numRegions)
2116   throws IOException {
2117     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2118     byte [] startKey = Bytes.toBytes("aaaaa");
2119     byte [] endKey = Bytes.toBytes("zzzzz");
2120     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2121     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2122     System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2123     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2124     return createMultiRegions(c, table, family, regionStartKeys);
2125   }
2126 
2127   public int createMultiRegions(final Configuration c, final HTable table,
2128       final byte[] columnFamily, byte [][] startKeys)
2129   throws IOException {
2130     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2131     Table meta = new HTable(c, TableName.META_TABLE_NAME);
2132     HTableDescriptor htd = table.getTableDescriptor();
2133     if(!htd.hasFamily(columnFamily)) {
2134       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2135       htd.addFamily(hcd);
2136     }
2137     // remove empty region - this is tricky as the mini cluster during the test
2138     // setup already has the "<tablename>,,123456789" row with an empty start
2139     // and end key. Adding the custom regions below adds those blindly,
2140     // including the new start region from empty to "bbb". lg
2141     List<byte[]> rows = getMetaTableRows(htd.getTableName());
2142     String regionToDeleteInFS = table
2143         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2144         .getRegionInfo().getEncodedName();
2145     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2146     // add custom ones
2147     int count = 0;
2148     for (int i = 0; i < startKeys.length; i++) {
2149       int j = (i + 1) % startKeys.length;
2150       HRegionInfo hri = new HRegionInfo(table.getName(),
2151         startKeys[i], startKeys[j]);
2152       MetaTableAccessor.addRegionToMeta(meta, hri);
2153       newRegions.add(hri);
2154       count++;
2155     }
2156     // see comment above, remove "old" (or previous) single region
2157     for (byte[] row : rows) {
2158       LOG.info("createMultiRegions: deleting meta row -> " +
2159         Bytes.toStringBinary(row));
2160       meta.delete(new Delete(row));
2161     }
2162     // remove the "old" region from FS
2163     Path tableDir = new Path(getDefaultRootDirPath().toString()
2164         + System.getProperty("file.separator") + htd.getTableName()
2165         + System.getProperty("file.separator") + regionToDeleteInFS);
2166     FileSystem.get(c).delete(tableDir, true);
2167     // flush cache of regions
2168     HConnection conn = table.getConnection();
2169     conn.clearRegionCache();
2170     // assign all the new regions IF table is enabled.
2171     Admin admin = getHBaseAdmin();
2172     if (admin.isTableEnabled(table.getName())) {
2173       for(HRegionInfo hri : newRegions) {
2174         admin.assign(hri.getRegionName());
2175       }
2176     }
2177 
2178     meta.close();
2179 
2180     return count;
2181   }
2182 
2183   /**
2184    * Create rows in hbase:meta for regions of the specified table with the specified
2185    * start keys.  The first startKey should be a 0 length byte array if you
2186    * want to form a proper range of regions.
2187    * @param conf
2188    * @param htd
2189    * @param startKeys
2190    * @return list of region info for regions added to meta
2191    * @throws IOException
2192    */
2193   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2194       final HTableDescriptor htd, byte [][] startKeys)
2195   throws IOException {
2196     Table meta = new HTable(conf, TableName.META_TABLE_NAME);
2197     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2198     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2199     // add custom ones
2200     for (int i = 0; i < startKeys.length; i++) {
2201       int j = (i + 1) % startKeys.length;
2202       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2203           startKeys[j]);
2204       MetaTableAccessor.addRegionToMeta(meta, hri);
2205       newRegions.add(hri);
2206     }
2207 
2208     meta.close();
2209     return newRegions;
2210   }
2211 
2212   /**
2213    * Returns all rows from the hbase:meta table.
2214    *
2215    * @throws IOException When reading the rows fails.
2216    */
2217   public List<byte[]> getMetaTableRows() throws IOException {
2218     // TODO: Redo using MetaTableAccessor class
2219     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2220     List<byte[]> rows = new ArrayList<byte[]>();
2221     ResultScanner s = t.getScanner(new Scan());
2222     for (Result result : s) {
2223       LOG.info("getMetaTableRows: row -> " +
2224         Bytes.toStringBinary(result.getRow()));
2225       rows.add(result.getRow());
2226     }
2227     s.close();
2228     t.close();
2229     return rows;
2230   }
2231 
2232   /**
2233    * Returns all rows from the hbase:meta table for a given user table
2234    *
2235    * @throws IOException When reading the rows fails.
2236    */
2237   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2238     // TODO: Redo using MetaTableAccessor.
2239     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2240     List<byte[]> rows = new ArrayList<byte[]>();
2241     ResultScanner s = t.getScanner(new Scan());
2242     for (Result result : s) {
2243       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2244       if (info == null) {
2245         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2246         // TODO figure out what to do for this new hosed case.
2247         continue;
2248       }
2249 
2250       if (info.getTable().equals(tableName)) {
2251         LOG.info("getMetaTableRows: row -> " +
2252             Bytes.toStringBinary(result.getRow()) + info);
2253         rows.add(result.getRow());
2254       }
2255     }
2256     s.close();
2257     t.close();
2258     return rows;
2259   }
2260 
2261   /**
2262    * Tool to get the reference to the region server object that holds the
2263    * region of the specified user table.
2264    * It first searches for the meta rows that contain the region of the
2265    * specified table, then gets the index of that RS, and finally retrieves
2266    * the RS's reference.
2267    * @param tableName user table to lookup in hbase:meta
2268    * @return region server that holds it, null if the row doesn't exist
2269    * @throws IOException
2270    * @throws InterruptedException
2271    */
2272   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2273       throws IOException, InterruptedException {
2274     List<byte[]> metaRows = getMetaTableRows(tableName);
2275     if (metaRows == null || metaRows.isEmpty()) {
2276       return null;
2277     }
2278     LOG.debug("Found " + metaRows.size() + " rows for table " +
2279       tableName);
2280     byte [] firstrow = metaRows.get(0);
2281     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2282     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2283       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2284     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2285       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2286     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2287     while(retrier.shouldRetry()) {
2288       int index = getMiniHBaseCluster().getServerWith(firstrow);
2289       if (index != -1) {
2290         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2291       }
2292       // Came back -1.  Region may not be online yet.  Sleep a while.
2293       retrier.sleepUntilNextRetry();
2294     }
2295     return null;
2296   }
2297 
2298   /**
2299    * Starts a <code>MiniMRCluster</code> with a default number of
2300    * <code>TaskTracker</code>'s.
2301    *
2302    * @throws IOException When starting the cluster fails.
2303    */
2304   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2305     startMiniMapReduceCluster(2);
2306     return mrCluster;
2307   }
2308 
2309   /**
2310    * Tasktracker has a bug where changing the hadoop.log.dir system property
2311    * will not change its internal static LOG_DIR variable.
2312    */
2313   private void forceChangeTaskLogDir() {
2314     Field logDirField;
2315     try {
2316       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2317       logDirField.setAccessible(true);
2318 
2319       Field modifiersField = Field.class.getDeclaredField("modifiers");
2320       modifiersField.setAccessible(true);
2321       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2322 
2323       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2324     } catch (SecurityException e) {
2325       throw new RuntimeException(e);
2326     } catch (NoSuchFieldException e) {
2327       // TODO Auto-generated catch block
2328       throw new RuntimeException(e);
2329     } catch (IllegalArgumentException e) {
2330       throw new RuntimeException(e);
2331     } catch (IllegalAccessException e) {
2332       throw new RuntimeException(e);
2333     }
2334   }
2335 
2336   /**
2337    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2338    * filesystem.
2339    * @param servers  The number of <code>TaskTracker</code>'s to start.
2340    * @throws IOException When starting the cluster fails.
2341    */
2342   private void startMiniMapReduceCluster(final int servers) throws IOException {
2343     if (mrCluster != null) {
2344       throw new IllegalStateException("MiniMRCluster is already running");
2345     }
2346     LOG.info("Starting mini mapreduce cluster...");
2347     setupClusterTestDir();
2348     createDirsAndSetProperties();
2349 
2350     forceChangeTaskLogDir();
2351 
2352     //// hadoop2 specific settings
2353     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2354     // we up the VM usable so that processes don't get killed.
2355     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2356 
2357     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2358     // this avoids the problem by disabling speculative task execution in tests.
2359     conf.setBoolean("mapreduce.map.speculative", false);
2360     conf.setBoolean("mapreduce.reduce.speculative", false);
2361     ////
2362 
2363     // Allow the user to override FS URI for this map-reduce cluster to use.
2364     mrCluster = new MiniMRCluster(servers,
2365       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2366       null, null, new JobConf(this.conf));
2367     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2368     if (jobConf == null) {
2369       jobConf = mrCluster.createJobConf();
2370     }
2371 
2372     jobConf.set("mapreduce.cluster.local.dir",
2373       conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2374     LOG.info("Mini mapreduce cluster started");
2375 
2376     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2377     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2378     // necessary config properties here.  YARN-129 required adding a few properties.
2379     conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2380     // this for mrv2 support; mr1 ignores this
2381     conf.set("mapreduce.framework.name", "yarn");
2382     conf.setBoolean("yarn.is.minicluster", true);
2383     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2384     if (rmAddress != null) {
2385       conf.set("yarn.resourcemanager.address", rmAddress);
2386     }
2387     String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2388     if (historyAddress != null) {
2389       conf.set("mapreduce.jobhistory.address", historyAddress);
2390     }
2391     String schedulerAddress =
2392       jobConf.get("yarn.resourcemanager.scheduler.address");
2393     if (schedulerAddress != null) {
2394       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2395     }
2396   }
2397 
2398   /**
2399    * Stops the previously started <code>MiniMRCluster</code>.
2400    */
2401   public void shutdownMiniMapReduceCluster() {
2402     if (mrCluster != null) {
2403       LOG.info("Stopping mini mapreduce cluster...");
2404       mrCluster.shutdown();
2405       mrCluster = null;
2406       LOG.info("Mini mapreduce cluster stopped");
2407     }
2408     // Restore configuration to point to local jobtracker
2409     conf.set("mapreduce.jobtracker.address", "local");
2410   }
2411 
2412   /**
2413    * Create a stubbed out RegionServerService, mainly for getting FS.
2414    */
2415   public RegionServerServices createMockRegionServerService() throws IOException {
2416     return createMockRegionServerService((ServerName)null);
2417   }
2418 
2419   /**
2420    * Create a stubbed out RegionServerService, mainly for getting FS.
2421    * This version is used by TestTokenAuthentication
2422    */
2423   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2424     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2425     rss.setFileSystem(getTestFileSystem());
2426     rss.setRpcServer(rpc);
2427     return rss;
2428   }
2429 
2430   /**
2431    * Create a stubbed out RegionServerService, mainly for getting FS.
2432    * This version is used by TestOpenRegionHandler
2433    */
2434   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2435     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2436     rss.setFileSystem(getTestFileSystem());
2437     return rss;
2438   }
2439 
2440   /**
2441    * Switches the logger for the given class to DEBUG level.
2442    *
2443    * @param clazz  The class for which to switch to debug logging.
2444    */
2445   public void enableDebug(Class<?> clazz) {
2446     Log l = LogFactory.getLog(clazz);
2447     if (l instanceof Log4JLogger) {
2448       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2449     } else if (l instanceof Jdk14Logger) {
2450       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2451     }
2452   }
2453 
2454   /**
2455    * Expire the Master's session
2456    * @throws Exception
2457    */
2458   public void expireMasterSession() throws Exception {
2459     HMaster master = getMiniHBaseCluster().getMaster();
2460     expireSession(master.getZooKeeper(), false);
2461   }
2462 
2463   /**
2464    * Expire a region server's session
2465    * @param index which RS
2466    * @throws Exception
2467    */
2468   public void expireRegionServerSession(int index) throws Exception {
2469     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2470     expireSession(rs.getZooKeeper(), false);
2471     decrementMinRegionServerCount();
2472   }
2473 
2474   private void decrementMinRegionServerCount() {
2475     // decrement the count for this.conf, for newly spwaned master
2476     // this.hbaseCluster shares this configuration too
2477     decrementMinRegionServerCount(getConfiguration());
2478 
2479     // each master thread keeps a copy of configuration
2480     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2481       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2482     }
2483   }
2484 
2485   private void decrementMinRegionServerCount(Configuration conf) {
2486     int currentCount = conf.getInt(
2487         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2488     if (currentCount != -1) {
2489       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2490           Math.max(currentCount - 1, 1));
2491     }
2492   }
2493 
2494   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2495    expireSession(nodeZK, false);
2496   }
2497 
2498   @Deprecated
2499   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2500     throws Exception {
2501     expireSession(nodeZK, false);
2502   }
2503 
2504   /**
2505    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2506    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2507    * There are issues when doing this:
2508    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2509    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2510    *
2511    * @param nodeZK - the ZK watcher to expire
2512    * @param checkStatus - true to check if we can create an HTable with the
2513    *                    current configuration.
2514    */
2515   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2516     throws Exception {
2517     Configuration c = new Configuration(this.conf);
2518     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2519     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2520     byte[] password = zk.getSessionPasswd();
2521     long sessionID = zk.getSessionId();
2522 
2523     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2524     //  so we create a first watcher to be sure that the
2525     //  event was sent. We expect that if our watcher receives the event
2526     //  other watchers on the same machine will get is as well.
2527     // When we ask to close the connection, ZK does not close it before
2528     //  we receive all the events, so don't have to capture the event, just
2529     //  closing the connection should be enough.
2530     ZooKeeper monitor = new ZooKeeper(quorumServers,
2531       1000, new org.apache.zookeeper.Watcher(){
2532       @Override
2533       public void process(WatchedEvent watchedEvent) {
2534         LOG.info("Monitor ZKW received event="+watchedEvent);
2535       }
2536     } , sessionID, password);
2537 
2538     // Making it expire
2539     ZooKeeper newZK = new ZooKeeper(quorumServers,
2540         1000, EmptyWatcher.instance, sessionID, password);
2541 
2542     //ensure that we have connection to the server before closing down, otherwise
2543     //the close session event will be eaten out before we start CONNECTING state
2544     long start = System.currentTimeMillis();
2545     while (newZK.getState() != States.CONNECTED
2546          && System.currentTimeMillis() - start < 1000) {
2547        Thread.sleep(1);
2548     }
2549     newZK.close();
2550     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2551 
2552     // Now closing & waiting to be sure that the clients get it.
2553     monitor.close();
2554 
2555     if (checkStatus) {
2556       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2557     }
2558   }
2559 
2560   /**
2561    * Get the Mini HBase cluster.
2562    *
2563    * @return hbase cluster
2564    * @see #getHBaseClusterInterface()
2565    */
2566   public MiniHBaseCluster getHBaseCluster() {
2567     return getMiniHBaseCluster();
2568   }
2569 
2570   /**
2571    * Returns the HBaseCluster instance.
2572    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2573    * tests referring this should not assume that the cluster is a mini cluster or a
2574    * distributed one. If the test only works on a mini cluster, then specific
2575    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2576    * need to type-cast.
2577    */
2578   public HBaseCluster getHBaseClusterInterface() {
2579     //implementation note: we should rename this method as #getHBaseCluster(),
2580     //but this would require refactoring 90+ calls.
2581     return hbaseCluster;
2582   }
2583 
2584   /**
2585    * Get a Connection to the cluster.
2586    * Not thread-safe (This class needs a lot of work to make it thread-safe).
2587    * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2588    * @throws IOException
2589    */
2590   public Connection getConnection() throws IOException {
2591     if (this.connection == null) {
2592       this.connection = ConnectionFactory.createConnection(this.conf);
2593     }
2594     return this.connection;
2595   }
2596 
2597   /**
2598    * Returns a Admin instance.
2599    * This instance is shared between HBaseTestingUtility instance users.
2600    * Closing it has no effect, it will be closed automatically when the
2601    * cluster shutdowns
2602    *
2603    * @return An Admin instance.
2604    * @throws IOException
2605    */
2606   public synchronized HBaseAdmin getHBaseAdmin()
2607   throws IOException {
2608     if (hbaseAdmin == null){
2609       this.hbaseAdmin = new HBaseAdminForTests(getConnection());
2610     }
2611     return hbaseAdmin;
2612   }
2613 
2614   private HBaseAdminForTests hbaseAdmin = null;
2615   private static class HBaseAdminForTests extends HBaseAdmin {
2616     public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
2617         ZooKeeperConnectionException, IOException {
2618       super(connection);
2619     }
2620 
2621     @Override
2622     public synchronized void close() throws IOException {
2623       LOG.warn("close() called on HBaseAdmin instance returned from " +
2624         "HBaseTestingUtility.getHBaseAdmin()");
2625     }
2626 
2627     private synchronized void close0() throws IOException {
2628       super.close();
2629     }
2630   }
2631 
2632   /**
2633    * Returns a ZooKeeperWatcher instance.
2634    * This instance is shared between HBaseTestingUtility instance users.
2635    * Don't close it, it will be closed automatically when the
2636    * cluster shutdowns
2637    *
2638    * @return The ZooKeeperWatcher instance.
2639    * @throws IOException
2640    */
2641   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2642     throws IOException {
2643     if (zooKeeperWatcher == null) {
2644       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2645         new Abortable() {
2646         @Override public void abort(String why, Throwable e) {
2647           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2648         }
2649         @Override public boolean isAborted() {return false;}
2650       });
2651     }
2652     return zooKeeperWatcher;
2653   }
2654   private ZooKeeperWatcher zooKeeperWatcher;
2655 
2656 
2657 
2658   /**
2659    * Closes the named region.
2660    *
2661    * @param regionName  The region to close.
2662    * @throws IOException
2663    */
2664   public void closeRegion(String regionName) throws IOException {
2665     closeRegion(Bytes.toBytes(regionName));
2666   }
2667 
2668   /**
2669    * Closes the named region.
2670    *
2671    * @param regionName  The region to close.
2672    * @throws IOException
2673    */
2674   public void closeRegion(byte[] regionName) throws IOException {
2675     getHBaseAdmin().closeRegion(regionName, null);
2676   }
2677 
2678   /**
2679    * Closes the region containing the given row.
2680    *
2681    * @param row  The row to find the containing region.
2682    * @param table  The table to find the region.
2683    * @throws IOException
2684    */
2685   public void closeRegionByRow(String row, RegionLocator table) throws IOException {
2686     closeRegionByRow(Bytes.toBytes(row), table);
2687   }
2688 
2689   /**
2690    * Closes the region containing the given row.
2691    *
2692    * @param row  The row to find the containing region.
2693    * @param table  The table to find the region.
2694    * @throws IOException
2695    */
2696   public void closeRegionByRow(byte[] row, RegionLocator table) throws IOException {
2697     HRegionLocation hrl = table.getRegionLocation(row);
2698     closeRegion(hrl.getRegionInfo().getRegionName());
2699   }
2700 
2701   /*
2702    * Retrieves a splittable region randomly from tableName
2703    *
2704    * @param tableName name of table
2705    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2706    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2707    */
2708   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2709     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2710     int regCount = regions.size();
2711     Set<Integer> attempted = new HashSet<Integer>();
2712     int idx;
2713     int attempts = 0;
2714     do {
2715       regions = getHBaseCluster().getRegions(tableName);
2716       if (regCount != regions.size()) {
2717         // if there was region movement, clear attempted Set
2718         attempted.clear();
2719       }
2720       regCount = regions.size();
2721       // There are chances that before we get the region for the table from an RS the region may
2722       // be going for CLOSE.  This may be because online schema change is enabled
2723       if (regCount > 0) {
2724         idx = random.nextInt(regCount);
2725         // if we have just tried this region, there is no need to try again
2726         if (attempted.contains(idx))
2727           continue;
2728         try {
2729           regions.get(idx).checkSplit();
2730           return regions.get(idx);
2731         } catch (Exception ex) {
2732           LOG.warn("Caught exception", ex);
2733           attempted.add(idx);
2734         }
2735       }
2736       attempts++;
2737     } while (maxAttempts == -1 || attempts < maxAttempts);
2738     return null;
2739   }
2740 
2741   public MiniZooKeeperCluster getZkCluster() {
2742     return zkCluster;
2743   }
2744 
2745   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2746     this.passedZkCluster = true;
2747     this.zkCluster = zkCluster;
2748     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2749   }
2750 
2751   public MiniDFSCluster getDFSCluster() {
2752     return dfsCluster;
2753   }
2754 
2755   public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2756     if (dfsCluster != null && dfsCluster.isClusterUp()) {
2757       throw new IOException("DFSCluster is already running! Shut it down first.");
2758     }
2759     this.dfsCluster = cluster;
2760   }
2761 
2762   public FileSystem getTestFileSystem() throws IOException {
2763     return HFileSystem.get(conf);
2764   }
2765 
2766   /**
2767    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2768    * (30 seconds).
2769    * @param table Table to wait on.
2770    * @throws InterruptedException
2771    * @throws IOException
2772    */
2773   public void waitTableAvailable(TableName table)
2774       throws InterruptedException, IOException {
2775     waitTableAvailable(getHBaseAdmin(), table.getName(), 30000);
2776   }
2777 
2778   public void waitTableAvailable(Admin admin, byte[] table)
2779       throws InterruptedException, IOException {
2780     waitTableAvailable(admin, table, 30000);
2781   }
2782 
2783   /**
2784    * Wait until all regions in a table have been assigned
2785    * @param table Table to wait on.
2786    * @param timeoutMillis Timeout.
2787    * @throws InterruptedException
2788    * @throws IOException
2789    */
2790   public void waitTableAvailable(byte[] table, long timeoutMillis)
2791   throws InterruptedException, IOException {
2792     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2793   }
2794 
2795   public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
2796   throws InterruptedException, IOException {
2797     long startWait = System.currentTimeMillis();
2798     while (!admin.isTableAvailable(TableName.valueOf(table))) {
2799       assertTrue("Timed out waiting for table to become available " +
2800         Bytes.toStringBinary(table),
2801         System.currentTimeMillis() - startWait < timeoutMillis);
2802       Thread.sleep(200);
2803     }
2804   }
2805 
2806   /**
2807    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2808    * regions have been all assigned.  Will timeout after default period (30 seconds)
2809    * @see #waitTableAvailable(byte[])
2810    * @param table Table to wait on.
2811    * @param table
2812    * @throws InterruptedException
2813    * @throws IOException
2814    */
2815   public void waitTableEnabled(TableName table)
2816       throws InterruptedException, IOException {
2817     waitTableEnabled(getHBaseAdmin(), table.getName(), 30000);
2818   }
2819 
2820   public void waitTableEnabled(Admin admin, byte[] table)
2821       throws InterruptedException, IOException {
2822     waitTableEnabled(admin, table, 30000);
2823   }
2824 
2825   /**
2826    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2827    * regions have been all assigned.
2828    * @see #waitTableAvailable(byte[])
2829    * @param table Table to wait on.
2830    * @param timeoutMillis Time to wait on it being marked enabled.
2831    * @throws InterruptedException
2832    * @throws IOException
2833    */
2834   public void waitTableEnabled(byte[] table, long timeoutMillis)
2835   throws InterruptedException, IOException {
2836     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2837   }
2838 
2839   public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
2840   throws InterruptedException, IOException {
2841     TableName tableName = TableName.valueOf(table);
2842     long startWait = System.currentTimeMillis();
2843     waitTableAvailable(admin, table, timeoutMillis);
2844     while (!admin.isTableEnabled(tableName)) {
2845       assertTrue("Timed out waiting for table to become available and enabled " +
2846          Bytes.toStringBinary(table),
2847          System.currentTimeMillis() - startWait < timeoutMillis);
2848       Thread.sleep(200);
2849     }
2850     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2851     // in the hbase:meta table and almost open on all regionservers but there setting the region
2852     // online in the regionserver is the very last thing done and can take a little while to happen.
2853     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2854     // RegionOpeningException.  It is crass but when done all will be online.
2855     try {
2856       Canary.sniff(admin, tableName);
2857     } catch (Exception e) {
2858       throw new IOException(e);
2859     }
2860   }
2861 
2862   /**
2863    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2864    * Will timeout after default period (30 seconds)
2865    * @param table Table to wait on.
2866    * @throws InterruptedException
2867    * @throws IOException
2868    */
2869   public void waitTableDisabled(byte[] table)
2870       throws InterruptedException, IOException {
2871     waitTableDisabled(getHBaseAdmin(), table, 30000);
2872   }
2873 
2874   public void waitTableDisabled(Admin admin, byte[] table)
2875       throws InterruptedException, IOException {
2876     waitTableDisabled(admin, table, 30000);
2877   }
2878 
2879   /**
2880    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2881    * @param table Table to wait on.
2882    * @param timeoutMillis Time to wait on it being marked disabled.
2883    * @throws InterruptedException
2884    * @throws IOException
2885    */
2886   public void waitTableDisabled(byte[] table, long timeoutMillis)
2887       throws InterruptedException, IOException {
2888     waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2889   }
2890 
2891   public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
2892       throws InterruptedException, IOException {
2893     TableName tableName = TableName.valueOf(table);
2894     long startWait = System.currentTimeMillis();
2895     while (!admin.isTableDisabled(tableName)) {
2896       assertTrue("Timed out waiting for table to become disabled " +
2897               Bytes.toStringBinary(table),
2898           System.currentTimeMillis() - startWait < timeoutMillis);
2899       Thread.sleep(200);
2900     }
2901   }
2902 
2903   /**
2904    * Make sure that at least the specified number of region servers
2905    * are running
2906    * @param num minimum number of region servers that should be running
2907    * @return true if we started some servers
2908    * @throws IOException
2909    */
2910   public boolean ensureSomeRegionServersAvailable(final int num)
2911       throws IOException {
2912     boolean startedServer = false;
2913     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2914     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2915       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2916       startedServer = true;
2917     }
2918 
2919     return startedServer;
2920   }
2921 
2922 
2923   /**
2924    * Make sure that at least the specified number of region servers
2925    * are running. We don't count the ones that are currently stopping or are
2926    * stopped.
2927    * @param num minimum number of region servers that should be running
2928    * @return true if we started some servers
2929    * @throws IOException
2930    */
2931   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2932     throws IOException {
2933     boolean startedServer = ensureSomeRegionServersAvailable(num);
2934 
2935     int nonStoppedServers = 0;
2936     for (JVMClusterUtil.RegionServerThread rst :
2937       getMiniHBaseCluster().getRegionServerThreads()) {
2938 
2939       HRegionServer hrs = rst.getRegionServer();
2940       if (hrs.isStopping() || hrs.isStopped()) {
2941         LOG.info("A region server is stopped or stopping:"+hrs);
2942       } else {
2943         nonStoppedServers++;
2944       }
2945     }
2946     for (int i=nonStoppedServers; i<num; ++i) {
2947       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2948       startedServer = true;
2949     }
2950     return startedServer;
2951   }
2952 
2953 
2954   /**
2955    * This method clones the passed <code>c</code> configuration setting a new
2956    * user into the clone.  Use it getting new instances of FileSystem.  Only
2957    * works for DistributedFileSystem.
2958    * @param c Initial configuration
2959    * @param differentiatingSuffix Suffix to differentiate this user from others.
2960    * @return A new configuration instance with a different user set into it.
2961    * @throws IOException
2962    */
2963   public static User getDifferentUser(final Configuration c,
2964     final String differentiatingSuffix)
2965   throws IOException {
2966     FileSystem currentfs = FileSystem.get(c);
2967     if (!(currentfs instanceof DistributedFileSystem)) {
2968       return User.getCurrent();
2969     }
2970     // Else distributed filesystem.  Make a new instance per daemon.  Below
2971     // code is taken from the AppendTestUtil over in hdfs.
2972     String username = User.getCurrent().getName() +
2973       differentiatingSuffix;
2974     User user = User.createUserForTesting(c, username,
2975         new String[]{"supergroup"});
2976     return user;
2977   }
2978 
2979   public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
2980       throws IOException {
2981     NavigableSet<String> online = new TreeSet<String>();
2982     for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
2983       try {
2984         for (HRegionInfo region :
2985             ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
2986           online.add(region.getRegionNameAsString());
2987         }
2988       } catch (RegionServerStoppedException e) {
2989         // That's fine.
2990       }
2991     }
2992     for (MasterThread mt : cluster.getLiveMasterThreads()) {
2993       try {
2994         for (HRegionInfo region :
2995             ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
2996           online.add(region.getRegionNameAsString());
2997         }
2998       } catch (RegionServerStoppedException e) {
2999         // That's fine.
3000       } catch (ServerNotRunningYetException e) {
3001         // That's fine.
3002       }
3003     }
3004     return online;
3005   }
3006 
3007   /**
3008    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3009    * makes tests linger.  Here is the exception you'll see:
3010    * <pre>
3011    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3012    * </pre>
3013    * @param stream A DFSClient.DFSOutputStream.
3014    * @param max
3015    * @throws NoSuchFieldException
3016    * @throws SecurityException
3017    * @throws IllegalAccessException
3018    * @throws IllegalArgumentException
3019    */
3020   public static void setMaxRecoveryErrorCount(final OutputStream stream,
3021       final int max) {
3022     try {
3023       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3024       for (Class<?> clazz: clazzes) {
3025         String className = clazz.getSimpleName();
3026         if (className.equals("DFSOutputStream")) {
3027           if (clazz.isInstance(stream)) {
3028             Field maxRecoveryErrorCountField =
3029               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3030             maxRecoveryErrorCountField.setAccessible(true);
3031             maxRecoveryErrorCountField.setInt(stream, max);
3032             break;
3033           }
3034         }
3035       }
3036     } catch (Exception e) {
3037       LOG.info("Could not set max recovery field", e);
3038     }
3039   }
3040 
3041   /**
3042    * Wait until all regions for a table in hbase:meta have a non-empty
3043    * info:server, up to 60 seconds. This means all regions have been deployed,
3044    * master has been informed and updated hbase:meta with the regions deployed
3045    * server.
3046    * @param tableName the table name
3047    * @throws IOException
3048    */
3049   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3050     waitUntilAllRegionsAssigned(tableName, 60000);
3051   }
3052 
3053   /**
3054    * Wait until all regions for a table in hbase:meta have a non-empty
3055    * info:server, or until timeout.  This means all regions have been deployed,
3056    * master has been informed and updated hbase:meta with the regions deployed
3057    * server.
3058    * @param tableName the table name
3059    * @param timeout timeout, in milliseconds
3060    * @throws IOException
3061    */
3062   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3063       throws IOException {
3064     final Table meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3065     try {
3066       waitFor(timeout, 200, true, new Predicate<IOException>() {
3067         @Override
3068         public boolean evaluate() throws IOException {
3069           boolean allRegionsAssigned = true;
3070           Scan scan = new Scan();
3071           scan.addFamily(HConstants.CATALOG_FAMILY);
3072           ResultScanner s = meta.getScanner(scan);
3073           try {
3074             Result r;
3075             while ((r = s.next()) != null) {
3076               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3077               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3078               if (info != null && info.getTable().equals(tableName)) {
3079                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3080                 allRegionsAssigned &= (b != null);
3081               }
3082             }
3083           } finally {
3084             s.close();
3085           }
3086           return allRegionsAssigned;
3087         }
3088       });
3089     } finally {
3090       meta.close();
3091     }
3092 
3093     // check from the master state if we are using a mini cluster
3094     if (!getHBaseClusterInterface().isDistributedCluster()) {
3095       // So, all regions are in the meta table but make sure master knows of the assignments before
3096       // returing -- sometimes this can lag.
3097       HMaster master = getHBaseCluster().getMaster();
3098       final RegionStates states = master.getAssignmentManager().getRegionStates();
3099       waitFor(timeout, 200, new Predicate<IOException>() {
3100         @Override
3101         public boolean evaluate() throws IOException {
3102           List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3103           return hris != null && !hris.isEmpty();
3104         }
3105       });
3106     }
3107   }
3108 
3109   /**
3110    * Do a small get/scan against one store. This is required because store
3111    * has no actual methods of querying itself, and relies on StoreScanner.
3112    */
3113   public static List<Cell> getFromStoreFile(HStore store,
3114                                                 Get get) throws IOException {
3115     Scan scan = new Scan(get);
3116     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3117         scan.getFamilyMap().get(store.getFamily().getName()),
3118         // originally MultiVersionConsistencyControl.resetThreadReadPoint() was called to set
3119         // readpoint 0.
3120         0);
3121 
3122     List<Cell> result = new ArrayList<Cell>();
3123     scanner.next(result);
3124     if (!result.isEmpty()) {
3125       // verify that we are on the row we want:
3126       Cell kv = result.get(0);
3127       if (!CellUtil.matchingRow(kv, get.getRow())) {
3128         result.clear();
3129       }
3130     }
3131     scanner.close();
3132     return result;
3133   }
3134 
3135   /**
3136    * Create region split keys between startkey and endKey
3137    *
3138    * @param startKey
3139    * @param endKey
3140    * @param numRegions the number of regions to be created. it has to be greater than 3.
3141    * @return
3142    */
3143   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3144     assertTrue(numRegions>3);
3145     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3146     byte [][] result = new byte[tmpSplitKeys.length+1][];
3147     System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3148     result[0] = HConstants.EMPTY_BYTE_ARRAY;
3149     return result;
3150   }
3151 
3152   /**
3153    * Do a small get/scan against one store. This is required because store
3154    * has no actual methods of querying itself, and relies on StoreScanner.
3155    */
3156   public static List<Cell> getFromStoreFile(HStore store,
3157                                                 byte [] row,
3158                                                 NavigableSet<byte[]> columns
3159                                                 ) throws IOException {
3160     Get get = new Get(row);
3161     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3162     s.put(store.getFamily().getName(), columns);
3163 
3164     return getFromStoreFile(store,get);
3165   }
3166 
3167   /**
3168    * Gets a ZooKeeperWatcher.
3169    * @param TEST_UTIL
3170    */
3171   public static ZooKeeperWatcher getZooKeeperWatcher(
3172       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3173       IOException {
3174     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3175         "unittest", new Abortable() {
3176           boolean aborted = false;
3177 
3178           @Override
3179           public void abort(String why, Throwable e) {
3180             aborted = true;
3181             throw new RuntimeException("Fatal ZK error, why=" + why, e);
3182           }
3183 
3184           @Override
3185           public boolean isAborted() {
3186             return aborted;
3187           }
3188         });
3189     return zkw;
3190   }
3191 
3192   /**
3193    * Creates a znode with OPENED state.
3194    * @param TEST_UTIL
3195    * @param region
3196    * @param serverName
3197    * @return
3198    * @throws IOException
3199    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
3200    * @throws KeeperException
3201    * @throws NodeExistsException
3202    */
3203   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3204       HBaseTestingUtility TEST_UTIL, HRegion region,
3205       ServerName serverName) throws ZooKeeperConnectionException,
3206       IOException, KeeperException, NodeExistsException {
3207     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3208     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3209     int version = ZKAssign.transitionNodeOpening(zkw, region
3210         .getRegionInfo(), serverName);
3211     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3212         version);
3213     return zkw;
3214   }
3215 
3216   public static void assertKVListsEqual(String additionalMsg,
3217       final List<? extends Cell> expected,
3218       final List<? extends Cell> actual) {
3219     final int eLen = expected.size();
3220     final int aLen = actual.size();
3221     final int minLen = Math.min(eLen, aLen);
3222 
3223     int i;
3224     for (i = 0; i < minLen
3225         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3226         ++i) {}
3227 
3228     if (additionalMsg == null) {
3229       additionalMsg = "";
3230     }
3231     if (!additionalMsg.isEmpty()) {
3232       additionalMsg = ". " + additionalMsg;
3233     }
3234 
3235     if (eLen != aLen || i != minLen) {
3236       throw new AssertionError(
3237           "Expected and actual KV arrays differ at position " + i + ": " +
3238           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3239           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3240     }
3241   }
3242 
3243   public static <T> String safeGetAsStr(List<T> lst, int i) {
3244     if (0 <= i && i < lst.size()) {
3245       return lst.get(i).toString();
3246     } else {
3247       return "<out_of_range>";
3248     }
3249   }
3250 
3251   public String getClusterKey() {
3252     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3253         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3254         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3255             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3256   }
3257 
3258   /** Creates a random table with the given parameters */
3259   public HTable createRandomTable(String tableName,
3260       final Collection<String> families,
3261       final int maxVersions,
3262       final int numColsPerRow,
3263       final int numFlushes,
3264       final int numRegions,
3265       final int numRowsPerFlush)
3266       throws IOException, InterruptedException {
3267 
3268     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3269         " regions, " + numFlushes + " storefiles per region, " +
3270         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3271         "\n");
3272 
3273     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3274     final int numCF = families.size();
3275     final byte[][] cfBytes = new byte[numCF][];
3276     {
3277       int cfIndex = 0;
3278       for (String cf : families) {
3279         cfBytes[cfIndex++] = Bytes.toBytes(cf);
3280       }
3281     }
3282 
3283     final int actualStartKey = 0;
3284     final int actualEndKey = Integer.MAX_VALUE;
3285     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3286     final int splitStartKey = actualStartKey + keysPerRegion;
3287     final int splitEndKey = actualEndKey - keysPerRegion;
3288     final String keyFormat = "%08x";
3289     final HTable table = createTable(tableName, cfBytes,
3290         maxVersions,
3291         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3292         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3293         numRegions);
3294 
3295     if (hbaseCluster != null) {
3296       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3297     }
3298 
3299     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3300       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3301         final byte[] row = Bytes.toBytes(String.format(keyFormat,
3302             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3303 
3304         Put put = new Put(row);
3305         Delete del = new Delete(row);
3306         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3307           final byte[] cf = cfBytes[rand.nextInt(numCF)];
3308           final long ts = rand.nextInt();
3309           final byte[] qual = Bytes.toBytes("col" + iCol);
3310           if (rand.nextBoolean()) {
3311             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3312                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3313                 ts + "_random_" + rand.nextLong());
3314             put.add(cf, qual, ts, value);
3315           } else if (rand.nextDouble() < 0.8) {
3316             del.deleteColumn(cf, qual, ts);
3317           } else {
3318             del.deleteColumns(cf, qual, ts);
3319           }
3320         }
3321 
3322         if (!put.isEmpty()) {
3323           table.put(put);
3324         }
3325 
3326         if (!del.isEmpty()) {
3327           table.delete(del);
3328         }
3329       }
3330       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3331       table.flushCommits();
3332       if (hbaseCluster != null) {
3333         getMiniHBaseCluster().flushcache(table.getName());
3334       }
3335     }
3336 
3337     return table;
3338   }
3339 
3340   private static final int MIN_RANDOM_PORT = 0xc000;
3341   private static final int MAX_RANDOM_PORT = 0xfffe;
3342   private static Random random = new Random();
3343 
3344   /**
3345    * Returns a random port. These ports cannot be registered with IANA and are
3346    * intended for dynamic allocation (see http://bit.ly/dynports).
3347    */
3348   public static int randomPort() {
3349     return MIN_RANDOM_PORT
3350         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3351   }
3352 
3353   /**
3354    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3355    * called from single-threaded test setup code/
3356    */
3357   public static int randomFreePort() {
3358     int port = 0;
3359     do {
3360       port = randomPort();
3361       if (takenRandomPorts.contains(port)) {
3362         continue;
3363       }
3364       takenRandomPorts.add(port);
3365 
3366       try {
3367         ServerSocket sock = new ServerSocket(port);
3368         sock.close();
3369       } catch (IOException ex) {
3370         port = 0;
3371       }
3372     } while (port == 0);
3373     return port;
3374   }
3375 
3376 
3377   public static String randomMultiCastAddress() {
3378     return "226.1.1." + random.nextInt(254);
3379   }
3380 
3381 
3382 
3383   public static void waitForHostPort(String host, int port)
3384       throws IOException {
3385     final int maxTimeMs = 10000;
3386     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3387     IOException savedException = null;
3388     LOG.info("Waiting for server at " + host + ":" + port);
3389     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3390       try {
3391         Socket sock = new Socket(InetAddress.getByName(host), port);
3392         sock.close();
3393         savedException = null;
3394         LOG.info("Server at " + host + ":" + port + " is available");
3395         break;
3396       } catch (UnknownHostException e) {
3397         throw new IOException("Failed to look up " + host, e);
3398       } catch (IOException e) {
3399         savedException = e;
3400       }
3401       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3402     }
3403 
3404     if (savedException != null) {
3405       throw savedException;
3406     }
3407   }
3408 
3409   /**
3410    * Creates a pre-split table for load testing. If the table already exists,
3411    * logs a warning and continues.
3412    * @return the number of regions the table was split into
3413    */
3414   public static int createPreSplitLoadTestTable(Configuration conf,
3415       TableName tableName, byte[] columnFamily, Algorithm compression,
3416       DataBlockEncoding dataBlockEncoding) throws IOException {
3417     return createPreSplitLoadTestTable(conf, tableName,
3418       columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3419       Durability.USE_DEFAULT);
3420   }
3421   /**
3422    * Creates a pre-split table for load testing. If the table already exists,
3423    * logs a warning and continues.
3424    * @return the number of regions the table was split into
3425    */
3426   public static int createPreSplitLoadTestTable(Configuration conf,
3427       TableName tableName, byte[] columnFamily, Algorithm compression,
3428       DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3429       Durability durability)
3430           throws IOException {
3431     HTableDescriptor desc = new HTableDescriptor(tableName);
3432     desc.setDurability(durability);
3433     desc.setRegionReplication(regionReplication);
3434     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3435     hcd.setDataBlockEncoding(dataBlockEncoding);
3436     hcd.setCompressionType(compression);
3437     return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3438   }
3439 
3440   /**
3441    * Creates a pre-split table for load testing. If the table already exists,
3442    * logs a warning and continues.
3443    * @return the number of regions the table was split into
3444    */
3445   public static int createPreSplitLoadTestTable(Configuration conf,
3446       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3447     return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3448   }
3449 
3450   /**
3451    * Creates a pre-split table for load testing. If the table already exists,
3452    * logs a warning and continues.
3453    * @return the number of regions the table was split into
3454    */
3455   public static int createPreSplitLoadTestTable(Configuration conf,
3456       HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3457     if (!desc.hasFamily(hcd.getName())) {
3458       desc.addFamily(hcd);
3459     }
3460 
3461     int totalNumberOfRegions = 0;
3462     Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3463     Admin admin = unmanagedConnection.getAdmin();
3464 
3465     try {
3466       // create a table a pre-splits regions.
3467       // The number of splits is set as:
3468       //    region servers * regions per region server).
3469       int numberOfServers = admin.getClusterStatus().getServers().size();
3470       if (numberOfServers == 0) {
3471         throw new IllegalStateException("No live regionservers");
3472       }
3473 
3474       totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3475       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3476           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3477           "(regions per server: " + numRegionsPerServer + ")");
3478 
3479       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3480           totalNumberOfRegions);
3481 
3482       admin.createTable(desc, splits);
3483     } catch (MasterNotRunningException e) {
3484       LOG.error("Master not running", e);
3485       throw new IOException(e);
3486     } catch (TableExistsException e) {
3487       LOG.warn("Table " + desc.getTableName() +
3488           " already exists, continuing");
3489     } finally {
3490       admin.close();
3491       unmanagedConnection.close();
3492     }
3493     return totalNumberOfRegions;
3494   }
3495 
3496   public static int getMetaRSPort(Configuration conf) throws IOException {
3497     RegionLocator table = new HTable(conf, TableName.META_TABLE_NAME);
3498     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3499     table.close();
3500     return hloc.getPort();
3501   }
3502 
3503   /**
3504    *  Due to async racing issue, a region may not be in
3505    *  the online region list of a region server yet, after
3506    *  the assignment znode is deleted and the new assignment
3507    *  is recorded in master.
3508    */
3509   public void assertRegionOnServer(
3510       final HRegionInfo hri, final ServerName server,
3511       final long timeout) throws IOException, InterruptedException {
3512     long timeoutTime = System.currentTimeMillis() + timeout;
3513     while (true) {
3514       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3515       if (regions.contains(hri)) return;
3516       long now = System.currentTimeMillis();
3517       if (now > timeoutTime) break;
3518       Thread.sleep(10);
3519     }
3520     fail("Could not find region " + hri.getRegionNameAsString()
3521       + " on server " + server);
3522   }
3523 
3524   /**
3525    * Check to make sure the region is open on the specified
3526    * region server, but not on any other one.
3527    */
3528   public void assertRegionOnlyOnServer(
3529       final HRegionInfo hri, final ServerName server,
3530       final long timeout) throws IOException, InterruptedException {
3531     long timeoutTime = System.currentTimeMillis() + timeout;
3532     while (true) {
3533       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3534       if (regions.contains(hri)) {
3535         List<JVMClusterUtil.RegionServerThread> rsThreads =
3536           getHBaseCluster().getLiveRegionServerThreads();
3537         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3538           HRegionServer rs = rsThread.getRegionServer();
3539           if (server.equals(rs.getServerName())) {
3540             continue;
3541           }
3542           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3543           for (HRegion r: hrs) {
3544             assertTrue("Region should not be double assigned",
3545               r.getRegionId() != hri.getRegionId());
3546           }
3547         }
3548         return; // good, we are happy
3549       }
3550       long now = System.currentTimeMillis();
3551       if (now > timeoutTime) break;
3552       Thread.sleep(10);
3553     }
3554     fail("Could not find region " + hri.getRegionNameAsString()
3555       + " on server " + server);
3556   }
3557 
3558   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3559       throws IOException {
3560     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3561     htd.addFamily(hcd);
3562     HRegionInfo info =
3563         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3564     HRegion region =
3565         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3566     return region;
3567   }
3568 
3569   public void setFileSystemURI(String fsURI) {
3570     FS_URI = fsURI;
3571   }
3572 
3573   /**
3574    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3575    */
3576   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3577       throws E {
3578     return Waiter.waitFor(this.conf, timeout, predicate);
3579   }
3580 
3581   /**
3582    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3583    */
3584   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3585       throws E {
3586     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3587   }
3588 
3589   /**
3590    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3591    */
3592   public <E extends Exception> long waitFor(long timeout, long interval,
3593       boolean failIfTimeout, Predicate<E> predicate) throws E {
3594     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3595   }
3596 
3597   /**
3598    * Returns a {@link Predicate} for checking that there are no regions in transition in master
3599    */
3600   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3601     return new Waiter.Predicate<Exception>() {
3602       @Override
3603       public boolean evaluate() throws Exception {
3604         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3605             .getAssignmentManager().getRegionStates();
3606         return !regionStates.isRegionsInTransition();
3607       }
3608     };
3609   }
3610 
3611   /**
3612    * Returns a {@link Predicate} for checking that table is enabled
3613    */
3614   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3615     return new Waiter.Predicate<Exception>() {
3616      @Override
3617      public boolean evaluate() throws Exception {
3618        return getHBaseAdmin().isTableEnabled(tableName);
3619       }
3620     };
3621   }
3622 
3623   /**
3624    * Create a set of column descriptors with the combination of compression,
3625    * encoding, bloom codecs available.
3626    * @return the list of column descriptors
3627    */
3628   public static List<HColumnDescriptor> generateColumnDescriptors() {
3629     return generateColumnDescriptors("");
3630   }
3631 
3632   /**
3633    * Create a set of column descriptors with the combination of compression,
3634    * encoding, bloom codecs available.
3635    * @param prefix family names prefix
3636    * @return the list of column descriptors
3637    */
3638   public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3639     List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3640     long familyId = 0;
3641     for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3642       for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3643         for (BloomType bloomType: BloomType.values()) {
3644           String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3645           HColumnDescriptor htd = new HColumnDescriptor(name);
3646           htd.setCompressionType(compressionType);
3647           htd.setDataBlockEncoding(encodingType);
3648           htd.setBloomFilterType(bloomType);
3649           htds.add(htd);
3650           familyId++;
3651         }
3652       }
3653     }
3654     return htds;
3655   }
3656 
3657   /**
3658    * Get supported compression algorithms.
3659    * @return supported compression algorithms.
3660    */
3661   public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3662     String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3663     List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3664     for (String algoName : allAlgos) {
3665       try {
3666         Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3667         algo.getCompressor();
3668         supportedAlgos.add(algo);
3669       } catch (Throwable t) {
3670         // this algo is not available
3671       }
3672     }
3673     return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3674   }
3675 
3676   /**
3677    * Wait until no regions in transition.
3678    * @param timeout How long to wait.
3679    * @throws Exception
3680    */
3681   public void waitUntilNoRegionsInTransition(final long timeout) throws Exception {
3682     waitFor(timeout, predicateNoRegionsInTransition());
3683   }
3684 }