View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.lang.reflect.Field;
27  import java.lang.reflect.Modifier;
28  import java.net.InetAddress;
29  import java.net.ServerSocket;
30  import java.net.Socket;
31  import java.net.UnknownHostException;
32  import java.security.MessageDigest;
33  import java.util.ArrayList;
34  import java.util.Arrays;
35  import java.util.Collection;
36  import java.util.Collections;
37  import java.util.HashSet;
38  import java.util.List;
39  import java.util.Map;
40  import java.util.NavigableSet;
41  import java.util.Random;
42  import java.util.Set;
43  import java.util.TreeSet;
44  import java.util.UUID;
45  import java.util.concurrent.TimeUnit;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.commons.logging.impl.Jdk14Logger;
50  import org.apache.commons.logging.impl.Log4JLogger;
51  import org.apache.hadoop.conf.Configuration;
52  import org.apache.hadoop.fs.FileSystem;
53  import org.apache.hadoop.fs.Path;
54  import org.apache.hadoop.hbase.Waiter.Predicate;
55  import org.apache.hadoop.hbase.classification.InterfaceAudience;
56  import org.apache.hadoop.hbase.classification.InterfaceStability;
57  import org.apache.hadoop.hbase.client.Admin;
58  import org.apache.hadoop.hbase.client.Connection;
59  import org.apache.hadoop.hbase.client.ConnectionFactory;
60  import org.apache.hadoop.hbase.client.Delete;
61  import org.apache.hadoop.hbase.client.Durability;
62  import org.apache.hadoop.hbase.client.Get;
63  import org.apache.hadoop.hbase.client.HBaseAdmin;
64  import org.apache.hadoop.hbase.client.HConnection;
65  import org.apache.hadoop.hbase.client.HTable;
66  import org.apache.hadoop.hbase.client.Put;
67  import org.apache.hadoop.hbase.client.RegionLocator;
68  import org.apache.hadoop.hbase.client.Result;
69  import org.apache.hadoop.hbase.client.ResultScanner;
70  import org.apache.hadoop.hbase.client.Scan;
71  import org.apache.hadoop.hbase.client.Table;
72  import org.apache.hadoop.hbase.fs.HFileSystem;
73  import org.apache.hadoop.hbase.io.compress.Compression;
74  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
75  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76  import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
77  import org.apache.hadoop.hbase.io.hfile.HFile;
78  import org.apache.hadoop.hbase.ipc.RpcServerInterface;
79  import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
80  import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
81  import org.apache.hadoop.hbase.master.HMaster;
82  import org.apache.hadoop.hbase.master.RegionStates;
83  import org.apache.hadoop.hbase.master.ServerManager;
84  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
85  import org.apache.hadoop.hbase.regionserver.BloomType;
86  import org.apache.hadoop.hbase.regionserver.HRegion;
87  import org.apache.hadoop.hbase.regionserver.HRegionServer;
88  import org.apache.hadoop.hbase.regionserver.HStore;
89  import org.apache.hadoop.hbase.regionserver.InternalScanner;
90  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
91  import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
92  import org.apache.hadoop.hbase.wal.WAL;
93  import org.apache.hadoop.hbase.security.User;
94  import org.apache.hadoop.hbase.tool.Canary;
95  import org.apache.hadoop.hbase.util.Bytes;
96  import org.apache.hadoop.hbase.util.FSTableDescriptors;
97  import org.apache.hadoop.hbase.util.FSUtils;
98  import org.apache.hadoop.hbase.util.JVMClusterUtil;
99  import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
100 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
101 import org.apache.hadoop.hbase.util.Pair;
102 import org.apache.hadoop.hbase.util.RegionSplitter;
103 import org.apache.hadoop.hbase.util.RetryCounter;
104 import org.apache.hadoop.hbase.util.Threads;
105 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
106 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
107 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
108 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
109 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
110 import org.apache.hadoop.hdfs.DFSClient;
111 import org.apache.hadoop.hdfs.DistributedFileSystem;
112 import org.apache.hadoop.hdfs.MiniDFSCluster;
113 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
114 import org.apache.hadoop.mapred.JobConf;
115 import org.apache.hadoop.mapred.MiniMRCluster;
116 import org.apache.hadoop.mapred.TaskLog;
117 import org.apache.zookeeper.KeeperException;
118 import org.apache.zookeeper.KeeperException.NodeExistsException;
119 import org.apache.zookeeper.WatchedEvent;
120 import org.apache.zookeeper.ZooKeeper;
121 import org.apache.zookeeper.ZooKeeper.States;
122 
123 /**
124  * Facility for testing HBase. Replacement for
125  * old HBaseTestCase and HBaseClusterTestCase functionality.
126  * Create an instance and keep it around testing HBase.  This class is
127  * meant to be your one-stop shop for anything you might need testing.  Manages
128  * one cluster at a time only. Managed cluster can be an in-process
129  * {@link MiniHBaseCluster}, or a deployed cluster of type {@link DistributedHBaseCluster}.
130  * Not all methods work with the real cluster.
131  * Depends on log4j being on classpath and
132  * hbase-site.xml for logging and test-run configuration.  It does not set
133  * logging levels nor make changes to configuration parameters.
134  * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
135  * setting it to true.
136  */
137 @InterfaceAudience.Public
138 @InterfaceStability.Evolving
139 @SuppressWarnings("deprecation")
140 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
141    private MiniZooKeeperCluster zkCluster = null;
142 
143   public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
144   /**
145    * The default number of regions per regionserver when creating a pre-split
146    * table.
147    */
148   public static final int DEFAULT_REGIONS_PER_SERVER = 5;
149 
150   /**
151    * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
152    * part of general shutdown.
153    */
154   private boolean passedZkCluster = false;
155   private MiniDFSCluster dfsCluster = null;
156 
157   private volatile HBaseCluster hbaseCluster = null;
158   private MiniMRCluster mrCluster = null;
159 
160   /** If there is a mini cluster running for this testing utility instance. */
161   private volatile boolean miniClusterRunning;
162 
163   private String hadoopLogDir;
164 
165   /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
166   private File clusterTestDir = null;
167 
168   /** Directory on test filesystem where we put the data for this instance of
169     * HBaseTestingUtility*/
170   private Path dataTestDirOnTestFS = null;
171 
172   /**
173    * Shared cluster connection.
174    */
175   private volatile Connection connection;
176 
177   /**
178    * System property key to get test directory value.
179    * Name is as it is because mini dfs has hard-codings to put test data here.
180    * It should NOT be used directly in HBase, as it's a property used in
181    *  mini dfs.
182    *  @deprecated can be used only with mini dfs
183    */
184   @Deprecated
185   private static final String TEST_DIRECTORY_KEY = "test.build.data";
186 
187   /** Filesystem URI used for map-reduce mini-cluster setup */
188   private static String FS_URI;
189 
190   /** A set of ports that have been claimed using {@link #randomFreePort()}. */
191   private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
192 
193   /** Compression algorithms to use in parameterized JUnit 4 tests */
194   public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
195     Arrays.asList(new Object[][] {
196       { Compression.Algorithm.NONE },
197       { Compression.Algorithm.GZ }
198     });
199 
200   /** This is for unit tests parameterized with a two booleans. */
201   public static final List<Object[]> BOOLEAN_PARAMETERIZED =
202       Arrays.asList(new Object[][] {
203           { new Boolean(false) },
204           { new Boolean(true) }
205       });
206 
207   /** This is for unit tests parameterized with a single boolean. */
208   public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination()  ;
209   /** Compression algorithms to use in testing */
210   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
211       Compression.Algorithm.NONE, Compression.Algorithm.GZ
212     };
213 
214   /**
215    * Create all combinations of Bloom filters and compression algorithms for
216    * testing.
217    */
218   private static List<Object[]> bloomAndCompressionCombinations() {
219     List<Object[]> configurations = new ArrayList<Object[]>();
220     for (Compression.Algorithm comprAlgo :
221          HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
222       for (BloomType bloomType : BloomType.values()) {
223         configurations.add(new Object[] { comprAlgo, bloomType });
224       }
225     }
226     return Collections.unmodifiableList(configurations);
227   }
228 
229   /**
230    * Create combination of memstoreTS and tags
231    */
232   private static List<Object[]> memStoreTSAndTagsCombination() {
233     List<Object[]> configurations = new ArrayList<Object[]>();
234     configurations.add(new Object[] { false, false });
235     configurations.add(new Object[] { false, true });
236     configurations.add(new Object[] { true, false });
237     configurations.add(new Object[] { true, true });
238     return Collections.unmodifiableList(configurations);
239   }
240 
241   public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
242       bloomAndCompressionCombinations();
243 
244   public HBaseTestingUtility() {
245     this(HBaseConfiguration.create());
246   }
247 
248   public HBaseTestingUtility(Configuration conf) {
249     super(conf);
250 
251     // a hbase checksum verification failure will cause unit tests to fail
252     ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
253   }
254 
255   /**
256    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
257    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
258    * test dir.  Use this when you aren't using an Mini HDFS cluster.
259    * @return HBaseTestingUtility that use local fs for temp files.
260    */
261   public static HBaseTestingUtility createLocalHTU() {
262     Configuration c = HBaseConfiguration.create();
263     return createLocalHTU(c);
264   }
265 
266   /**
267    * Create an HBaseTestingUtility where all tmp files are written to the local test data dir.
268    * It is needed to properly base FSUtil.getRootDirs so that they drop temp files in the proper
269    * test dir.  Use this when you aren't using an Mini HDFS cluster.
270    * @param c Configuration (will be modified)
271    * @return HBaseTestingUtility that use local fs for temp files.
272    */
273   public static HBaseTestingUtility createLocalHTU(Configuration c) {
274     HBaseTestingUtility htu = new HBaseTestingUtility(c);
275     String dataTestDir = htu.getDataTestDir().toString();
276     htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
277     LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
278     return htu;
279   }
280 
281   /**
282    * Returns this classes's instance of {@link Configuration}.  Be careful how
283    * you use the returned Configuration since {@link HConnection} instances
284    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
285    * say, a Connection was being used against a cluster that had been shutdown,
286    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
287    * be wholesome.  Rather than use the return direct, its usually best to
288    * make a copy and use that.  Do
289    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
290    * @return Instance of Configuration.
291    */
292   @Override
293   public Configuration getConfiguration() {
294     return super.getConfiguration();
295   }
296 
297   public void setHBaseCluster(HBaseCluster hbaseCluster) {
298     this.hbaseCluster = hbaseCluster;
299   }
300 
301   /**
302    * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
303    * Give it a random name so can have many concurrent tests running if
304    * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}
305    * System property, as it's what minidfscluster bases
306    * it data dir on.  Moding a System property is not the way to do concurrent
307    * instances -- another instance could grab the temporary
308    * value unintentionally -- but not anything can do about it at moment;
309    * single instance only is how the minidfscluster works.
310    *
311    * We also create the underlying directory for
312    *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
313    *  in the conf, and as a system property for hadoop.tmp.dir
314    *
315    * @return The calculated data test build directory, if newly-created.
316    */
317   @Override
318   protected Path setupDataTestDir() {
319     Path testPath = super.setupDataTestDir();
320     if (null == testPath) {
321       return null;
322     }
323 
324     createSubDirAndSystemProperty(
325       "hadoop.log.dir",
326       testPath, "hadoop-log-dir");
327 
328     // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
329     //  we want our own value to ensure uniqueness on the same machine
330     createSubDirAndSystemProperty(
331       "hadoop.tmp.dir",
332       testPath, "hadoop-tmp-dir");
333 
334     // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
335     createSubDir(
336       "mapreduce.cluster.local.dir",
337       testPath, "mapred-local-dir");
338 
339     return testPath;
340   }
341 
342   private void createSubDirAndSystemProperty(
343     String propertyName, Path parent, String subDirName){
344 
345     String sysValue = System.getProperty(propertyName);
346 
347     if (sysValue != null) {
348       // There is already a value set. So we do nothing but hope
349       //  that there will be no conflicts
350       LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351         sysValue + " so I do NOT create it in " + parent);
352       String confValue = conf.get(propertyName);
353       if (confValue != null && !confValue.endsWith(sysValue)){
354        LOG.warn(
355          propertyName + " property value differs in configuration and system: "+
356          "Configuration="+confValue+" while System="+sysValue+
357          " Erasing configuration value by system value."
358        );
359       }
360       conf.set(propertyName, sysValue);
361     } else {
362       // Ok, it's not set, so we create it as a subdirectory
363       createSubDir(propertyName, parent, subDirName);
364       System.setProperty(propertyName, conf.get(propertyName));
365     }
366   }
367 
368   /**
369    * @return Where to write test data on the test filesystem; Returns working directory
370    * for the test filesystem by default
371    * @see #setupDataTestDirOnTestFS()
372    * @see #getTestFileSystem()
373    */
374   private Path getBaseTestDirOnTestFS() throws IOException {
375     FileSystem fs = getTestFileSystem();
376     return new Path(fs.getWorkingDirectory(), "test-data");
377   }
378 
379   /**
380    * @return META table descriptor
381    */
382   public HTableDescriptor getMetaTableDescriptor() {
383     try {
384       return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385     } catch (IOException e) {
386       throw new RuntimeException("Unable to create META table descriptor", e);
387     }
388   }
389 
390   /**
391    * @return Where the DFS cluster will write data on the local subsystem.
392    * Creates it if it does not exist already.  A subdir of {@link #getBaseTestDir()}
393    * @see #getTestFileSystem()
394    */
395   Path getClusterTestDir() {
396     if (clusterTestDir == null){
397       setupClusterTestDir();
398     }
399     return new Path(clusterTestDir.getAbsolutePath());
400   }
401 
402   /**
403    * Creates a directory for the DFS cluster, under the test data
404    */
405   private void setupClusterTestDir() {
406     if (clusterTestDir != null) {
407       return;
408     }
409 
410     // Using randomUUID ensures that multiple clusters can be launched by
411     //  a same test, if it stops & starts them
412     Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413     clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414     // Have it cleaned up on exit
415     boolean b = deleteOnExit();
416     if (b) clusterTestDir.deleteOnExit();
417     conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418     LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419   }
420 
421   /**
422    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
423    * to write temporary test data. Call this method after setting up the mini dfs cluster
424    * if the test relies on it.
425    * @return a unique path in the test filesystem
426    */
427   public Path getDataTestDirOnTestFS() throws IOException {
428     if (dataTestDirOnTestFS == null) {
429       setupDataTestDirOnTestFS();
430     }
431 
432     return dataTestDirOnTestFS;
433   }
434 
435   /**
436    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
437    * to write temporary test data. Call this method after setting up the mini dfs cluster
438    * if the test relies on it.
439    * @return a unique path in the test filesystem
440    * @param subdirName name of the subdir to create under the base test dir
441    */
442   public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443     return new Path(getDataTestDirOnTestFS(), subdirName);
444   }
445 
446   /**
447    * Sets up a path in test filesystem to be used by tests.
448    * Creates a new directory if not already setup.
449    */
450   private void setupDataTestDirOnTestFS() throws IOException {
451     if (dataTestDirOnTestFS != null) {
452       LOG.warn("Data test on test fs dir already setup in "
453           + dataTestDirOnTestFS.toString());
454       return;
455     }
456     dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
457   }
458 
459   /**
460    * Sets up a new path in test filesystem to be used by tests.
461    */
462   private Path getNewDataTestDirOnTestFS() throws IOException {
463     //The file system can be either local, mini dfs, or if the configuration
464     //is supplied externally, it can be an external cluster FS. If it is a local
465     //file system, the tests should use getBaseTestDir, otherwise, we can use
466     //the working directory, and create a unique sub dir there
467     FileSystem fs = getTestFileSystem();
468     Path newDataTestDir = null;
469     if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
470       File dataTestDir = new File(getDataTestDir().toString());
471       if (deleteOnExit()) dataTestDir.deleteOnExit();
472       newDataTestDir = new Path(dataTestDir.getAbsolutePath());
473     } else {
474       Path base = getBaseTestDirOnTestFS();
475       String randomStr = UUID.randomUUID().toString();
476       newDataTestDir = new Path(base, randomStr);
477       if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
478     }
479     return newDataTestDir;
480   }
481 
482   /**
483    * Cleans the test data directory on the test filesystem.
484    * @return True if we removed the test dirs
485    * @throws IOException
486    */
487   public boolean cleanupDataTestDirOnTestFS() throws IOException {
488     boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
489     if (ret)
490       dataTestDirOnTestFS = null;
491     return ret;
492   }
493 
494   /**
495    * Cleans a subdirectory under the test data directory on the test filesystem.
496    * @return True if we removed child
497    * @throws IOException
498    */
499   public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
500     Path cpath = getDataTestDirOnTestFS(subdirName);
501     return getTestFileSystem().delete(cpath, true);
502   }
503 
504   /**
505    * Start a minidfscluster.
506    * @param servers How many DNs to start.
507    * @throws Exception
508    * @see {@link #shutdownMiniDFSCluster()}
509    * @return The mini dfs cluster created.
510    */
511   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
512     return startMiniDFSCluster(servers, null);
513   }
514 
515   /**
516    * Start a minidfscluster.
517    * This is useful if you want to run datanode on distinct hosts for things
518    * like HDFS block location verification.
519    * If you start MiniDFSCluster without host names, all instances of the
520    * datanodes will have the same host name.
521    * @param hosts hostnames DNs to run on.
522    * @throws Exception
523    * @see {@link #shutdownMiniDFSCluster()}
524    * @return The mini dfs cluster created.
525    */
526   public MiniDFSCluster startMiniDFSCluster(final String hosts[])
527   throws Exception {
528     if ( hosts != null && hosts.length != 0) {
529       return startMiniDFSCluster(hosts.length, hosts);
530     } else {
531       return startMiniDFSCluster(1, null);
532     }
533   }
534 
535   /**
536    * Start a minidfscluster.
537    * Can only create one.
538    * @param servers How many DNs to start.
539    * @param hosts hostnames DNs to run on.
540    * @throws Exception
541    * @see {@link #shutdownMiniDFSCluster()}
542    * @return The mini dfs cluster created.
543    */
544   public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
545   throws Exception {
546     createDirsAndSetProperties();
547     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
548 
549     // Error level to skip some warnings specific to the minicluster. See HBASE-4709
550     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
551         setLevel(org.apache.log4j.Level.ERROR);
552     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
553         setLevel(org.apache.log4j.Level.ERROR);
554 
555 
556     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
557       true, null, null, hosts, null);
558 
559     // Set this just-started cluster as our filesystem.
560     FileSystem fs = this.dfsCluster.getFileSystem();
561     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
562 
563     // Wait for the cluster to be totally up
564     this.dfsCluster.waitClusterUp();
565 
566     //reset the test directory for test file system
567     dataTestDirOnTestFS = null;
568 
569     return this.dfsCluster;
570   }
571 
572 
573   public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
574       throws Exception {
575     createDirsAndSetProperties();
576     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
577         true, null, racks, hosts, null);
578 
579     // Set this just-started cluster as our filesystem.
580     FileSystem fs = this.dfsCluster.getFileSystem();
581     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
582 
583     // Wait for the cluster to be totally up
584     this.dfsCluster.waitClusterUp();
585 
586     //reset the test directory for test file system
587     dataTestDirOnTestFS = null;
588 
589     return this.dfsCluster;
590   }
591 
592   public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
593     createDirsAndSetProperties();
594     dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
595         null, null, null);
596     return dfsCluster;
597   }
598 
599   /** This is used before starting HDFS and map-reduce mini-clusters */
600   private void createDirsAndSetProperties() throws IOException {
601     setupClusterTestDir();
602     System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
603     createDirAndSetProperty("cache_data", "test.cache.data");
604     createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
605     hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
606     createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
607     createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
608     enableShortCircuit();
609 
610     Path root = getDataTestDirOnTestFS("hadoop");
611     conf.set(MapreduceTestingShim.getMROutputDirProp(),
612       new Path(root, "mapred-output-dir").toString());
613     conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
614     conf.set("mapreduce.jobtracker.staging.root.dir",
615       new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
616     conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
617   }
618 
619 
620   /**
621    *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
622    *  This allows to specify this parameter on the command line.
623    *   If not set, default is true.
624    */
625   public boolean isReadShortCircuitOn(){
626     final String propName = "hbase.tests.use.shortcircuit.reads";
627     String readOnProp = System.getProperty(propName);
628     if (readOnProp != null){
629       return  Boolean.parseBoolean(readOnProp);
630     } else {
631       return conf.getBoolean(propName, false);
632     }
633   }
634 
635   /** Enable the short circuit read, unless configured differently.
636    * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
637    */
638   private void enableShortCircuit() {
639     if (isReadShortCircuitOn()) {
640       String curUser = System.getProperty("user.name");
641       LOG.info("read short circuit is ON for user " + curUser);
642       // read short circuit, for hdfs
643       conf.set("dfs.block.local-path-access.user", curUser);
644       // read short circuit, for hbase
645       conf.setBoolean("dfs.client.read.shortcircuit", true);
646       // Skip checking checksum, for the hdfs client and the datanode
647       conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
648     } else {
649       LOG.info("read short circuit is OFF");
650     }
651   }
652 
653   private String createDirAndSetProperty(final String relPath, String property) {
654     String path = getDataTestDir(relPath).toString();
655     System.setProperty(property, path);
656     conf.set(property, path);
657     new File(path).mkdirs();
658     LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
659     return path;
660   }
661 
662   /**
663    * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
664    * or does nothing.
665    * @throws IOException
666    */
667   public void shutdownMiniDFSCluster() throws IOException {
668     if (this.dfsCluster != null) {
669       // The below throws an exception per dn, AsynchronousCloseException.
670       this.dfsCluster.shutdown();
671       dfsCluster = null;
672       dataTestDirOnTestFS = null;
673       FSUtils.setFsDefault(this.conf, new Path("file:///"));
674     }
675   }
676 
677   /**
678    * Call this if you only want a zk cluster.
679    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
680    * @throws Exception
681    * @see #shutdownMiniZKCluster()
682    * @return zk cluster started.
683    */
684   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
685     return startMiniZKCluster(1);
686   }
687 
688   /**
689    * Call this if you only want a zk cluster.
690    * @param zooKeeperServerNum
691    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
692    * @throws Exception
693    * @see #shutdownMiniZKCluster()
694    * @return zk cluster started.
695    */
696   public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
697       throws Exception {
698     setupClusterTestDir();
699     return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
700   }
701 
702   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
703     throws Exception {
704     return startMiniZKCluster(dir,1);
705   }
706 
707   /**
708    * Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
709    *  the port mentionned is used as the default port for ZooKeeper.
710    */
711   private MiniZooKeeperCluster startMiniZKCluster(final File dir,
712       int zooKeeperServerNum)
713   throws Exception {
714     if (this.zkCluster != null) {
715       throw new IOException("Cluster already running at " + dir);
716     }
717     this.passedZkCluster = false;
718     this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
719     final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
720     if (defPort > 0){
721       // If there is a port in the config file, we use it.
722       this.zkCluster.setDefaultClientPort(defPort);
723     }
724     int clientPort =   this.zkCluster.startup(dir,zooKeeperServerNum);
725     this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
726       Integer.toString(clientPort));
727     return this.zkCluster;
728   }
729 
730   /**
731    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
732    * or does nothing.
733    * @throws IOException
734    * @see #startMiniZKCluster()
735    */
736   public void shutdownMiniZKCluster() throws IOException {
737     if (this.zkCluster != null) {
738       this.zkCluster.shutdown();
739       this.zkCluster = null;
740     }
741   }
742 
743   /**
744    * Start up a minicluster of hbase, dfs, and zookeeper.
745    * @throws Exception
746    * @return Mini hbase cluster instance created.
747    * @see {@link #shutdownMiniDFSCluster()}
748    */
749   public MiniHBaseCluster startMiniCluster() throws Exception {
750     return startMiniCluster(1, 1);
751   }
752 
753   /**
754    * Start up a minicluster of hbase, dfs, and zookeeper.
755    * Set the <code>create</code> flag to create root or data directory path or not
756    * (will overwrite if dir already exists)
757    * @throws Exception
758    * @return Mini hbase cluster instance created.
759    * @see {@link #shutdownMiniDFSCluster()}
760    */
761   public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
762   throws Exception {
763     return startMiniCluster(1, numSlaves, create);
764   }
765 
766   /**
767    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
768    * Modifies Configuration.  Homes the cluster data directory under a random
769    * subdirectory in a directory under System property test.build.data.
770    * Directory is cleaned up on exit.
771    * @param numSlaves Number of slaves to start up.  We'll start this many
772    * datanodes and regionservers.  If numSlaves is > 1, then make sure
773    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
774    * bind errors.
775    * @throws Exception
776    * @see {@link #shutdownMiniCluster()}
777    * @return Mini hbase cluster instance created.
778    */
779   public MiniHBaseCluster startMiniCluster(final int numSlaves)
780   throws Exception {
781     return startMiniCluster(1, numSlaves, false);
782   }
783 
784   /**
785    * Start minicluster. Whether to create a new root or data dir path even if such a path
786    * has been created earlier is decided based on flag <code>create</code>
787    * @throws Exception
788    * @see {@link #shutdownMiniCluster()}
789    * @return Mini hbase cluster instance created.
790    */
791   public MiniHBaseCluster startMiniCluster(final int numMasters,
792       final int numSlaves, boolean create)
793     throws Exception {
794       return startMiniCluster(numMasters, numSlaves, null, create);
795   }
796 
797   /**
798    * start minicluster
799    * @throws Exception
800    * @see {@link #shutdownMiniCluster()}
801    * @return Mini hbase cluster instance created.
802    */
803   public MiniHBaseCluster startMiniCluster(final int numMasters,
804     final int numSlaves)
805   throws Exception {
806     return startMiniCluster(numMasters, numSlaves, null, false);
807   }
808 
809   public MiniHBaseCluster startMiniCluster(final int numMasters,
810       final int numSlaves, final String[] dataNodeHosts, boolean create)
811       throws Exception {
812     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
813         null, null, create);
814   }
815 
816   /**
817    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
818    * Modifies Configuration.  Homes the cluster data directory under a random
819    * subdirectory in a directory under System property test.build.data.
820    * Directory is cleaned up on exit.
821    * @param numMasters Number of masters to start up.  We'll start this many
822    * hbase masters.  If numMasters > 1, you can find the active/primary master
823    * with {@link MiniHBaseCluster#getMaster()}.
824    * @param numSlaves Number of slaves to start up.  We'll start this many
825    * regionservers. If dataNodeHosts == null, this also indicates the number of
826    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
827    * based on dataNodeHosts.length.
828    * If numSlaves is > 1, then make sure
829    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
830    * bind errors.
831    * @param dataNodeHosts hostnames DNs to run on.
832    * This is useful if you want to run datanode on distinct hosts for things
833    * like HDFS block location verification.
834    * If you start MiniDFSCluster without host names,
835    * all instances of the datanodes will have the same host name.
836    * @throws Exception
837    * @see {@link #shutdownMiniCluster()}
838    * @return Mini hbase cluster instance created.
839    */
840   public MiniHBaseCluster startMiniCluster(final int numMasters,
841       final int numSlaves, final String[] dataNodeHosts) throws Exception {
842     return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
843         null, null);
844   }
845 
846   /**
847    * Same as {@link #startMiniCluster(int, int)}, but with custom number of datanodes.
848    * @param numDataNodes Number of data nodes.
849    */
850   public MiniHBaseCluster startMiniCluster(final int numMasters,
851       final int numSlaves, final int numDataNodes) throws Exception {
852     return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
853   }
854 
855   /**
856    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
857    * Modifies Configuration.  Homes the cluster data directory under a random
858    * subdirectory in a directory under System property test.build.data.
859    * Directory is cleaned up on exit.
860    * @param numMasters Number of masters to start up.  We'll start this many
861    * hbase masters.  If numMasters > 1, you can find the active/primary master
862    * with {@link MiniHBaseCluster#getMaster()}.
863    * @param numSlaves Number of slaves to start up.  We'll start this many
864    * regionservers. If dataNodeHosts == null, this also indicates the number of
865    * datanodes to start. If dataNodeHosts != null, the number of datanodes is
866    * based on dataNodeHosts.length.
867    * If numSlaves is > 1, then make sure
868    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
869    * bind errors.
870    * @param dataNodeHosts hostnames DNs to run on.
871    * This is useful if you want to run datanode on distinct hosts for things
872    * like HDFS block location verification.
873    * If you start MiniDFSCluster without host names,
874    * all instances of the datanodes will have the same host name.
875    * @param masterClass The class to use as HMaster, or null for default
876    * @param regionserverClass The class to use as HRegionServer, or null for
877    * default
878    * @throws Exception
879    * @see {@link #shutdownMiniCluster()}
880    * @return Mini hbase cluster instance created.
881    */
882   public MiniHBaseCluster startMiniCluster(final int numMasters,
883       final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
884       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
885           throws Exception {
886     return startMiniCluster(
887         numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
888   }
889 
890   public MiniHBaseCluster startMiniCluster(final int numMasters,
891       final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
892       Class<? extends HMaster> masterClass,
893       Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
894     throws Exception {
895     return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
896         masterClass, regionserverClass, false);
897   }
898 
899   /**
900    * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
901    * number of datanodes.
902    * @param numDataNodes Number of data nodes.
903    * @param create Set this flag to create a new
904    * root or data directory path or not (will overwrite if exists already).
905    */
906   public MiniHBaseCluster startMiniCluster(final int numMasters,
907     final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
908     Class<? extends HMaster> masterClass,
909     Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
910     boolean create)
911   throws Exception {
912     if (dataNodeHosts != null && dataNodeHosts.length != 0) {
913       numDataNodes = dataNodeHosts.length;
914     }
915 
916     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
917         numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
918 
919     // If we already put up a cluster, fail.
920     if (miniClusterRunning) {
921       throw new IllegalStateException("A mini-cluster is already running");
922     }
923     miniClusterRunning = true;
924 
925     setupClusterTestDir();
926     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
927 
928     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
929     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
930     startMiniDFSCluster(numDataNodes, dataNodeHosts);
931 
932     // Start up a zk cluster.
933     if (this.zkCluster == null) {
934       startMiniZKCluster(clusterTestDir);
935     }
936 
937     // Start the MiniHBaseCluster
938     return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
939       regionserverClass, create);
940   }
941 
942   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
943       throws IOException, InterruptedException{
944     return startMiniHBaseCluster(numMasters, numSlaves, null, null, false);
945   }
946 
947   /**
948    * Starts up mini hbase cluster.  Usually used after call to
949    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
950    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
951    * @param numMasters
952    * @param numSlaves
953    * @param create Whether to create a
954    * root or data directory path or not; will overwrite if exists already.
955    * @return Reference to the hbase mini hbase cluster.
956    * @throws IOException
957    * @throws InterruptedException
958    * @see {@link #startMiniCluster()}
959    */
960   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
961         final int numSlaves, Class<? extends HMaster> masterClass,
962         Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
963         boolean create)
964   throws IOException, InterruptedException {
965     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
966     createRootDir(create);
967 
968     // These settings will make the server waits until this exact number of
969     // regions servers are connected.
970     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
971       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
972     }
973     if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
974       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
975     }
976 
977     Configuration c = new Configuration(this.conf);
978     this.hbaseCluster =
979         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
980     // Don't leave here till we've done a successful scan of the hbase:meta
981     Table t = new HTable(c, TableName.META_TABLE_NAME);
982     ResultScanner s = t.getScanner(new Scan());
983     while (s.next() != null) {
984       continue;
985     }
986     s.close();
987     t.close();
988 
989     getHBaseAdmin(); // create immediately the hbaseAdmin
990     LOG.info("Minicluster is up");
991     return (MiniHBaseCluster)this.hbaseCluster;
992   }
993 
994   /**
995    * Starts the hbase cluster up again after shutting it down previously in a
996    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
997    * @param servers number of region servers
998    * @throws IOException
999    */
1000   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1001     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1002     // Don't leave here till we've done a successful scan of the hbase:meta
1003     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1004     ResultScanner s = t.getScanner(new Scan());
1005     while (s.next() != null) {
1006       // do nothing
1007     }
1008     LOG.info("HBase has been restarted");
1009     s.close();
1010     t.close();
1011   }
1012 
1013   /**
1014    * @return Current mini hbase cluster. Only has something in it after a call
1015    * to {@link #startMiniCluster()}.
1016    * @see #startMiniCluster()
1017    */
1018   public MiniHBaseCluster getMiniHBaseCluster() {
1019     if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1020       return (MiniHBaseCluster)this.hbaseCluster;
1021     }
1022     throw new RuntimeException(hbaseCluster + " not an instance of " +
1023                                MiniHBaseCluster.class.getName());
1024   }
1025 
1026   /**
1027    * Stops mini hbase, zk, and hdfs clusters.
1028    * @throws IOException
1029    * @see {@link #startMiniCluster(int)}
1030    */
1031   public void shutdownMiniCluster() throws Exception {
1032     LOG.info("Shutting down minicluster");
1033     if (this.connection != null && !this.connection.isClosed()) {
1034       this.connection.close();
1035       this.connection = null;
1036     }
1037     shutdownMiniHBaseCluster();
1038     if (!this.passedZkCluster){
1039       shutdownMiniZKCluster();
1040     }
1041     shutdownMiniDFSCluster();
1042 
1043     cleanupTestDir();
1044     miniClusterRunning = false;
1045     LOG.info("Minicluster is down");
1046   }
1047 
1048   /**
1049    * @return True if we removed the test dirs
1050    * @throws IOException
1051    */
1052   @Override
1053   public boolean cleanupTestDir() throws IOException {
1054     boolean ret = super.cleanupTestDir();
1055     if (deleteDir(this.clusterTestDir)) {
1056       this.clusterTestDir = null;
1057       return ret & true;
1058     }
1059     return false;
1060   }
1061 
1062   /**
1063    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
1064    * @throws IOException
1065    */
1066   public void shutdownMiniHBaseCluster() throws IOException {
1067     if (hbaseAdmin != null) {
1068       hbaseAdmin.close0();
1069       hbaseAdmin = null;
1070     }
1071 
1072     // unset the configuration for MIN and MAX RS to start
1073     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1074     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1075     if (this.hbaseCluster != null) {
1076       this.hbaseCluster.shutdown();
1077       // Wait till hbase is down before going on to shutdown zk.
1078       this.hbaseCluster.waitUntilShutDown();
1079       this.hbaseCluster = null;
1080     }
1081 
1082     if (zooKeeperWatcher != null) {
1083       zooKeeperWatcher.close();
1084       zooKeeperWatcher = null;
1085     }
1086   }
1087 
1088   /**
1089    * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1090    * is true, a new root directory path is fetched irrespective of whether it has been fetched
1091    * before or not. If false, previous path is used.
1092    * Note: this does not cause the root dir to be created.
1093    * @return Fully qualified path for the default hbase root dir
1094    * @throws IOException
1095    */
1096   public Path getDefaultRootDirPath(boolean create) throws IOException {
1097     if (!create) {
1098       return getDataTestDirOnTestFS();
1099     } else {
1100       return getNewDataTestDirOnTestFS();
1101     }
1102   }
1103 
1104   /**
1105    * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1106    * except that <code>create</code> flag is false.
1107    * Note: this does not cause the root dir to be created.
1108    * @return Fully qualified path for the default hbase root dir
1109    * @throws IOException
1110    */
1111   public Path getDefaultRootDirPath() throws IOException {
1112     return getDefaultRootDirPath(false);
1113   }
1114 
1115   /**
1116    * Creates an hbase rootdir in user home directory.  Also creates hbase
1117    * version file.  Normally you won't make use of this method.  Root hbasedir
1118    * is created for you as part of mini cluster startup.  You'd only use this
1119    * method if you were doing manual operation.
1120    * @param create This flag decides whether to get a new
1121    * root or data directory path or not, if it has been fetched already.
1122    * Note : Directory will be made irrespective of whether path has been fetched or not.
1123    * If directory already exists, it will be overwritten
1124    * @return Fully qualified path to hbase root dir
1125    * @throws IOException
1126    */
1127   public Path createRootDir(boolean create) throws IOException {
1128     FileSystem fs = FileSystem.get(this.conf);
1129     Path hbaseRootdir = getDefaultRootDirPath(create);
1130     FSUtils.setRootDir(this.conf, hbaseRootdir);
1131     fs.mkdirs(hbaseRootdir);
1132     FSUtils.setVersion(fs, hbaseRootdir);
1133     return hbaseRootdir;
1134   }
1135 
1136   /**
1137    * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1138    * except that <code>create</code> flag is false.
1139    * @return Fully qualified path to hbase root dir
1140    * @throws IOException
1141    */
1142   public Path createRootDir() throws IOException {
1143     return createRootDir(false);
1144   }
1145 
1146   /**
1147    * Flushes all caches in the mini hbase cluster
1148    * @throws IOException
1149    */
1150   public void flush() throws IOException {
1151     getMiniHBaseCluster().flushcache();
1152   }
1153 
1154   /**
1155    * Flushes all caches in the mini hbase cluster
1156    * @throws IOException
1157    */
1158   public void flush(TableName tableName) throws IOException {
1159     getMiniHBaseCluster().flushcache(tableName);
1160   }
1161 
1162   /**
1163    * Compact all regions in the mini hbase cluster
1164    * @throws IOException
1165    */
1166   public void compact(boolean major) throws IOException {
1167     getMiniHBaseCluster().compact(major);
1168   }
1169 
1170   /**
1171    * Compact all of a table's reagion in the mini hbase cluster
1172    * @throws IOException
1173    */
1174   public void compact(TableName tableName, boolean major) throws IOException {
1175     getMiniHBaseCluster().compact(tableName, major);
1176   }
1177 
1178   /**
1179    * Create a table.
1180    * @param tableName
1181    * @param family
1182    * @return An HTable instance for the created table.
1183    * @throws IOException
1184    */
1185   public Table createTable(TableName tableName, String family)
1186   throws IOException{
1187     return createTable(tableName, new String[]{family});
1188   }
1189 
1190   /**
1191    * Create a table.
1192    * @param tableName
1193    * @param family
1194    * @return An HTable instance for the created table.
1195    * @throws IOException
1196    */
1197   public HTable createTable(byte[] tableName, byte[] family)
1198   throws IOException{
1199     return createTable(TableName.valueOf(tableName), new byte[][]{family});
1200   }
1201 
1202   /**
1203    * Create a table.
1204    * @param tableName
1205    * @param families
1206    * @return An HTable instance for the created table.
1207    * @throws IOException
1208    */
1209   public Table createTable(TableName tableName, String[] families)
1210   throws IOException {
1211     List<byte[]> fams = new ArrayList<byte[]>(families.length);
1212     for (String family : families) {
1213       fams.add(Bytes.toBytes(family));
1214     }
1215     return createTable(tableName, fams.toArray(new byte[0][]));
1216   }
1217 
1218   /**
1219    * Create a table.
1220    * @param tableName
1221    * @param family
1222    * @return An HTable instance for the created table.
1223    * @throws IOException
1224    */
1225   public HTable createTable(TableName tableName, byte[] family)
1226   throws IOException{
1227     return createTable(tableName, new byte[][]{family});
1228   }
1229 
1230 
1231   /**
1232    * Create a table.
1233    * @param tableName
1234    * @param families
1235    * @return An HTable instance for the created table.
1236    * @throws IOException
1237    */
1238   public HTable createTable(byte[] tableName, byte[][] families)
1239   throws IOException {
1240     return createTable(tableName, families,
1241         new Configuration(getConfiguration()));
1242   }
1243 
1244   /**
1245    * Create a table.
1246    * @param tableName
1247    * @param families
1248    * @return An HT
1249    * able instance for the created table.
1250    * @throws IOException
1251    */
1252   public HTable createTable(TableName tableName, byte[][] families)
1253   throws IOException {
1254     return createTable(tableName, families, new Configuration(getConfiguration()));
1255   }
1256 
1257   public HTable createTable(byte[] tableName, byte[][] families,
1258       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1259     return createTable(TableName.valueOf(tableName), families, numVersions,
1260         startKey, endKey, numRegions);
1261   }
1262 
1263   public HTable createTable(String tableName, byte[][] families,
1264       int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1265     return createTable(TableName.valueOf(tableName), families, numVersions,
1266         startKey, endKey, numRegions);
1267   }
1268 
1269   public HTable createTable(TableName tableName, byte[][] families,
1270       int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1271   throws IOException{
1272     HTableDescriptor desc = new HTableDescriptor(tableName);
1273     for (byte[] family : families) {
1274       HColumnDescriptor hcd = new HColumnDescriptor(family)
1275           .setMaxVersions(numVersions);
1276       desc.addFamily(hcd);
1277     }
1278     getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1279     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1280     waitUntilAllRegionsAssigned(tableName);
1281     return new HTable(getConfiguration(), tableName);
1282   }
1283 
1284   /**
1285    * Create a table.
1286    * @param htd
1287    * @param families
1288    * @param c Configuration to use
1289    * @return An HTable instance for the created table.
1290    * @throws IOException
1291    */
1292   public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1293   throws IOException {
1294     for(byte[] family : families) {
1295       HColumnDescriptor hcd = new HColumnDescriptor(family);
1296       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1297       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1298       // on is interfering.
1299       hcd.setBloomFilterType(BloomType.NONE);
1300       htd.addFamily(hcd);
1301     }
1302     getHBaseAdmin().createTable(htd);
1303     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1304     waitUntilAllRegionsAssigned(htd.getTableName());
1305     return (HTable)getConnection().getTable(htd.getTableName());
1306   }
1307 
1308   /**
1309    * Create a table.
1310    * @param htd
1311    * @param splitRows
1312    * @return An HTable instance for the created table.
1313    * @throws IOException
1314    */
1315   public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1316       throws IOException {
1317     getHBaseAdmin().createTable(htd, splitRows);
1318     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1319     waitUntilAllRegionsAssigned(htd.getTableName());
1320     return new HTable(getConfiguration(), htd.getTableName());
1321   }
1322 
1323   /**
1324    * Create a table.
1325    * @param tableName
1326    * @param families
1327    * @param c Configuration to use
1328    * @return An HTable instance for the created table.
1329    * @throws IOException
1330    */
1331   public HTable createTable(TableName tableName, byte[][] families,
1332       final Configuration c)
1333   throws IOException {
1334     return createTable(new HTableDescriptor(tableName), families, c);
1335   }
1336 
1337   /**
1338    * Create a table.
1339    * @param tableName
1340    * @param families
1341    * @param c Configuration to use
1342    * @return An HTable instance for the created table.
1343    * @throws IOException
1344    */
1345   public HTable createTable(byte[] tableName, byte[][] families,
1346       final Configuration c)
1347   throws IOException {
1348     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1349     for(byte[] family : families) {
1350       HColumnDescriptor hcd = new HColumnDescriptor(family);
1351       // Disable blooms (they are on by default as of 0.95) but we disable them here because
1352       // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1353       // on is interfering.
1354       hcd.setBloomFilterType(BloomType.NONE);
1355       desc.addFamily(hcd);
1356     }
1357     getHBaseAdmin().createTable(desc);
1358     return new HTable(c, desc.getTableName());
1359   }
1360 
1361   /**
1362    * Create a table.
1363    * @param tableName
1364    * @param families
1365    * @param c Configuration to use
1366    * @param numVersions
1367    * @return An HTable instance for the created table.
1368    * @throws IOException
1369    */
1370   public HTable createTable(TableName tableName, byte[][] families,
1371       final Configuration c, int numVersions)
1372   throws IOException {
1373     HTableDescriptor desc = new HTableDescriptor(tableName);
1374     for(byte[] family : families) {
1375       HColumnDescriptor hcd = new HColumnDescriptor(family)
1376           .setMaxVersions(numVersions);
1377       desc.addFamily(hcd);
1378     }
1379     getHBaseAdmin().createTable(desc);
1380     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1381     waitUntilAllRegionsAssigned(tableName);
1382     return new HTable(c, tableName);
1383   }
1384 
1385   /**
1386    * Create a table.
1387    * @param tableName
1388    * @param families
1389    * @param c Configuration to use
1390    * @param numVersions
1391    * @return An HTable instance for the created table.
1392    * @throws IOException
1393    */
1394   public HTable createTable(byte[] tableName, byte[][] families,
1395       final Configuration c, int numVersions)
1396   throws IOException {
1397     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1398     for(byte[] family : families) {
1399       HColumnDescriptor hcd = new HColumnDescriptor(family)
1400           .setMaxVersions(numVersions);
1401       desc.addFamily(hcd);
1402     }
1403     getHBaseAdmin().createTable(desc);
1404     return new HTable(c, desc.getTableName());
1405   }
1406 
1407   /**
1408    * Create a table.
1409    * @param tableName
1410    * @param family
1411    * @param numVersions
1412    * @return An HTable instance for the created table.
1413    * @throws IOException
1414    */
1415   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1416   throws IOException {
1417     return createTable(tableName, new byte[][]{family}, numVersions);
1418   }
1419 
1420   /**
1421    * Create a table.
1422    * @param tableName
1423    * @param family
1424    * @param numVersions
1425    * @return An HTable instance for the created table.
1426    * @throws IOException
1427    */
1428   public HTable createTable(TableName tableName, byte[] family, int numVersions)
1429   throws IOException {
1430     return createTable(tableName, new byte[][]{family}, numVersions);
1431   }
1432 
1433   /**
1434    * Create a table.
1435    * @param tableName
1436    * @param families
1437    * @param numVersions
1438    * @return An HTable instance for the created table.
1439    * @throws IOException
1440    */
1441   public HTable createTable(byte[] tableName, byte[][] families,
1442       int numVersions)
1443   throws IOException {
1444     return createTable(TableName.valueOf(tableName), families, numVersions);
1445   }
1446 
1447   /**
1448    * Create a table.
1449    * @param tableName
1450    * @param families
1451    * @param numVersions
1452    * @return An HTable instance for the created table.
1453    * @throws IOException
1454    */
1455   public HTable createTable(TableName tableName, byte[][] families,
1456       int numVersions)
1457   throws IOException {
1458     HTableDescriptor desc = new HTableDescriptor(tableName);
1459     for (byte[] family : families) {
1460       HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1461       desc.addFamily(hcd);
1462     }
1463     getHBaseAdmin().createTable(desc);
1464     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1465     waitUntilAllRegionsAssigned(tableName);
1466     return new HTable(new Configuration(getConfiguration()), tableName);
1467   }
1468 
1469   /**
1470    * Create a table.
1471    * @param tableName
1472    * @param families
1473    * @param numVersions
1474    * @return An HTable instance for the created table.
1475    * @throws IOException
1476    */
1477   public HTable createTable(byte[] tableName, byte[][] families,
1478     int numVersions, int blockSize) throws IOException {
1479     return createTable(TableName.valueOf(tableName),
1480         families, numVersions, blockSize);
1481   }
1482 
1483   /**
1484    * Create a table.
1485    * @param tableName
1486    * @param families
1487    * @param numVersions
1488    * @return An HTable instance for the created table.
1489    * @throws IOException
1490    */
1491   public HTable createTable(TableName tableName, byte[][] families,
1492     int numVersions, int blockSize) throws IOException {
1493     HTableDescriptor desc = new HTableDescriptor(tableName);
1494     for (byte[] family : families) {
1495       HColumnDescriptor hcd = new HColumnDescriptor(family)
1496           .setMaxVersions(numVersions)
1497           .setBlocksize(blockSize);
1498       desc.addFamily(hcd);
1499     }
1500     getHBaseAdmin().createTable(desc);
1501     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1502     waitUntilAllRegionsAssigned(tableName);
1503     return new HTable(new Configuration(getConfiguration()), tableName);
1504   }
1505 
1506   /**
1507    * Create a table.
1508    * @param tableName
1509    * @param families
1510    * @param numVersions
1511    * @return An HTable instance for the created table.
1512    * @throws IOException
1513    */
1514   public HTable createTable(byte[] tableName, byte[][] families,
1515       int[] numVersions)
1516   throws IOException {
1517     return createTable(TableName.valueOf(tableName), families, numVersions);
1518   }
1519 
1520   /**
1521    * Create a table.
1522    * @param tableName
1523    * @param families
1524    * @param numVersions
1525    * @return An HTable instance for the created table.
1526    * @throws IOException
1527    */
1528   public HTable createTable(TableName tableName, byte[][] families,
1529       int[] numVersions)
1530   throws IOException {
1531     HTableDescriptor desc = new HTableDescriptor(tableName);
1532     int i = 0;
1533     for (byte[] family : families) {
1534       HColumnDescriptor hcd = new HColumnDescriptor(family)
1535           .setMaxVersions(numVersions[i]);
1536       desc.addFamily(hcd);
1537       i++;
1538     }
1539     getHBaseAdmin().createTable(desc);
1540     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1541     waitUntilAllRegionsAssigned(tableName);
1542     return new HTable(new Configuration(getConfiguration()), tableName);
1543   }
1544 
1545   /**
1546    * Create a table.
1547    * @param tableName
1548    * @param family
1549    * @param splitRows
1550    * @return An HTable instance for the created table.
1551    * @throws IOException
1552    */
1553   public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1554     throws IOException{
1555     return createTable(TableName.valueOf(tableName), family, splitRows);
1556   }
1557 
1558   /**
1559    * Create a table.
1560    * @param tableName
1561    * @param family
1562    * @param splitRows
1563    * @return An HTable instance for the created table.
1564    * @throws IOException
1565    */
1566   public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1567       throws IOException {
1568     HTableDescriptor desc = new HTableDescriptor(tableName);
1569     HColumnDescriptor hcd = new HColumnDescriptor(family);
1570     desc.addFamily(hcd);
1571     getHBaseAdmin().createTable(desc, splitRows);
1572     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1573     waitUntilAllRegionsAssigned(tableName);
1574     return new HTable(getConfiguration(), tableName);
1575   }
1576 
1577   /**
1578    * Create a table.
1579    * @param tableName
1580    * @param families
1581    * @param splitRows
1582    * @return An HTable instance for the created table.
1583    * @throws IOException
1584    */
1585   public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1586       throws IOException {
1587     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1588     for(byte[] family:families) {
1589       HColumnDescriptor hcd = new HColumnDescriptor(family);
1590       desc.addFamily(hcd);
1591     }
1592     getHBaseAdmin().createTable(desc, splitRows);
1593     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
1594     waitUntilAllRegionsAssigned(desc.getTableName());
1595     return new HTable(getConfiguration(), desc.getTableName());
1596   }
1597 
1598   /**
1599    * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1600    */
1601   @SuppressWarnings("serial")
1602   public static void modifyTableSync(Admin admin, HTableDescriptor desc)
1603       throws IOException, InterruptedException {
1604     admin.modifyTable(desc.getTableName(), desc);
1605     Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1606       setFirst(0);
1607       setSecond(0);
1608     }};
1609     int i = 0;
1610     do {
1611       status = admin.getAlterStatus(desc.getTableName());
1612       if (status.getSecond() != 0) {
1613         LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1614           + " regions updated.");
1615         Thread.sleep(1 * 1000l);
1616       } else {
1617         LOG.debug("All regions updated.");
1618         break;
1619       }
1620     } while (status.getFirst() != 0 && i++ < 500);
1621     if (status.getFirst() != 0) {
1622       throw new IOException("Failed to update all regions even after 500 seconds.");
1623     }
1624   }
1625 
1626   /**
1627    * Set the number of Region replicas.
1628    */
1629   public static void setReplicas(Admin admin, TableName table, int replicaCount)
1630       throws IOException, InterruptedException {
1631     admin.disableTable(table);
1632     HTableDescriptor desc = admin.getTableDescriptor(table);
1633     desc.setRegionReplication(replicaCount);
1634     admin.modifyTable(desc.getTableName(), desc);
1635     admin.enableTable(table);
1636   }
1637 
1638   /**
1639    * Drop an existing table
1640    * @param tableName existing table
1641    */
1642   public void deleteTable(String tableName) throws IOException {
1643     deleteTable(TableName.valueOf(tableName));
1644   }
1645 
1646   /**
1647    * Drop an existing table
1648    * @param tableName existing table
1649    */
1650   public void deleteTable(byte[] tableName) throws IOException {
1651     deleteTable(TableName.valueOf(tableName));
1652   }
1653 
1654   /**
1655    * Drop an existing table
1656    * @param tableName existing table
1657    */
1658   public void deleteTable(TableName tableName) throws IOException {
1659     try {
1660       getHBaseAdmin().disableTable(tableName);
1661     } catch (TableNotEnabledException e) {
1662       LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1663     }
1664     getHBaseAdmin().deleteTable(tableName);
1665   }
1666 
1667   // ==========================================================================
1668   // Canned table and table descriptor creation
1669   // TODO replace HBaseTestCase
1670 
1671   public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1672   public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1673   public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1674   public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1675   private static final int MAXVERSIONS = 3;
1676 
1677   public static final char FIRST_CHAR = 'a';
1678   public static final char LAST_CHAR = 'z';
1679   public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1680   public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1681 
1682   /**
1683    * Create a table of name <code>name</code> with {@link COLUMNS} for
1684    * families.
1685    * @param name Name to give table.
1686    * @param versions How many versions to allow per column.
1687    * @return Column descriptor.
1688    */
1689   public HTableDescriptor createTableDescriptor(final String name,
1690       final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1691     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1692     for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1693       htd.addFamily(new HColumnDescriptor(cfName)
1694           .setMinVersions(minVersions)
1695           .setMaxVersions(versions)
1696           .setKeepDeletedCells(keepDeleted)
1697           .setBlockCacheEnabled(false)
1698           .setTimeToLive(ttl)
1699       );
1700     }
1701     return htd;
1702   }
1703 
1704   /**
1705    * Create a table of name <code>name</code> with {@link COLUMNS} for
1706    * families.
1707    * @param name Name to give table.
1708    * @return Column descriptor.
1709    */
1710   public HTableDescriptor createTableDescriptor(final String name) {
1711     return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1712         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1713   }
1714 
1715   /**
1716    * Create an HRegion that writes to the local tmp dirs
1717    * @param desc
1718    * @param startKey
1719    * @param endKey
1720    * @return
1721    * @throws IOException
1722    */
1723   public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1724       byte [] endKey)
1725   throws IOException {
1726     HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1727     return createLocalHRegion(hri, desc);
1728   }
1729 
1730   /**
1731    * Create an HRegion that writes to the local tmp dirs
1732    * @param info
1733    * @param desc
1734    * @return
1735    * @throws IOException
1736    */
1737   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1738     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1739   }
1740 
1741   /**
1742    * Create an HRegion that writes to the local tmp dirs with specified wal
1743    * @param info regioninfo
1744    * @param desc table descriptor
1745    * @param wal wal for this region.
1746    * @return created hregion
1747    * @throws IOException
1748    */
1749   public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal)
1750       throws IOException {
1751     return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
1752   }
1753 
1754   /**
1755    * @param tableName
1756    * @param startKey
1757    * @param stopKey
1758    * @param callingMethod
1759    * @param conf
1760    * @param isReadOnly
1761    * @param families
1762    * @throws IOException
1763    * @return A region on which you must call
1764    *         {@link HRegion#closeHRegion(HRegion)} when done.
1765    */
1766   public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1767       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1768       WAL wal, byte[]... families) throws IOException {
1769     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1770     htd.setReadOnly(isReadOnly);
1771     for (byte[] family : families) {
1772       HColumnDescriptor hcd = new HColumnDescriptor(family);
1773       // Set default to be three versions.
1774       hcd.setMaxVersions(Integer.MAX_VALUE);
1775       htd.addFamily(hcd);
1776     }
1777     htd.setDurability(durability);
1778     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1779     return createLocalHRegion(info, htd, wal);
1780   }
1781   //
1782   // ==========================================================================
1783 
1784   /**
1785    * Provide an existing table name to truncate
1786    * @param tableName existing table
1787    * @return HTable to that new table
1788    * @throws IOException
1789    */
1790   public HTable truncateTable(byte[] tableName) throws IOException {
1791     return truncateTable(TableName.valueOf(tableName));
1792   }
1793 
1794   /**
1795    * Provide an existing table name to truncate
1796    * @param tableName existing table
1797    * @return HTable to that new table
1798    * @throws IOException
1799    */
1800   public HTable truncateTable(TableName tableName) throws IOException {
1801     HTable table = new HTable(getConfiguration(), tableName);
1802     Scan scan = new Scan();
1803     ResultScanner resScan = table.getScanner(scan);
1804     for(Result res : resScan) {
1805       Delete del = new Delete(res.getRow());
1806       table.delete(del);
1807     }
1808     resScan = table.getScanner(scan);
1809     resScan.close();
1810     return table;
1811   }
1812 
1813   /**
1814    * Load table with rows from 'aaa' to 'zzz'.
1815    * @param t Table
1816    * @param f Family
1817    * @return Count of rows loaded.
1818    * @throws IOException
1819    */
1820   public int loadTable(final Table t, final byte[] f) throws IOException {
1821     return loadTable(t, new byte[][] {f});
1822   }
1823 
1824   /**
1825    * Load table with rows from 'aaa' to 'zzz'.
1826    * @param t Table
1827    * @param f Family
1828    * @return Count of rows loaded.
1829    * @throws IOException
1830    */
1831   public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
1832     return loadTable(t, new byte[][] {f}, null, writeToWAL);
1833   }
1834 
1835   /**
1836    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1837    * @param t Table
1838    * @param f Array of Families to load
1839    * @return Count of rows loaded.
1840    * @throws IOException
1841    */
1842   public int loadTable(final Table t, final byte[][] f) throws IOException {
1843     return loadTable(t, f, null);
1844   }
1845 
1846   /**
1847    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1848    * @param t Table
1849    * @param f Array of Families to load
1850    * @param value the values of the cells. If null is passed, the row key is used as value
1851    * @return Count of rows loaded.
1852    * @throws IOException
1853    */
1854   public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
1855     return loadTable(t, f, value, true);
1856   }
1857 
1858   /**
1859    * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1860    * @param t Table
1861    * @param f Array of Families to load
1862    * @param value the values of the cells. If null is passed, the row key is used as value
1863    * @return Count of rows loaded.
1864    * @throws IOException
1865    */
1866   public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1867     List<Put> puts = new ArrayList<>();
1868     for (byte[] row : HBaseTestingUtility.ROWS) {
1869       Put put = new Put(row);
1870       put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1871       for (int i = 0; i < f.length; i++) {
1872         put.add(f[i], null, value != null ? value : row);
1873       }
1874       puts.add(put);
1875     }
1876     t.put(puts);
1877     return puts.size();
1878   }
1879 
1880   /** A tracker for tracking and validating table rows
1881    * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
1882    */
1883   public static class SeenRowTracker {
1884     int dim = 'z' - 'a' + 1;
1885     int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
1886     byte[] startRow;
1887     byte[] stopRow;
1888 
1889     public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1890       this.startRow = startRow;
1891       this.stopRow = stopRow;
1892     }
1893 
1894     void reset() {
1895       for (byte[] row : ROWS) {
1896         seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1897       }
1898     }
1899 
1900     int i(byte b) {
1901       return b - 'a';
1902     }
1903 
1904     public void addRow(byte[] row) {
1905       seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1906     }
1907 
1908     /** Validate that all the rows between startRow and stopRow are seen exactly once, and
1909      * all other rows none
1910      */
1911     public void validate() {
1912       for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1913         for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1914           for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1915             int count = seenRows[i(b1)][i(b2)][i(b3)];
1916             int expectedCount = 0;
1917             if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1918                 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1919               expectedCount = 1;
1920             }
1921             if (count != expectedCount) {
1922               String row = new String(new byte[] {b1,b2,b3});
1923               throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1924             }
1925           }
1926         }
1927       }
1928     }
1929   }
1930 
1931   public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1932     return loadRegion(r, f, false);
1933   }
1934 
1935   /**
1936    * Load region with rows from 'aaa' to 'zzz'.
1937    * @param r Region
1938    * @param f Family
1939    * @param flush flush the cache if true
1940    * @return Count of rows loaded.
1941    * @throws IOException
1942    */
1943   public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1944   throws IOException {
1945     byte[] k = new byte[3];
1946     int rowCount = 0;
1947     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1948       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1949         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1950           k[0] = b1;
1951           k[1] = b2;
1952           k[2] = b3;
1953           Put put = new Put(k);
1954           put.setDurability(Durability.SKIP_WAL);
1955           put.add(f, null, k);
1956           if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL);
1957 
1958           int preRowCount = rowCount;
1959           int pause = 10;
1960           int maxPause = 1000;
1961           while (rowCount == preRowCount) {
1962             try {
1963               r.put(put);
1964               rowCount++;
1965             } catch (RegionTooBusyException e) {
1966               pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1967               Threads.sleep(pause);
1968             }
1969           }
1970         }
1971       }
1972       if (flush) {
1973         r.flushcache();
1974       }
1975     }
1976     return rowCount;
1977   }
1978 
1979   public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1980     for (int i = startRow; i < endRow; i++) {
1981       byte[] data = Bytes.toBytes(String.valueOf(i));
1982       Put put = new Put(data);
1983       put.add(f, null, data);
1984       t.put(put);
1985     }
1986   }
1987 
1988   public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1989     for (int i = startRow; i < endRow; i++) {
1990       byte[] data = Bytes.toBytes(String.valueOf(i));
1991       Delete delete = new Delete(data);
1992       delete.deleteFamily(f);
1993       t.delete(delete);
1994     }
1995   }
1996 
1997   /**
1998    * Return the number of rows in the given table.
1999    */
2000   public int countRows(final Table table) throws IOException {
2001     Scan scan = new Scan();
2002     ResultScanner results = table.getScanner(scan);
2003     int count = 0;
2004     for (@SuppressWarnings("unused") Result res : results) {
2005       count++;
2006     }
2007     results.close();
2008     return count;
2009   }
2010 
2011   public int countRows(final Table table, final byte[]... families) throws IOException {
2012     Scan scan = new Scan();
2013     for (byte[] family: families) {
2014       scan.addFamily(family);
2015     }
2016     ResultScanner results = table.getScanner(scan);
2017     int count = 0;
2018     for (@SuppressWarnings("unused") Result res : results) {
2019       count++;
2020     }
2021     results.close();
2022     return count;
2023   }
2024 
2025   /**
2026    * Return an md5 digest of the entire contents of a table.
2027    */
2028   public String checksumRows(final Table table) throws Exception {
2029     Scan scan = new Scan();
2030     ResultScanner results = table.getScanner(scan);
2031     MessageDigest digest = MessageDigest.getInstance("MD5");
2032     for (Result res : results) {
2033       digest.update(res.getRow());
2034     }
2035     results.close();
2036     return digest.toString();
2037   }
2038 
2039   /**
2040    * Creates many regions names "aaa" to "zzz".
2041    *
2042    * @param table  The table to use for the data.
2043    * @param columnFamily  The family to insert the data into.
2044    * @return count of regions created.
2045    * @throws IOException When creating the regions fails.
2046    */
2047   public int createMultiRegions(HTable table, byte[] columnFamily)
2048   throws IOException {
2049     return createMultiRegions(getConfiguration(), table, columnFamily);
2050   }
2051 
2052   /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
2053   public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2054   static {
2055     int i = 0;
2056     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2057       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2058         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2059           ROWS[i][0] = b1;
2060           ROWS[i][1] = b2;
2061           ROWS[i][2] = b3;
2062           i++;
2063         }
2064       }
2065     }
2066   }
2067 
2068   public static final byte[][] KEYS = {
2069     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2070     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2071     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2072     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2073     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2074     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2075     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2076     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2077     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2078   };
2079 
2080   public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2081       Bytes.toBytes("bbb"),
2082       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2083       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2084       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2085       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2086       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2087       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2088       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2089       Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2090   };
2091 
2092   /**
2093    * Creates many regions names "aaa" to "zzz".
2094    * @param c Configuration to use.
2095    * @param table  The table to use for the data.
2096    * @param columnFamily  The family to insert the data into.
2097    * @return count of regions created.
2098    * @throws IOException When creating the regions fails.
2099    */
2100   public int createMultiRegions(final Configuration c, final HTable table,
2101       final byte[] columnFamily)
2102   throws IOException {
2103     return createMultiRegions(c, table, columnFamily, KEYS);
2104   }
2105 
2106   /**
2107    * Creates the specified number of regions in the specified table.
2108    * @param c
2109    * @param table
2110    * @param family
2111    * @param numRegions
2112    * @return
2113    * @throws IOException
2114    */
2115   public int createMultiRegions(final Configuration c, final HTable table,
2116       final byte [] family, int numRegions)
2117   throws IOException {
2118     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2119     byte [] startKey = Bytes.toBytes("aaaaa");
2120     byte [] endKey = Bytes.toBytes("zzzzz");
2121     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2122     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2123     System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2124     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2125     return createMultiRegions(c, table, family, regionStartKeys);
2126   }
2127 
2128   public int createMultiRegions(final Configuration c, final HTable table,
2129       final byte[] columnFamily, byte [][] startKeys)
2130   throws IOException {
2131     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2132     Table meta = new HTable(c, TableName.META_TABLE_NAME);
2133     HTableDescriptor htd = table.getTableDescriptor();
2134     if(!htd.hasFamily(columnFamily)) {
2135       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2136       htd.addFamily(hcd);
2137     }
2138     // remove empty region - this is tricky as the mini cluster during the test
2139     // setup already has the "<tablename>,,123456789" row with an empty start
2140     // and end key. Adding the custom regions below adds those blindly,
2141     // including the new start region from empty to "bbb". lg
2142     List<byte[]> rows = getMetaTableRows(htd.getTableName());
2143     String regionToDeleteInFS = table
2144         .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2145         .getRegionInfo().getEncodedName();
2146     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2147     // add custom ones
2148     int count = 0;
2149     for (int i = 0; i < startKeys.length; i++) {
2150       int j = (i + 1) % startKeys.length;
2151       HRegionInfo hri = new HRegionInfo(table.getName(),
2152         startKeys[i], startKeys[j]);
2153       MetaTableAccessor.addRegionToMeta(meta, hri);
2154       newRegions.add(hri);
2155       count++;
2156     }
2157     // see comment above, remove "old" (or previous) single region
2158     for (byte[] row : rows) {
2159       LOG.info("createMultiRegions: deleting meta row -> " +
2160         Bytes.toStringBinary(row));
2161       meta.delete(new Delete(row));
2162     }
2163     // remove the "old" region from FS
2164     Path tableDir = new Path(getDefaultRootDirPath().toString()
2165         + System.getProperty("file.separator") + htd.getTableName()
2166         + System.getProperty("file.separator") + regionToDeleteInFS);
2167     FileSystem.get(c).delete(tableDir, true);
2168     // flush cache of regions
2169     HConnection conn = table.getConnection();
2170     conn.clearRegionCache();
2171     // assign all the new regions IF table is enabled.
2172     Admin admin = getHBaseAdmin();
2173     if (admin.isTableEnabled(table.getName())) {
2174       for(HRegionInfo hri : newRegions) {
2175         admin.assign(hri.getRegionName());
2176       }
2177     }
2178 
2179     meta.close();
2180 
2181     return count;
2182   }
2183 
2184   /**
2185    * Create rows in hbase:meta for regions of the specified table with the specified
2186    * start keys.  The first startKey should be a 0 length byte array if you
2187    * want to form a proper range of regions.
2188    * @param conf
2189    * @param htd
2190    * @param startKeys
2191    * @return list of region info for regions added to meta
2192    * @throws IOException
2193    */
2194   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2195       final HTableDescriptor htd, byte [][] startKeys)
2196   throws IOException {
2197     Table meta = new HTable(conf, TableName.META_TABLE_NAME);
2198     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2199     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2200     // add custom ones
2201     for (int i = 0; i < startKeys.length; i++) {
2202       int j = (i + 1) % startKeys.length;
2203       HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2204           startKeys[j]);
2205       MetaTableAccessor.addRegionToMeta(meta, hri);
2206       newRegions.add(hri);
2207     }
2208 
2209     meta.close();
2210     return newRegions;
2211   }
2212 
2213   /**
2214    * Returns all rows from the hbase:meta table.
2215    *
2216    * @throws IOException When reading the rows fails.
2217    */
2218   public List<byte[]> getMetaTableRows() throws IOException {
2219     // TODO: Redo using MetaTableAccessor class
2220     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2221     List<byte[]> rows = new ArrayList<byte[]>();
2222     ResultScanner s = t.getScanner(new Scan());
2223     for (Result result : s) {
2224       LOG.info("getMetaTableRows: row -> " +
2225         Bytes.toStringBinary(result.getRow()));
2226       rows.add(result.getRow());
2227     }
2228     s.close();
2229     t.close();
2230     return rows;
2231   }
2232 
2233   /**
2234    * Returns all rows from the hbase:meta table for a given user table
2235    *
2236    * @throws IOException When reading the rows fails.
2237    */
2238   public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2239     // TODO: Redo using MetaTableAccessor.
2240     Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2241     List<byte[]> rows = new ArrayList<byte[]>();
2242     ResultScanner s = t.getScanner(new Scan());
2243     for (Result result : s) {
2244       HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2245       if (info == null) {
2246         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2247         // TODO figure out what to do for this new hosed case.
2248         continue;
2249       }
2250 
2251       if (info.getTable().equals(tableName)) {
2252         LOG.info("getMetaTableRows: row -> " +
2253             Bytes.toStringBinary(result.getRow()) + info);
2254         rows.add(result.getRow());
2255       }
2256     }
2257     s.close();
2258     t.close();
2259     return rows;
2260   }
2261 
2262   /**
2263    * Tool to get the reference to the region server object that holds the
2264    * region of the specified user table.
2265    * It first searches for the meta rows that contain the region of the
2266    * specified table, then gets the index of that RS, and finally retrieves
2267    * the RS's reference.
2268    * @param tableName user table to lookup in hbase:meta
2269    * @return region server that holds it, null if the row doesn't exist
2270    * @throws IOException
2271    * @throws InterruptedException
2272    */
2273   public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2274       throws IOException, InterruptedException {
2275     List<byte[]> metaRows = getMetaTableRows(tableName);
2276     if (metaRows == null || metaRows.isEmpty()) {
2277       return null;
2278     }
2279     LOG.debug("Found " + metaRows.size() + " rows for table " +
2280       tableName);
2281     byte [] firstrow = metaRows.get(0);
2282     LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2283     long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2284       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2285     int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2286       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2287     RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2288     while(retrier.shouldRetry()) {
2289       int index = getMiniHBaseCluster().getServerWith(firstrow);
2290       if (index != -1) {
2291         return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2292       }
2293       // Came back -1.  Region may not be online yet.  Sleep a while.
2294       retrier.sleepUntilNextRetry();
2295     }
2296     return null;
2297   }
2298 
2299   /**
2300    * Starts a <code>MiniMRCluster</code> with a default number of
2301    * <code>TaskTracker</code>'s.
2302    *
2303    * @throws IOException When starting the cluster fails.
2304    */
2305   public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2306     startMiniMapReduceCluster(2);
2307     return mrCluster;
2308   }
2309 
2310   /**
2311    * Tasktracker has a bug where changing the hadoop.log.dir system property
2312    * will not change its internal static LOG_DIR variable.
2313    */
2314   private void forceChangeTaskLogDir() {
2315     Field logDirField;
2316     try {
2317       logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2318       logDirField.setAccessible(true);
2319 
2320       Field modifiersField = Field.class.getDeclaredField("modifiers");
2321       modifiersField.setAccessible(true);
2322       modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2323 
2324       logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2325     } catch (SecurityException e) {
2326       throw new RuntimeException(e);
2327     } catch (NoSuchFieldException e) {
2328       // TODO Auto-generated catch block
2329       throw new RuntimeException(e);
2330     } catch (IllegalArgumentException e) {
2331       throw new RuntimeException(e);
2332     } catch (IllegalAccessException e) {
2333       throw new RuntimeException(e);
2334     }
2335   }
2336 
2337   /**
2338    * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2339    * filesystem.
2340    * @param servers  The number of <code>TaskTracker</code>'s to start.
2341    * @throws IOException When starting the cluster fails.
2342    */
2343   private void startMiniMapReduceCluster(final int servers) throws IOException {
2344     if (mrCluster != null) {
2345       throw new IllegalStateException("MiniMRCluster is already running");
2346     }
2347     LOG.info("Starting mini mapreduce cluster...");
2348     setupClusterTestDir();
2349     createDirsAndSetProperties();
2350 
2351     forceChangeTaskLogDir();
2352 
2353     //// hadoop2 specific settings
2354     // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2355     // we up the VM usable so that processes don't get killed.
2356     conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2357 
2358     // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2359     // this avoids the problem by disabling speculative task execution in tests.
2360     conf.setBoolean("mapreduce.map.speculative", false);
2361     conf.setBoolean("mapreduce.reduce.speculative", false);
2362     ////
2363 
2364     // Allow the user to override FS URI for this map-reduce cluster to use.
2365     mrCluster = new MiniMRCluster(servers,
2366       FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2367       null, null, new JobConf(this.conf));
2368     JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2369     if (jobConf == null) {
2370       jobConf = mrCluster.createJobConf();
2371     }
2372 
2373     jobConf.set("mapreduce.cluster.local.dir",
2374       conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2375     LOG.info("Mini mapreduce cluster started");
2376 
2377     // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2378     // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
2379     // necessary config properties here.  YARN-129 required adding a few properties.
2380     conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2381     // this for mrv2 support; mr1 ignores this
2382     conf.set("mapreduce.framework.name", "yarn");
2383     conf.setBoolean("yarn.is.minicluster", true);
2384     String rmAddress = jobConf.get("yarn.resourcemanager.address");
2385     if (rmAddress != null) {
2386       conf.set("yarn.resourcemanager.address", rmAddress);
2387     }
2388     String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2389     if (historyAddress != null) {
2390       conf.set("mapreduce.jobhistory.address", historyAddress);
2391     }
2392     String schedulerAddress =
2393       jobConf.get("yarn.resourcemanager.scheduler.address");
2394     if (schedulerAddress != null) {
2395       conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2396     }
2397   }
2398 
2399   /**
2400    * Stops the previously started <code>MiniMRCluster</code>.
2401    */
2402   public void shutdownMiniMapReduceCluster() {
2403     if (mrCluster != null) {
2404       LOG.info("Stopping mini mapreduce cluster...");
2405       mrCluster.shutdown();
2406       mrCluster = null;
2407       LOG.info("Mini mapreduce cluster stopped");
2408     }
2409     // Restore configuration to point to local jobtracker
2410     conf.set("mapreduce.jobtracker.address", "local");
2411   }
2412 
2413   /**
2414    * Create a stubbed out RegionServerService, mainly for getting FS.
2415    */
2416   public RegionServerServices createMockRegionServerService() throws IOException {
2417     return createMockRegionServerService((ServerName)null);
2418   }
2419 
2420   /**
2421    * Create a stubbed out RegionServerService, mainly for getting FS.
2422    * This version is used by TestTokenAuthentication
2423    */
2424   public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2425     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2426     rss.setFileSystem(getTestFileSystem());
2427     rss.setRpcServer(rpc);
2428     return rss;
2429   }
2430 
2431   /**
2432    * Create a stubbed out RegionServerService, mainly for getting FS.
2433    * This version is used by TestOpenRegionHandler
2434    */
2435   public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2436     final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2437     rss.setFileSystem(getTestFileSystem());
2438     return rss;
2439   }
2440 
2441   /**
2442    * Switches the logger for the given class to DEBUG level.
2443    *
2444    * @param clazz  The class for which to switch to debug logging.
2445    */
2446   public void enableDebug(Class<?> clazz) {
2447     Log l = LogFactory.getLog(clazz);
2448     if (l instanceof Log4JLogger) {
2449       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2450     } else if (l instanceof Jdk14Logger) {
2451       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2452     }
2453   }
2454 
2455   /**
2456    * Expire the Master's session
2457    * @throws Exception
2458    */
2459   public void expireMasterSession() throws Exception {
2460     HMaster master = getMiniHBaseCluster().getMaster();
2461     expireSession(master.getZooKeeper(), false);
2462   }
2463 
2464   /**
2465    * Expire a region server's session
2466    * @param index which RS
2467    * @throws Exception
2468    */
2469   public void expireRegionServerSession(int index) throws Exception {
2470     HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2471     expireSession(rs.getZooKeeper(), false);
2472     decrementMinRegionServerCount();
2473   }
2474 
2475   private void decrementMinRegionServerCount() {
2476     // decrement the count for this.conf, for newly spwaned master
2477     // this.hbaseCluster shares this configuration too
2478     decrementMinRegionServerCount(getConfiguration());
2479 
2480     // each master thread keeps a copy of configuration
2481     for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2482       decrementMinRegionServerCount(master.getMaster().getConfiguration());
2483     }
2484   }
2485 
2486   private void decrementMinRegionServerCount(Configuration conf) {
2487     int currentCount = conf.getInt(
2488         ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2489     if (currentCount != -1) {
2490       conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2491           Math.max(currentCount - 1, 1));
2492     }
2493   }
2494 
2495   public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2496    expireSession(nodeZK, false);
2497   }
2498 
2499   @Deprecated
2500   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2501     throws Exception {
2502     expireSession(nodeZK, false);
2503   }
2504 
2505   /**
2506    * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2507    * http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A4
2508    * There are issues when doing this:
2509    * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2510    * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2511    *
2512    * @param nodeZK - the ZK watcher to expire
2513    * @param checkStatus - true to check if we can create an HTable with the
2514    *                    current configuration.
2515    */
2516   public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2517     throws Exception {
2518     Configuration c = new Configuration(this.conf);
2519     String quorumServers = ZKConfig.getZKQuorumServersString(c);
2520     ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2521     byte[] password = zk.getSessionPasswd();
2522     long sessionID = zk.getSessionId();
2523 
2524     // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2525     //  so we create a first watcher to be sure that the
2526     //  event was sent. We expect that if our watcher receives the event
2527     //  other watchers on the same machine will get is as well.
2528     // When we ask to close the connection, ZK does not close it before
2529     //  we receive all the events, so don't have to capture the event, just
2530     //  closing the connection should be enough.
2531     ZooKeeper monitor = new ZooKeeper(quorumServers,
2532       1000, new org.apache.zookeeper.Watcher(){
2533       @Override
2534       public void process(WatchedEvent watchedEvent) {
2535         LOG.info("Monitor ZKW received event="+watchedEvent);
2536       }
2537     } , sessionID, password);
2538 
2539     // Making it expire
2540     ZooKeeper newZK = new ZooKeeper(quorumServers,
2541         1000, EmptyWatcher.instance, sessionID, password);
2542 
2543     //ensure that we have connection to the server before closing down, otherwise
2544     //the close session event will be eaten out before we start CONNECTING state
2545     long start = System.currentTimeMillis();
2546     while (newZK.getState() != States.CONNECTED
2547          && System.currentTimeMillis() - start < 1000) {
2548        Thread.sleep(1);
2549     }
2550     newZK.close();
2551     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2552 
2553     // Now closing & waiting to be sure that the clients get it.
2554     monitor.close();
2555 
2556     if (checkStatus) {
2557       new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2558     }
2559   }
2560 
2561   /**
2562    * Get the Mini HBase cluster.
2563    *
2564    * @return hbase cluster
2565    * @see #getHBaseClusterInterface()
2566    */
2567   public MiniHBaseCluster getHBaseCluster() {
2568     return getMiniHBaseCluster();
2569   }
2570 
2571   /**
2572    * Returns the HBaseCluster instance.
2573    * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2574    * tests referring this should not assume that the cluster is a mini cluster or a
2575    * distributed one. If the test only works on a mini cluster, then specific
2576    * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2577    * need to type-cast.
2578    */
2579   public HBaseCluster getHBaseClusterInterface() {
2580     //implementation note: we should rename this method as #getHBaseCluster(),
2581     //but this would require refactoring 90+ calls.
2582     return hbaseCluster;
2583   }
2584 
2585   /**
2586    * Get a Connection to the cluster.
2587    * Not thread-safe (This class needs a lot of work to make it thread-safe).
2588    * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2589    * @throws IOException
2590    */
2591   public Connection getConnection() throws IOException {
2592     if (this.connection == null) {
2593       this.connection = ConnectionFactory.createConnection(this.conf);
2594     }
2595     return this.connection;
2596   }
2597 
2598   /**
2599    * Returns a Admin instance.
2600    * This instance is shared between HBaseTestingUtility instance users.
2601    * Closing it has no effect, it will be closed automatically when the
2602    * cluster shutdowns
2603    *
2604    * @return An Admin instance.
2605    * @throws IOException
2606    */
2607   public synchronized HBaseAdmin getHBaseAdmin()
2608   throws IOException {
2609     if (hbaseAdmin == null){
2610       this.hbaseAdmin = new HBaseAdminForTests(getConnection());
2611     }
2612     return hbaseAdmin;
2613   }
2614 
2615   private HBaseAdminForTests hbaseAdmin = null;
2616   private static class HBaseAdminForTests extends HBaseAdmin {
2617     public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
2618         ZooKeeperConnectionException, IOException {
2619       super(connection);
2620     }
2621 
2622     @Override
2623     public synchronized void close() throws IOException {
2624       LOG.warn("close() called on HBaseAdmin instance returned from " +
2625         "HBaseTestingUtility.getHBaseAdmin()");
2626     }
2627 
2628     private synchronized void close0() throws IOException {
2629       super.close();
2630     }
2631   }
2632 
2633   /**
2634    * Returns a ZooKeeperWatcher instance.
2635    * This instance is shared between HBaseTestingUtility instance users.
2636    * Don't close it, it will be closed automatically when the
2637    * cluster shutdowns
2638    *
2639    * @return The ZooKeeperWatcher instance.
2640    * @throws IOException
2641    */
2642   public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2643     throws IOException {
2644     if (zooKeeperWatcher == null) {
2645       zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2646         new Abortable() {
2647         @Override public void abort(String why, Throwable e) {
2648           throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2649         }
2650         @Override public boolean isAborted() {return false;}
2651       });
2652     }
2653     return zooKeeperWatcher;
2654   }
2655   private ZooKeeperWatcher zooKeeperWatcher;
2656 
2657 
2658 
2659   /**
2660    * Closes the named region.
2661    *
2662    * @param regionName  The region to close.
2663    * @throws IOException
2664    */
2665   public void closeRegion(String regionName) throws IOException {
2666     closeRegion(Bytes.toBytes(regionName));
2667   }
2668 
2669   /**
2670    * Closes the named region.
2671    *
2672    * @param regionName  The region to close.
2673    * @throws IOException
2674    */
2675   public void closeRegion(byte[] regionName) throws IOException {
2676     getHBaseAdmin().closeRegion(regionName, null);
2677   }
2678 
2679   /**
2680    * Closes the region containing the given row.
2681    *
2682    * @param row  The row to find the containing region.
2683    * @param table  The table to find the region.
2684    * @throws IOException
2685    */
2686   public void closeRegionByRow(String row, RegionLocator table) throws IOException {
2687     closeRegionByRow(Bytes.toBytes(row), table);
2688   }
2689 
2690   /**
2691    * Closes the region containing the given row.
2692    *
2693    * @param row  The row to find the containing region.
2694    * @param table  The table to find the region.
2695    * @throws IOException
2696    */
2697   public void closeRegionByRow(byte[] row, RegionLocator table) throws IOException {
2698     HRegionLocation hrl = table.getRegionLocation(row);
2699     closeRegion(hrl.getRegionInfo().getRegionName());
2700   }
2701 
2702   /*
2703    * Retrieves a splittable region randomly from tableName
2704    *
2705    * @param tableName name of table
2706    * @param maxAttempts maximum number of attempts, unlimited for value of -1
2707    * @return the HRegion chosen, null if none was found within limit of maxAttempts
2708    */
2709   public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2710     List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2711     int regCount = regions.size();
2712     Set<Integer> attempted = new HashSet<Integer>();
2713     int idx;
2714     int attempts = 0;
2715     do {
2716       regions = getHBaseCluster().getRegions(tableName);
2717       if (regCount != regions.size()) {
2718         // if there was region movement, clear attempted Set
2719         attempted.clear();
2720       }
2721       regCount = regions.size();
2722       // There are chances that before we get the region for the table from an RS the region may
2723       // be going for CLOSE.  This may be because online schema change is enabled
2724       if (regCount > 0) {
2725         idx = random.nextInt(regCount);
2726         // if we have just tried this region, there is no need to try again
2727         if (attempted.contains(idx))
2728           continue;
2729         try {
2730           regions.get(idx).checkSplit();
2731           return regions.get(idx);
2732         } catch (Exception ex) {
2733           LOG.warn("Caught exception", ex);
2734           attempted.add(idx);
2735         }
2736       }
2737       attempts++;
2738     } while (maxAttempts == -1 || attempts < maxAttempts);
2739     return null;
2740   }
2741 
2742   public MiniZooKeeperCluster getZkCluster() {
2743     return zkCluster;
2744   }
2745 
2746   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2747     this.passedZkCluster = true;
2748     this.zkCluster = zkCluster;
2749     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2750   }
2751 
2752   public MiniDFSCluster getDFSCluster() {
2753     return dfsCluster;
2754   }
2755 
2756   public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2757     if (dfsCluster != null && dfsCluster.isClusterUp()) {
2758       throw new IOException("DFSCluster is already running! Shut it down first.");
2759     }
2760     this.dfsCluster = cluster;
2761   }
2762 
2763   public FileSystem getTestFileSystem() throws IOException {
2764     return HFileSystem.get(conf);
2765   }
2766 
2767   /**
2768    * Wait until all regions in a table have been assigned.  Waits default timeout before giving up
2769    * (30 seconds).
2770    * @param table Table to wait on.
2771    * @throws InterruptedException
2772    * @throws IOException
2773    */
2774   public void waitTableAvailable(TableName table)
2775       throws InterruptedException, IOException {
2776     waitTableAvailable(getHBaseAdmin(), table.getName(), 30000);
2777   }
2778 
2779   public void waitTableAvailable(Admin admin, byte[] table)
2780       throws InterruptedException, IOException {
2781     waitTableAvailable(admin, table, 30000);
2782   }
2783 
2784   /**
2785    * Wait until all regions in a table have been assigned
2786    * @param table Table to wait on.
2787    * @param timeoutMillis Timeout.
2788    * @throws InterruptedException
2789    * @throws IOException
2790    */
2791   public void waitTableAvailable(byte[] table, long timeoutMillis)
2792   throws InterruptedException, IOException {
2793     waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2794   }
2795 
2796   public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
2797   throws InterruptedException, IOException {
2798     long startWait = System.currentTimeMillis();
2799     while (!admin.isTableAvailable(TableName.valueOf(table))) {
2800       assertTrue("Timed out waiting for table to become available " +
2801         Bytes.toStringBinary(table),
2802         System.currentTimeMillis() - startWait < timeoutMillis);
2803       Thread.sleep(200);
2804     }
2805   }
2806 
2807   /**
2808    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2809    * regions have been all assigned.  Will timeout after default period (30 seconds)
2810    * @see #waitTableAvailable(byte[])
2811    * @param table Table to wait on.
2812    * @param table
2813    * @throws InterruptedException
2814    * @throws IOException
2815    */
2816   public void waitTableEnabled(TableName table)
2817       throws InterruptedException, IOException {
2818     waitTableEnabled(getHBaseAdmin(), table.getName(), 30000);
2819   }
2820 
2821   public void waitTableEnabled(Admin admin, byte[] table)
2822       throws InterruptedException, IOException {
2823     waitTableEnabled(admin, table, 30000);
2824   }
2825 
2826   /**
2827    * Waits for a table to be 'enabled'.  Enabled means that table is set as 'enabled' and the
2828    * regions have been all assigned.
2829    * @see #waitTableAvailable(byte[])
2830    * @param table Table to wait on.
2831    * @param timeoutMillis Time to wait on it being marked enabled.
2832    * @throws InterruptedException
2833    * @throws IOException
2834    */
2835   public void waitTableEnabled(byte[] table, long timeoutMillis)
2836   throws InterruptedException, IOException {
2837     waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2838   }
2839 
2840   public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
2841   throws InterruptedException, IOException {
2842     TableName tableName = TableName.valueOf(table);
2843     long startWait = System.currentTimeMillis();
2844     waitTableAvailable(admin, table, timeoutMillis);
2845     while (!admin.isTableEnabled(tableName)) {
2846       assertTrue("Timed out waiting for table to become available and enabled " +
2847          Bytes.toStringBinary(table),
2848          System.currentTimeMillis() - startWait < timeoutMillis);
2849       Thread.sleep(200);
2850     }
2851     // Finally make sure all regions are fully open and online out on the cluster. Regions may be
2852     // in the hbase:meta table and almost open on all regionservers but there setting the region
2853     // online in the regionserver is the very last thing done and can take a little while to happen.
2854     // Below we do a get.  The get will retry if a NotServeringRegionException or a
2855     // RegionOpeningException.  It is crass but when done all will be online.
2856     try {
2857       Canary.sniff(admin, tableName);
2858     } catch (Exception e) {
2859       throw new IOException(e);
2860     }
2861   }
2862 
2863   /**
2864    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2865    * Will timeout after default period (30 seconds)
2866    * @param table Table to wait on.
2867    * @throws InterruptedException
2868    * @throws IOException
2869    */
2870   public void waitTableDisabled(byte[] table)
2871       throws InterruptedException, IOException {
2872     waitTableDisabled(getHBaseAdmin(), table, 30000);
2873   }
2874 
2875   public void waitTableDisabled(Admin admin, byte[] table)
2876       throws InterruptedException, IOException {
2877     waitTableDisabled(admin, table, 30000);
2878   }
2879 
2880   /**
2881    * Waits for a table to be 'disabled'.  Disabled means that table is set as 'disabled'
2882    * @param table Table to wait on.
2883    * @param timeoutMillis Time to wait on it being marked disabled.
2884    * @throws InterruptedException
2885    * @throws IOException
2886    */
2887   public void waitTableDisabled(byte[] table, long timeoutMillis)
2888       throws InterruptedException, IOException {
2889     waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2890   }
2891 
2892   public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
2893       throws InterruptedException, IOException {
2894     TableName tableName = TableName.valueOf(table);
2895     long startWait = System.currentTimeMillis();
2896     while (!admin.isTableDisabled(tableName)) {
2897       assertTrue("Timed out waiting for table to become disabled " +
2898               Bytes.toStringBinary(table),
2899           System.currentTimeMillis() - startWait < timeoutMillis);
2900       Thread.sleep(200);
2901     }
2902   }
2903 
2904   /**
2905    * Make sure that at least the specified number of region servers
2906    * are running
2907    * @param num minimum number of region servers that should be running
2908    * @return true if we started some servers
2909    * @throws IOException
2910    */
2911   public boolean ensureSomeRegionServersAvailable(final int num)
2912       throws IOException {
2913     boolean startedServer = false;
2914     MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2915     for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2916       LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2917       startedServer = true;
2918     }
2919 
2920     return startedServer;
2921   }
2922 
2923 
2924   /**
2925    * Make sure that at least the specified number of region servers
2926    * are running. We don't count the ones that are currently stopping or are
2927    * stopped.
2928    * @param num minimum number of region servers that should be running
2929    * @return true if we started some servers
2930    * @throws IOException
2931    */
2932   public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2933     throws IOException {
2934     boolean startedServer = ensureSomeRegionServersAvailable(num);
2935 
2936     int nonStoppedServers = 0;
2937     for (JVMClusterUtil.RegionServerThread rst :
2938       getMiniHBaseCluster().getRegionServerThreads()) {
2939 
2940       HRegionServer hrs = rst.getRegionServer();
2941       if (hrs.isStopping() || hrs.isStopped()) {
2942         LOG.info("A region server is stopped or stopping:"+hrs);
2943       } else {
2944         nonStoppedServers++;
2945       }
2946     }
2947     for (int i=nonStoppedServers; i<num; ++i) {
2948       LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2949       startedServer = true;
2950     }
2951     return startedServer;
2952   }
2953 
2954 
2955   /**
2956    * This method clones the passed <code>c</code> configuration setting a new
2957    * user into the clone.  Use it getting new instances of FileSystem.  Only
2958    * works for DistributedFileSystem.
2959    * @param c Initial configuration
2960    * @param differentiatingSuffix Suffix to differentiate this user from others.
2961    * @return A new configuration instance with a different user set into it.
2962    * @throws IOException
2963    */
2964   public static User getDifferentUser(final Configuration c,
2965     final String differentiatingSuffix)
2966   throws IOException {
2967     FileSystem currentfs = FileSystem.get(c);
2968     if (!(currentfs instanceof DistributedFileSystem)) {
2969       return User.getCurrent();
2970     }
2971     // Else distributed filesystem.  Make a new instance per daemon.  Below
2972     // code is taken from the AppendTestUtil over in hdfs.
2973     String username = User.getCurrent().getName() +
2974       differentiatingSuffix;
2975     User user = User.createUserForTesting(c, username,
2976         new String[]{"supergroup"});
2977     return user;
2978   }
2979 
2980   public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
2981       throws IOException {
2982     NavigableSet<String> online = new TreeSet<String>();
2983     for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
2984       try {
2985         for (HRegionInfo region :
2986             ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
2987           online.add(region.getRegionNameAsString());
2988         }
2989       } catch (RegionServerStoppedException e) {
2990         // That's fine.
2991       }
2992     }
2993     for (MasterThread mt : cluster.getLiveMasterThreads()) {
2994       try {
2995         for (HRegionInfo region :
2996             ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
2997           online.add(region.getRegionNameAsString());
2998         }
2999       } catch (RegionServerStoppedException e) {
3000         // That's fine.
3001       } catch (ServerNotRunningYetException e) {
3002         // That's fine.
3003       }
3004     }
3005     return online;
3006   }
3007 
3008   /**
3009    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
3010    * makes tests linger.  Here is the exception you'll see:
3011    * <pre>
3012    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3013    * </pre>
3014    * @param stream A DFSClient.DFSOutputStream.
3015    * @param max
3016    * @throws NoSuchFieldException
3017    * @throws SecurityException
3018    * @throws IllegalAccessException
3019    * @throws IllegalArgumentException
3020    */
3021   public static void setMaxRecoveryErrorCount(final OutputStream stream,
3022       final int max) {
3023     try {
3024       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3025       for (Class<?> clazz: clazzes) {
3026         String className = clazz.getSimpleName();
3027         if (className.equals("DFSOutputStream")) {
3028           if (clazz.isInstance(stream)) {
3029             Field maxRecoveryErrorCountField =
3030               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3031             maxRecoveryErrorCountField.setAccessible(true);
3032             maxRecoveryErrorCountField.setInt(stream, max);
3033             break;
3034           }
3035         }
3036       }
3037     } catch (Exception e) {
3038       LOG.info("Could not set max recovery field", e);
3039     }
3040   }
3041 
3042   /**
3043    * Wait until all regions for a table in hbase:meta have a non-empty
3044    * info:server, up to 60 seconds. This means all regions have been deployed,
3045    * master has been informed and updated hbase:meta with the regions deployed
3046    * server.
3047    * @param tableName the table name
3048    * @throws IOException
3049    */
3050   public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3051     waitUntilAllRegionsAssigned(tableName, 60000);
3052   }
3053 
3054   /**
3055    * Wait until all regions for a table in hbase:meta have a non-empty
3056    * info:server, or until timeout.  This means all regions have been deployed,
3057    * master has been informed and updated hbase:meta with the regions deployed
3058    * server.
3059    * @param tableName the table name
3060    * @param timeout timeout, in milliseconds
3061    * @throws IOException
3062    */
3063   public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3064       throws IOException {
3065     final Table meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3066     try {
3067       waitFor(timeout, 200, true, new Predicate<IOException>() {
3068         @Override
3069         public boolean evaluate() throws IOException {
3070           boolean allRegionsAssigned = true;
3071           Scan scan = new Scan();
3072           scan.addFamily(HConstants.CATALOG_FAMILY);
3073           ResultScanner s = meta.getScanner(scan);
3074           try {
3075             Result r;
3076             while ((r = s.next()) != null) {
3077               byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3078               HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3079               if (info != null && info.getTable().equals(tableName)) {
3080                 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3081                 allRegionsAssigned &= (b != null);
3082               }
3083             }
3084           } finally {
3085             s.close();
3086           }
3087           return allRegionsAssigned;
3088         }
3089       });
3090     } finally {
3091       meta.close();
3092     }
3093 
3094     // check from the master state if we are using a mini cluster
3095     if (!getHBaseClusterInterface().isDistributedCluster()) {
3096       // So, all regions are in the meta table but make sure master knows of the assignments before
3097       // returing -- sometimes this can lag.
3098       HMaster master = getHBaseCluster().getMaster();
3099       final RegionStates states = master.getAssignmentManager().getRegionStates();
3100       waitFor(timeout, 200, new Predicate<IOException>() {
3101         @Override
3102         public boolean evaluate() throws IOException {
3103           List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3104           return hris != null && !hris.isEmpty();
3105         }
3106       });
3107     }
3108   }
3109 
3110   /**
3111    * Do a small get/scan against one store. This is required because store
3112    * has no actual methods of querying itself, and relies on StoreScanner.
3113    */
3114   public static List<Cell> getFromStoreFile(HStore store,
3115                                                 Get get) throws IOException {
3116     Scan scan = new Scan(get);
3117     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3118         scan.getFamilyMap().get(store.getFamily().getName()),
3119         // originally MultiVersionConsistencyControl.resetThreadReadPoint() was called to set
3120         // readpoint 0.
3121         0);
3122 
3123     List<Cell> result = new ArrayList<Cell>();
3124     scanner.next(result);
3125     if (!result.isEmpty()) {
3126       // verify that we are on the row we want:
3127       Cell kv = result.get(0);
3128       if (!CellUtil.matchingRow(kv, get.getRow())) {
3129         result.clear();
3130       }
3131     }
3132     scanner.close();
3133     return result;
3134   }
3135 
3136   /**
3137    * Create region split keys between startkey and endKey
3138    *
3139    * @param startKey
3140    * @param endKey
3141    * @param numRegions the number of regions to be created. it has to be greater than 3.
3142    * @return
3143    */
3144   public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3145     assertTrue(numRegions>3);
3146     byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3147     byte [][] result = new byte[tmpSplitKeys.length+1][];
3148     System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3149     result[0] = HConstants.EMPTY_BYTE_ARRAY;
3150     return result;
3151   }
3152 
3153   /**
3154    * Do a small get/scan against one store. This is required because store
3155    * has no actual methods of querying itself, and relies on StoreScanner.
3156    */
3157   public static List<Cell> getFromStoreFile(HStore store,
3158                                                 byte [] row,
3159                                                 NavigableSet<byte[]> columns
3160                                                 ) throws IOException {
3161     Get get = new Get(row);
3162     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3163     s.put(store.getFamily().getName(), columns);
3164 
3165     return getFromStoreFile(store,get);
3166   }
3167 
3168   /**
3169    * Gets a ZooKeeperWatcher.
3170    * @param TEST_UTIL
3171    */
3172   public static ZooKeeperWatcher getZooKeeperWatcher(
3173       HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3174       IOException {
3175     ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3176         "unittest", new Abortable() {
3177           boolean aborted = false;
3178 
3179           @Override
3180           public void abort(String why, Throwable e) {
3181             aborted = true;
3182             throw new RuntimeException("Fatal ZK error, why=" + why, e);
3183           }
3184 
3185           @Override
3186           public boolean isAborted() {
3187             return aborted;
3188           }
3189         });
3190     return zkw;
3191   }
3192 
3193   /**
3194    * Creates a znode with OPENED state.
3195    * @param TEST_UTIL
3196    * @param region
3197    * @param serverName
3198    * @return
3199    * @throws IOException
3200    * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
3201    * @throws KeeperException
3202    * @throws NodeExistsException
3203    */
3204   public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3205       HBaseTestingUtility TEST_UTIL, HRegion region,
3206       ServerName serverName) throws ZooKeeperConnectionException,
3207       IOException, KeeperException, NodeExistsException {
3208     ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3209     ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3210     int version = ZKAssign.transitionNodeOpening(zkw, region
3211         .getRegionInfo(), serverName);
3212     ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3213         version);
3214     return zkw;
3215   }
3216 
3217   public static void assertKVListsEqual(String additionalMsg,
3218       final List<? extends Cell> expected,
3219       final List<? extends Cell> actual) {
3220     final int eLen = expected.size();
3221     final int aLen = actual.size();
3222     final int minLen = Math.min(eLen, aLen);
3223 
3224     int i;
3225     for (i = 0; i < minLen
3226         && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3227         ++i) {}
3228 
3229     if (additionalMsg == null) {
3230       additionalMsg = "";
3231     }
3232     if (!additionalMsg.isEmpty()) {
3233       additionalMsg = ". " + additionalMsg;
3234     }
3235 
3236     if (eLen != aLen || i != minLen) {
3237       throw new AssertionError(
3238           "Expected and actual KV arrays differ at position " + i + ": " +
3239           safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3240           safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3241     }
3242   }
3243 
3244   public static <T> String safeGetAsStr(List<T> lst, int i) {
3245     if (0 <= i && i < lst.size()) {
3246       return lst.get(i).toString();
3247     } else {
3248       return "<out_of_range>";
3249     }
3250   }
3251 
3252   public String getClusterKey() {
3253     return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3254         + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3255         + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3256             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3257   }
3258 
3259   /** Creates a random table with the given parameters */
3260   public HTable createRandomTable(String tableName,
3261       final Collection<String> families,
3262       final int maxVersions,
3263       final int numColsPerRow,
3264       final int numFlushes,
3265       final int numRegions,
3266       final int numRowsPerFlush)
3267       throws IOException, InterruptedException {
3268 
3269     LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3270         " regions, " + numFlushes + " storefiles per region, " +
3271         numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
3272         "\n");
3273 
3274     final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3275     final int numCF = families.size();
3276     final byte[][] cfBytes = new byte[numCF][];
3277     {
3278       int cfIndex = 0;
3279       for (String cf : families) {
3280         cfBytes[cfIndex++] = Bytes.toBytes(cf);
3281       }
3282     }
3283 
3284     final int actualStartKey = 0;
3285     final int actualEndKey = Integer.MAX_VALUE;
3286     final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3287     final int splitStartKey = actualStartKey + keysPerRegion;
3288     final int splitEndKey = actualEndKey - keysPerRegion;
3289     final String keyFormat = "%08x";
3290     final HTable table = createTable(tableName, cfBytes,
3291         maxVersions,
3292         Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3293         Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3294         numRegions);
3295 
3296     if (hbaseCluster != null) {
3297       getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3298     }
3299 
3300     for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3301       for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3302         final byte[] row = Bytes.toBytes(String.format(keyFormat,
3303             actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3304 
3305         Put put = new Put(row);
3306         Delete del = new Delete(row);
3307         for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3308           final byte[] cf = cfBytes[rand.nextInt(numCF)];
3309           final long ts = rand.nextInt();
3310           final byte[] qual = Bytes.toBytes("col" + iCol);
3311           if (rand.nextBoolean()) {
3312             final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3313                 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3314                 ts + "_random_" + rand.nextLong());
3315             put.add(cf, qual, ts, value);
3316           } else if (rand.nextDouble() < 0.8) {
3317             del.deleteColumn(cf, qual, ts);
3318           } else {
3319             del.deleteColumns(cf, qual, ts);
3320           }
3321         }
3322 
3323         if (!put.isEmpty()) {
3324           table.put(put);
3325         }
3326 
3327         if (!del.isEmpty()) {
3328           table.delete(del);
3329         }
3330       }
3331       LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3332       table.flushCommits();
3333       if (hbaseCluster != null) {
3334         getMiniHBaseCluster().flushcache(table.getName());
3335       }
3336     }
3337 
3338     return table;
3339   }
3340 
3341   private static final int MIN_RANDOM_PORT = 0xc000;
3342   private static final int MAX_RANDOM_PORT = 0xfffe;
3343   private static Random random = new Random();
3344 
3345   /**
3346    * Returns a random port. These ports cannot be registered with IANA and are
3347    * intended for dynamic allocation (see http://bit.ly/dynports).
3348    */
3349   public static int randomPort() {
3350     return MIN_RANDOM_PORT
3351         + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3352   }
3353 
3354   /**
3355    * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3356    * called from single-threaded test setup code/
3357    */
3358   public static int randomFreePort() {
3359     int port = 0;
3360     do {
3361       port = randomPort();
3362       if (takenRandomPorts.contains(port)) {
3363         continue;
3364       }
3365       takenRandomPorts.add(port);
3366 
3367       try {
3368         ServerSocket sock = new ServerSocket(port);
3369         sock.close();
3370       } catch (IOException ex) {
3371         port = 0;
3372       }
3373     } while (port == 0);
3374     return port;
3375   }
3376 
3377 
3378   public static String randomMultiCastAddress() {
3379     return "226.1.1." + random.nextInt(254);
3380   }
3381 
3382 
3383 
3384   public static void waitForHostPort(String host, int port)
3385       throws IOException {
3386     final int maxTimeMs = 10000;
3387     final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3388     IOException savedException = null;
3389     LOG.info("Waiting for server at " + host + ":" + port);
3390     for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3391       try {
3392         Socket sock = new Socket(InetAddress.getByName(host), port);
3393         sock.close();
3394         savedException = null;
3395         LOG.info("Server at " + host + ":" + port + " is available");
3396         break;
3397       } catch (UnknownHostException e) {
3398         throw new IOException("Failed to look up " + host, e);
3399       } catch (IOException e) {
3400         savedException = e;
3401       }
3402       Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3403     }
3404 
3405     if (savedException != null) {
3406       throw savedException;
3407     }
3408   }
3409 
3410   /**
3411    * Creates a pre-split table for load testing. If the table already exists,
3412    * logs a warning and continues.
3413    * @return the number of regions the table was split into
3414    */
3415   public static int createPreSplitLoadTestTable(Configuration conf,
3416       TableName tableName, byte[] columnFamily, Algorithm compression,
3417       DataBlockEncoding dataBlockEncoding) throws IOException {
3418     return createPreSplitLoadTestTable(conf, tableName,
3419       columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3420       Durability.USE_DEFAULT);
3421   }
3422   /**
3423    * Creates a pre-split table for load testing. If the table already exists,
3424    * logs a warning and continues.
3425    * @return the number of regions the table was split into
3426    */
3427   public static int createPreSplitLoadTestTable(Configuration conf,
3428       TableName tableName, byte[] columnFamily, Algorithm compression,
3429       DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3430       Durability durability)
3431           throws IOException {
3432     HTableDescriptor desc = new HTableDescriptor(tableName);
3433     desc.setDurability(durability);
3434     desc.setRegionReplication(regionReplication);
3435     HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3436     hcd.setDataBlockEncoding(dataBlockEncoding);
3437     hcd.setCompressionType(compression);
3438     return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3439   }
3440 
3441   /**
3442    * Creates a pre-split table for load testing. If the table already exists,
3443    * logs a warning and continues.
3444    * @return the number of regions the table was split into
3445    */
3446   public static int createPreSplitLoadTestTable(Configuration conf,
3447       HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3448     return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3449   }
3450 
3451   /**
3452    * Creates a pre-split table for load testing. If the table already exists,
3453    * logs a warning and continues.
3454    * @return the number of regions the table was split into
3455    */
3456   public static int createPreSplitLoadTestTable(Configuration conf,
3457       HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3458     if (!desc.hasFamily(hcd.getName())) {
3459       desc.addFamily(hcd);
3460     }
3461 
3462     int totalNumberOfRegions = 0;
3463     Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3464     Admin admin = unmanagedConnection.getAdmin();
3465 
3466     try {
3467       // create a table a pre-splits regions.
3468       // The number of splits is set as:
3469       //    region servers * regions per region server).
3470       int numberOfServers = admin.getClusterStatus().getServers().size();
3471       if (numberOfServers == 0) {
3472         throw new IllegalStateException("No live regionservers");
3473       }
3474 
3475       totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3476       LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3477           "pre-splitting table into " + totalNumberOfRegions + " regions " +
3478           "(regions per server: " + numRegionsPerServer + ")");
3479 
3480       byte[][] splits = new RegionSplitter.HexStringSplit().split(
3481           totalNumberOfRegions);
3482 
3483       admin.createTable(desc, splits);
3484     } catch (MasterNotRunningException e) {
3485       LOG.error("Master not running", e);
3486       throw new IOException(e);
3487     } catch (TableExistsException e) {
3488       LOG.warn("Table " + desc.getTableName() +
3489           " already exists, continuing");
3490     } finally {
3491       admin.close();
3492       unmanagedConnection.close();
3493     }
3494     return totalNumberOfRegions;
3495   }
3496 
3497   public static int getMetaRSPort(Configuration conf) throws IOException {
3498     RegionLocator table = new HTable(conf, TableName.META_TABLE_NAME);
3499     HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3500     table.close();
3501     return hloc.getPort();
3502   }
3503 
3504   /**
3505    *  Due to async racing issue, a region may not be in
3506    *  the online region list of a region server yet, after
3507    *  the assignment znode is deleted and the new assignment
3508    *  is recorded in master.
3509    */
3510   public void assertRegionOnServer(
3511       final HRegionInfo hri, final ServerName server,
3512       final long timeout) throws IOException, InterruptedException {
3513     long timeoutTime = System.currentTimeMillis() + timeout;
3514     while (true) {
3515       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3516       if (regions.contains(hri)) return;
3517       long now = System.currentTimeMillis();
3518       if (now > timeoutTime) break;
3519       Thread.sleep(10);
3520     }
3521     fail("Could not find region " + hri.getRegionNameAsString()
3522       + " on server " + server);
3523   }
3524 
3525   /**
3526    * Check to make sure the region is open on the specified
3527    * region server, but not on any other one.
3528    */
3529   public void assertRegionOnlyOnServer(
3530       final HRegionInfo hri, final ServerName server,
3531       final long timeout) throws IOException, InterruptedException {
3532     long timeoutTime = System.currentTimeMillis() + timeout;
3533     while (true) {
3534       List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3535       if (regions.contains(hri)) {
3536         List<JVMClusterUtil.RegionServerThread> rsThreads =
3537           getHBaseCluster().getLiveRegionServerThreads();
3538         for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3539           HRegionServer rs = rsThread.getRegionServer();
3540           if (server.equals(rs.getServerName())) {
3541             continue;
3542           }
3543           Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3544           for (HRegion r: hrs) {
3545             assertTrue("Region should not be double assigned",
3546               r.getRegionId() != hri.getRegionId());
3547           }
3548         }
3549         return; // good, we are happy
3550       }
3551       long now = System.currentTimeMillis();
3552       if (now > timeoutTime) break;
3553       Thread.sleep(10);
3554     }
3555     fail("Could not find region " + hri.getRegionNameAsString()
3556       + " on server " + server);
3557   }
3558 
3559   public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3560       throws IOException {
3561     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3562     htd.addFamily(hcd);
3563     HRegionInfo info =
3564         new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3565     HRegion region =
3566         HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3567     return region;
3568   }
3569 
3570   public void setFileSystemURI(String fsURI) {
3571     FS_URI = fsURI;
3572   }
3573 
3574   /**
3575    * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
3576    */
3577   public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3578       throws E {
3579     return Waiter.waitFor(this.conf, timeout, predicate);
3580   }
3581 
3582   /**
3583    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
3584    */
3585   public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3586       throws E {
3587     return Waiter.waitFor(this.conf, timeout, interval, predicate);
3588   }
3589 
3590   /**
3591    * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
3592    */
3593   public <E extends Exception> long waitFor(long timeout, long interval,
3594       boolean failIfTimeout, Predicate<E> predicate) throws E {
3595     return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3596   }
3597 
3598   /**
3599    * Returns a {@link Predicate} for checking that there are no regions in transition in master
3600    */
3601   public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3602     return new Waiter.Predicate<Exception>() {
3603       @Override
3604       public boolean evaluate() throws Exception {
3605         final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3606             .getAssignmentManager().getRegionStates();
3607         return !regionStates.isRegionsInTransition();
3608       }
3609     };
3610   }
3611 
3612   /**
3613    * Returns a {@link Predicate} for checking that table is enabled
3614    */
3615   public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3616     return new Waiter.Predicate<Exception>() {
3617      @Override
3618      public boolean evaluate() throws Exception {
3619        return getHBaseAdmin().isTableEnabled(tableName);
3620       }
3621     };
3622   }
3623 
3624   /**
3625    * Create a set of column descriptors with the combination of compression,
3626    * encoding, bloom codecs available.
3627    * @return the list of column descriptors
3628    */
3629   public static List<HColumnDescriptor> generateColumnDescriptors() {
3630     return generateColumnDescriptors("");
3631   }
3632 
3633   /**
3634    * Create a set of column descriptors with the combination of compression,
3635    * encoding, bloom codecs available.
3636    * @param prefix family names prefix
3637    * @return the list of column descriptors
3638    */
3639   public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3640     List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3641     long familyId = 0;
3642     for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3643       for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3644         for (BloomType bloomType: BloomType.values()) {
3645           String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3646           HColumnDescriptor htd = new HColumnDescriptor(name);
3647           htd.setCompressionType(compressionType);
3648           htd.setDataBlockEncoding(encodingType);
3649           htd.setBloomFilterType(bloomType);
3650           htds.add(htd);
3651           familyId++;
3652         }
3653       }
3654     }
3655     return htds;
3656   }
3657 
3658   /**
3659    * Get supported compression algorithms.
3660    * @return supported compression algorithms.
3661    */
3662   public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3663     String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3664     List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3665     for (String algoName : allAlgos) {
3666       try {
3667         Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3668         algo.getCompressor();
3669         supportedAlgos.add(algo);
3670       } catch (Throwable t) {
3671         // this algo is not available
3672       }
3673     }
3674     return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3675   }
3676 
3677   /**
3678    * Wait until no regions in transition.
3679    * @param timeout How long to wait.
3680    * @throws Exception
3681    */
3682   public void waitUntilNoRegionsInTransition(final long timeout) throws Exception {
3683     waitFor(timeout, predicateNoRegionsInTransition());
3684   }
3685 }