1   /**
2    * Copyright 2009 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase;
21  
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.File;
25  import java.io.IOException;
26  import java.io.OutputStream;
27  import java.lang.reflect.Field;
28  import java.security.MessageDigest;
29  import java.util.ArrayList;
30  import java.util.Arrays;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.NavigableSet;
34  import java.util.Set;
35  import java.util.UUID;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.commons.logging.impl.Jdk14Logger;
40  import org.apache.commons.logging.impl.Log4JLogger;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.fs.FileSystem;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.hbase.client.Delete;
45  import org.apache.hadoop.hbase.client.Get;
46  import org.apache.hadoop.hbase.client.HBaseAdmin;
47  import org.apache.hadoop.hbase.client.HConnection;
48  import org.apache.hadoop.hbase.client.HTable;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.Result;
51  import org.apache.hadoop.hbase.client.ResultScanner;
52  import org.apache.hadoop.hbase.client.Scan;
53  import org.apache.hadoop.hbase.master.HMaster;
54  import org.apache.hadoop.hbase.regionserver.HRegionServer;
55  import org.apache.hadoop.hbase.regionserver.InternalScanner;
56  import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
57  import org.apache.hadoop.hbase.regionserver.Store;
58  import org.apache.hadoop.hbase.util.Bytes;
59  import org.apache.hadoop.hbase.util.FSUtils;
60  import org.apache.hadoop.hbase.util.Threads;
61  import org.apache.hadoop.hbase.util.Writables;
62  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
63  import org.apache.hadoop.hdfs.MiniDFSCluster;
64  import org.apache.hadoop.mapred.MiniMRCluster;
65  import org.apache.zookeeper.ZooKeeper;
66  import org.apache.hadoop.hdfs.DFSClient;
67  import org.apache.hadoop.security.UnixUserGroupInformation;
68  import org.apache.hadoop.security.UserGroupInformation;
69  import org.apache.hadoop.hdfs.server.namenode.NameNode;
70  import org.apache.hadoop.hdfs.DistributedFileSystem;
71  
72  import com.google.common.base.Preconditions;
73  
74  /**
75   * Facility for testing HBase. Added as tool to abet junit4 testing.  Replaces
76   * old HBaseTestCase and HBaseCluserTestCase functionality.
77   * Create an instance and keep it around doing HBase testing.  This class is
78   * meant to be your one-stop shop for anything you might need testing.  Manages
79   * one cluster at a time only.  Depends on log4j being on classpath and
80   * hbase-site.xml for logging and test-run configuration.  It does not set
81   * logging levels nor make changes to configuration parameters.
82   */
83  public class HBaseTestingUtility {
84    private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
85    private final Configuration conf;
86    private MiniZooKeeperCluster zkCluster = null;
87    private MiniDFSCluster dfsCluster = null;
88    private MiniHBaseCluster hbaseCluster = null;
89    private MiniMRCluster mrCluster = null;
90    // If non-null, then already a cluster running.
91    private File clusterTestBuildDir = null;
92    private HBaseAdmin hbaseAdmin = null;
93  
94    /**
95     * System property key to get test directory value.
96     */
97    public static final String TEST_DIRECTORY_KEY = "test.build.data";
98  
99    /**
100    * Default parent direccounttory for test output.
101    */
102   public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
103 
104   public HBaseTestingUtility() {
105     this(HBaseConfiguration.create());
106   }
107 
108   public HBaseTestingUtility(Configuration conf) {
109     this.conf = conf;
110   }
111 
112   /**
113    * @return Instance of Configuration.
114    */
115   public Configuration getConfiguration() {
116     return this.conf;
117   }
118 
119   /**
120    * @return Where to write test data on local filesystem; usually
121    * {@link #DEFAULT_TEST_DIRECTORY}
122    * @see #setupClusterTestBuildDir()
123    */
124   public static Path getTestDir() {
125     return new Path(System.getProperty(TEST_DIRECTORY_KEY,
126       DEFAULT_TEST_DIRECTORY));
127   }
128 
129   /**
130    * @param subdirName
131    * @return Path to a subdirectory named <code>subdirName</code> under
132    * {@link #getTestDir()}.
133    * @see #setupClusterTestBuildDir()
134    */
135   public static Path getTestDir(final String subdirName) {
136     return new Path(getTestDir(), subdirName);
137   }
138 
139   /**
140    * Home our cluster in a dir under target/test.  Give it a random name
141    * so can have many concurrent clusters running if we need to.  Need to
142    * amend the test.build.data System property.  Its what minidfscluster bases
143    * it data dir on.  Moding a System property is not the way to do concurrent
144    * instances -- another instance could grab the temporary
145    * value unintentionally -- but not anything can do about it at moment;
146    * single instance only is how the minidfscluster works.
147    * @return The calculated cluster test build directory.
148    */
149   File setupClusterTestBuildDir() {
150     String randomStr = UUID.randomUUID().toString();
151     String dirStr = getTestDir(randomStr).toString();
152     File dir = new File(dirStr).getAbsoluteFile();
153     // Have it cleaned up on exit
154     dir.deleteOnExit();
155     return dir;
156   }
157 
158   /**
159    * @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
160    */
161   void isRunningCluster() throws IOException {
162     if (this.clusterTestBuildDir == null) return;
163     throw new IOException("Cluster already running at " +
164       this.clusterTestBuildDir);
165   }
166 
167   /**
168    * Start a minidfscluster.
169    * @param servers How many DNs to start.
170    * @throws Exception
171    * @see {@link #shutdownMiniDFSCluster()}
172    * @return The mini dfs cluster created.
173    */
174   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
175     return startMiniDFSCluster(servers, null);
176   }
177 
178   /**
179    * Start a minidfscluster.
180    * Can only create one.
181    * @param dir Where to home your dfs cluster.
182    * @param servers How many DNs to start.
183    * @throws Exception
184    * @see {@link #shutdownMiniDFSCluster()}
185    * @return The mini dfs cluster created.
186    */
187   public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
188   throws Exception {
189     // This does the following to home the minidfscluster
190     //     base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
191     // Some tests also do this:
192     //  System.getProperty("test.cache.data", "build/test/cache");
193     if (dir == null) this.clusterTestBuildDir = setupClusterTestBuildDir();
194     else this.clusterTestBuildDir = dir;
195     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
196     System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
197     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
198       true, null, null, null, null);
199     return this.dfsCluster;
200   }
201 
202   /**
203    * Shuts down instance created by call to {@link #startMiniDFSCluster(int, File)}
204    * or does nothing.
205    * @throws Exception
206    */
207   public void shutdownMiniDFSCluster() throws Exception {
208     if (this.dfsCluster != null) {
209       // The below throws an exception per dn, AsynchronousCloseException.
210       this.dfsCluster.shutdown();
211     }
212   }
213 
214   /**
215    * Call this if you only want a zk cluster.
216    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
217    * @throws Exception
218    * @see #shutdownMiniZKCluster()
219    * @return zk cluster started.
220    */
221   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
222     return startMiniZKCluster(setupClusterTestBuildDir());
223 
224   }
225 
226   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
227   throws Exception {
228     if (this.zkCluster != null) {
229       throw new IOException("Cluster already running at " + dir);
230     }
231     this.zkCluster = new MiniZooKeeperCluster();
232     int clientPort = this.zkCluster.startup(dir);
233     this.conf.set("hbase.zookeeper.property.clientPort",
234       Integer.toString(clientPort));
235     return this.zkCluster;
236   }
237 
238   /**
239    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
240    * or does nothing.
241    * @throws IOException
242    * @see #startMiniZKCluster()
243    */
244   public void shutdownMiniZKCluster() throws IOException {
245     if (this.zkCluster != null) this.zkCluster.shutdown();
246   }
247 
248   /**
249    * Start up a minicluster of hbase, dfs, and zookeeper.
250    * @throws Exception
251    * @return Mini hbase cluster instance created.
252    * @see {@link #shutdownMiniDFSCluster()}
253    */
254   public MiniHBaseCluster startMiniCluster() throws Exception {
255     return startMiniCluster(1);
256   }
257 
258   /**
259    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
260    * Modifies Configuration.  Homes the cluster data directory under a random
261    * subdirectory in a directory under System property test.build.data.
262    * Directory is cleaned up on exit.
263    * @param servers Number of servers to start up.  We'll start this many
264    * datanodes and regionservers.  If servers is > 1, then make sure
265    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
266    * bind errors.
267    * @throws Exception
268    * @see {@link #shutdownMiniCluster()}
269    * @return Mini hbase cluster instance created.
270    */
271   public MiniHBaseCluster startMiniCluster(final int servers)
272   throws Exception {
273     LOG.info("Starting up minicluster");
274     // If we already put up a cluster, fail.
275     isRunningCluster();
276     // Make a new random dir to home everything in.  Set it as system property.
277     // minidfs reads home from system property.
278     this.clusterTestBuildDir = setupClusterTestBuildDir();
279     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
280     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
281     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
282     startMiniDFSCluster(servers, this.clusterTestBuildDir);
283 
284     // Mangle conf so fs parameter points to minidfs we just started up
285     FileSystem fs = this.dfsCluster.getFileSystem();
286     this.conf.set("fs.defaultFS", fs.getUri().toString());
287     // Do old style too just to be safe.
288     this.conf.set("fs.default.name", fs.getUri().toString());
289     this.dfsCluster.waitClusterUp();
290 
291     // Start up a zk cluster.
292     if (this.zkCluster == null) {
293       startMiniZKCluster(this.clusterTestBuildDir);
294     }
295 
296     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
297     Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory());
298     this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
299     fs.mkdirs(hbaseRootdir);
300     FSUtils.setVersion(fs, hbaseRootdir);
301     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
302     // Don't leave here till we've done a successful scan of the .META.
303     HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
304     ResultScanner s = t.getScanner(new Scan());
305     while (s.next() != null) continue;
306     LOG.info("Minicluster is up");
307     return this.hbaseCluster;
308   }
309 
310   /**
311    * @return Current mini hbase cluster. Only has something in it after a call
312    * to {@link #startMiniCluster()}.
313    * @see #startMiniCluster()
314    */
315   public MiniHBaseCluster getMiniHBaseCluster() {
316     return this.hbaseCluster;
317   }
318 
319   /**
320    * @throws IOException
321    * @see {@link #startMiniCluster(int)}
322    */
323   public void shutdownMiniCluster() throws IOException {
324     LOG.info("Shutting down minicluster");
325     if (this.hbaseCluster != null) {
326       this.hbaseCluster.shutdown();
327       // Wait till hbase is down before going on to shutdown zk.
328       this.hbaseCluster.join();
329     }
330     shutdownMiniZKCluster();
331     if (this.dfsCluster != null) {
332       // The below throws an exception per dn, AsynchronousCloseException.
333       this.dfsCluster.shutdown();
334     }
335     // Clean up our directory.
336     if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
337       // Need to use deleteDirectory because File.delete required dir is empty.
338       if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
339           new Path(this.clusterTestBuildDir.toString()))) {
340         LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
341       }
342     }
343     LOG.info("Minicluster is down");
344   }
345 
346   /**
347    * Flushes all caches in the mini hbase cluster
348    * @throws IOException
349    */
350   public void flush() throws IOException {
351     this.hbaseCluster.flushcache();
352   }
353 
354   /**
355    * Flushes all caches in the mini hbase cluster
356    * @throws IOException
357    */
358   public void flush(byte [] tableName) throws IOException {
359     this.hbaseCluster.flushcache(tableName);
360   }
361 
362 
363   /**
364    * Create a table.
365    * @param tableName
366    * @param family
367    * @return An HTable instance for the created table.
368    * @throws IOException
369    */
370   public HTable createTable(byte[] tableName, byte[] family)
371   throws IOException{
372     return createTable(tableName, new byte[][]{family});
373   }
374 
375   /**
376    * Create a table.
377    * @param tableName
378    * @param families
379    * @return An HTable instance for the created table.
380    * @throws IOException
381    */
382   public HTable createTable(byte[] tableName, byte[][] families)
383   throws IOException {
384     HTableDescriptor desc = new HTableDescriptor(tableName);
385     for(byte[] family : families) {
386       desc.addFamily(new HColumnDescriptor(family));
387     }
388     (new HBaseAdmin(getConfiguration())).createTable(desc);
389     return new HTable(getConfiguration(), tableName);
390   }
391 
392   /**
393    * Create a table.
394    * @param tableName
395    * @param family
396    * @param numVersions
397    * @return An HTable instance for the created table.
398    * @throws IOException
399    */
400   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
401   throws IOException {
402     return createTable(tableName, new byte[][]{family}, numVersions);
403   }
404 
405   /**
406    * Create a table.
407    * @param tableName
408    * @param families
409    * @param numVersions
410    * @return An HTable instance for the created table.
411    * @throws IOException
412    */
413   public HTable createTable(byte[] tableName, byte[][] families,
414       int numVersions)
415   throws IOException {
416     HTableDescriptor desc = new HTableDescriptor(tableName);
417     for (byte[] family : families) {
418       HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
419           HColumnDescriptor.DEFAULT_COMPRESSION,
420           HColumnDescriptor.DEFAULT_IN_MEMORY,
421           HColumnDescriptor.DEFAULT_BLOCKCACHE,
422           Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
423           HColumnDescriptor.DEFAULT_BLOOMFILTER,
424           HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
425       desc.addFamily(hcd);
426     }
427     (new HBaseAdmin(getConfiguration())).createTable(desc);
428     return new HTable(getConfiguration(), tableName);
429   }
430 
431   /**
432    * Create a table.
433    * @param tableName
434    * @param families
435    * @param numVersions
436    * @return An HTable instance for the created table.
437    * @throws IOException
438    */
439   public HTable createTable(byte[] tableName, byte[][] families,
440       int[] numVersions)
441   throws IOException {
442     HTableDescriptor desc = new HTableDescriptor(tableName);
443     int i = 0;
444     for (byte[] family : families) {
445       HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
446           HColumnDescriptor.DEFAULT_COMPRESSION,
447           HColumnDescriptor.DEFAULT_IN_MEMORY,
448           HColumnDescriptor.DEFAULT_BLOCKCACHE,
449           Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
450           HColumnDescriptor.DEFAULT_BLOOMFILTER,
451           HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
452       desc.addFamily(hcd);
453       i++;
454     }
455     (new HBaseAdmin(getConfiguration())).createTable(desc);
456     return new HTable(getConfiguration(), tableName);
457   }
458 
459   /**
460    * Provide an existing table name to truncate
461    * @param tableName existing table
462    * @return HTable to that new table
463    * @throws IOException
464    */
465   public HTable truncateTable(byte [] tableName) throws IOException {
466     HTable table = new HTable(getConfiguration(), tableName);
467     Scan scan = new Scan();
468     ResultScanner resScan = table.getScanner(scan);
469     for(Result res : resScan) {
470       Delete del = new Delete(res.getRow());
471       table.delete(del);
472     }
473     return table;
474   }
475 
476   /**
477    * Load table with rows from 'aaa' to 'zzz'.
478    * @param t Table
479    * @param f Family
480    * @return Count of rows loaded.
481    * @throws IOException
482    */
483   public int loadTable(final HTable t, final byte[] f) throws IOException {
484     t.setAutoFlush(false);
485     byte[] k = new byte[3];
486     int rowCount = 0;
487     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
488       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
489         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
490           k[0] = b1;
491           k[1] = b2;
492           k[2] = b3;
493           Put put = new Put(k);
494           put.add(f, null, k);
495           t.put(put);
496           rowCount++;
497         }
498       }
499     }
500     t.flushCommits();
501     return rowCount;
502   }
503 
504   /**
505    * Return the number of rows in the given table.
506    */
507   public int countRows(final HTable table) throws IOException {
508     Scan scan = new Scan();
509     ResultScanner results = table.getScanner(scan);
510     int count = 0;
511     for (@SuppressWarnings("unused") Result res : results) {
512       count++;
513     }
514     results.close();
515     return count;
516   }
517 
518   /**
519    * Return an md5 digest of the entire contents of a table.
520    */
521   public String checksumRows(final HTable table) throws Exception {
522     Scan scan = new Scan();
523     ResultScanner results = table.getScanner(scan);
524     MessageDigest digest = MessageDigest.getInstance("MD5");
525     for (Result res : results) {
526       digest.update(res.getRow());
527     }
528     results.close();
529     return digest.toString();
530   }
531 
532   /**
533    * Creates many regions names "aaa" to "zzz".
534    *
535    * @param table  The table to use for the data.
536    * @param columnFamily  The family to insert the data into.
537    * @return count of regions created.
538    * @throws IOException When creating the regions fails.
539    */
540   public int createMultiRegions(HTable table, byte[] columnFamily)
541   throws IOException {
542     return createMultiRegions(getConfiguration(), table, columnFamily);
543   }
544 
545   /**
546    * Creates many regions names "aaa" to "zzz".
547    * @param c Configuration to use.
548    * @param table  The table to use for the data.
549    * @param columnFamily  The family to insert the data into.
550    * @return count of regions created.
551    * @throws IOException When creating the regions fails.
552    */
553   public int createMultiRegions(final Configuration c, final HTable table,
554       final byte[] columnFamily)
555   throws IOException {
556     byte[][] KEYS = {
557       HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
558       Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
559       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
560       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
561       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
562       Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
563       Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
564       Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
565       Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
566     };
567     return createMultiRegions(c, table, columnFamily, KEYS);
568   }
569 
570   public int createMultiRegions(final Configuration c, final HTable table,
571       final byte[] columnFamily, byte [][] startKeys)
572   throws IOException {
573     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
574     HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
575     HTableDescriptor htd = table.getTableDescriptor();
576     if(!htd.hasFamily(columnFamily)) {
577       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
578       htd.addFamily(hcd);
579     }
580     // remove empty region - this is tricky as the mini cluster during the test
581     // setup already has the "<tablename>,,123456789" row with an empty start
582     // and end key. Adding the custom regions below adds those blindly,
583     // including the new start region from empty to "bbb". lg
584     List<byte[]> rows = getMetaTableRows(htd.getName());
585     // add custom ones
586     int count = 0;
587     for (int i = 0; i < startKeys.length; i++) {
588       int j = (i + 1) % startKeys.length;
589       HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
590         startKeys[i], startKeys[j]);
591       Put put = new Put(hri.getRegionName());
592       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
593         Writables.getBytes(hri));
594       meta.put(put);
595       LOG.info("createMultiRegions: inserted " + hri.toString());
596       count++;
597     }
598     // see comment above, remove "old" (or previous) single region
599     for (byte[] row : rows) {
600       LOG.info("createMultiRegions: deleting meta row -> " +
601         Bytes.toStringBinary(row));
602       meta.delete(new Delete(row));
603     }
604     // flush cache of regions
605     HConnection conn = table.getConnection();
606     conn.clearRegionCache();
607     return count;
608   }
609 
610   /**
611    * Returns all rows from the .META. table.
612    *
613    * @throws IOException When reading the rows fails.
614    */
615   public List<byte[]> getMetaTableRows() throws IOException {
616     HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
617     List<byte[]> rows = new ArrayList<byte[]>();
618     ResultScanner s = t.getScanner(new Scan());
619     for (Result result : s) {
620       LOG.info("getMetaTableRows: row -> " +
621         Bytes.toStringBinary(result.getRow()));
622       rows.add(result.getRow());
623     }
624     s.close();
625     return rows;
626   }
627 
628   /**
629    * Returns all rows from the .META. table for a given user table
630    *
631    * @throws IOException When reading the rows fails.
632    */
633   public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
634     HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
635     List<byte[]> rows = new ArrayList<byte[]>();
636     ResultScanner s = t.getScanner(new Scan());
637     for (Result result : s) {
638       HRegionInfo info = Writables.getHRegionInfo(
639           result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
640       HTableDescriptor desc = info.getTableDesc();
641       if (Bytes.compareTo(desc.getName(), tableName) == 0) {
642         LOG.info("getMetaTableRows: row -> " +
643             Bytes.toStringBinary(result.getRow()));
644         rows.add(result.getRow());
645       }
646     }
647     s.close();
648     return rows;
649   }
650 
651   /**
652    * Starts a <code>MiniMRCluster</code> with a default number of
653    * <code>TaskTracker</code>'s.
654    *
655    * @throws IOException When starting the cluster fails.
656    */
657   public void startMiniMapReduceCluster() throws IOException {
658     startMiniMapReduceCluster(2);
659   }
660 
661   /**
662    * Starts a <code>MiniMRCluster</code>.
663    *
664    * @param servers  The number of <code>TaskTracker</code>'s to start.
665    * @throws IOException When starting the cluster fails.
666    */
667   public void startMiniMapReduceCluster(final int servers) throws IOException {
668     LOG.info("Starting mini mapreduce cluster...");
669     // These are needed for the new and improved Map/Reduce framework
670     Configuration c = getConfiguration();
671     System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
672     c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
673     mrCluster = new MiniMRCluster(servers,
674       FileSystem.get(c).getUri().toString(), 1);
675     LOG.info("Mini mapreduce cluster started");
676     c.set("mapred.job.tracker",
677         mrCluster.createJobConf().get("mapred.job.tracker"));
678   }
679 
680   /**
681    * Stops the previously started <code>MiniMRCluster</code>.
682    */
683   public void shutdownMiniMapReduceCluster() {
684     LOG.info("Stopping mini mapreduce cluster...");
685     if (mrCluster != null) {
686       mrCluster.shutdown();
687     }
688     // Restore configuration to point to local jobtracker
689     conf.set("mapred.job.tracker", "local");
690     LOG.info("Mini mapreduce cluster stopped");
691   }
692 
693   /**
694    * Switches the logger for the given class to DEBUG level.
695    *
696    * @param clazz  The class for which to switch to debug logging.
697    */
698   public void enableDebug(Class<?> clazz) {
699     Log l = LogFactory.getLog(clazz);
700     if (l instanceof Log4JLogger) {
701       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
702     } else if (l instanceof Jdk14Logger) {
703       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
704     }
705   }
706 
707   /**
708    * Expire the Master's session
709    * @throws Exception
710    */
711   public void expireMasterSession() throws Exception {
712     HMaster master = hbaseCluster.getMaster();
713     expireSession(master.getZooKeeperWrapper());
714   }
715 
716   /**
717    * Expire a region server's session
718    * @param index which RS
719    * @throws Exception
720    */
721   public void expireRegionServerSession(int index) throws Exception {
722     HRegionServer rs = hbaseCluster.getRegionServer(index);
723     expireSession(rs.getZooKeeperWrapper());
724   }
725 
726   public void expireSession(ZooKeeperWrapper nodeZK) throws Exception{
727     ZooKeeperWrapper zkw =
728         ZooKeeperWrapper.createInstance(conf,
729             ZooKeeperWrapper.class.getName());
730     zkw.registerListener(EmptyWatcher.instance);
731     String quorumServers = zkw.getQuorumServers();
732     int sessionTimeout = 5 * 1000; // 5 seconds
733 
734     byte[] password = nodeZK.getSessionPassword();
735     long sessionID = nodeZK.getSessionID();
736 
737     ZooKeeper zk = new ZooKeeper(quorumServers,
738         sessionTimeout, EmptyWatcher.instance, sessionID, password);
739     zk.close();
740     final long sleep = sessionTimeout * 5L;
741     LOG.info("ZK Closed; sleeping=" + sleep);
742 
743     Thread.sleep(sleep);
744 
745     new HTable(conf, HConstants.META_TABLE_NAME);
746   }
747 
748   /**
749    * Get the HBase cluster.
750    *
751    * @return hbase cluster
752    */
753   public MiniHBaseCluster getHBaseCluster() {
754     return hbaseCluster;
755   }
756 
757   /**
758    * Returns a HBaseAdmin instance.
759    *
760    * @return The HBaseAdmin instance.
761    * @throws MasterNotRunningException
762    */
763   public HBaseAdmin getHBaseAdmin() throws MasterNotRunningException {
764     if (hbaseAdmin == null) {
765       hbaseAdmin = new HBaseAdmin(getConfiguration());
766     }
767     return hbaseAdmin;
768   }
769 
770   /**
771    * Closes the named region.
772    *
773    * @param regionName  The region to close.
774    * @throws IOException
775    */
776   public void closeRegion(String regionName) throws IOException {
777     closeRegion(Bytes.toBytes(regionName));
778   }
779 
780   /**
781    * Closes the named region.
782    *
783    * @param regionName  The region to close.
784    * @throws IOException
785    */
786   public void closeRegion(byte[] regionName) throws IOException {
787     HBaseAdmin admin = getHBaseAdmin();
788     admin.closeRegion(regionName, (Object[]) null);
789   }
790 
791   /**
792    * Closes the region containing the given row.
793    *
794    * @param row  The row to find the containing region.
795    * @param table  The table to find the region.
796    * @throws IOException
797    */
798   public void closeRegionByRow(String row, HTable table) throws IOException {
799     closeRegionByRow(Bytes.toBytes(row), table);
800   }
801 
802   /**
803    * Closes the region containing the given row.
804    *
805    * @param row  The row to find the containing region.
806    * @param table  The table to find the region.
807    * @throws IOException
808    */
809   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
810     HRegionLocation hrl = table.getRegionLocation(row);
811     closeRegion(hrl.getRegionInfo().getRegionName());
812   }
813 
814   public MiniZooKeeperCluster getZkCluster() {
815     return zkCluster;
816   }
817 
818   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
819     this.zkCluster = zkCluster;
820   }
821 
822   public MiniDFSCluster getDFSCluster() {
823     return dfsCluster;
824   }
825 
826   public FileSystem getTestFileSystem() throws IOException {
827     return FileSystem.get(conf);
828   }
829 
830   public void cleanupTestDir() throws IOException {
831     getTestDir().getFileSystem(conf).delete(getTestDir(), true);
832   }
833 
834   public void waitTableAvailable(byte[] table, long timeoutMillis)
835   throws InterruptedException, IOException {
836     HBaseAdmin admin = new HBaseAdmin(conf);
837     long startWait = System.currentTimeMillis();
838     while (!admin.isTableAvailable(table)) {
839       assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
840           System.currentTimeMillis() - startWait < timeoutMillis);
841       Thread.sleep(500);
842     }
843   }
844 
845   /**
846    * Make sure that at least the specified number of region servers
847    * are running
848    * @param num minimum number of region servers that should be running
849    * @throws IOException
850    */
851   public void ensureSomeRegionServersAvailable(final int num)
852       throws IOException {
853     if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) {
854       // Need at least "num" servers.
855       LOG.info("Started new server=" +
856         this.getHBaseCluster().startRegionServer());
857 
858     }
859   }
860 
861   /**
862    * This method clones the passed <code>c</code> configuration setting a new
863    * user into the clone.  Use it getting new instances of FileSystem.  Only
864    * works for DistributedFileSystem.
865    * @param c Initial configuration
866    * @param differentiatingSuffix Suffix to differentiate this user from others.
867    * @return A new configuration instance with a different user set into it.
868    * @throws IOException
869    */
870   public static Configuration setDifferentUser(final Configuration c,
871     final String differentiatingSuffix)
872   throws IOException {
873     FileSystem currentfs = FileSystem.get(c);
874     Preconditions.checkArgument(currentfs instanceof DistributedFileSystem);
875     // Else distributed filesystem.  Make a new instance per daemon.  Below
876     // code is taken from the AppendTestUtil over in hdfs.
877     Configuration c2 = new Configuration(c);
878     String username = UserGroupInformation.getCurrentUGI().getUserName() +
879       differentiatingSuffix;
880     UnixUserGroupInformation.saveToConf(c2,
881       UnixUserGroupInformation.UGI_PROPERTY_NAME,
882       new UnixUserGroupInformation(username, new String[]{"supergroup"}));
883     return c2;
884   }
885 
886   /**
887    * Set soft and hard limits in namenode.
888    * You'll get a NPE if you call before you've started a minidfscluster.
889    * @param soft Soft limit
890    * @param hard Hard limit
891    * @throws NoSuchFieldException
892    * @throws SecurityException
893    * @throws IllegalAccessException
894    * @throws IllegalArgumentException
895    */
896   public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
897   throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
898     // TODO: If 0.20 hadoop do one thing, if 0.21 hadoop do another.
899     // Not available in 0.20 hdfs.  Use reflection to make it happen.
900 
901     // private NameNode nameNode;
902     Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
903     field.setAccessible(true);
904     NameNode nn = (NameNode)field.get(this.dfsCluster);
905     nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
906   }
907 
908   /**
909    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
910    * makes tests linger.  Here is the exception you'll see:
911    * <pre>
912    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
913    * </pre>
914    * @param stream A DFSClient.DFSOutputStream.
915    * @param max
916    * @throws NoSuchFieldException
917    * @throws SecurityException
918    * @throws IllegalAccessException
919    * @throws IllegalArgumentException
920    */
921   public static void setMaxRecoveryErrorCount(final OutputStream stream,
922       final int max) {
923     try {
924       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
925       for (Class<?> clazz: clazzes) {
926         String className = clazz.getSimpleName();
927         if (className.equals("DFSOutputStream")) {
928           if (clazz.isInstance(stream)) {
929             Field maxRecoveryErrorCountField =
930               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
931             maxRecoveryErrorCountField.setAccessible(true);
932             maxRecoveryErrorCountField.setInt(stream, max);
933             break;
934           }
935         }
936       }
937     } catch (Exception e) {
938       LOG.info("Could not set max recovery field", e);
939     }
940   }
941 
942 
943   /**
944    * Wait until <code>countOfRegion</code> in .META. have a non-empty
945    * info:server.  This means all regions have been deployed, master has been
946    * informed and updated .META. with the regions deployed server.
947    * @param conf Configuration
948    * @param countOfRegions How many regions in .META.
949    * @throws IOException
950    */
951   public void waitUntilAllRegionsAssigned(final int countOfRegions)
952   throws IOException {
953     HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
954     while (true) {
955       int rows = 0;
956       Scan scan = new Scan();
957       scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
958       ResultScanner s = meta.getScanner(scan);
959       for (Result r = null; (r = s.next()) != null;) {
960         byte [] b =
961           r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
962         if (b == null || b.length <= 0) break;
963         rows++;
964       }
965       s.close();
966       // If I get to here and all rows have a Server, then all have been assigned.
967       if (rows == countOfRegions) break;
968       LOG.info("Found=" + rows);
969       Threads.sleep(1000);
970     }
971   }
972 
973   /**
974    * Do a small get/scan against one store. This is required because store
975    * has no actual methods of querying itself, and relies on StoreScanner.
976    */
977   public static List<KeyValue> getFromStoreFile(Store store,
978                                                 Get get) throws IOException {
979     ReadWriteConsistencyControl.resetThreadReadPoint();
980     Scan scan = new Scan(get);
981     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
982         scan.getFamilyMap().get(store.getFamily().getName()));
983 
984     List<KeyValue> result = new ArrayList<KeyValue>();
985     scanner.next(result);
986     if (!result.isEmpty()) {
987       // verify that we are on the row we want:
988       KeyValue kv = result.get(0);
989       if (!Bytes.equals(kv.getRow(), get.getRow())) {
990         result.clear();
991       }
992     }
993     return result;
994   }
995 
996   /**
997    * Do a small get/scan against one store. This is required because store
998    * has no actual methods of querying itself, and relies on StoreScanner.
999    */
1000   public static List<KeyValue> getFromStoreFile(Store store,
1001                                                 byte [] row,
1002                                                 NavigableSet<byte[]> columns
1003                                                 ) throws IOException {
1004     Get get = new Get(row);
1005     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1006     s.put(store.getFamily().getName(), columns);
1007 
1008     return getFromStoreFile(store,get);
1009   }
1010 }