1   /**
2    * Copyright 2009 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase;
21  
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.File;
25  import java.io.IOException;
26  import java.io.OutputStream;
27  import java.lang.reflect.Field;
28  import java.security.MessageDigest;
29  import java.util.ArrayList;
30  import java.util.Arrays;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.NavigableSet;
34  import java.util.UUID;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.commons.logging.impl.Jdk14Logger;
39  import org.apache.commons.logging.impl.Log4JLogger;
40  import org.apache.hadoop.conf.Configuration;
41  import org.apache.hadoop.fs.FileSystem;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.client.Delete;
44  import org.apache.hadoop.hbase.client.Get;
45  import org.apache.hadoop.hbase.client.HBaseAdmin;
46  import org.apache.hadoop.hbase.client.HConnection;
47  import org.apache.hadoop.hbase.client.HTable;
48  import org.apache.hadoop.hbase.client.Put;
49  import org.apache.hadoop.hbase.client.Result;
50  import org.apache.hadoop.hbase.client.ResultScanner;
51  import org.apache.hadoop.hbase.client.Scan;
52  import org.apache.hadoop.hbase.master.HMaster;
53  import org.apache.hadoop.hbase.regionserver.HRegion;
54  import org.apache.hadoop.hbase.regionserver.HRegionServer;
55  import org.apache.hadoop.hbase.regionserver.InternalScanner;
56  import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
57  import org.apache.hadoop.hbase.regionserver.Store;
58  import org.apache.hadoop.hbase.security.User;
59  import org.apache.hadoop.hbase.util.Bytes;
60  import org.apache.hadoop.hbase.util.FSUtils;
61  import org.apache.hadoop.hbase.util.Threads;
62  import org.apache.hadoop.hbase.util.Writables;
63  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
64  import org.apache.hadoop.hbase.zookeeper.ZKConfig;
65  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
66  import org.apache.hadoop.hdfs.DFSClient;
67  import org.apache.hadoop.hdfs.DistributedFileSystem;
68  import org.apache.hadoop.hdfs.MiniDFSCluster;
69  import org.apache.hadoop.hdfs.server.namenode.NameNode;
70  import org.apache.hadoop.mapred.MiniMRCluster;
71  import org.apache.zookeeper.ZooKeeper;
72  
73  /**
74   * Facility for testing HBase. Replacement for
75   * old HBaseTestCase and HBaseCluserTestCase functionality.
76   * Create an instance and keep it around testing HBase.  This class is
77   * meant to be your one-stop shop for anything you might need testing.  Manages
78   * one cluster at a time only.  Depends on log4j being on classpath and
79   * hbase-site.xml for logging and test-run configuration.  It does not set
80   * logging levels nor make changes to configuration parameters.
81   */
82  public class HBaseTestingUtility {
83    private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
84    private Configuration conf;
85    private MiniZooKeeperCluster zkCluster = null;
86    /**
87     * Set if we were passed a zkCluster.  If so, we won't shutdown zk as
88     * part of general shutdown.
89     */
90    private boolean passedZkCluster = false;
91    private MiniDFSCluster dfsCluster = null;
92    private MiniHBaseCluster hbaseCluster = null;
93    private MiniMRCluster mrCluster = null;
94    // If non-null, then already a cluster running.
95    private File clusterTestBuildDir = null;
96  
97    /**
98     * System property key to get test directory value.
99     * Name is as it is because mini dfs has hard-codings to put test data here.
100    */
101   public static final String TEST_DIRECTORY_KEY = "test.build.data";
102 
103   /**
104    * Default parent directory for test output.
105    */
106   public static final String DEFAULT_TEST_DIRECTORY = "target/test-data";
107 
108   public HBaseTestingUtility() {
109     this(HBaseConfiguration.create());
110   }
111 
112   public HBaseTestingUtility(Configuration conf) {
113     this.conf = conf;
114   }
115 
116   /**
117    * Returns this classes's instance of {@link Configuration}.  Be careful how
118    * you use the returned Configuration since {@link HConnection} instances
119    * can be shared.  The Map of HConnections is keyed by the Configuration.  If
120    * say, a Connection was being used against a cluster that had been shutdown,
121    * see {@link #shutdownMiniCluster()}, then the Connection will no longer
122    * be wholesome.  Rather than use the return direct, its usually best to
123    * make a copy and use that.  Do
124    * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
125    * @return Instance of Configuration.
126    */
127   public Configuration getConfiguration() {
128     return this.conf;
129   }
130 
131   /**
132    * @return Where to write test data on local filesystem; usually
133    * {@link #DEFAULT_TEST_DIRECTORY}
134    * @see #setupClusterTestBuildDir()
135    * @see #clusterTestBuildDir()
136    * @see #getTestFileSystem()
137    */
138   public static Path getTestDir() {
139     return new Path(System.getProperty(TEST_DIRECTORY_KEY,
140       DEFAULT_TEST_DIRECTORY));
141   }
142 
143   /**
144    * @param subdirName
145    * @return Path to a subdirectory named <code>subdirName</code> under
146    * {@link #getTestDir()}.
147    * @see #setupClusterTestBuildDir()
148    * @see #clusterTestBuildDir(String)
149    * @see #getTestFileSystem()
150    */
151   public static Path getTestDir(final String subdirName) {
152     return new Path(getTestDir(), subdirName);
153   }
154 
155   /**
156    * Home our cluster in a dir under {@link #DEFAULT_TEST_DIRECTORY}.  Give it a
157    * random name
158    * so can have many concurrent clusters running if we need to.  Need to
159    * amend the {@link #TEST_DIRECTORY_KEY} System property.  Its what
160    * minidfscluster bases
161    * it data dir on.  Moding a System property is not the way to do concurrent
162    * instances -- another instance could grab the temporary
163    * value unintentionally -- but not anything can do about it at moment;
164    * single instance only is how the minidfscluster works.
165    * @return The calculated cluster test build directory.
166    */
167   public File setupClusterTestBuildDir() {
168     String randomStr = UUID.randomUUID().toString();
169     String dirStr = getTestDir(randomStr).toString();
170     File dir = new File(dirStr).getAbsoluteFile();
171     // Have it cleaned up on exit
172     dir.deleteOnExit();
173     return dir;
174   }
175 
176   /**
177    * @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
178    */
179   void isRunningCluster(String passedBuildPath) throws IOException {
180     if (this.clusterTestBuildDir == null || passedBuildPath != null) return;
181     throw new IOException("Cluster already running at " +
182       this.clusterTestBuildDir);
183   }
184 
185   /**
186    * Start a minidfscluster.
187    * @param servers How many DNs to start.
188    * @throws Exception
189    * @see {@link #shutdownMiniDFSCluster()}
190    * @return The mini dfs cluster created.
191    */
192   public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
193     return startMiniDFSCluster(servers, null);
194   }
195 
196   /**
197    * Start a minidfscluster.
198    * Can only create one.
199    * @param dir Where to home your dfs cluster.
200    * @param servers How many DNs to start.
201    * @throws Exception
202    * @see {@link #shutdownMiniDFSCluster()}
203    * @return The mini dfs cluster created.
204    */
205   public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
206   throws Exception {
207     // This does the following to home the minidfscluster
208     //     base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
209     // Some tests also do this:
210     //  System.getProperty("test.cache.data", "build/test/cache");
211     if (dir == null) {
212       this.clusterTestBuildDir = setupClusterTestBuildDir();
213     } else {
214       this.clusterTestBuildDir = dir;
215     }
216     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
217     System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
218     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
219       true, null, null, null, null);
220     // Set this just-started cluser as our filesystem.
221     FileSystem fs = this.dfsCluster.getFileSystem();
222     this.conf.set("fs.defaultFS", fs.getUri().toString());
223     // Do old style too just to be safe.
224     this.conf.set("fs.default.name", fs.getUri().toString());
225     return this.dfsCluster;
226   }
227 
228   /**
229    * Shuts down instance created by call to {@link #startMiniDFSCluster(int, File)}
230    * or does nothing.
231    * @throws Exception
232    */
233   public void shutdownMiniDFSCluster() throws Exception {
234     if (this.dfsCluster != null) {
235       // The below throws an exception per dn, AsynchronousCloseException.
236       this.dfsCluster.shutdown();
237     }
238   }
239 
240   /**
241    * Call this if you only want a zk cluster.
242    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
243    * @throws Exception
244    * @see #shutdownMiniZKCluster()
245    * @return zk cluster started.
246    */
247   public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
248     return startMiniZKCluster(setupClusterTestBuildDir());
249 
250   }
251 
252   private MiniZooKeeperCluster startMiniZKCluster(final File dir)
253   throws Exception {
254     this.passedZkCluster = false;
255     if (this.zkCluster != null) {
256       throw new IOException("Cluster already running at " + dir);
257     }
258     this.zkCluster = new MiniZooKeeperCluster();
259     int clientPort = this.zkCluster.startup(dir);
260     this.conf.set("hbase.zookeeper.property.clientPort",
261       Integer.toString(clientPort));
262     return this.zkCluster;
263   }
264 
265   /**
266    * Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
267    * or does nothing.
268    * @throws IOException
269    * @see #startMiniZKCluster()
270    */
271   public void shutdownMiniZKCluster() throws IOException {
272     if (this.zkCluster != null) {
273       this.zkCluster.shutdown();
274       this.zkCluster = null;
275     }
276   }
277 
278   /**
279    * Start up a minicluster of hbase, dfs, and zookeeper.
280    * @throws Exception
281    * @return Mini hbase cluster instance created.
282    * @see {@link #shutdownMiniDFSCluster()}
283    */
284   public MiniHBaseCluster startMiniCluster() throws Exception {
285     return startMiniCluster(1, 1);
286   }
287 
288   /**
289    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
290    * Modifies Configuration.  Homes the cluster data directory under a random
291    * subdirectory in a directory under System property test.build.data.
292    * Directory is cleaned up on exit.
293    * @param numSlaves Number of slaves to start up.  We'll start this many
294    * datanodes and regionservers.  If numSlaves is > 1, then make sure
295    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
296    * bind errors.
297    * @throws Exception
298    * @see {@link #shutdownMiniCluster()}
299    * @return Mini hbase cluster instance created.
300    */
301   public MiniHBaseCluster startMiniCluster(final int numSlaves)
302   throws Exception {
303     return startMiniCluster(1, numSlaves);
304   }
305 
306   /**
307    * Start up a minicluster of hbase, optionally dfs, and zookeeper.
308    * Modifies Configuration.  Homes the cluster data directory under a random
309    * subdirectory in a directory under System property test.build.data.
310    * Directory is cleaned up on exit.
311    * @param numMasters Number of masters to start up.  We'll start this many
312    * hbase masters.  If numMasters > 1, you can find the active/primary master
313    * with {@link MiniHBaseCluster#getMaster()}.
314    * @param numSlaves Number of slaves to start up.  We'll start this many
315    * datanodes and regionservers.  If numSlaves is > 1, then make sure
316    * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
317    * bind errors.
318    * @throws Exception
319    * @see {@link #shutdownMiniCluster()}
320    * @return Mini hbase cluster instance created.
321    */
322   public MiniHBaseCluster startMiniCluster(final int numMasters,
323       final int numSlaves)
324   throws Exception {
325     LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
326         numSlaves + " regionserver(s) and datanode(s)");
327     // If we already put up a cluster, fail.
328     String testBuildPath = conf.get(TEST_DIRECTORY_KEY, null);
329     isRunningCluster(testBuildPath);
330     if (testBuildPath != null) {
331       LOG.info("Using passed path: " + testBuildPath);
332     }
333     // Make a new random dir to home everything in.  Set it as system property.
334     // minidfs reads home from system property.
335     this.clusterTestBuildDir = testBuildPath == null?
336       setupClusterTestBuildDir() : new File(testBuildPath);
337     System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
338     // Bring up mini dfs cluster. This spews a bunch of warnings about missing
339     // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
340     startMiniDFSCluster(numSlaves, this.clusterTestBuildDir);
341     this.dfsCluster.waitClusterUp();
342 
343     // Start up a zk cluster.
344     if (this.zkCluster == null) {
345       startMiniZKCluster(this.clusterTestBuildDir);
346     }
347     return startMiniHBaseCluster(numMasters, numSlaves);
348   }
349 
350   /**
351    * Starts up mini hbase cluster.  Usually used after call to
352    * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
353    * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
354    * @param numMasters
355    * @param numSlaves
356    * @return Reference to the hbase mini hbase cluster.
357    * @throws IOException
358    * @throws InterruptedException 
359    * @see {@link #startMiniCluster()}
360    */
361   public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
362       final int numSlaves)
363   throws IOException, InterruptedException {
364     // Now do the mini hbase cluster.  Set the hbase.rootdir in config.
365     createRootDir();
366     Configuration c = new Configuration(this.conf);
367     this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
368     // Don't leave here till we've done a successful scan of the .META.
369     HTable t = new HTable(c, HConstants.META_TABLE_NAME);
370     ResultScanner s = t.getScanner(new Scan());
371     while (s.next() != null) {
372       continue;
373     }
374     LOG.info("Minicluster is up");
375     return this.hbaseCluster;
376   }
377 
378   /**
379    * Starts the hbase cluster up again after shutting it down previously in a
380    * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
381    * @param servers number of region servers
382    * @throws IOException
383    */
384   public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
385     this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
386     // Don't leave here till we've done a successful scan of the .META.
387     HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
388     ResultScanner s = t.getScanner(new Scan());
389     while (s.next() != null) {
390       continue;
391     }
392     LOG.info("HBase has been restarted");
393   }
394 
395   /**
396    * @return Current mini hbase cluster. Only has something in it after a call
397    * to {@link #startMiniCluster()}.
398    * @see #startMiniCluster()
399    */
400   public MiniHBaseCluster getMiniHBaseCluster() {
401     return this.hbaseCluster;
402   }
403 
404   /**
405    * Stops mini hbase, zk, and hdfs clusters.
406    * @throws IOException
407    * @see {@link #startMiniCluster(int)}
408    */
409   public void shutdownMiniCluster() throws IOException {
410     LOG.info("Shutting down minicluster");
411     shutdownMiniHBaseCluster();
412     if (!this.passedZkCluster) shutdownMiniZKCluster();
413     if (this.dfsCluster != null) {
414       // The below throws an exception per dn, AsynchronousCloseException.
415       this.dfsCluster.shutdown();
416     }
417     // Clean up our directory.
418     if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
419       // Need to use deleteDirectory because File.delete required dir is empty.
420       if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
421           new Path(this.clusterTestBuildDir.toString()))) {
422         LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
423       }
424       this.clusterTestBuildDir = null;
425     }
426     LOG.info("Minicluster is down");
427   }
428 
429   /**
430    * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
431    * @throws IOException
432    */
433   public void shutdownMiniHBaseCluster() throws IOException {
434     if (this.hbaseCluster != null) {
435       this.hbaseCluster.shutdown();
436       // Wait till hbase is down before going on to shutdown zk.
437       this.hbaseCluster.join();
438     }
439     this.hbaseCluster = null;
440   }
441 
442   /**
443    * Creates an hbase rootdir in user home directory.  Also creates hbase
444    * version file.  Normally you won't make use of this method.  Root hbasedir
445    * is created for you as part of mini cluster startup.  You'd only use this
446    * method if you were doing manual operation.
447    * @return Fully qualified path to hbase root dir
448    * @throws IOException
449    */
450   public Path createRootDir() throws IOException {
451     FileSystem fs = FileSystem.get(this.conf);
452     Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory());
453     this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
454     fs.mkdirs(hbaseRootdir);
455     FSUtils.setVersion(fs, hbaseRootdir);
456     return hbaseRootdir;
457   }
458 
459   /**
460    * Flushes all caches in the mini hbase cluster
461    * @throws IOException
462    */
463   public void flush() throws IOException {
464     this.hbaseCluster.flushcache();
465   }
466 
467   /**
468    * Flushes all caches in the mini hbase cluster
469    * @throws IOException
470    */
471   public void flush(byte [] tableName) throws IOException {
472     this.hbaseCluster.flushcache(tableName);
473   }
474 
475 
476   /**
477    * Create a table.
478    * @param tableName
479    * @param family
480    * @return An HTable instance for the created table.
481    * @throws IOException
482    */
483   public HTable createTable(byte[] tableName, byte[] family)
484   throws IOException{
485     return createTable(tableName, new byte[][]{family});
486   }
487 
488   /**
489    * Create a table.
490    * @param tableName
491    * @param families
492    * @return An HTable instance for the created table.
493    * @throws IOException
494    */
495   public HTable createTable(byte[] tableName, byte[][] families)
496   throws IOException {
497     return createTable(tableName, families,
498         new Configuration(getConfiguration()));
499   }
500 
501   /**
502    * Create a table.
503    * @param tableName
504    * @param families
505    * @param c Configuration to use
506    * @return An HTable instance for the created table.
507    * @throws IOException
508    */
509   public HTable createTable(byte[] tableName, byte[][] families,
510       final Configuration c)
511   throws IOException {
512     HTableDescriptor desc = new HTableDescriptor(tableName);
513     for(byte[] family : families) {
514       desc.addFamily(new HColumnDescriptor(family));
515     }
516     getHBaseAdmin().createTable(desc);
517     return new HTable(c, tableName);
518   }
519 
520   /**
521    * Create a table.
522    * @param tableName
523    * @param family
524    * @param numVersions
525    * @return An HTable instance for the created table.
526    * @throws IOException
527    */
528   public HTable createTable(byte[] tableName, byte[] family, int numVersions)
529   throws IOException {
530     return createTable(tableName, new byte[][]{family}, numVersions);
531   }
532 
533   /**
534    * Create a table.
535    * @param tableName
536    * @param families
537    * @param numVersions
538    * @return An HTable instance for the created table.
539    * @throws IOException
540    */
541   public HTable createTable(byte[] tableName, byte[][] families,
542       int numVersions)
543   throws IOException {
544     HTableDescriptor desc = new HTableDescriptor(tableName);
545     for (byte[] family : families) {
546       HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
547           HColumnDescriptor.DEFAULT_COMPRESSION,
548           HColumnDescriptor.DEFAULT_IN_MEMORY,
549           HColumnDescriptor.DEFAULT_BLOCKCACHE,
550           Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
551           HColumnDescriptor.DEFAULT_BLOOMFILTER,
552           HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
553       desc.addFamily(hcd);
554     }
555     getHBaseAdmin().createTable(desc);
556     return new HTable(new Configuration(getConfiguration()), tableName);
557   }
558 
559   /**
560    * Create a table.
561    * @param tableName
562    * @param families
563    * @param numVersions
564    * @return An HTable instance for the created table.
565    * @throws IOException
566    */
567   public HTable createTable(byte[] tableName, byte[][] families,
568       int[] numVersions)
569   throws IOException {
570     HTableDescriptor desc = new HTableDescriptor(tableName);
571     int i = 0;
572     for (byte[] family : families) {
573       HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
574           HColumnDescriptor.DEFAULT_COMPRESSION,
575           HColumnDescriptor.DEFAULT_IN_MEMORY,
576           HColumnDescriptor.DEFAULT_BLOCKCACHE,
577           Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
578           HColumnDescriptor.DEFAULT_BLOOMFILTER,
579           HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
580       desc.addFamily(hcd);
581       i++;
582     }
583     getHBaseAdmin().createTable(desc);
584     return new HTable(new Configuration(getConfiguration()), tableName);
585   }
586 
587   /**
588    * Drop an existing table
589    * @param tableName existing table
590    */
591   public void deleteTable(byte[] tableName) throws IOException {
592     HBaseAdmin admin = new HBaseAdmin(getConfiguration());
593     admin.disableTable(tableName);
594     admin.deleteTable(tableName);
595   }
596 
597   /**
598    * Provide an existing table name to truncate
599    * @param tableName existing table
600    * @return HTable to that new table
601    * @throws IOException
602    */
603   public HTable truncateTable(byte [] tableName) throws IOException {
604     HTable table = new HTable(getConfiguration(), tableName);
605     Scan scan = new Scan();
606     ResultScanner resScan = table.getScanner(scan);
607     for(Result res : resScan) {
608       Delete del = new Delete(res.getRow());
609       table.delete(del);
610     }
611     resScan = table.getScanner(scan);
612     return table;
613   }
614 
615   /**
616    * Load table with rows from 'aaa' to 'zzz'.
617    * @param t Table
618    * @param f Family
619    * @return Count of rows loaded.
620    * @throws IOException
621    */
622   public int loadTable(final HTable t, final byte[] f) throws IOException {
623     t.setAutoFlush(false);
624     byte[] k = new byte[3];
625     int rowCount = 0;
626     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
627       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
628         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
629           k[0] = b1;
630           k[1] = b2;
631           k[2] = b3;
632           Put put = new Put(k);
633           put.add(f, null, k);
634           t.put(put);
635           rowCount++;
636         }
637       }
638     }
639     t.flushCommits();
640     return rowCount;
641   }
642   /**
643    * Load region with rows from 'aaa' to 'zzz'.
644    * @param r Region
645    * @param f Family
646    * @return Count of rows loaded.
647    * @throws IOException
648    */
649   public int loadRegion(final HRegion r, final byte[] f)
650   throws IOException {
651     byte[] k = new byte[3];
652     int rowCount = 0;
653     for (byte b1 = 'a'; b1 <= 'z'; b1++) {
654       for (byte b2 = 'a'; b2 <= 'z'; b2++) {
655         for (byte b3 = 'a'; b3 <= 'z'; b3++) {
656           k[0] = b1;
657           k[1] = b2;
658           k[2] = b3;
659           Put put = new Put(k);
660           put.add(f, null, k);
661           if (r.getLog() == null) put.setWriteToWAL(false);
662           r.put(put);
663           rowCount++;
664         }
665       }
666     }
667     return rowCount;
668   }
669 
670   /**
671    * Return the number of rows in the given table.
672    */
673   public int countRows(final HTable table) throws IOException {
674     Scan scan = new Scan();
675     ResultScanner results = table.getScanner(scan);
676     int count = 0;
677     for (@SuppressWarnings("unused") Result res : results) {
678       count++;
679     }
680     results.close();
681     return count;
682   }
683 
684   /**
685    * Return an md5 digest of the entire contents of a table.
686    */
687   public String checksumRows(final HTable table) throws Exception {
688     Scan scan = new Scan();
689     ResultScanner results = table.getScanner(scan);
690     MessageDigest digest = MessageDigest.getInstance("MD5");
691     for (Result res : results) {
692       digest.update(res.getRow());
693     }
694     results.close();
695     return digest.toString();
696   }
697 
698   /**
699    * Creates many regions names "aaa" to "zzz".
700    *
701    * @param table  The table to use for the data.
702    * @param columnFamily  The family to insert the data into.
703    * @return count of regions created.
704    * @throws IOException When creating the regions fails.
705    */
706   public int createMultiRegions(HTable table, byte[] columnFamily)
707   throws IOException {
708     return createMultiRegions(getConfiguration(), table, columnFamily);
709   }
710 
711   public static final byte[][] KEYS = {
712     HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
713     Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
714     Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
715     Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
716     Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
717     Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
718     Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
719     Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
720     Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
721   };
722 
723   /**
724    * Creates many regions names "aaa" to "zzz".
725    * @param c Configuration to use.
726    * @param table  The table to use for the data.
727    * @param columnFamily  The family to insert the data into.
728    * @return count of regions created.
729    * @throws IOException When creating the regions fails.
730    */
731   public int createMultiRegions(final Configuration c, final HTable table,
732       final byte[] columnFamily)
733   throws IOException {
734     return createMultiRegions(c, table, columnFamily, KEYS);
735   }
736 
737   /**
738    * Creates the specified number of regions in the specified table.
739    * @param c
740    * @param table
741    * @param columnFamily
742    * @param startKeys
743    * @return
744    * @throws IOException
745    */
746   public int createMultiRegions(final Configuration c, final HTable table,
747       final byte [] family, int numRegions)
748   throws IOException {
749     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
750     byte [] startKey = Bytes.toBytes("aaaaa");
751     byte [] endKey = Bytes.toBytes("zzzzz");
752     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
753     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
754     for (int i=0;i<splitKeys.length;i++) {
755       regionStartKeys[i+1] = splitKeys[i];
756     }
757     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
758     return createMultiRegions(c, table, family, regionStartKeys);
759   }
760   
761   public int createMultiRegions(final Configuration c, final HTable table,
762       final byte[] columnFamily, byte [][] startKeys)
763   throws IOException {
764     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
765     HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
766     HTableDescriptor htd = table.getTableDescriptor();
767     if(!htd.hasFamily(columnFamily)) {
768       HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
769       htd.addFamily(hcd);
770     }
771     // remove empty region - this is tricky as the mini cluster during the test
772     // setup already has the "<tablename>,,123456789" row with an empty start
773     // and end key. Adding the custom regions below adds those blindly,
774     // including the new start region from empty to "bbb". lg
775     List<byte[]> rows = getMetaTableRows(htd.getName());
776     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
777     // add custom ones
778     int count = 0;
779     for (int i = 0; i < startKeys.length; i++) {
780       int j = (i + 1) % startKeys.length;
781       HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
782         startKeys[i], startKeys[j]);
783       Put put = new Put(hri.getRegionName());
784       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
785         Writables.getBytes(hri));
786       meta.put(put);
787       LOG.info("createMultiRegions: inserted " + hri.toString());
788       newRegions.add(hri);
789       count++;
790     }
791     // see comment above, remove "old" (or previous) single region
792     for (byte[] row : rows) {
793       LOG.info("createMultiRegions: deleting meta row -> " +
794         Bytes.toStringBinary(row));
795       meta.delete(new Delete(row));
796     }
797     // flush cache of regions
798     HConnection conn = table.getConnection();
799     conn.clearRegionCache();
800     // assign all the new regions IF table is enabled.
801     if (getHBaseAdmin().isTableEnabled(table.getTableName())) {
802       for(HRegionInfo hri : newRegions) {
803         hbaseCluster.getMaster().assignRegion(hri);
804       }
805     }
806     return count;
807   }
808 
809   /**
810    * Create rows in META for regions of the specified table with the specified
811    * start keys.  The first startKey should be a 0 length byte array if you
812    * want to form a proper range of regions.
813    * @param conf
814    * @param htd
815    * @param startKeys
816    * @return list of region info for regions added to meta
817    * @throws IOException
818    */
819   public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
820       final HTableDescriptor htd, byte [][] startKeys)
821   throws IOException {
822     HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
823     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
824     List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
825     // add custom ones
826     int count = 0;
827     for (int i = 0; i < startKeys.length; i++) {
828       int j = (i + 1) % startKeys.length;
829       HRegionInfo hri = new HRegionInfo(htd, startKeys[i], startKeys[j]);
830       Put put = new Put(hri.getRegionName());
831       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
832         Writables.getBytes(hri));
833       meta.put(put);
834       LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
835       newRegions.add(hri);
836       count++;
837     }
838     return newRegions;
839   }
840 
841   /**
842    * Returns all rows from the .META. table.
843    *
844    * @throws IOException When reading the rows fails.
845    */
846   public List<byte[]> getMetaTableRows() throws IOException {
847     // TODO: Redo using MetaReader class
848     HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
849     List<byte[]> rows = new ArrayList<byte[]>();
850     ResultScanner s = t.getScanner(new Scan());
851     for (Result result : s) {
852       LOG.info("getMetaTableRows: row -> " +
853         Bytes.toStringBinary(result.getRow()));
854       rows.add(result.getRow());
855     }
856     s.close();
857     return rows;
858   }
859 
860   /**
861    * Returns all rows from the .META. table for a given user table
862    *
863    * @throws IOException When reading the rows fails.
864    */
865   public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
866     // TODO: Redo using MetaReader.
867     HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
868     List<byte[]> rows = new ArrayList<byte[]>();
869     ResultScanner s = t.getScanner(new Scan());
870     for (Result result : s) {
871       HRegionInfo info = Writables.getHRegionInfo(
872           result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
873       HTableDescriptor desc = info.getTableDesc();
874       if (Bytes.compareTo(desc.getName(), tableName) == 0) {
875         LOG.info("getMetaTableRows: row -> " +
876             Bytes.toStringBinary(result.getRow()));
877         rows.add(result.getRow());
878       }
879     }
880     s.close();
881     return rows;
882   }
883 
884   /**
885    * Tool to get the reference to the region server object that holds the
886    * region of the specified user table.
887    * It first searches for the meta rows that contain the region of the
888    * specified table, then gets the index of that RS, and finally retrieves
889    * the RS's reference.
890    * @param tableName user table to lookup in .META.
891    * @return region server that holds it, null if the row doesn't exist
892    * @throws IOException
893    */
894   public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
895       throws IOException {
896     List<byte[]> metaRows = getMetaTableRows(tableName);
897     if (metaRows == null || metaRows.size() == 0) {
898       return null;
899     }
900     int index = hbaseCluster.getServerWith(metaRows.get(0));
901     return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
902   }
903 
904   /**
905    * Starts a <code>MiniMRCluster</code> with a default number of
906    * <code>TaskTracker</code>'s.
907    *
908    * @throws IOException When starting the cluster fails.
909    */
910   public void startMiniMapReduceCluster() throws IOException {
911     startMiniMapReduceCluster(2);
912   }
913 
914   /**
915    * Starts a <code>MiniMRCluster</code>.
916    *
917    * @param servers  The number of <code>TaskTracker</code>'s to start.
918    * @throws IOException When starting the cluster fails.
919    */
920   public void startMiniMapReduceCluster(final int servers) throws IOException {
921     LOG.info("Starting mini mapreduce cluster...");
922     // These are needed for the new and improved Map/Reduce framework
923     Configuration c = getConfiguration();
924     System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
925     c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
926     mrCluster = new MiniMRCluster(servers,
927       FileSystem.get(c).getUri().toString(), 1);
928     LOG.info("Mini mapreduce cluster started");
929     c.set("mapred.job.tracker",
930         mrCluster.createJobConf().get("mapred.job.tracker"));
931   }
932 
933   /**
934    * Stops the previously started <code>MiniMRCluster</code>.
935    */
936   public void shutdownMiniMapReduceCluster() {
937     LOG.info("Stopping mini mapreduce cluster...");
938     if (mrCluster != null) {
939       mrCluster.shutdown();
940     }
941     // Restore configuration to point to local jobtracker
942     conf.set("mapred.job.tracker", "local");
943     LOG.info("Mini mapreduce cluster stopped");
944   }
945 
946   /**
947    * Switches the logger for the given class to DEBUG level.
948    *
949    * @param clazz  The class for which to switch to debug logging.
950    */
951   public void enableDebug(Class<?> clazz) {
952     Log l = LogFactory.getLog(clazz);
953     if (l instanceof Log4JLogger) {
954       ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
955     } else if (l instanceof Jdk14Logger) {
956       ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
957     }
958   }
959 
960   /**
961    * Expire the Master's session
962    * @throws Exception
963    */
964   public void expireMasterSession() throws Exception {
965     HMaster master = hbaseCluster.getMaster();
966     expireSession(master.getZooKeeper(), master);
967   }
968 
969   /**
970    * Expire a region server's session
971    * @param index which RS
972    * @throws Exception
973    */
974   public void expireRegionServerSession(int index) throws Exception {
975     HRegionServer rs = hbaseCluster.getRegionServer(index);
976     expireSession(rs.getZooKeeper(), rs);
977   }
978 
979   public void expireSession(ZooKeeperWatcher nodeZK, Server server)
980   throws Exception {
981     Configuration c = new Configuration(this.conf);
982     String quorumServers = ZKConfig.getZKQuorumServersString(c);
983     int sessionTimeout = 5 * 1000; // 5 seconds
984     ZooKeeper zk = nodeZK.getZooKeeper();
985     byte[] password = zk.getSessionPasswd();
986     long sessionID = zk.getSessionId();
987 
988     ZooKeeper newZK = new ZooKeeper(quorumServers,
989         sessionTimeout, EmptyWatcher.instance, sessionID, password);
990     newZK.close();
991     final long sleep = sessionTimeout * 5L;
992     LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID) +
993       "; sleeping=" + sleep);
994 
995     Thread.sleep(sleep);
996 
997     new HTable(new Configuration(conf), HConstants.META_TABLE_NAME);
998   }
999 
1000   /**
1001    * Get the HBase cluster.
1002    *
1003    * @return hbase cluster
1004    */
1005   public MiniHBaseCluster getHBaseCluster() {
1006     return hbaseCluster;
1007   }
1008 
1009   /**
1010    * Returns a HBaseAdmin instance.
1011    *
1012    * @return The HBaseAdmin instance.
1013    * @throws IOException
1014    */
1015   public HBaseAdmin getHBaseAdmin()
1016   throws IOException {
1017     return new HBaseAdmin(new Configuration(getConfiguration()));
1018   }
1019 
1020   /**
1021    * Closes the named region.
1022    *
1023    * @param regionName  The region to close.
1024    * @throws IOException
1025    */
1026   public void closeRegion(String regionName) throws IOException {
1027     closeRegion(Bytes.toBytes(regionName));
1028   }
1029 
1030   /**
1031    * Closes the named region.
1032    *
1033    * @param regionName  The region to close.
1034    * @throws IOException
1035    */
1036   public void closeRegion(byte[] regionName) throws IOException {
1037     HBaseAdmin admin = getHBaseAdmin();
1038     admin.closeRegion(regionName, null);
1039   }
1040 
1041   /**
1042    * Closes the region containing the given row.
1043    *
1044    * @param row  The row to find the containing region.
1045    * @param table  The table to find the region.
1046    * @throws IOException
1047    */
1048   public void closeRegionByRow(String row, HTable table) throws IOException {
1049     closeRegionByRow(Bytes.toBytes(row), table);
1050   }
1051 
1052   /**
1053    * Closes the region containing the given row.
1054    *
1055    * @param row  The row to find the containing region.
1056    * @param table  The table to find the region.
1057    * @throws IOException
1058    */
1059   public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1060     HRegionLocation hrl = table.getRegionLocation(row);
1061     closeRegion(hrl.getRegionInfo().getRegionName());
1062   }
1063 
1064   public MiniZooKeeperCluster getZkCluster() {
1065     return zkCluster;
1066   }
1067 
1068   public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1069     this.passedZkCluster = true;
1070     this.zkCluster = zkCluster;
1071   }
1072 
1073   public MiniDFSCluster getDFSCluster() {
1074     return dfsCluster;
1075   }
1076 
1077   public FileSystem getTestFileSystem() throws IOException {
1078     return FileSystem.get(conf);
1079   }
1080 
1081   /**
1082    * @return True if we removed the test dir
1083    * @throws IOException
1084    */
1085   public boolean cleanupTestDir() throws IOException {
1086     return deleteDir(getTestDir());
1087   }
1088 
1089   /**
1090    * @param subdir Test subdir name.
1091    * @return True if we removed the test dir
1092    * @throws IOException
1093    */
1094   public boolean cleanupTestDir(final String subdir) throws IOException {
1095     return deleteDir(getTestDir(subdir));
1096   }
1097 
1098   /**
1099    * @param dir Directory to delete
1100    * @return True if we deleted it.
1101    * @throws IOException
1102    */
1103   public boolean deleteDir(final Path dir) throws IOException {
1104     FileSystem fs = getTestFileSystem();
1105     if (fs.exists(dir)) {
1106       return fs.delete(getTestDir(), true);
1107     }
1108     return false;
1109   }
1110 
1111   public void waitTableAvailable(byte[] table, long timeoutMillis)
1112   throws InterruptedException, IOException {
1113     HBaseAdmin admin = getHBaseAdmin();
1114     long startWait = System.currentTimeMillis();
1115     while (!admin.isTableAvailable(table)) {
1116       assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
1117           System.currentTimeMillis() - startWait < timeoutMillis);
1118       Thread.sleep(500);
1119     }
1120   }
1121 
1122   /**
1123    * Make sure that at least the specified number of region servers
1124    * are running
1125    * @param num minimum number of region servers that should be running
1126    * @return True if we started some servers
1127    * @throws IOException
1128    */
1129   public boolean ensureSomeRegionServersAvailable(final int num)
1130       throws IOException {
1131     if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) {
1132       // Need at least "num" servers.
1133       LOG.info("Started new server=" +
1134         this.getHBaseCluster().startRegionServer());
1135       return true;
1136     }
1137     return false;
1138   }
1139 
1140   /**
1141    * This method clones the passed <code>c</code> configuration setting a new
1142    * user into the clone.  Use it getting new instances of FileSystem.  Only
1143    * works for DistributedFileSystem.
1144    * @param c Initial configuration
1145    * @param differentiatingSuffix Suffix to differentiate this user from others.
1146    * @return A new configuration instance with a different user set into it.
1147    * @throws IOException
1148    */
1149   public static User getDifferentUser(final Configuration c,
1150     final String differentiatingSuffix)
1151   throws IOException {
1152     FileSystem currentfs = FileSystem.get(c);
1153     if (!(currentfs instanceof DistributedFileSystem)) {
1154       return User.getCurrent();
1155     }
1156     // Else distributed filesystem.  Make a new instance per daemon.  Below
1157     // code is taken from the AppendTestUtil over in hdfs.
1158     String username = User.getCurrent().getName() +
1159       differentiatingSuffix;
1160     User user = User.createUserForTesting(c, username,
1161         new String[]{"supergroup"});
1162     return user;
1163   }
1164 
1165   /**
1166    * Set soft and hard limits in namenode.
1167    * You'll get a NPE if you call before you've started a minidfscluster.
1168    * @param soft Soft limit
1169    * @param hard Hard limit
1170    * @throws NoSuchFieldException
1171    * @throws SecurityException
1172    * @throws IllegalAccessException
1173    * @throws IllegalArgumentException
1174    */
1175   public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
1176   throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
1177     // TODO: If 0.20 hadoop do one thing, if 0.21 hadoop do another.
1178     // Not available in 0.20 hdfs.  Use reflection to make it happen.
1179 
1180     // private NameNode nameNode;
1181     Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
1182     field.setAccessible(true);
1183     NameNode nn = (NameNode)field.get(this.dfsCluster);
1184     nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
1185   }
1186 
1187   /**
1188    * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
1189    * makes tests linger.  Here is the exception you'll see:
1190    * <pre>
1191    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
1192    * </pre>
1193    * @param stream A DFSClient.DFSOutputStream.
1194    * @param max
1195    * @throws NoSuchFieldException
1196    * @throws SecurityException
1197    * @throws IllegalAccessException
1198    * @throws IllegalArgumentException
1199    */
1200   public static void setMaxRecoveryErrorCount(final OutputStream stream,
1201       final int max) {
1202     try {
1203       Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1204       for (Class<?> clazz: clazzes) {
1205         String className = clazz.getSimpleName();
1206         if (className.equals("DFSOutputStream")) {
1207           if (clazz.isInstance(stream)) {
1208             Field maxRecoveryErrorCountField =
1209               stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1210             maxRecoveryErrorCountField.setAccessible(true);
1211             maxRecoveryErrorCountField.setInt(stream, max);
1212             break;
1213           }
1214         }
1215       }
1216     } catch (Exception e) {
1217       LOG.info("Could not set max recovery field", e);
1218     }
1219   }
1220 
1221 
1222   /**
1223    * Wait until <code>countOfRegion</code> in .META. have a non-empty
1224    * info:server.  This means all regions have been deployed, master has been
1225    * informed and updated .META. with the regions deployed server.
1226    * @param conf Configuration
1227    * @param countOfRegions How many regions in .META.
1228    * @throws IOException
1229    */
1230   public void waitUntilAllRegionsAssigned(final int countOfRegions)
1231   throws IOException {
1232     HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1233     while (true) {
1234       int rows = 0;
1235       Scan scan = new Scan();
1236       scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1237       ResultScanner s = meta.getScanner(scan);
1238       for (Result r = null; (r = s.next()) != null;) {
1239         byte [] b =
1240           r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1241         if (b == null || b.length <= 0) {
1242           break;
1243         }
1244         rows++;
1245       }
1246       s.close();
1247       // If I get to here and all rows have a Server, then all have been assigned.
1248       if (rows == countOfRegions) {
1249         break;
1250       }
1251       LOG.info("Found=" + rows);
1252       Threads.sleep(1000);
1253     }
1254   }
1255 
1256   /**
1257    * Do a small get/scan against one store. This is required because store
1258    * has no actual methods of querying itself, and relies on StoreScanner.
1259    */
1260   public static List<KeyValue> getFromStoreFile(Store store,
1261                                                 Get get) throws IOException {
1262     ReadWriteConsistencyControl.resetThreadReadPoint();
1263     Scan scan = new Scan(get);
1264     InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1265         scan.getFamilyMap().get(store.getFamily().getName()));
1266 
1267     List<KeyValue> result = new ArrayList<KeyValue>();
1268     scanner.next(result);
1269     if (!result.isEmpty()) {
1270       // verify that we are on the row we want:
1271       KeyValue kv = result.get(0);
1272       if (!Bytes.equals(kv.getRow(), get.getRow())) {
1273         result.clear();
1274       }
1275     }
1276     return result;
1277   }
1278 
1279   /**
1280    * Do a small get/scan against one store. This is required because store
1281    * has no actual methods of querying itself, and relies on StoreScanner.
1282    */
1283   public static List<KeyValue> getFromStoreFile(Store store,
1284                                                 byte [] row,
1285                                                 NavigableSet<byte[]> columns
1286                                                 ) throws IOException {
1287     Get get = new Get(row);
1288     Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1289     s.put(store.getFamily().getName(), columns);
1290 
1291     return getFromStoreFile(store,get);
1292   }
1293 }