View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import static org.codehaus.jackson.map.SerializationConfig.Feature.SORT_PROPERTIES_ALPHABETICALLY;
22  
23  import java.io.IOException;
24  import java.io.PrintStream;
25  import java.lang.reflect.Constructor;
26  import java.math.BigDecimal;
27  import java.math.MathContext;
28  import java.text.DecimalFormat;
29  import java.text.SimpleDateFormat;
30  import java.util.ArrayList;
31  import java.util.Arrays;
32  import java.util.Date;
33  import java.util.LinkedList;
34  import java.util.Map;
35  import java.util.Queue;
36  import java.util.Random;
37  import java.util.TreeMap;
38  import java.util.concurrent.Callable;
39  import java.util.concurrent.ExecutionException;
40  import java.util.concurrent.ExecutorService;
41  import java.util.concurrent.Executors;
42  import java.util.concurrent.Future;
43  
44  import com.google.common.base.Objects;
45  import com.google.common.util.concurrent.ThreadFactoryBuilder;
46  
47  import org.apache.commons.logging.Log;
48  import org.apache.commons.logging.LogFactory;
49  import org.apache.hadoop.conf.Configuration;
50  import org.apache.hadoop.conf.Configured;
51  import org.apache.hadoop.fs.FileSystem;
52  import org.apache.hadoop.fs.Path;
53  import org.apache.hadoop.hbase.classification.InterfaceAudience;
54  import org.apache.hadoop.hbase.client.Admin;
55  import org.apache.hadoop.hbase.client.BufferedMutator;
56  import org.apache.hadoop.hbase.client.Connection;
57  import org.apache.hadoop.hbase.client.ConnectionFactory;
58  import org.apache.hadoop.hbase.client.Consistency;
59  import org.apache.hadoop.hbase.client.Durability;
60  import org.apache.hadoop.hbase.client.Get;
61  import org.apache.hadoop.hbase.client.Put;
62  import org.apache.hadoop.hbase.client.Result;
63  import org.apache.hadoop.hbase.client.ResultScanner;
64  import org.apache.hadoop.hbase.client.Scan;
65  import org.apache.hadoop.hbase.client.Table;
66  import org.apache.hadoop.hbase.filter.BinaryComparator;
67  import org.apache.hadoop.hbase.filter.CompareFilter;
68  import org.apache.hadoop.hbase.filter.Filter;
69  import org.apache.hadoop.hbase.filter.FilterAllFilter;
70  import org.apache.hadoop.hbase.filter.FilterList;
71  import org.apache.hadoop.hbase.filter.PageFilter;
72  import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
73  import org.apache.hadoop.hbase.filter.WhileMatchFilter;
74  import org.apache.hadoop.hbase.io.compress.Compression;
75  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76  import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
77  import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
78  import org.apache.hadoop.hbase.regionserver.BloomType;
79  import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
80  import org.apache.hadoop.hbase.trace.SpanReceiverHost;
81  import org.apache.hadoop.hbase.util.*;
82  import org.apache.hadoop.io.LongWritable;
83  import org.apache.hadoop.io.Text;
84  import org.apache.hadoop.mapreduce.Job;
85  import org.apache.hadoop.mapreduce.Mapper;
86  import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
87  import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
88  import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
89  import org.apache.hadoop.util.Tool;
90  import org.apache.hadoop.util.ToolRunner;
91  import org.codehaus.jackson.map.ObjectMapper;
92  
93  import com.yammer.metrics.core.Histogram;
94  import com.yammer.metrics.stats.UniformSample;
95  import com.yammer.metrics.stats.Snapshot;
96  
97  import org.apache.htrace.Sampler;
98  import org.apache.htrace.Trace;
99  import org.apache.htrace.TraceScope;
100 import org.apache.htrace.impl.ProbabilitySampler;
101 
102 /**
103  * Script used evaluating HBase performance and scalability.  Runs a HBase
104  * client that steps through one of a set of hardcoded tests or 'experiments'
105  * (e.g. a random reads test, a random writes test, etc.). Pass on the
106  * command-line which test to run and how many clients are participating in
107  * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage.
108  *
109  * <p>This class sets up and runs the evaluation programs described in
110  * Section 7, <i>Performance Evaluation</i>, of the <a
111  * href="http://labs.google.com/papers/bigtable.html">Bigtable</a>
112  * paper, pages 8-10.
113  *
114  * <p>By default, runs as a mapreduce job where each mapper runs a single test
115  * client. Can also run as a non-mapreduce, multithreaded application by
116  * specifying {@code --nomapred}. Each client does about 1GB of data, unless
117  * specified otherwise.
118  */
119 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
120 public class PerformanceEvaluation extends Configured implements Tool {
121   protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
122   private static final ObjectMapper MAPPER = new ObjectMapper();
123   static {
124     MAPPER.configure(SORT_PROPERTIES_ALPHABETICALLY, true);
125   }
126 
127   public static final String TABLE_NAME = "TestTable";
128   public static final byte[] FAMILY_NAME = Bytes.toBytes("info");
129   public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0);
130   public static final byte [] QUALIFIER_NAME = COLUMN_ZERO;
131   public static final int DEFAULT_VALUE_LENGTH = 1000;
132   public static final int ROW_LENGTH = 26;
133 
134   private static final int ONE_GB = 1024 * 1024 * 1000;
135   private static final int DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
136   // TODO : should we make this configurable
137   private static final int TAG_LENGTH = 256;
138   private static final DecimalFormat FMT = new DecimalFormat("0.##");
139   private static final MathContext CXT = MathContext.DECIMAL64;
140   private static final BigDecimal MS_PER_SEC = BigDecimal.valueOf(1000);
141   private static final BigDecimal BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
142   private static final TestOptions DEFAULT_OPTS = new TestOptions();
143 
144   private static Map<String, CmdDescriptor> COMMANDS = new TreeMap<String, CmdDescriptor>();
145   private static final Path PERF_EVAL_DIR = new Path("performance_evaluation");
146 
147   static {
148     addCommandDescriptor(RandomReadTest.class, "randomRead",
149       "Run random read test");
150     addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan",
151       "Run random seek and scan 100 test");
152     addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
153       "Run random seek scan with both start and stop row (max 10 rows)");
154     addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
155       "Run random seek scan with both start and stop row (max 100 rows)");
156     addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
157       "Run random seek scan with both start and stop row (max 1000 rows)");
158     addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000",
159       "Run random seek scan with both start and stop row (max 10000 rows)");
160     addCommandDescriptor(RandomWriteTest.class, "randomWrite",
161       "Run random write test");
162     addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
163       "Run sequential read test");
164     addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
165       "Run sequential write test");
166     addCommandDescriptor(ScanTest.class, "scan",
167       "Run scan test (read every row)");
168     addCommandDescriptor(FilteredScanTest.class, "filterScan",
169       "Run scan test using a filter to find a specific row based on it's value " +
170         "(make sure to use --rows=20)");
171   }
172 
173   /**
174    * Enum for map metrics.  Keep it out here rather than inside in the Map
175    * inner-class so we can find associated properties.
176    */
177   protected static enum Counter {
178     /** elapsed time */
179     ELAPSED_TIME,
180     /** number of rows */
181     ROWS
182   }
183 
184   protected static class RunResult implements Comparable<RunResult> {
185     public RunResult(long duration, Histogram hist) {
186       this.duration = duration;
187       this.hist = hist;
188     }
189 
190     public final long duration;
191     public final Histogram hist;
192 
193     @Override
194     public String toString() {
195       return Long.toString(duration);
196     }
197 
198     @Override public int compareTo(RunResult o) {
199       return Long.compare(this.duration, o.duration);
200     }
201   }
202 
203   /**
204    * Constructor
205    * @param conf Configuration object
206    */
207   public PerformanceEvaluation(final Configuration conf) {
208     super(conf);
209   }
210 
211   protected static void addCommandDescriptor(Class<? extends Test> cmdClass,
212       String name, String description) {
213     CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description);
214     COMMANDS.put(name, cmdDescriptor);
215   }
216 
217   /**
218    * Implementations can have their status set.
219    */
220   interface Status {
221     /**
222      * Sets status
223      * @param msg status message
224      * @throws IOException
225      */
226     void setStatus(final String msg) throws IOException;
227   }
228 
229   /**
230    * MapReduce job that runs a performance evaluation client in each map task.
231    */
232   public static class EvaluationMapTask
233       extends Mapper<LongWritable, Text, LongWritable, LongWritable> {
234 
235     /** configuration parameter name that contains the command */
236     public final static String CMD_KEY = "EvaluationMapTask.command";
237     /** configuration parameter name that contains the PE impl */
238     public static final String PE_KEY = "EvaluationMapTask.performanceEvalImpl";
239 
240     private Class<? extends Test> cmd;
241 
242     @Override
243     protected void setup(Context context) throws IOException, InterruptedException {
244       this.cmd = forName(context.getConfiguration().get(CMD_KEY), Test.class);
245 
246       // this is required so that extensions of PE are instantiated within the
247       // map reduce task...
248       Class<? extends PerformanceEvaluation> peClass =
249           forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class);
250       try {
251         peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration());
252       } catch (Exception e) {
253         throw new IllegalStateException("Could not instantiate PE instance", e);
254       }
255     }
256 
257     private <Type> Class<? extends Type> forName(String className, Class<Type> type) {
258       try {
259         return Class.forName(className).asSubclass(type);
260       } catch (ClassNotFoundException e) {
261         throw new IllegalStateException("Could not find class for name: " + className, e);
262       }
263     }
264 
265     @Override
266     protected void map(LongWritable key, Text value, final Context context)
267            throws IOException, InterruptedException {
268 
269       Status status = new Status() {
270         @Override
271         public void setStatus(String msg) {
272            context.setStatus(msg);
273         }
274       };
275 
276       ObjectMapper mapper = new ObjectMapper();
277       TestOptions opts = mapper.readValue(value.toString(), TestOptions.class);
278       Configuration conf = HBaseConfiguration.create(context.getConfiguration());
279       final Connection con = ConnectionFactory.createConnection(conf);
280 
281       // Evaluation task
282       RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, opts, status);
283       // Collect how much time the thing took. Report as map output and
284       // to the ELAPSED_TIME counter.
285       context.getCounter(Counter.ELAPSED_TIME).increment(result.duration);
286       context.getCounter(Counter.ROWS).increment(opts.perClientRunRows);
287       context.write(new LongWritable(opts.startRow), new LongWritable(result.duration));
288       context.progress();
289     }
290   }
291 
292   /*
293    * If table does not already exist, create. Also create a table when
294    * {@code opts.presplitRegions} is specified or when the existing table's
295    * region replica count doesn't match {@code opts.replicas}.
296    */
297   static boolean checkTable(Admin admin, TestOptions opts) throws IOException {
298     TableName tableName = TableName.valueOf(opts.tableName);
299     boolean needsDelete = false, exists = admin.tableExists(tableName);
300     boolean isReadCmd = opts.cmdName.toLowerCase().contains("read")
301       || opts.cmdName.toLowerCase().contains("scan");
302     if (!exists && isReadCmd) {
303       throw new IllegalStateException(
304         "Must specify an existing table for read commands. Run a write command first.");
305     }
306     HTableDescriptor desc =
307       exists ? admin.getTableDescriptor(TableName.valueOf(opts.tableName)) : null;
308     byte[][] splits = getSplits(opts);
309 
310     // recreate the table when user has requested presplit or when existing
311     // {RegionSplitPolicy,replica count} does not match requested.
312     if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
313       || (!isReadCmd && desc != null && desc.getRegionSplitPolicyClassName() != opts.splitPolicy)
314       || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas)) {
315       needsDelete = true;
316       // wait, why did it delete my table?!?
317       LOG.debug(Objects.toStringHelper("needsDelete")
318         .add("needsDelete", needsDelete)
319         .add("isReadCmd", isReadCmd)
320         .add("exists", exists)
321         .add("desc", desc)
322         .add("presplit", opts.presplitRegions)
323         .add("splitPolicy", opts.splitPolicy)
324         .add("replicas", opts.replicas));
325     }
326 
327     // remove an existing table
328     if (needsDelete) {
329       if (admin.isTableEnabled(tableName)) {
330         admin.disableTable(tableName);
331       }
332       admin.deleteTable(tableName);
333     }
334 
335     // table creation is necessary
336     if (!exists || needsDelete) {
337       desc = getTableDescriptor(opts);
338       if (splits != null) {
339         if (LOG.isDebugEnabled()) {
340           for (int i = 0; i < splits.length; i++) {
341             LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i]));
342           }
343         }
344       }
345       admin.createTable(desc, splits);
346       LOG.info("Table " + desc + " created");
347     }
348     return admin.tableExists(tableName);
349   }
350 
351   /**
352    * Create an HTableDescriptor from provided TestOptions.
353    */
354   protected static HTableDescriptor getTableDescriptor(TestOptions opts) {
355     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(opts.tableName));
356     HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME);
357     family.setDataBlockEncoding(opts.blockEncoding);
358     family.setCompressionType(opts.compression);
359     family.setBloomFilterType(opts.bloomType);
360     if (opts.inMemoryCF) {
361       family.setInMemory(true);
362     }
363     desc.addFamily(family);
364     if (opts.replicas != DEFAULT_OPTS.replicas) {
365       desc.setRegionReplication(opts.replicas);
366     }
367     if (opts.splitPolicy != DEFAULT_OPTS.splitPolicy) {
368       desc.setRegionSplitPolicyClassName(opts.splitPolicy);
369     }
370     return desc;
371   }
372 
373   /**
374    * generates splits based on total number of rows and specified split regions
375    */
376   protected static byte[][] getSplits(TestOptions opts) {
377     if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions)
378       return null;
379 
380     int numSplitPoints = opts.presplitRegions - 1;
381     byte[][] splits = new byte[numSplitPoints][];
382     int jump = opts.totalRows / opts.presplitRegions;
383     for (int i = 0; i < numSplitPoints; i++) {
384       int rowkey = jump * (1 + i);
385       splits[i] = format(rowkey);
386     }
387     return splits;
388   }
389 
390   /*
391    * Run all clients in this vm each to its own thread.
392    */
393   static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf)
394       throws IOException, InterruptedException {
395     final Class<? extends Test> cmd = determineCommandClass(opts.cmdName);
396     assert cmd != null;
397     @SuppressWarnings("unchecked")
398     Future<RunResult>[] threads = new Future[opts.numClientThreads];
399     RunResult[] results = new RunResult[opts.numClientThreads];
400     ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads,
401       new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
402     final Connection con = ConnectionFactory.createConnection(conf);
403     for (int i = 0; i < threads.length; i++) {
404       final int index = i;
405       threads[i] = pool.submit(new Callable<RunResult>() {
406         @Override
407         public RunResult call() throws Exception {
408           TestOptions threadOpts = new TestOptions(opts);
409           if (threadOpts.startRow == 0) threadOpts.startRow = index * threadOpts.perClientRunRows;
410           RunResult run = runOneClient(cmd, conf, con, threadOpts, new Status() {
411             @Override
412             public void setStatus(final String msg) throws IOException {
413               LOG.info(msg);
414             }
415           });
416           LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration +
417             "ms over " + threadOpts.perClientRunRows + " rows");
418           return run;
419         }
420       });
421     }
422     pool.shutdown();
423 
424     for (int i = 0; i < threads.length; i++) {
425       try {
426         results[i] = threads[i].get();
427       } catch (ExecutionException e) {
428         throw new IOException(e.getCause());
429       }
430     }
431     final String test = cmd.getSimpleName();
432     LOG.info("[" + test + "] Summary of timings (ms): "
433              + Arrays.toString(results));
434     Arrays.sort(results);
435     long total = 0;
436     for (RunResult result : results) {
437       total += result.duration;
438     }
439     LOG.info("[" + test + "]"
440       + "\tMin: " + results[0] + "ms"
441       + "\tMax: " + results[results.length - 1] + "ms"
442       + "\tAvg: " + (total / results.length) + "ms");
443 
444     con.close();
445 
446     return results;
447   }
448 
449   /*
450    * Run a mapreduce job.  Run as many maps as asked-for clients.
451    * Before we start up the job, write out an input file with instruction
452    * per client regards which row they are to start on.
453    * @param cmd Command to run.
454    * @throws IOException
455    */
456   static Job doMapReduce(TestOptions opts, final Configuration conf)
457       throws IOException, InterruptedException, ClassNotFoundException {
458     final Class<? extends Test> cmd = determineCommandClass(opts.cmdName);
459     assert cmd != null;
460     Path inputDir = writeInputFile(conf, opts);
461     conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
462     conf.set(EvaluationMapTask.PE_KEY, PerformanceEvaluation.class.getName());
463     Job job = Job.getInstance(conf);
464     job.setJarByClass(PerformanceEvaluation.class);
465     job.setJobName("HBase Performance Evaluation - " + opts.cmdName);
466 
467     job.setInputFormatClass(NLineInputFormat.class);
468     NLineInputFormat.setInputPaths(job, inputDir);
469     // this is default, but be explicit about it just in case.
470     NLineInputFormat.setNumLinesPerSplit(job, 1);
471 
472     job.setOutputKeyClass(LongWritable.class);
473     job.setOutputValueClass(LongWritable.class);
474 
475     job.setMapperClass(EvaluationMapTask.class);
476     job.setReducerClass(LongSumReducer.class);
477 
478     job.setNumReduceTasks(1);
479 
480     job.setOutputFormatClass(TextOutputFormat.class);
481     TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));
482 
483     TableMapReduceUtil.addDependencyJars(job);
484     TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
485       Histogram.class,     // yammer metrics
486       ObjectMapper.class); // jackson-mapper-asl
487 
488     TableMapReduceUtil.initCredentials(job);
489 
490     job.waitForCompletion(true);
491     return job;
492   }
493 
494   /*
495    * Write input file of offsets-per-client for the mapreduce job.
496    * @param c Configuration
497    * @return Directory that contains file written.
498    * @throws IOException
499    */
500   private static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException {
501     SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
502     Path jobdir = new Path(PERF_EVAL_DIR, formatter.format(new Date()));
503     Path inputDir = new Path(jobdir, "inputs");
504 
505     FileSystem fs = FileSystem.get(c);
506     fs.mkdirs(inputDir);
507 
508     Path inputFile = new Path(inputDir, "input.txt");
509     PrintStream out = new PrintStream(fs.create(inputFile));
510     // Make input random.
511     Map<Integer, String> m = new TreeMap<Integer, String>();
512     Hash h = MurmurHash.getInstance();
513     int perClientRows = (opts.totalRows / opts.numClientThreads);
514     try {
515       for (int i = 0; i < 10; i++) {
516         for (int j = 0; j < opts.numClientThreads; j++) {
517           TestOptions next = new TestOptions(opts);
518           next.startRow = (j * perClientRows) + (i * (perClientRows/10));
519           next.perClientRunRows = perClientRows / 10;
520           String s = MAPPER.writeValueAsString(next);
521           LOG.info("maptask input=" + s);
522           int hash = h.hash(Bytes.toBytes(s));
523           m.put(hash, s);
524         }
525       }
526       for (Map.Entry<Integer, String> e: m.entrySet()) {
527         out.println(e.getValue());
528       }
529     } finally {
530       out.close();
531     }
532     return inputDir;
533   }
534 
535   /**
536    * Describes a command.
537    */
538   static class CmdDescriptor {
539     private Class<? extends Test> cmdClass;
540     private String name;
541     private String description;
542 
543     CmdDescriptor(Class<? extends Test> cmdClass, String name, String description) {
544       this.cmdClass = cmdClass;
545       this.name = name;
546       this.description = description;
547     }
548 
549     public Class<? extends Test> getCmdClass() {
550       return cmdClass;
551     }
552 
553     public String getName() {
554       return name;
555     }
556 
557     public String getDescription() {
558       return description;
559     }
560   }
561 
562   /**
563    * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}.
564    * This makes tracking all these arguments a little easier.
565    * NOTE: ADDING AN OPTION, you need to add a data member, a getter/setter (to make JSON
566    * serialization of this TestOptions class behave), and you need to add to the clone constructor
567    * below copying your new option from the 'that' to the 'this'.  Look for 'clone' below.
568    */
569   static class TestOptions {
570     String cmdName = null;
571     boolean nomapred = false;
572     boolean filterAll = false;
573     int startRow = 0;
574     float size = 1.0f;
575     int perClientRunRows = DEFAULT_ROWS_PER_GB;
576     int numClientThreads = 1;
577     int totalRows = DEFAULT_ROWS_PER_GB;
578     float sampleRate = 1.0f;
579     double traceRate = 0.0;
580     String tableName = TABLE_NAME;
581     boolean flushCommits = true;
582     boolean writeToWAL = true;
583     boolean autoFlush = false;
584     boolean oneCon = false;
585     boolean useTags = false;
586     int noOfTags = 1;
587     boolean reportLatency = false;
588     int multiGet = 0;
589     int randomSleep = 0;
590     boolean inMemoryCF = false;
591     int presplitRegions = 0;
592     int replicas = HTableDescriptor.DEFAULT_REGION_REPLICATION;
593     String splitPolicy = null;
594     Compression.Algorithm compression = Compression.Algorithm.NONE;
595     BloomType bloomType = BloomType.ROW;
596     DataBlockEncoding blockEncoding = DataBlockEncoding.NONE;
597     boolean valueRandom = false;
598     boolean valueZipf = false;
599     int valueSize = DEFAULT_VALUE_LENGTH;
600     int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10;
601     int columns = 1;
602     int caching = 30;
603     boolean addColumns = true;
604 
605     public TestOptions() {}
606 
607     /**
608      * Clone constructor.
609      * @param that Object to copy from.
610      */
611     public TestOptions(TestOptions that) {
612       this.cmdName = that.cmdName;
613       this.nomapred = that.nomapred;
614       this.startRow = that.startRow;
615       this.size = that.size;
616       this.perClientRunRows = that.perClientRunRows;
617       this.numClientThreads = that.numClientThreads;
618       this.totalRows = that.totalRows;
619       this.sampleRate = that.sampleRate;
620       this.traceRate = that.traceRate;
621       this.tableName = that.tableName;
622       this.flushCommits = that.flushCommits;
623       this.writeToWAL = that.writeToWAL;
624       this.autoFlush = that.autoFlush;
625       this.oneCon = that.oneCon;
626       this.useTags = that.useTags;
627       this.noOfTags = that.noOfTags;
628       this.reportLatency = that.reportLatency;
629       this.multiGet = that.multiGet;
630       this.inMemoryCF = that.inMemoryCF;
631       this.presplitRegions = that.presplitRegions;
632       this.replicas = that.replicas;
633       this.splitPolicy = that.splitPolicy;
634       this.compression = that.compression;
635       this.blockEncoding = that.blockEncoding;
636       this.filterAll = that.filterAll;
637       this.bloomType = that.bloomType;
638       this.valueRandom = that.valueRandom;
639       this.valueZipf = that.valueZipf;
640       this.valueSize = that.valueSize;
641       this.period = that.period;
642       this.randomSleep = that.randomSleep;
643       this.addColumns = that.addColumns;
644       this.columns = that.columns;
645       this.caching = that.caching;
646     }
647 
648     public int getCaching() {
649       return this.caching;
650     }
651 
652     public void setCaching(final int caching) {
653       this.caching = caching;
654     }
655 
656     public int getColumns() {
657       return this.columns;
658     }
659 
660     public void setColumns(final int columns) {
661       this.columns = columns;
662     }
663 
664     public boolean isValueZipf() {
665       return valueZipf;
666     }
667 
668     public void setValueZipf(boolean valueZipf) {
669       this.valueZipf = valueZipf;
670     }
671 
672     public String getCmdName() {
673       return cmdName;
674     }
675 
676     public void setCmdName(String cmdName) {
677       this.cmdName = cmdName;
678     }
679 
680     public int getRandomSleep() {
681       return randomSleep;
682     }
683 
684     public void setRandomSleep(int randomSleep) {
685       this.randomSleep = randomSleep;
686     }
687 
688     public int getReplicas() {
689       return replicas;
690     }
691 
692     public void setReplicas(int replicas) {
693       this.replicas = replicas;
694     }
695 
696     public String getSplitPolicy() {
697       return splitPolicy;
698     }
699 
700     public void setSplitPolicy(String splitPolicy) {
701       this.splitPolicy = splitPolicy;
702     }
703 
704     public void setNomapred(boolean nomapred) {
705       this.nomapred = nomapred;
706     }
707 
708     public void setFilterAll(boolean filterAll) {
709       this.filterAll = filterAll;
710     }
711 
712     public void setStartRow(int startRow) {
713       this.startRow = startRow;
714     }
715 
716     public void setSize(float size) {
717       this.size = size;
718     }
719 
720     public void setPerClientRunRows(int perClientRunRows) {
721       this.perClientRunRows = perClientRunRows;
722     }
723 
724     public void setNumClientThreads(int numClientThreads) {
725       this.numClientThreads = numClientThreads;
726     }
727 
728     public void setTotalRows(int totalRows) {
729       this.totalRows = totalRows;
730     }
731 
732     public void setSampleRate(float sampleRate) {
733       this.sampleRate = sampleRate;
734     }
735 
736     public void setTraceRate(double traceRate) {
737       this.traceRate = traceRate;
738     }
739 
740     public void setTableName(String tableName) {
741       this.tableName = tableName;
742     }
743 
744     public void setFlushCommits(boolean flushCommits) {
745       this.flushCommits = flushCommits;
746     }
747 
748     public void setWriteToWAL(boolean writeToWAL) {
749       this.writeToWAL = writeToWAL;
750     }
751 
752     public void setAutoFlush(boolean autoFlush) {
753       this.autoFlush = autoFlush;
754     }
755 
756     public void setOneCon(boolean oneCon) {
757       this.oneCon = oneCon;
758     }
759 
760     public void setUseTags(boolean useTags) {
761       this.useTags = useTags;
762     }
763 
764     public void setNoOfTags(int noOfTags) {
765       this.noOfTags = noOfTags;
766     }
767 
768     public void setReportLatency(boolean reportLatency) {
769       this.reportLatency = reportLatency;
770     }
771 
772     public void setMultiGet(int multiGet) {
773       this.multiGet = multiGet;
774     }
775 
776     public void setInMemoryCF(boolean inMemoryCF) {
777       this.inMemoryCF = inMemoryCF;
778     }
779 
780     public void setPresplitRegions(int presplitRegions) {
781       this.presplitRegions = presplitRegions;
782     }
783 
784     public void setCompression(Compression.Algorithm compression) {
785       this.compression = compression;
786     }
787 
788     public void setBloomType(BloomType bloomType) {
789       this.bloomType = bloomType;
790     }
791 
792     public void setBlockEncoding(DataBlockEncoding blockEncoding) {
793       this.blockEncoding = blockEncoding;
794     }
795 
796     public void setValueRandom(boolean valueRandom) {
797       this.valueRandom = valueRandom;
798     }
799 
800     public void setValueSize(int valueSize) {
801       this.valueSize = valueSize;
802     }
803 
804     public void setPeriod(int period) {
805       this.period = period;
806     }
807 
808     public boolean isNomapred() {
809       return nomapred;
810     }
811 
812     public boolean isFilterAll() {
813       return filterAll;
814     }
815 
816     public int getStartRow() {
817       return startRow;
818     }
819 
820     public float getSize() {
821       return size;
822     }
823 
824     public int getPerClientRunRows() {
825       return perClientRunRows;
826     }
827 
828     public int getNumClientThreads() {
829       return numClientThreads;
830     }
831 
832     public int getTotalRows() {
833       return totalRows;
834     }
835 
836     public float getSampleRate() {
837       return sampleRate;
838     }
839 
840     public double getTraceRate() {
841       return traceRate;
842     }
843 
844     public String getTableName() {
845       return tableName;
846     }
847 
848     public boolean isFlushCommits() {
849       return flushCommits;
850     }
851 
852     public boolean isWriteToWAL() {
853       return writeToWAL;
854     }
855 
856     public boolean isAutoFlush() {
857       return autoFlush;
858     }
859 
860     public boolean isUseTags() {
861       return useTags;
862     }
863 
864     public int getNoOfTags() {
865       return noOfTags;
866     }
867 
868     public boolean isReportLatency() {
869       return reportLatency;
870     }
871 
872     public int getMultiGet() {
873       return multiGet;
874     }
875 
876     public boolean isInMemoryCF() {
877       return inMemoryCF;
878     }
879 
880     public int getPresplitRegions() {
881       return presplitRegions;
882     }
883 
884     public Compression.Algorithm getCompression() {
885       return compression;
886     }
887 
888     public DataBlockEncoding getBlockEncoding() {
889       return blockEncoding;
890     }
891 
892     public boolean isValueRandom() {
893       return valueRandom;
894     }
895 
896     public int getValueSize() {
897       return valueSize;
898     }
899 
900     public int getPeriod() {
901       return period;
902     }
903 
904     public BloomType getBloomType() {
905       return bloomType;
906     }
907 
908     public boolean isOneCon() {
909       return oneCon;
910     }
911 
912     public boolean getAddColumns() {
913       return addColumns;
914     }
915 
916     public void setAddColumns(boolean addColumns) {
917       this.addColumns = addColumns;
918     }
919   }
920 
921   /*
922    * A test.
923    * Subclass to particularize what happens per row.
924    */
925   static abstract class Test {
926     // Below is make it so when Tests are all running in the one
927     // jvm, that they each have a differently seeded Random.
928     private static final Random randomSeed = new Random(System.currentTimeMillis());
929 
930     private static long nextRandomSeed() {
931       return randomSeed.nextLong();
932     }
933     private final int everyN;
934 
935     protected final Random rand = new Random(nextRandomSeed());
936     protected final Configuration conf;
937     protected final TestOptions opts;
938 
939     private final Status status;
940     private final Sampler<?> traceSampler;
941     private final SpanReceiverHost receiverHost;
942     protected Connection connection;
943 
944     private String testName;
945     private Histogram latency;
946     private Histogram valueSize;
947     private RandomDistribution.Zipf zipf;
948 
949     /**
950      * Note that all subclasses of this class must provide a public constructor
951      * that has the exact same list of arguments.
952      */
953     Test(final Connection con, final TestOptions options, final Status status) {
954       this.connection = con;
955       this.conf = con == null ? HBaseConfiguration.create() : this.connection.getConfiguration();
956       this.opts = options;
957       this.status = status;
958       this.testName = this.getClass().getSimpleName();
959       receiverHost = SpanReceiverHost.getInstance(conf);
960       if (options.traceRate >= 1.0) {
961         this.traceSampler = Sampler.ALWAYS;
962       } else if (options.traceRate > 0.0) {
963         conf.setDouble("hbase.sampler.fraction", options.traceRate);
964         this.traceSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(conf));
965       } else {
966         this.traceSampler = Sampler.NEVER;
967       }
968       everyN = (int) (opts.totalRows / (opts.totalRows * opts.sampleRate));
969       if (options.isValueZipf()) {
970         this.zipf = new RandomDistribution.Zipf(this.rand, 1, options.getValueSize(), 1.1);
971       }
972       LOG.info("Sampling 1 every " + everyN + " out of " + opts.perClientRunRows + " total rows.");
973     }
974 
975     int getValueLength(final Random r) {
976       if (this.opts.isValueRandom()) return Math.abs(r.nextInt() % opts.valueSize);
977       else if (this.opts.isValueZipf()) return Math.abs(this.zipf.nextInt());
978       else return opts.valueSize;
979     }
980 
981     void updateValueSize(final Result [] rs) throws IOException {
982       if (rs == null || !isRandomValueSize()) return;
983       for (Result r: rs) updateValueSize(r);
984     }
985 
986     void updateValueSize(final Result r) throws IOException {
987       if (r == null || !isRandomValueSize()) return;
988       int size = 0;
989       for (CellScanner scanner = r.cellScanner(); scanner.advance();) {
990         size += scanner.current().getValueLength();
991       }
992       updateValueSize(size);
993     }
994 
995     void updateValueSize(final int valueSize) {
996       if (!isRandomValueSize()) return;
997       this.valueSize.update(valueSize);
998     }
999 
1000     String generateStatus(final int sr, final int i, final int lr) {
1001       return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() +
1002         (!isRandomValueSize()? "": ", value size " + getShortValueSizeReport());
1003     }
1004 
1005     boolean isRandomValueSize() {
1006       return opts.valueRandom;
1007     }
1008 
1009     protected int getReportingPeriod() {
1010       return opts.period;
1011     }
1012 
1013     /**
1014      * Populated by testTakedown. Only implemented by RandomReadTest at the moment.
1015      */
1016     public Histogram getLatency() {
1017       return latency;
1018     }
1019 
1020     void testSetup() throws IOException {
1021       if (!opts.oneCon) {
1022         this.connection = ConnectionFactory.createConnection(conf);
1023       }
1024       onStartup();
1025       latency = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
1026       valueSize = YammerHistogramUtils.newHistogram(new UniformSample(1024 * 500));
1027     }
1028 
1029     abstract void onStartup() throws IOException;
1030 
1031     void testTakedown() throws IOException {
1032       reportLatency();
1033       reportValueSize();
1034       onTakedown();
1035       if (!opts.oneCon) {
1036         connection.close();
1037       }
1038       receiverHost.closeReceivers();
1039     }
1040 
1041     abstract void onTakedown() throws IOException;
1042 
1043     /*
1044      * Run test
1045      * @return Elapsed time.
1046      * @throws IOException
1047      */
1048     long test() throws IOException, InterruptedException {
1049       testSetup();
1050       LOG.info("Timed test starting in thread " + Thread.currentThread().getName());
1051       final long startTime = System.nanoTime();
1052       try {
1053         testTimed();
1054       } finally {
1055         testTakedown();
1056       }
1057       return (System.nanoTime() - startTime) / 1000000;
1058     }
1059 
1060     /**
1061      * Provides an extension point for tests that don't want a per row invocation.
1062      */
1063     void testTimed() throws IOException, InterruptedException {
1064       int lastRow = opts.startRow + opts.perClientRunRows;
1065       // Report on completion of 1/10th of total.
1066       for (int i = opts.startRow; i < lastRow; i++) {
1067         if (i % everyN != 0) continue;
1068         long startTime = System.nanoTime();
1069         TraceScope scope = Trace.startSpan("test row", traceSampler);
1070         try {
1071           testRow(i);
1072         } finally {
1073           scope.close();
1074         }
1075         latency.update((System.nanoTime() - startTime) / 1000);
1076         if (status != null && i > 0 && (i % getReportingPeriod()) == 0) {
1077           status.setStatus(generateStatus(opts.startRow, i, lastRow));
1078         }
1079       }
1080     }
1081     /**
1082      * report percentiles of latency
1083      * @throws IOException
1084      */
1085     private void reportLatency() throws IOException {
1086       status.setStatus(testName + " latency log (microseconds), on " +
1087           latency.count() + " measures");
1088       reportHistogram(this.latency);
1089     }
1090 
1091     private void reportValueSize() throws IOException {
1092       status.setStatus(testName + " valueSize after " +
1093           valueSize.count() + " measures");
1094       reportHistogram(this.valueSize);
1095     }
1096 
1097     private void reportHistogram(final Histogram h) throws IOException {
1098       Snapshot sn = h.getSnapshot();
1099       status.setStatus(testName + " Min      = " + h.min());
1100       status.setStatus(testName + " Avg      = " + h.mean());
1101       status.setStatus(testName + " StdDev   = " + h.stdDev());
1102       status.setStatus(testName + " 50th     = " + sn.getMedian());
1103       status.setStatus(testName + " 75th     = " + sn.get75thPercentile());
1104       status.setStatus(testName + " 95th     = " + sn.get95thPercentile());
1105       status.setStatus(testName + " 99th     = " + sn.get99thPercentile());
1106       status.setStatus(testName + " 99.9th   = " + sn.get999thPercentile());
1107       status.setStatus(testName + " 99.99th  = " + sn.getValue(0.9999));
1108       status.setStatus(testName + " 99.999th = " + sn.getValue(0.99999));
1109       status.setStatus(testName + " Max      = " + h.max());
1110     }
1111 
1112     /**
1113      * @return Subset of the histograms' calculation.
1114      */
1115     public String getShortLatencyReport() {
1116       return YammerHistogramUtils.getShortHistogramReport(this.latency);
1117     }
1118 
1119     /**
1120      * @return Subset of the histograms' calculation.
1121      */
1122     public String getShortValueSizeReport() {
1123       return YammerHistogramUtils.getShortHistogramReport(this.valueSize);
1124     }
1125 
1126     /*
1127     * Test for individual row.
1128     * @param i Row index.
1129     */
1130     abstract void testRow(final int i) throws IOException, InterruptedException;
1131   }
1132 
1133   static abstract class TableTest extends Test {
1134     protected Table table;
1135 
1136     TableTest(Connection con, TestOptions options, Status status) {
1137       super(con, options, status);
1138     }
1139 
1140     @Override
1141     void onStartup() throws IOException {
1142       this.table = connection.getTable(TableName.valueOf(opts.tableName));
1143     }
1144 
1145     @Override
1146     void onTakedown() throws IOException {
1147       table.close();
1148     }
1149   }
1150 
1151   static abstract class BufferedMutatorTest extends Test {
1152     protected BufferedMutator mutator;
1153 
1154     BufferedMutatorTest(Connection con, TestOptions options, Status status) {
1155       super(con, options, status);
1156     }
1157 
1158     @Override
1159     void onStartup() throws IOException {
1160       this.mutator = connection.getBufferedMutator(TableName.valueOf(opts.tableName));
1161     }
1162 
1163     @Override
1164     void onTakedown() throws IOException {
1165       mutator.close();
1166     }
1167   }
1168 
1169   static class RandomSeekScanTest extends TableTest {
1170     RandomSeekScanTest(Connection con, TestOptions options, Status status) {
1171       super(con, options, status);
1172     }
1173 
1174     @Override
1175     void testRow(final int i) throws IOException {
1176       Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
1177       scan.setCaching(opts.caching);
1178       FilterList list = new FilterList();
1179       if (opts.addColumns) {
1180         scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1181       } else {
1182         scan.addFamily(FAMILY_NAME);
1183       }
1184       if (opts.filterAll) {
1185         list.addFilter(new FilterAllFilter());
1186       }
1187       list.addFilter(new WhileMatchFilter(new PageFilter(120)));
1188       scan.setFilter(list);
1189       ResultScanner s = this.table.getScanner(scan);
1190       for (Result rr; (rr = s.next()) != null;) {
1191         updateValueSize(rr);
1192       }
1193       s.close();
1194     }
1195 
1196     @Override
1197     protected int getReportingPeriod() {
1198       int period = opts.perClientRunRows / 100;
1199       return period == 0 ? opts.perClientRunRows : period;
1200     }
1201 
1202   }
1203 
1204   static abstract class RandomScanWithRangeTest extends TableTest {
1205     RandomScanWithRangeTest(Connection con, TestOptions options, Status status) {
1206       super(con, options, status);
1207     }
1208 
1209     @Override
1210     void testRow(final int i) throws IOException {
1211       Pair<byte[], byte[]> startAndStopRow = getStartAndStopRow();
1212       Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond());
1213       scan.setCaching(opts.caching);
1214       if (opts.filterAll) {
1215         scan.setFilter(new FilterAllFilter());
1216       }
1217       if (opts.addColumns) {
1218         scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1219       } else {
1220         scan.addFamily(FAMILY_NAME);
1221       }
1222       Result r = null;
1223       int count = 0;
1224       ResultScanner s = this.table.getScanner(scan);
1225       for (; (r = s.next()) != null;) {
1226         updateValueSize(r);
1227         count++;
1228       }
1229       if (i % 100 == 0) {
1230         LOG.info(String.format("Scan for key range %s - %s returned %s rows",
1231             Bytes.toString(startAndStopRow.getFirst()),
1232             Bytes.toString(startAndStopRow.getSecond()), count));
1233       }
1234 
1235       s.close();
1236     }
1237 
1238     protected abstract Pair<byte[],byte[]> getStartAndStopRow();
1239 
1240     protected Pair<byte[], byte[]> generateStartAndStopRows(int maxRange) {
1241       int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows;
1242       int stop = start + maxRange;
1243       return new Pair<byte[],byte[]>(format(start), format(stop));
1244     }
1245 
1246     @Override
1247     protected int getReportingPeriod() {
1248       int period = opts.perClientRunRows / 100;
1249       return period == 0? opts.perClientRunRows: period;
1250     }
1251   }
1252 
1253   static class RandomScanWithRange10Test extends RandomScanWithRangeTest {
1254     RandomScanWithRange10Test(Connection con, TestOptions options, Status status) {
1255       super(con, options, status);
1256     }
1257 
1258     @Override
1259     protected Pair<byte[], byte[]> getStartAndStopRow() {
1260       return generateStartAndStopRows(10);
1261     }
1262   }
1263 
1264   static class RandomScanWithRange100Test extends RandomScanWithRangeTest {
1265     RandomScanWithRange100Test(Connection con, TestOptions options, Status status) {
1266       super(con, options, status);
1267     }
1268 
1269     @Override
1270     protected Pair<byte[], byte[]> getStartAndStopRow() {
1271       return generateStartAndStopRows(100);
1272     }
1273   }
1274 
1275   static class RandomScanWithRange1000Test extends RandomScanWithRangeTest {
1276     RandomScanWithRange1000Test(Connection con, TestOptions options, Status status) {
1277       super(con, options, status);
1278     }
1279 
1280     @Override
1281     protected Pair<byte[], byte[]> getStartAndStopRow() {
1282       return generateStartAndStopRows(1000);
1283     }
1284   }
1285 
1286   static class RandomScanWithRange10000Test extends RandomScanWithRangeTest {
1287     RandomScanWithRange10000Test(Connection con, TestOptions options, Status status) {
1288       super(con, options, status);
1289     }
1290 
1291     @Override
1292     protected Pair<byte[], byte[]> getStartAndStopRow() {
1293       return generateStartAndStopRows(10000);
1294     }
1295   }
1296 
1297   static class RandomReadTest extends TableTest {
1298     private final Consistency consistency;
1299     private ArrayList<Get> gets;
1300     private Random rd = new Random();
1301 
1302     RandomReadTest(Connection con, TestOptions options, Status status) {
1303       super(con, options, status);
1304       consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE;
1305       if (opts.multiGet > 0) {
1306         LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + ".");
1307         this.gets = new ArrayList<Get>(opts.multiGet);
1308       }
1309     }
1310 
1311     @Override
1312     void testRow(final int i) throws IOException, InterruptedException {
1313       if (opts.randomSleep > 0) {
1314         Thread.sleep(rd.nextInt(opts.randomSleep));
1315       }
1316       Get get = new Get(getRandomRow(this.rand, opts.totalRows));
1317       if (opts.addColumns) {
1318         get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1319       } else {
1320         get.addFamily(FAMILY_NAME);
1321       }
1322       if (opts.filterAll) {
1323         get.setFilter(new FilterAllFilter());
1324       }
1325       get.setConsistency(consistency);
1326       if (LOG.isTraceEnabled()) LOG.trace(get.toString());
1327       if (opts.multiGet > 0) {
1328         this.gets.add(get);
1329         if (this.gets.size() == opts.multiGet) {
1330           Result [] rs = this.table.get(this.gets);
1331           updateValueSize(rs);
1332           this.gets.clear();
1333         }
1334       } else {
1335         updateValueSize(this.table.get(get));
1336       }
1337     }
1338 
1339     @Override
1340     protected int getReportingPeriod() {
1341       int period = opts.perClientRunRows / 10;
1342       return period == 0 ? opts.perClientRunRows : period;
1343     }
1344 
1345     @Override
1346     protected void testTakedown() throws IOException {
1347       if (this.gets != null && this.gets.size() > 0) {
1348         this.table.get(gets);
1349         this.gets.clear();
1350       }
1351       super.testTakedown();
1352     }
1353   }
1354 
1355   static class RandomWriteTest extends BufferedMutatorTest {
1356     RandomWriteTest(Connection con, TestOptions options, Status status) {
1357       super(con, options, status);
1358     }
1359 
1360     @Override
1361     void testRow(final int i) throws IOException {
1362       byte[] row = getRandomRow(this.rand, opts.totalRows);
1363       Put put = new Put(row);
1364       for (int column = 0; column < opts.columns; column++) {
1365         byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
1366         byte[] value = generateData(this.rand, getValueLength(this.rand));
1367         if (opts.useTags) {
1368           byte[] tag = generateData(this.rand, TAG_LENGTH);
1369           Tag[] tags = new Tag[opts.noOfTags];
1370           for (int n = 0; n < opts.noOfTags; n++) {
1371             Tag t = new Tag((byte) n, tag);
1372             tags[n] = t;
1373           }
1374           KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
1375               value, tags);
1376           put.add(kv);
1377           updateValueSize(kv.getValueLength());
1378         } else {
1379           put.add(FAMILY_NAME, qualifier, value);
1380           updateValueSize(value.length);
1381         }
1382       }
1383       put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
1384       mutator.mutate(put);
1385     }
1386   }
1387 
1388   static class ScanTest extends TableTest {
1389     private ResultScanner testScanner;
1390 
1391     ScanTest(Connection con, TestOptions options, Status status) {
1392       super(con, options, status);
1393     }
1394 
1395     @Override
1396     void testTakedown() throws IOException {
1397       if (this.testScanner != null) {
1398         this.testScanner.close();
1399       }
1400       super.testTakedown();
1401     }
1402 
1403 
1404     @Override
1405     void testRow(final int i) throws IOException {
1406       if (this.testScanner == null) {
1407         Scan scan = new Scan(format(opts.startRow));
1408         scan.setCaching(opts.caching);
1409         if (opts.addColumns) {
1410           scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1411         } else {
1412           scan.addFamily(FAMILY_NAME);
1413         }
1414         if (opts.filterAll) {
1415           scan.setFilter(new FilterAllFilter());
1416         }
1417        this.testScanner = table.getScanner(scan);
1418       }
1419       Result r = testScanner.next();
1420       updateValueSize(r);
1421     }
1422 
1423   }
1424 
1425   static class SequentialReadTest extends TableTest {
1426     SequentialReadTest(Connection con, TestOptions options, Status status) {
1427       super(con, options, status);
1428     }
1429 
1430     @Override
1431     void testRow(final int i) throws IOException {
1432       Get get = new Get(format(i));
1433       if (opts.addColumns) {
1434         get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1435       }
1436       if (opts.filterAll) {
1437         get.setFilter(new FilterAllFilter());
1438       }
1439       updateValueSize(table.get(get));
1440     }
1441   }
1442 
1443   static class SequentialWriteTest extends BufferedMutatorTest {
1444     SequentialWriteTest(Connection con, TestOptions options, Status status) {
1445       super(con, options, status);
1446     }
1447 
1448     @Override
1449     void testRow(final int i) throws IOException {
1450       byte[] row = format(i);
1451       Put put = new Put(row);
1452       for (int column = 0; column < opts.columns; column++) {
1453         byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
1454         byte[] value = generateData(this.rand, getValueLength(this.rand));
1455         if (opts.useTags) {
1456           byte[] tag = generateData(this.rand, TAG_LENGTH);
1457           Tag[] tags = new Tag[opts.noOfTags];
1458           for (int n = 0; n < opts.noOfTags; n++) {
1459             Tag t = new Tag((byte) n, tag);
1460             tags[n] = t;
1461           }
1462           KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
1463               value, tags);
1464           put.add(kv);
1465           updateValueSize(kv.getValueLength());
1466         } else {
1467           put.add(FAMILY_NAME, qualifier, value);
1468           updateValueSize(value.length);
1469         }
1470       }
1471       put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
1472       mutator.mutate(put);
1473     }
1474   }
1475 
1476   static class FilteredScanTest extends TableTest {
1477     protected static final Log LOG = LogFactory.getLog(FilteredScanTest.class.getName());
1478 
1479     FilteredScanTest(Connection con, TestOptions options, Status status) {
1480       super(con, options, status);
1481     }
1482 
1483     @Override
1484     void testRow(int i) throws IOException {
1485       byte[] value = generateData(this.rand, getValueLength(this.rand));
1486       Scan scan = constructScan(value);
1487       ResultScanner scanner = null;
1488       try {
1489         scanner = this.table.getScanner(scan);
1490         for (Result r = null; (r = scanner.next()) != null;) {
1491           updateValueSize(r);
1492         }
1493       } finally {
1494         if (scanner != null) scanner.close();
1495       }
1496     }
1497 
1498     protected Scan constructScan(byte[] valuePrefix) throws IOException {
1499       FilterList list = new FilterList();
1500       Filter filter = new SingleColumnValueFilter(
1501           FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL,
1502           new BinaryComparator(valuePrefix)
1503       );
1504       list.addFilter(filter);
1505       if(opts.filterAll) {
1506         list.addFilter(new FilterAllFilter());
1507       }
1508       Scan scan = new Scan();
1509       scan.setCaching(opts.caching);
1510       if (opts.addColumns) {
1511         scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
1512       } else {
1513         scan.addFamily(FAMILY_NAME);
1514       }
1515       scan.setFilter(list);
1516       return scan;
1517     }
1518   }
1519 
1520   /**
1521    * Compute a throughput rate in MB/s.
1522    * @param rows Number of records consumed.
1523    * @param timeMs Time taken in milliseconds.
1524    * @return String value with label, ie '123.76 MB/s'
1525    */
1526   private static String calculateMbps(int rows, long timeMs, final int valueSize, int columns) {
1527     BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH +
1528       ((valueSize + FAMILY_NAME.length + COLUMN_ZERO.length) * columns));
1529     BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT)
1530       .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
1531       .divide(BYTES_PER_MB, CXT);
1532     return FMT.format(mbps) + " MB/s";
1533   }
1534 
1535   /*
1536    * Format passed integer.
1537    * @param number
1538    * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed
1539    * number (Does absolute in case number is negative).
1540    */
1541   public static byte [] format(final int number) {
1542     byte [] b = new byte[ROW_LENGTH];
1543     int d = Math.abs(number);
1544     for (int i = b.length - 1; i >= 0; i--) {
1545       b[i] = (byte)((d % 10) + '0');
1546       d /= 10;
1547     }
1548     return b;
1549   }
1550 
1551   /*
1552    * This method takes some time and is done inline uploading data.  For
1553    * example, doing the mapfile test, generation of the key and value
1554    * consumes about 30% of CPU time.
1555    * @return Generated random value to insert into a table cell.
1556    */
1557   public static byte[] generateData(final Random r, int length) {
1558     byte [] b = new byte [length];
1559     int i;
1560 
1561     for(i = 0; i < (length-8); i += 8) {
1562       b[i] = (byte) (65 + r.nextInt(26));
1563       b[i+1] = b[i];
1564       b[i+2] = b[i];
1565       b[i+3] = b[i];
1566       b[i+4] = b[i];
1567       b[i+5] = b[i];
1568       b[i+6] = b[i];
1569       b[i+7] = b[i];
1570     }
1571 
1572     byte a = (byte) (65 + r.nextInt(26));
1573     for(; i < length; i++) {
1574       b[i] = a;
1575     }
1576     return b;
1577   }
1578 
1579   /**
1580    * @deprecated Use {@link #generateData(java.util.Random, int)} instead.
1581    * @return Generated random value to insert into a table cell.
1582    */
1583   @Deprecated
1584   public static byte[] generateValue(final Random r) {
1585     return generateData(r, DEFAULT_VALUE_LENGTH);
1586   }
1587 
1588   static byte [] getRandomRow(final Random random, final int totalRows) {
1589     return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
1590   }
1591 
1592   static RunResult runOneClient(final Class<? extends Test> cmd, Configuration conf, Connection con,
1593                            TestOptions opts, final Status status)
1594       throws IOException, InterruptedException {
1595     status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " +
1596       opts.perClientRunRows + " rows");
1597     long totalElapsedTime;
1598 
1599     final Test t;
1600     try {
1601       Constructor<? extends Test> constructor =
1602         cmd.getDeclaredConstructor(Connection.class, TestOptions.class, Status.class);
1603       t = constructor.newInstance(con, opts, status);
1604     } catch (NoSuchMethodException e) {
1605       throw new IllegalArgumentException("Invalid command class: " +
1606           cmd.getName() + ".  It does not provide a constructor as described by " +
1607           "the javadoc comment.  Available constructors are: " +
1608           Arrays.toString(cmd.getConstructors()));
1609     } catch (Exception e) {
1610       throw new IllegalStateException("Failed to construct command class", e);
1611     }
1612     totalElapsedTime = t.test();
1613 
1614     status.setStatus("Finished " + cmd + " in " + totalElapsedTime +
1615       "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" +
1616       " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime,
1617           getAverageValueLength(opts), opts.columns) + ")");
1618 
1619     return new RunResult(totalElapsedTime, t.getLatency());
1620   }
1621 
1622   private static int getAverageValueLength(final TestOptions opts) {
1623     return opts.valueRandom? opts.valueSize/2: opts.valueSize;
1624   }
1625 
1626   private void runTest(final Class<? extends Test> cmd, TestOptions opts) throws IOException,
1627       InterruptedException, ClassNotFoundException {
1628     // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do
1629     // the TestOptions introspection for us and dump the output in a readable format.
1630     LOG.info(cmd.getSimpleName() + " test run options=" + MAPPER.writeValueAsString(opts));
1631     try(Connection conn = ConnectionFactory.createConnection(getConf());
1632         Admin admin = conn.getAdmin()) {
1633       checkTable(admin, opts);
1634     }
1635     if (opts.nomapred) {
1636       doLocalClients(opts, getConf());
1637     } else {
1638       doMapReduce(opts, getConf());
1639     }
1640   }
1641 
1642   protected void printUsage() {
1643     printUsage(this.getClass().getName(), null);
1644   }
1645 
1646   protected static void printUsage(final String message) {
1647     printUsage(PerformanceEvaluation.class.getName(), message);
1648   }
1649 
1650   protected static void printUsageAndExit(final String message, final int exitCode) {
1651     printUsage(message);
1652     System.exit(exitCode);
1653   }
1654 
1655   protected static void printUsage(final String className, final String message) {
1656     if (message != null && message.length() > 0) {
1657       System.err.println(message);
1658     }
1659     System.err.println("Usage: java " + className + " \\");
1660     System.err.println("  <OPTIONS> [-D<property=value>]* <command> <nclients>");
1661     System.err.println();
1662     System.err.println("Options:");
1663     System.err.println(" nomapred        Run multiple clients using threads " +
1664       "(rather than use mapreduce)");
1665     System.err.println(" rows            Rows each client runs. Default: One million");
1666     System.err.println(" size            Total size in GiB. Mutually exclusive with --rows. " +
1667       "Default: 1.0.");
1668     System.err.println(" sampleRate      Execute test on a sample of total " +
1669       "rows. Only supported by randomRead. Default: 1.0");
1670     System.err.println(" traceRate       Enable HTrace spans. Initiate tracing every N rows. " +
1671       "Default: 0");
1672     System.err.println(" table           Alternate table name. Default: 'TestTable'");
1673     System.err.println(" multiGet        If >0, when doing RandomRead, perform multiple gets " +
1674       "instead of single gets. Default: 0");
1675     System.err.println(" compress        Compression type to use (GZ, LZO, ...). Default: 'NONE'");
1676     System.err.println(" flushCommits    Used to determine if the test should flush the table. " +
1677       "Default: false");
1678     System.err.println(" writeToWAL      Set writeToWAL on puts. Default: True");
1679     System.err.println(" autoFlush       Set autoFlush on htable. Default: False");
1680     System.err.println(" oneCon          all the threads share the same connection. Default: False");
1681     System.err.println(" presplit        Create presplit table. Recommended for accurate perf " +
1682       "analysis (see guide).  Default: disabled");
1683     System.err.println(" inmemory        Tries to keep the HFiles of the CF " +
1684       "inmemory as far as possible. Not guaranteed that reads are always served " +
1685       "from memory.  Default: false");
1686     System.err.println(" usetags         Writes tags along with KVs. Use with HFile V3. " +
1687       "Default: false");
1688     System.err.println(" numoftags       Specify the no of tags that would be needed. " +
1689        "This works only if usetags is true.");
1690     System.err.println(" filterAll       Helps to filter out all the rows on the server side"
1691         + " there by not returning any thing back to the client.  Helps to check the server side"
1692         + " performance.  Uses FilterAllFilter internally. ");
1693     System.err.println(" latency         Set to report operation latencies. Default: False");
1694     System.err.println(" bloomFilter      Bloom filter type, one of " + Arrays.toString(BloomType.values()));
1695     System.err.println(" valueSize       Pass value size to use: Default: 1024");
1696     System.err.println(" valueRandom     Set if we should vary value size between 0 and " +
1697         "'valueSize'; set on read for stats on size: Default: Not set.");
1698     System.err.println(" valueZipf       Set if we should vary value size between 0 and " +
1699         "'valueSize' in zipf form: Default: Not set.");
1700     System.err.println(" period          Report every 'period' rows: " +
1701       "Default: opts.perClientRunRows / 10");
1702     System.err.println(" multiGet        Batch gets together into groups of N. Only supported " +
1703       "by randomRead. Default: disabled");
1704     System.err.println(" addColumns      Adds columns to scans/gets explicitly. Default: true");
1705     System.err.println(" replicas        Enable region replica testing. Defaults: 1.");
1706     System.err.println(" splitPolicy     Specify a custom RegionSplitPolicy for the table.");
1707     System.err.println(" randomSleep     Do a random sleep before each get between 0 and entered value. Defaults: 0");
1708     System.err.println(" columns         Columns to write per row. Default: 1");
1709     System.err.println(" caching         Scan caching to use. Default: 30");
1710     System.err.println();
1711     System.err.println(" Note: -D properties will be applied to the conf used. ");
1712     System.err.println("  For example: ");
1713     System.err.println("   -Dmapreduce.output.fileoutputformat.compress=true");
1714     System.err.println("   -Dmapreduce.task.timeout=60000");
1715     System.err.println();
1716     System.err.println("Command:");
1717     for (CmdDescriptor command : COMMANDS.values()) {
1718       System.err.println(String.format(" %-15s %s", command.getName(), command.getDescription()));
1719     }
1720     System.err.println();
1721     System.err.println("Args:");
1722     System.err.println(" nclients        Integer. Required. Total number of " +
1723       "clients (and HRegionServers)");
1724     System.err.println("                 running: 1 <= value <= 500");
1725     System.err.println("Examples:");
1726     System.err.println(" To run a single evaluation client:");
1727     System.err.println(" $ bin/hbase " + className + " sequentialWrite 1");
1728   }
1729 
1730   /**
1731    * Parse options passed in via an arguments array. Assumes that array has been split
1732    * on white-space and placed into a {@code Queue}. Any unknown arguments will remain
1733    * in the queue at the conclusion of this method call. It's up to the caller to deal
1734    * with these unrecognized arguments.
1735    */
1736   static TestOptions parseOpts(Queue<String> args) {
1737     TestOptions opts = new TestOptions();
1738 
1739     String cmd = null;
1740     while ((cmd = args.poll()) != null) {
1741       if (cmd.equals("-h") || cmd.startsWith("--h")) {
1742         // place item back onto queue so that caller knows parsing was incomplete
1743         args.add(cmd);
1744         break;
1745       }
1746 
1747       final String nmr = "--nomapred";
1748       if (cmd.startsWith(nmr)) {
1749         opts.nomapred = true;
1750         continue;
1751       }
1752 
1753       final String rows = "--rows=";
1754       if (cmd.startsWith(rows)) {
1755         opts.perClientRunRows = Integer.parseInt(cmd.substring(rows.length()));
1756         continue;
1757       }
1758 
1759       final String sampleRate = "--sampleRate=";
1760       if (cmd.startsWith(sampleRate)) {
1761         opts.sampleRate = Float.parseFloat(cmd.substring(sampleRate.length()));
1762         continue;
1763       }
1764 
1765       final String table = "--table=";
1766       if (cmd.startsWith(table)) {
1767         opts.tableName = cmd.substring(table.length());
1768         continue;
1769       }
1770 
1771       final String startRow = "--startRow=";
1772       if (cmd.startsWith(startRow)) {
1773         opts.startRow = Integer.parseInt(cmd.substring(startRow.length()));
1774         continue;
1775       }
1776 
1777       final String compress = "--compress=";
1778       if (cmd.startsWith(compress)) {
1779         opts.compression = Compression.Algorithm.valueOf(cmd.substring(compress.length()));
1780         continue;
1781       }
1782 
1783       final String traceRate = "--traceRate=";
1784       if (cmd.startsWith(traceRate)) {
1785         opts.traceRate = Double.parseDouble(cmd.substring(traceRate.length()));
1786         continue;
1787       }
1788 
1789       final String blockEncoding = "--blockEncoding=";
1790       if (cmd.startsWith(blockEncoding)) {
1791         opts.blockEncoding = DataBlockEncoding.valueOf(cmd.substring(blockEncoding.length()));
1792         continue;
1793       }
1794 
1795       final String flushCommits = "--flushCommits=";
1796       if (cmd.startsWith(flushCommits)) {
1797         opts.flushCommits = Boolean.parseBoolean(cmd.substring(flushCommits.length()));
1798         continue;
1799       }
1800 
1801       final String writeToWAL = "--writeToWAL=";
1802       if (cmd.startsWith(writeToWAL)) {
1803         opts.writeToWAL = Boolean.parseBoolean(cmd.substring(writeToWAL.length()));
1804         continue;
1805       }
1806 
1807       final String presplit = "--presplit=";
1808       if (cmd.startsWith(presplit)) {
1809         opts.presplitRegions = Integer.parseInt(cmd.substring(presplit.length()));
1810         continue;
1811       }
1812 
1813       final String inMemory = "--inmemory=";
1814       if (cmd.startsWith(inMemory)) {
1815         opts.inMemoryCF = Boolean.parseBoolean(cmd.substring(inMemory.length()));
1816         continue;
1817       }
1818 
1819       final String autoFlush = "--autoFlush=";
1820       if (cmd.startsWith(autoFlush)) {
1821         opts.autoFlush = Boolean.parseBoolean(cmd.substring(autoFlush.length()));
1822         continue;
1823       }
1824 
1825       final String onceCon = "--oneCon=";
1826       if (cmd.startsWith(onceCon)) {
1827         opts.oneCon = Boolean.parseBoolean(cmd.substring(onceCon.length()));
1828         continue;
1829       }
1830 
1831       final String latency = "--latency";
1832       if (cmd.startsWith(latency)) {
1833         opts.reportLatency = true;
1834         continue;
1835       }
1836 
1837       final String multiGet = "--multiGet=";
1838       if (cmd.startsWith(multiGet)) {
1839         opts.multiGet = Integer.parseInt(cmd.substring(multiGet.length()));
1840         continue;
1841       }
1842 
1843       final String useTags = "--usetags=";
1844       if (cmd.startsWith(useTags)) {
1845         opts.useTags = Boolean.parseBoolean(cmd.substring(useTags.length()));
1846         continue;
1847       }
1848 
1849       final String noOfTags = "--numoftags=";
1850       if (cmd.startsWith(noOfTags)) {
1851         opts.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length()));
1852         continue;
1853       }
1854 
1855       final String replicas = "--replicas=";
1856       if (cmd.startsWith(replicas)) {
1857         opts.replicas = Integer.parseInt(cmd.substring(replicas.length()));
1858         continue;
1859       }
1860 
1861       final String filterOutAll = "--filterAll";
1862       if (cmd.startsWith(filterOutAll)) {
1863         opts.filterAll = true;
1864         continue;
1865       }
1866 
1867       final String size = "--size=";
1868       if (cmd.startsWith(size)) {
1869         opts.size = Float.parseFloat(cmd.substring(size.length()));
1870         continue;
1871       }
1872 
1873       final String splitPolicy = "--splitPolicy=";
1874       if (cmd.startsWith(splitPolicy)) {
1875         opts.splitPolicy = cmd.substring(splitPolicy.length());
1876         continue;
1877       }
1878 
1879       final String randomSleep = "--randomSleep=";
1880       if (cmd.startsWith(randomSleep)) {
1881         opts.randomSleep = Integer.parseInt(cmd.substring(randomSleep.length()));
1882         continue;
1883       }
1884 
1885       final String bloomFilter = "--bloomFilter=";
1886       if (cmd.startsWith(bloomFilter)) {
1887         opts.bloomType = BloomType.valueOf(cmd.substring(bloomFilter.length()));
1888         continue;
1889       }
1890 
1891       final String valueSize = "--valueSize=";
1892       if (cmd.startsWith(valueSize)) {
1893         opts.valueSize = Integer.parseInt(cmd.substring(valueSize.length()));
1894         continue;
1895       }
1896 
1897       final String valueRandom = "--valueRandom";
1898       if (cmd.startsWith(valueRandom)) {
1899         opts.valueRandom = true;
1900         if (opts.valueZipf) {
1901           throw new IllegalStateException("Either valueZipf or valueRandom but not both");
1902         }
1903         continue;
1904       }
1905 
1906       final String valueZipf = "--valueZipf";
1907       if (cmd.startsWith(valueZipf)) {
1908         opts.valueZipf = true;
1909         if (opts.valueRandom) {
1910           throw new IllegalStateException("Either valueZipf or valueRandom but not both");
1911         }
1912         continue;
1913       }
1914 
1915       final String period = "--period=";
1916       if (cmd.startsWith(period)) {
1917         opts.period = Integer.parseInt(cmd.substring(period.length()));
1918         continue;
1919       }
1920 
1921       final String addColumns = "--addColumns=";
1922       if (cmd.startsWith(addColumns)) {
1923         opts.addColumns = Boolean.parseBoolean(cmd.substring(addColumns.length()));
1924         continue;
1925       }
1926 
1927       final String columns = "--columns=";
1928       if (cmd.startsWith(columns)) {
1929         opts.columns = Integer.parseInt(cmd.substring(columns.length()));
1930         continue;
1931       }
1932 
1933       final String caching = "--caching=";
1934       if (cmd.startsWith(caching)) {
1935         opts.caching = Integer.parseInt(cmd.substring(caching.length()));
1936         continue;
1937       }
1938 
1939       if (isCommandClass(cmd)) {
1940         opts.cmdName = cmd;
1941         opts.numClientThreads = Integer.parseInt(args.remove());
1942         int rowsPerGB = getRowsPerGB(opts);
1943         if (opts.size != DEFAULT_OPTS.size &&
1944             opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
1945           throw new IllegalArgumentException(rows + " and " + size + " are mutually exclusive arguments.");
1946         }
1947         if (opts.size != DEFAULT_OPTS.size) {
1948           // total size in GB specified
1949           opts.totalRows = (int) opts.size * rowsPerGB;
1950           opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
1951         } else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
1952           // number of rows specified
1953           opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
1954           opts.size = opts.totalRows / rowsPerGB;
1955         }
1956         break;
1957       } else {
1958         printUsageAndExit("ERROR: Unrecognized option/command: " + cmd, -1);
1959       }
1960 
1961       // Not matching any option or command.
1962       System.err.println("Error: Wrong option or command: " + cmd);
1963       args.add(cmd);
1964       break;
1965     }
1966     return opts;
1967   }
1968 
1969   static int getRowsPerGB(final TestOptions opts) {
1970     return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getColumns());
1971   }
1972 
1973   @Override
1974   public int run(String[] args) throws Exception {
1975     // Process command-line args. TODO: Better cmd-line processing
1976     // (but hopefully something not as painful as cli options).
1977     int errCode = -1;
1978     if (args.length < 1) {
1979       printUsage();
1980       return errCode;
1981     }
1982 
1983     try {
1984       LinkedList<String> argv = new LinkedList<String>();
1985       argv.addAll(Arrays.asList(args));
1986       TestOptions opts = parseOpts(argv);
1987 
1988       // args remaining, print help and exit
1989       if (!argv.isEmpty()) {
1990         errCode = 0;
1991         printUsage();
1992         return errCode;
1993       }
1994 
1995       // must run at least 1 client
1996       if (opts.numClientThreads <= 0) {
1997         throw new IllegalArgumentException("Number of clients must be > 0");
1998       }
1999 
2000       Class<? extends Test> cmdClass = determineCommandClass(opts.cmdName);
2001       if (cmdClass != null) {
2002         runTest(cmdClass, opts);
2003         errCode = 0;
2004       }
2005 
2006     } catch (Exception e) {
2007       e.printStackTrace();
2008     }
2009 
2010     return errCode;
2011   }
2012 
2013   private static boolean isCommandClass(String cmd) {
2014     return COMMANDS.containsKey(cmd);
2015   }
2016 
2017   private static Class<? extends Test> determineCommandClass(String cmd) {
2018     CmdDescriptor descriptor = COMMANDS.get(cmd);
2019     return descriptor != null ? descriptor.getCmdClass() : null;
2020   }
2021 
2022   public static void main(final String[] args) throws Exception {
2023     int res = ToolRunner.run(new PerformanceEvaluation(HBaseConfiguration.create()), args);
2024     System.exit(res);
2025   }
2026 }