View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.util;
18  
19  import java.io.IOException;
20  import java.io.InterruptedIOException;
21  import java.util.ArrayList;
22  import java.util.Arrays;
23  import java.util.List;
24  import java.util.concurrent.atomic.AtomicReference;
25  
26  import org.apache.commons.cli.CommandLine;
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.hbase.TableName;
30  import org.apache.hadoop.hbase.HBaseConfiguration;
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HColumnDescriptor;
33  import org.apache.hadoop.hbase.HConstants;
34  import org.apache.hadoop.hbase.HTableDescriptor;
35  import org.apache.hadoop.hbase.PerformanceEvaluation;
36  import org.apache.hadoop.hbase.client.HBaseAdmin;
37  import org.apache.hadoop.hbase.io.compress.Compression;
38  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
39  import org.apache.hadoop.hbase.regionserver.BloomType;
40  import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
41  import org.apache.hadoop.util.ToolRunner;
42  
43  /**
44   * A command-line utility that reads, writes, and verifies data. Unlike
45   * {@link PerformanceEvaluation}, this tool validates the data written,
46   * and supports simultaneously writing and reading the same set of keys.
47   */
48  public class LoadTestTool extends AbstractHBaseTool {
49  
50    private static final Log LOG = LogFactory.getLog(LoadTestTool.class);
51  
52    /** Table name for the test */
53    private TableName tableName;
54  
55    /** Table name to use of not overridden on the command line */
56    protected static final String DEFAULT_TABLE_NAME = "cluster_test";
57  
58    /** Column family used by the test */
59    public static byte[] COLUMN_FAMILY = Bytes.toBytes("test_cf");
60  
61    /** Column families used by the test */
62    protected static final byte[][] COLUMN_FAMILIES = { COLUMN_FAMILY };
63  
64    /** The number of reader/writer threads if not specified */
65    protected static final int DEFAULT_NUM_THREADS = 20;
66  
67    /** Usage string for the load option */
68    protected static final String OPT_USAGE_LOAD =
69        "<avg_cols_per_key>:<avg_data_size>" +
70        "[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
71  
72    /** Usage string for the read option */
73    protected static final String OPT_USAGE_READ =
74        "<verify_percent>[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
75  
76    /** Usage string for the update option */
77    protected static final String OPT_USAGE_UPDATE =
78        "<update_percent>[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
79  
80    protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " +
81        Arrays.toString(BloomType.values());
82  
83    protected static final String OPT_USAGE_COMPRESSION = "Compression type, " +
84        "one of " + Arrays.toString(Compression.Algorithm.values());
85  
86    public static final String OPT_DATA_BLOCK_ENCODING_USAGE =
87      "Encoding algorithm (e.g. prefix "
88          + "compression) to use for data blocks in the test column family, "
89          + "one of " + Arrays.toString(DataBlockEncoding.values()) + ".";
90  
91    private static final String OPT_BLOOM = "bloom";
92    private static final String OPT_COMPRESSION = "compression";
93    public static final String OPT_DATA_BLOCK_ENCODING =
94        HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase();
95    public static final String OPT_ENCODE_IN_CACHE_ONLY =
96        "encode_in_cache_only";
97    public static final String OPT_ENCODE_IN_CACHE_ONLY_USAGE =
98        "If this is specified, data blocks will only be encoded in block " +
99        "cache but not on disk";
100   
101   public static final String OPT_INMEMORY = "in_memory";
102   public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
103   		"inmemory as far as possible.  Not guaranteed that reads are always served from inmemory";
104 
105   protected static final String OPT_KEY_WINDOW = "key_window";
106   protected static final String OPT_WRITE = "write";
107   protected static final String OPT_MAX_READ_ERRORS = "max_read_errors";
108   protected static final String OPT_MULTIPUT = "multiput";
109   protected static final String OPT_NUM_KEYS = "num_keys";
110   protected static final String OPT_READ = "read";
111   protected static final String OPT_START_KEY = "start_key";
112   protected static final String OPT_TABLE_NAME = "tn";
113   protected static final String OPT_ZK_QUORUM = "zk";
114   protected static final String OPT_SKIP_INIT = "skip_init";
115   protected static final String OPT_INIT_ONLY = "init_only";
116   private static final String NUM_TABLES = "num_tables";
117   protected static final String OPT_BATCHUPDATE = "batchupdate";
118   protected static final String OPT_UPDATE = "update";
119 
120   protected static final long DEFAULT_START_KEY = 0;
121 
122   /** This will be removed as we factor out the dependency on command line */
123   protected CommandLine cmd;
124 
125   protected MultiThreadedWriter writerThreads = null;
126   protected MultiThreadedReader readerThreads = null;
127   protected MultiThreadedUpdater updaterThreads = null;
128 
129   protected long startKey, endKey;
130 
131   protected boolean isWrite, isRead, isUpdate;
132 
133   // Column family options
134   protected DataBlockEncoding dataBlockEncodingAlgo;
135   protected boolean encodeInCacheOnly;
136   protected Compression.Algorithm compressAlgo;
137   protected BloomType bloomType;
138   private boolean inMemoryCF;
139   // Writer options
140   protected int numWriterThreads = DEFAULT_NUM_THREADS;
141   protected int minColsPerKey, maxColsPerKey;
142   protected int minColDataSize, maxColDataSize;
143   protected boolean isMultiPut;
144 
145   // Updater options
146   protected int numUpdaterThreads = DEFAULT_NUM_THREADS;
147   protected int updatePercent;
148   protected boolean isBatchUpdate;
149 
150   // Reader options
151   private int numReaderThreads = DEFAULT_NUM_THREADS;
152   private int keyWindow = MultiThreadedReader.DEFAULT_KEY_WINDOW;
153   private int maxReadErrors = MultiThreadedReader.DEFAULT_MAX_ERRORS;
154   private int verifyPercent;
155  
156   private int numTables = 1;
157 
158   // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad,
159   //       console tool itself should only be used from console.
160   protected boolean isSkipInit = false;
161   protected boolean isInitOnly = false;
162 
163   protected String[] splitColonSeparated(String option,
164       int minNumCols, int maxNumCols) {
165     String optVal = cmd.getOptionValue(option);
166     String[] cols = optVal.split(":");
167     if (cols.length < minNumCols || cols.length > maxNumCols) {
168       throw new IllegalArgumentException("Expected at least "
169           + minNumCols + " columns but no more than " + maxNumCols +
170           " in the colon-separated value '" + optVal + "' of the " +
171           "-" + option + " option");
172     }
173     return cols;
174   }
175 
176   protected int getNumThreads(String numThreadsStr) {
177     return parseInt(numThreadsStr, 1, Short.MAX_VALUE);
178   }
179 
180   /**
181    * Apply column family options such as Bloom filters, compression, and data
182    * block encoding.
183    */
184   protected void applyColumnFamilyOptions(TableName tableName,
185       byte[][] columnFamilies) throws IOException {
186     HBaseAdmin admin = new HBaseAdmin(conf);
187     HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
188     LOG.info("Disabling table " + tableName);
189     admin.disableTable(tableName);
190     for (byte[] cf : columnFamilies) {
191       HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
192       boolean isNewCf = columnDesc == null;
193       if (isNewCf) {
194         columnDesc = new HColumnDescriptor(cf);
195       }
196       if (bloomType != null) {
197         columnDesc.setBloomFilterType(bloomType);
198       }
199       if (compressAlgo != null) {
200         columnDesc.setCompressionType(compressAlgo);
201       }
202       if (dataBlockEncodingAlgo != null) {
203         columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
204         columnDesc.setEncodeOnDisk(!encodeInCacheOnly);
205       }
206       if (inMemoryCF) {
207         columnDesc.setInMemory(inMemoryCF);
208       }
209       if (isNewCf) {
210         admin.addColumn(tableName, columnDesc);
211       } else {
212         admin.modifyColumn(tableName, columnDesc);
213       }
214     }
215     LOG.info("Enabling table " + tableName);
216     admin.enableTable(tableName);
217   }
218 
219   @Override
220   protected void addOptions() {
221     addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " +
222         "without port numbers");
223     addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write");
224     addOptWithArg(OPT_WRITE, OPT_USAGE_LOAD);
225     addOptWithArg(OPT_READ, OPT_USAGE_READ);
226     addOptWithArg(OPT_UPDATE, OPT_USAGE_UPDATE);
227     addOptNoArg(OPT_INIT_ONLY, "Initialize the test table only, don't do any loading");
228     addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM);
229     addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION);
230     addOptWithArg(OPT_DATA_BLOCK_ENCODING, OPT_DATA_BLOCK_ENCODING_USAGE);
231     addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " +
232         "to tolerate before terminating all reader threads. The default is " +
233         MultiThreadedReader.DEFAULT_MAX_ERRORS + ".");
234     addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " +
235         "reads and writes for concurrent write/read workload. The default " +
236         "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + ".");
237 
238     addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " +
239         "separate puts for every column in a row");
240     addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
241         "separate updates for every column in a row");
242     addOptNoArg(OPT_ENCODE_IN_CACHE_ONLY, OPT_ENCODE_IN_CACHE_ONLY_USAGE);
243     addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
244 
245     addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write");
246     addOptWithArg(OPT_START_KEY, "The first key to read/write " +
247         "(a 0-based index). The default value is " +
248         DEFAULT_START_KEY + ".");
249     addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table "
250         + "already exists");
251     
252     addOptWithArg(NUM_TABLES,
253       "A positive integer number. When a number n is speicfied, load test "
254           + "tool  will load n table parallely. -tn parameter value becomes "
255           + "table name prefix. Each table name is in format <tn>_1...<tn>_n");
256   }
257 
258   @Override
259   protected void processOptions(CommandLine cmd) {
260     this.cmd = cmd;
261 
262     tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME,
263         DEFAULT_TABLE_NAME));
264 
265     isWrite = cmd.hasOption(OPT_WRITE);
266     isRead = cmd.hasOption(OPT_READ);
267     isUpdate = cmd.hasOption(OPT_UPDATE);
268     isInitOnly = cmd.hasOption(OPT_INIT_ONLY);
269 
270     if (!isWrite && !isRead && !isUpdate && !isInitOnly) {
271       throw new IllegalArgumentException("Either -" + OPT_WRITE + " or " +
272         "-" + OPT_UPDATE + "-" + OPT_READ + " has to be specified");
273     }
274 
275     if (isInitOnly && (isRead || isWrite || isUpdate)) {
276       throw new IllegalArgumentException(OPT_INIT_ONLY + " cannot be specified with"
277           + " either -" + OPT_WRITE + " or -" + OPT_UPDATE + " or -" + OPT_READ);
278     }
279 
280     if (!isInitOnly) {
281       if (!cmd.hasOption(OPT_NUM_KEYS)) {
282         throw new IllegalArgumentException(OPT_NUM_KEYS + " must be specified in "
283             + "read or write mode");
284       }
285       startKey = parseLong(cmd.getOptionValue(OPT_START_KEY,
286           String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE);
287       long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1,
288           Long.MAX_VALUE - startKey);
289       endKey = startKey + numKeys;
290       isSkipInit = cmd.hasOption(OPT_SKIP_INIT);
291       System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
292     }
293 
294     encodeInCacheOnly = cmd.hasOption(OPT_ENCODE_IN_CACHE_ONLY);
295     parseColumnFamilyOptions(cmd);
296 
297     if (isWrite) {
298       String[] writeOpts = splitColonSeparated(OPT_WRITE, 2, 3);
299 
300       int colIndex = 0;
301       minColsPerKey = 1;
302       maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]);
303       int avgColDataSize =
304           parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE);
305       minColDataSize = avgColDataSize / 2;
306       maxColDataSize = avgColDataSize * 3 / 2;
307 
308       if (colIndex < writeOpts.length) {
309         numWriterThreads = getNumThreads(writeOpts[colIndex++]);
310       }
311 
312       isMultiPut = cmd.hasOption(OPT_MULTIPUT);
313 
314       System.out.println("Multi-puts: " + isMultiPut);
315       System.out.println("Columns per key: " + minColsPerKey + ".."
316           + maxColsPerKey);
317       System.out.println("Data size per column: " + minColDataSize + ".."
318           + maxColDataSize);
319     }
320 
321     if (isUpdate) {
322       String[] mutateOpts = splitColonSeparated(OPT_UPDATE, 1, 2);
323       int colIndex = 0;
324       updatePercent = parseInt(mutateOpts[colIndex++], 0, 100);
325       if (colIndex < mutateOpts.length) {
326         numUpdaterThreads = getNumThreads(mutateOpts[colIndex++]);
327       }
328 
329       isBatchUpdate = cmd.hasOption(OPT_BATCHUPDATE);
330 
331       System.out.println("Batch updates: " + isBatchUpdate);
332       System.out.println("Percent of keys to update: " + updatePercent);
333       System.out.println("Updater threads: " + numUpdaterThreads);
334     }
335 
336     if (isRead) {
337       String[] readOpts = splitColonSeparated(OPT_READ, 1, 2);
338       int colIndex = 0;
339       verifyPercent = parseInt(readOpts[colIndex++], 0, 100);
340       if (colIndex < readOpts.length) {
341         numReaderThreads = getNumThreads(readOpts[colIndex++]);
342       }
343 
344       if (cmd.hasOption(OPT_MAX_READ_ERRORS)) {
345         maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS),
346             0, Integer.MAX_VALUE);
347       }
348 
349       if (cmd.hasOption(OPT_KEY_WINDOW)) {
350         keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW),
351             0, Integer.MAX_VALUE);
352       }
353 
354       System.out.println("Percent of keys to verify: " + verifyPercent);
355       System.out.println("Reader threads: " + numReaderThreads);
356     }
357     
358     numTables = 1;
359     if(cmd.hasOption(NUM_TABLES)) {
360       numTables = parseInt(cmd.getOptionValue(NUM_TABLES), 1, Short.MAX_VALUE);
361     }
362   }
363 
364   private void parseColumnFamilyOptions(CommandLine cmd) {
365     String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
366     dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
367         DataBlockEncoding.valueOf(dataBlockEncodingStr);
368     if (dataBlockEncodingAlgo == DataBlockEncoding.NONE && encodeInCacheOnly) {
369       throw new IllegalArgumentException("-" + OPT_ENCODE_IN_CACHE_ONLY + " " +
370           "does not make sense when data block encoding is not used");
371     }
372 
373     String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
374     compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
375         Compression.Algorithm.valueOf(compressStr);
376 
377     String bloomStr = cmd.getOptionValue(OPT_BLOOM);
378     bloomType = bloomStr == null ? null :
379         BloomType.valueOf(bloomStr);
380     
381     inMemoryCF = cmd.hasOption(OPT_INMEMORY);
382     
383   }
384 
385   public void initTestTable() throws IOException {
386     HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName,
387         COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo);
388     applyColumnFamilyOptions(tableName, COLUMN_FAMILIES);
389   }
390 
391   @Override
392   protected int doWork() throws IOException {
393     if (numTables > 1) {
394       return parallelLoadTables();
395     } else {
396       return loadTable();
397     }
398   }
399 
400   protected int loadTable() throws IOException {
401     if (cmd.hasOption(OPT_ZK_QUORUM)) {
402       conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue(OPT_ZK_QUORUM));
403     }
404 
405     if (isInitOnly) {
406       LOG.info("Initializing only; no reads or writes");
407       initTestTable();
408       return 0;
409     }
410 
411     if (!isSkipInit) {
412       initTestTable();
413     }
414 
415     LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(
416         minColDataSize, maxColDataSize, minColsPerKey, maxColsPerKey, COLUMN_FAMILY);
417 
418     if (isWrite) {
419       writerThreads = new MultiThreadedWriter(dataGen, conf, tableName);
420       writerThreads.setMultiPut(isMultiPut);
421     }
422 
423     if (isUpdate) {
424       updaterThreads = new MultiThreadedUpdater(dataGen, conf, tableName, updatePercent);
425       updaterThreads.setBatchUpdate(isBatchUpdate);
426     }
427 
428     if (isRead) {
429       readerThreads = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent);
430       readerThreads.setMaxErrors(maxReadErrors);
431       readerThreads.setKeyWindow(keyWindow);
432     }
433 
434     if (isUpdate && isWrite) {
435       LOG.info("Concurrent write/update workload: making updaters aware of the " +
436         "write point");
437       updaterThreads.linkToWriter(writerThreads);
438     }
439 
440     if (isRead && (isUpdate || isWrite)) {
441       LOG.info("Concurrent write/read workload: making readers aware of the " +
442         "write point");
443       readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads);
444     }
445 
446     if (isWrite) {
447       System.out.println("Starting to write data...");
448       writerThreads.start(startKey, endKey, numWriterThreads);
449     }
450 
451     if (isUpdate) {
452       System.out.println("Starting to mutate data...");
453       updaterThreads.start(startKey, endKey, numUpdaterThreads);
454     }
455 
456     if (isRead) {
457       System.out.println("Starting to read data...");
458       readerThreads.start(startKey, endKey, numReaderThreads);
459     }
460 
461     if (isWrite) {
462       writerThreads.waitForFinish();
463     }
464 
465     if (isUpdate) {
466       updaterThreads.waitForFinish();
467     }
468 
469     if (isRead) {
470       readerThreads.waitForFinish();
471     }
472 
473     boolean success = true;
474     if (isWrite) {
475       success = success && writerThreads.getNumWriteFailures() == 0;
476     }
477     if (isUpdate) {
478       success = success && updaterThreads.getNumWriteFailures() == 0;
479     }
480     if (isRead) {
481       success = success && readerThreads.getNumReadErrors() == 0
482           && readerThreads.getNumReadFailures() == 0;
483     }
484     return success ? EXIT_SUCCESS : EXIT_FAILURE;
485   }
486 
487   public static void main(String[] args) {
488     new LoadTestTool().doStaticMain(args);
489   }
490 
491   /**
492    * When NUM_TABLES is specified, the function starts multiple worker threads 
493    * which individually start a LoadTestTool instance to load a table. Each 
494    * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
495    * , table names will be "test_1", "test_2"
496    * 
497    * @throws IOException
498    */
499   private int parallelLoadTables() 
500       throws IOException {
501     // create new command args
502     String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME);
503     String[] newArgs = null;
504     if (!cmd.hasOption(LoadTestTool.OPT_TABLE_NAME)) {
505       newArgs = new String[cmdLineArgs.length + 2];
506       newArgs[0] = "-" + LoadTestTool.OPT_TABLE_NAME;
507       for (int i = 0; i < cmdLineArgs.length; i++) {
508         newArgs[i + 2] = cmdLineArgs[i];
509       }
510     } else {
511       newArgs = cmdLineArgs;
512     }
513 
514     int tableNameValueIndex = -1;
515     for (int j = 0; j < newArgs.length; j++) {
516       if (newArgs[j].endsWith(OPT_TABLE_NAME)) {
517         tableNameValueIndex = j + 1;
518       } else if (newArgs[j].endsWith(NUM_TABLES)) {
519         // change NUM_TABLES to 1 so that each worker loads one table
520         newArgs[j + 1] = "1"; 
521       }
522     }
523 
524     // starting to load multiple tables
525     List<WorkerThread> workers = new ArrayList<WorkerThread>();
526     for (int i = 0; i < numTables; i++) {
527       String[] workerArgs = newArgs.clone();
528       workerArgs[tableNameValueIndex] = tableName + "_" + (i+1);
529       WorkerThread worker = new WorkerThread(i, workerArgs);
530       workers.add(worker);
531       LOG.info(worker + " starting");
532       worker.start();
533     }
534 
535     // wait for all workers finish
536     LOG.info("Waiting for worker threads to finish");
537     for (WorkerThread t : workers) {
538       try {
539         t.join();
540       } catch (InterruptedException ie) {
541         IOException iie = new InterruptedIOException();
542         iie.initCause(ie);
543         throw iie;
544       }
545       checkForErrors();
546     }
547     
548     return EXIT_SUCCESS;
549   }
550 
551   // If an exception is thrown by one of worker threads, it will be
552   // stored here.
553   protected AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
554 
555   private void workerThreadError(Throwable t) {
556     thrown.compareAndSet(null, t);
557   }
558 
559   /**
560    * Check for errors in the writer threads. If any is found, rethrow it.
561    */
562   private void checkForErrors() throws IOException {
563     Throwable thrown = this.thrown.get();
564     if (thrown == null) return;
565     if (thrown instanceof IOException) {
566       throw (IOException) thrown;
567     } else {
568       throw new RuntimeException(thrown);
569     }
570   }
571 
572   class WorkerThread extends Thread {
573     private String[] workerArgs;
574 
575     WorkerThread(int i, String[] args) {
576       super("WorkerThread-" + i);
577       workerArgs = args;
578     }
579 
580     public void run() {
581       try {
582         int ret = ToolRunner.run(HBaseConfiguration.create(), new LoadTestTool(), workerArgs);
583         if (ret != 0) {
584           throw new RuntimeException("LoadTestTool exit with non-zero return code.");
585         }
586       } catch (Exception ex) {
587         LOG.error("Error in worker thread", ex);
588         workerThreadError(ex);
589       }
590     }
591   }
592 }