1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 package org.apache.hadoop.hbase.util;
18
19 import java.io.IOException;
20 import java.io.InterruptedIOException;
21 import java.lang.reflect.Constructor;
22 import java.security.SecureRandom;
23 import java.util.ArrayList;
24 import java.util.Arrays;
25 import java.util.List;
26 import java.util.Random;
27 import java.util.concurrent.atomic.AtomicReference;
28
29 import javax.crypto.spec.SecretKeySpec;
30
31 import org.apache.commons.cli.CommandLine;
32 import org.apache.commons.logging.Log;
33 import org.apache.commons.logging.LogFactory;
34 import org.apache.hadoop.hbase.HBaseConfiguration;
35 import org.apache.hadoop.hbase.HBaseTestingUtility;
36 import org.apache.hadoop.hbase.HColumnDescriptor;
37 import org.apache.hadoop.hbase.HConstants;
38 import org.apache.hadoop.hbase.HTableDescriptor;
39 import org.apache.hadoop.hbase.PerformanceEvaluation;
40 import org.apache.hadoop.hbase.TableName;
41 import org.apache.hadoop.hbase.client.HBaseAdmin;
42 import org.apache.hadoop.hbase.client.HTable;
43 import org.apache.hadoop.hbase.io.compress.Compression;
44 import org.apache.hadoop.hbase.io.crypto.Cipher;
45 import org.apache.hadoop.hbase.io.crypto.Encryption;
46 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
47 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
48 import org.apache.hadoop.hbase.regionserver.BloomType;
49 import org.apache.hadoop.hbase.security.EncryptionUtil;
50 import org.apache.hadoop.hbase.security.User;
51 import org.apache.hadoop.hbase.security.access.AccessControlClient;
52 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
53 import org.apache.hadoop.hbase.util.test.LoadTestDataGeneratorWithACL;
54 import org.apache.hadoop.util.ToolRunner;
55
56
57
58
59
60
61 public class LoadTestTool extends AbstractHBaseTool {
62
63 private static final Log LOG = LogFactory.getLog(LoadTestTool.class);
64 private static final String COLON = ":";
65
66
67 private TableName tableName;
68
69
70 protected static final String DEFAULT_TABLE_NAME = "cluster_test";
71
72
73 public static byte[] COLUMN_FAMILY = Bytes.toBytes("test_cf");
74
75
76 protected static final byte[][] COLUMN_FAMILIES = { COLUMN_FAMILY };
77
78
79 protected static final int DEFAULT_DATA_SIZE = 64;
80
81
82 protected static final int DEFAULT_NUM_THREADS = 20;
83
84
85 protected static final String OPT_USAGE_LOAD =
86 "<avg_cols_per_key>:<avg_data_size>" +
87 "[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
88
89
90 protected static final String OPT_USAGE_READ =
91 "<verify_percent>[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
92
93
94 protected static final String OPT_USAGE_UPDATE =
95 "<update_percent>[:<#threads=" + DEFAULT_NUM_THREADS
96 + ">][:<#whether to ignore nonce collisions=0>]";
97
98 protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " +
99 Arrays.toString(BloomType.values());
100
101 protected static final String OPT_USAGE_COMPRESSION = "Compression type, " +
102 "one of " + Arrays.toString(Compression.Algorithm.values());
103
104 public static final String OPT_DATA_BLOCK_ENCODING_USAGE =
105 "Encoding algorithm (e.g. prefix "
106 + "compression) to use for data blocks in the test column family, "
107 + "one of " + Arrays.toString(DataBlockEncoding.values()) + ".";
108
109 private static final String OPT_BLOOM = "bloom";
110 private static final String OPT_COMPRESSION = "compression";
111 public static final String OPT_DATA_BLOCK_ENCODING =
112 HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase();
113
114 public static final String OPT_INMEMORY = "in_memory";
115 public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
116 "inmemory as far as possible. Not guaranteed that reads are always served from inmemory";
117
118 public static final String OPT_GENERATOR = "generator";
119 public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool."
120 + " Any args for this class can be passed as colon separated after class name";
121
122 protected static final String OPT_KEY_WINDOW = "key_window";
123 protected static final String OPT_WRITE = "write";
124 protected static final String OPT_MAX_READ_ERRORS = "max_read_errors";
125 protected static final String OPT_MULTIPUT = "multiput";
126 protected static final String OPT_NUM_KEYS = "num_keys";
127 protected static final String OPT_READ = "read";
128 protected static final String OPT_START_KEY = "start_key";
129 protected static final String OPT_TABLE_NAME = "tn";
130 protected static final String OPT_ZK_QUORUM = "zk";
131 protected static final String OPT_ZK_PARENT_NODE = "zk_root";
132 protected static final String OPT_SKIP_INIT = "skip_init";
133 protected static final String OPT_INIT_ONLY = "init_only";
134 private static final String NUM_TABLES = "num_tables";
135 protected static final String OPT_BATCHUPDATE = "batchupdate";
136 protected static final String OPT_UPDATE = "update";
137
138 protected static final String OPT_ENCRYPTION = "encryption";
139 protected static final String OPT_ENCRYPTION_USAGE =
140 "Enables transparent encryption on the test table, one of " +
141 Arrays.toString(Encryption.getSupportedCiphers());
142
143 protected static final long DEFAULT_START_KEY = 0;
144
145
146 protected CommandLine cmd;
147
148 protected MultiThreadedWriter writerThreads = null;
149 protected MultiThreadedReader readerThreads = null;
150 protected MultiThreadedUpdater updaterThreads = null;
151
152 protected long startKey, endKey;
153
154 protected boolean isWrite, isRead, isUpdate;
155
156
157 protected DataBlockEncoding dataBlockEncodingAlgo;
158 protected Compression.Algorithm compressAlgo;
159 protected BloomType bloomType;
160 private boolean inMemoryCF;
161
162 private User userOwner;
163
164 protected int numWriterThreads = DEFAULT_NUM_THREADS;
165 protected int minColsPerKey, maxColsPerKey;
166 protected int minColDataSize = DEFAULT_DATA_SIZE, maxColDataSize = DEFAULT_DATA_SIZE;
167 protected boolean isMultiPut;
168
169
170 protected int numUpdaterThreads = DEFAULT_NUM_THREADS;
171 protected int updatePercent;
172 protected boolean ignoreConflicts = false;
173 protected boolean isBatchUpdate;
174
175
176 private int numReaderThreads = DEFAULT_NUM_THREADS;
177 private int keyWindow = MultiThreadedReader.DEFAULT_KEY_WINDOW;
178 private int maxReadErrors = MultiThreadedReader.DEFAULT_MAX_ERRORS;
179 private int verifyPercent;
180
181 private int numTables = 1;
182
183
184
185 protected boolean isSkipInit = false;
186 protected boolean isInitOnly = false;
187
188 protected Cipher cipher = null;
189
190 protected String[] splitColonSeparated(String option,
191 int minNumCols, int maxNumCols) {
192 String optVal = cmd.getOptionValue(option);
193 String[] cols = optVal.split(COLON);
194 if (cols.length < minNumCols || cols.length > maxNumCols) {
195 throw new IllegalArgumentException("Expected at least "
196 + minNumCols + " columns but no more than " + maxNumCols +
197 " in the colon-separated value '" + optVal + "' of the " +
198 "-" + option + " option");
199 }
200 return cols;
201 }
202
203 protected int getNumThreads(String numThreadsStr) {
204 return parseInt(numThreadsStr, 1, Short.MAX_VALUE);
205 }
206
207
208
209
210
211 protected void applyColumnFamilyOptions(TableName tableName,
212 byte[][] columnFamilies) throws IOException {
213 HBaseAdmin admin = new HBaseAdmin(conf);
214 HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
215 LOG.info("Disabling table " + tableName);
216 admin.disableTable(tableName);
217 for (byte[] cf : columnFamilies) {
218 HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
219 boolean isNewCf = columnDesc == null;
220 if (isNewCf) {
221 columnDesc = new HColumnDescriptor(cf);
222 }
223 if (bloomType != null) {
224 columnDesc.setBloomFilterType(bloomType);
225 }
226 if (compressAlgo != null) {
227 columnDesc.setCompressionType(compressAlgo);
228 }
229 if (dataBlockEncodingAlgo != null) {
230 columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
231 }
232 if (inMemoryCF) {
233 columnDesc.setInMemory(inMemoryCF);
234 }
235 if (cipher != null) {
236 byte[] keyBytes = new byte[cipher.getKeyLength()];
237 new SecureRandom().nextBytes(keyBytes);
238 columnDesc.setEncryptionType(cipher.getName());
239 columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf,
240 User.getCurrent().getShortName(),
241 new SecretKeySpec(keyBytes, cipher.getName())));
242 }
243 if (isNewCf) {
244 admin.addColumn(tableName, columnDesc);
245 } else {
246 admin.modifyColumn(tableName, columnDesc);
247 }
248 }
249 LOG.info("Enabling table " + tableName);
250 admin.enableTable(tableName);
251 }
252
253 @Override
254 protected void addOptions() {
255 addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " +
256 "without port numbers");
257 addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper");
258 addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write");
259 addOptWithArg(OPT_WRITE, OPT_USAGE_LOAD);
260 addOptWithArg(OPT_READ, OPT_USAGE_READ);
261 addOptWithArg(OPT_UPDATE, OPT_USAGE_UPDATE);
262 addOptNoArg(OPT_INIT_ONLY, "Initialize the test table only, don't do any loading");
263 addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM);
264 addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION);
265 addOptWithArg(OPT_DATA_BLOCK_ENCODING, OPT_DATA_BLOCK_ENCODING_USAGE);
266 addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " +
267 "to tolerate before terminating all reader threads. The default is " +
268 MultiThreadedReader.DEFAULT_MAX_ERRORS + ".");
269 addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " +
270 "reads and writes for concurrent write/read workload. The default " +
271 "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + ".");
272
273 addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " +
274 "separate puts for every column in a row");
275 addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
276 "separate updates for every column in a row");
277 addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
278 addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE);
279
280 addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write");
281 addOptWithArg(OPT_START_KEY, "The first key to read/write " +
282 "(a 0-based index). The default value is " +
283 DEFAULT_START_KEY + ".");
284 addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table "
285 + "already exists");
286
287 addOptWithArg(NUM_TABLES,
288 "A positive integer number. When a number n is speicfied, load test "
289 + "tool will load n table parallely. -tn parameter value becomes "
290 + "table name prefix. Each table name is in format <tn>_1...<tn>_n");
291
292 addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE);
293 }
294
295 @Override
296 protected void processOptions(CommandLine cmd) {
297 this.cmd = cmd;
298
299 tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME,
300 DEFAULT_TABLE_NAME));
301
302 isWrite = cmd.hasOption(OPT_WRITE);
303 isRead = cmd.hasOption(OPT_READ);
304 isUpdate = cmd.hasOption(OPT_UPDATE);
305 isInitOnly = cmd.hasOption(OPT_INIT_ONLY);
306
307 if (!isWrite && !isRead && !isUpdate && !isInitOnly) {
308 throw new IllegalArgumentException("Either -" + OPT_WRITE + " or " +
309 "-" + OPT_UPDATE + "-" + OPT_READ + " has to be specified");
310 }
311
312 if (isInitOnly && (isRead || isWrite || isUpdate)) {
313 throw new IllegalArgumentException(OPT_INIT_ONLY + " cannot be specified with"
314 + " either -" + OPT_WRITE + " or -" + OPT_UPDATE + " or -" + OPT_READ);
315 }
316
317 if (!isInitOnly) {
318 if (!cmd.hasOption(OPT_NUM_KEYS)) {
319 throw new IllegalArgumentException(OPT_NUM_KEYS + " must be specified in "
320 + "read or write mode");
321 }
322 startKey = parseLong(cmd.getOptionValue(OPT_START_KEY,
323 String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE);
324 long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1,
325 Long.MAX_VALUE - startKey);
326 endKey = startKey + numKeys;
327 isSkipInit = cmd.hasOption(OPT_SKIP_INIT);
328 System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
329 }
330
331 parseColumnFamilyOptions(cmd);
332
333 if (isWrite) {
334 String[] writeOpts = splitColonSeparated(OPT_WRITE, 2, 3);
335
336 int colIndex = 0;
337 minColsPerKey = 1;
338 maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]);
339 int avgColDataSize =
340 parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE);
341 minColDataSize = avgColDataSize / 2;
342 maxColDataSize = avgColDataSize * 3 / 2;
343
344 if (colIndex < writeOpts.length) {
345 numWriterThreads = getNumThreads(writeOpts[colIndex++]);
346 }
347
348 isMultiPut = cmd.hasOption(OPT_MULTIPUT);
349
350 System.out.println("Multi-puts: " + isMultiPut);
351 System.out.println("Columns per key: " + minColsPerKey + ".."
352 + maxColsPerKey);
353 System.out.println("Data size per column: " + minColDataSize + ".."
354 + maxColDataSize);
355 }
356
357 if (isUpdate) {
358 String[] mutateOpts = splitColonSeparated(OPT_UPDATE, 1, 3);
359 int colIndex = 0;
360 updatePercent = parseInt(mutateOpts[colIndex++], 0, 100);
361 if (colIndex < mutateOpts.length) {
362 numUpdaterThreads = getNumThreads(mutateOpts[colIndex++]);
363 }
364 if (colIndex < mutateOpts.length) {
365 ignoreConflicts = parseInt(mutateOpts[colIndex++], 0, 1) == 1;
366 }
367
368 isBatchUpdate = cmd.hasOption(OPT_BATCHUPDATE);
369
370 System.out.println("Batch updates: " + isBatchUpdate);
371 System.out.println("Percent of keys to update: " + updatePercent);
372 System.out.println("Updater threads: " + numUpdaterThreads);
373 System.out.println("Ignore nonce conflicts: " + ignoreConflicts);
374 }
375
376 if (isRead) {
377 String[] readOpts = splitColonSeparated(OPT_READ, 1, 2);
378 int colIndex = 0;
379 verifyPercent = parseInt(readOpts[colIndex++], 0, 100);
380 if (colIndex < readOpts.length) {
381 numReaderThreads = getNumThreads(readOpts[colIndex++]);
382 }
383
384 if (cmd.hasOption(OPT_MAX_READ_ERRORS)) {
385 maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS),
386 0, Integer.MAX_VALUE);
387 }
388
389 if (cmd.hasOption(OPT_KEY_WINDOW)) {
390 keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW),
391 0, Integer.MAX_VALUE);
392 }
393
394 System.out.println("Percent of keys to verify: " + verifyPercent);
395 System.out.println("Reader threads: " + numReaderThreads);
396 }
397
398 numTables = 1;
399 if(cmd.hasOption(NUM_TABLES)) {
400 numTables = parseInt(cmd.getOptionValue(NUM_TABLES), 1, Short.MAX_VALUE);
401 }
402 }
403
404 private void parseColumnFamilyOptions(CommandLine cmd) {
405 String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
406 dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
407 DataBlockEncoding.valueOf(dataBlockEncodingStr);
408
409 String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
410 compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
411 Compression.Algorithm.valueOf(compressStr);
412
413 String bloomStr = cmd.getOptionValue(OPT_BLOOM);
414 bloomType = bloomStr == null ? null :
415 BloomType.valueOf(bloomStr);
416
417 inMemoryCF = cmd.hasOption(OPT_INMEMORY);
418 if (cmd.hasOption(OPT_ENCRYPTION)) {
419 cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
420 }
421 }
422
423 public void initTestTable() throws IOException {
424 HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName,
425 COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo);
426 applyColumnFamilyOptions(tableName, COLUMN_FAMILIES);
427 }
428
429 @Override
430 protected int doWork() throws IOException {
431 if (numTables > 1) {
432 return parallelLoadTables();
433 } else {
434 return loadTable();
435 }
436 }
437
438 protected int loadTable() throws IOException {
439 if (cmd.hasOption(OPT_ZK_QUORUM)) {
440 conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue(OPT_ZK_QUORUM));
441 }
442 if (cmd.hasOption(OPT_ZK_PARENT_NODE)) {
443 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, cmd.getOptionValue(OPT_ZK_PARENT_NODE));
444 }
445
446 if (isInitOnly) {
447 LOG.info("Initializing only; no reads or writes");
448 initTestTable();
449 return 0;
450 }
451
452 if (!isSkipInit) {
453 initTestTable();
454 }
455 LoadTestDataGenerator dataGen = null;
456 if (cmd.hasOption(OPT_GENERATOR)) {
457 String[] clazzAndArgs = cmd.getOptionValue(OPT_GENERATOR).split(COLON);
458 dataGen = getLoadGeneratorInstance(clazzAndArgs[0]);
459 if(dataGen instanceof LoadTestDataGeneratorWithACL) {
460 LOG.info("ACL is on");
461 userOwner = User.createUserForTesting(conf, "owner", new String[0]);
462 }
463 String[] args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs,
464 1, clazzAndArgs.length);
465 dataGen.initialize(args);
466 } else {
467
468 dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize,
469 minColsPerKey, maxColsPerKey, COLUMN_FAMILY);
470 }
471
472 if(userOwner != null) {
473 conf.set("hadoop.security.authorization", "false");
474 conf.set("hadoop.security.authentication", "simple");
475 LOG.info("Granting permission for the user " + userOwner.getShortName());
476 HTable table = new HTable(conf, tableName);
477 AccessControlProtos.Permission.Action[] actions = {
478 AccessControlProtos.Permission.Action.ADMIN,
479 AccessControlProtos.Permission.Action.CREATE, AccessControlProtos.Permission.Action.READ,
480 AccessControlProtos.Permission.Action.WRITE };
481
482 try {
483 AccessControlClient.grant(conf, table.getName(), userOwner.getShortName(), COLUMN_FAMILY,
484 null, actions);
485 } catch (Throwable e) {
486 LOG.fatal("Error in granting permission for the user " + userOwner.getShortName(), e);
487 return EXIT_FAILURE;
488 }
489 }
490
491 if (isWrite) {
492 if (userOwner != null) {
493 writerThreads = new MultiThreadedWriterWithACL(dataGen, conf, tableName, userOwner);
494 } else {
495 writerThreads = new MultiThreadedWriter(dataGen, conf, tableName);
496 }
497 writerThreads.setMultiPut(isMultiPut);
498 }
499
500 if (isUpdate) {
501 if (userOwner != null) {
502 updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent,
503 userOwner);
504 } else {
505 updaterThreads = new MultiThreadedUpdater(dataGen, conf, tableName, updatePercent);
506 }
507 updaterThreads.setBatchUpdate(isBatchUpdate);
508 updaterThreads.setIgnoreNonceConflicts(ignoreConflicts);
509 }
510
511 if (isRead) {
512 if (userOwner != null) {
513 readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent);
514 } else {
515 readerThreads = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent);
516 }
517 readerThreads.setMaxErrors(maxReadErrors);
518 readerThreads.setKeyWindow(keyWindow);
519 }
520
521 if (isUpdate && isWrite) {
522 LOG.info("Concurrent write/update workload: making updaters aware of the " +
523 "write point");
524 updaterThreads.linkToWriter(writerThreads);
525 }
526
527 if (isRead && (isUpdate || isWrite)) {
528 LOG.info("Concurrent write/read workload: making readers aware of the " +
529 "write point");
530 readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads);
531 }
532
533 if (isWrite) {
534 System.out.println("Starting to write data...");
535 writerThreads.start(startKey, endKey, numWriterThreads);
536 }
537
538 if (isUpdate) {
539 LOG.info("Starting to mutate data...");
540 System.out.println("Starting to mutate data...");
541
542
543 updaterThreads.start(startKey, endKey, numUpdaterThreads);
544 }
545
546 if (isRead) {
547 System.out.println("Starting to read data...");
548 readerThreads.start(startKey, endKey, numReaderThreads);
549 }
550
551 if (isWrite) {
552 writerThreads.waitForFinish();
553 }
554
555 if (isUpdate) {
556 updaterThreads.waitForFinish();
557 }
558
559 if (isRead) {
560 readerThreads.waitForFinish();
561 }
562
563 boolean success = true;
564 if (isWrite) {
565 success = success && writerThreads.getNumWriteFailures() == 0;
566 }
567 if (isUpdate) {
568 success = success && updaterThreads.getNumWriteFailures() == 0;
569 }
570 if (isRead) {
571 success = success && readerThreads.getNumReadErrors() == 0
572 && readerThreads.getNumReadFailures() == 0;
573 }
574 return success ? EXIT_SUCCESS : EXIT_FAILURE;
575 }
576
577 private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException {
578 try {
579 Class<?> clazz = Class.forName(clazzName);
580 Constructor<?> constructor = clazz.getConstructor(int.class, int.class, int.class, int.class,
581 byte[][].class);
582 return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize,
583 minColsPerKey, maxColsPerKey, COLUMN_FAMILIES);
584 } catch (Exception e) {
585 throw new IOException(e);
586 }
587 }
588
589 public static byte[] generateData(final Random r, int length) {
590 byte [] b = new byte [length];
591 int i = 0;
592
593 for(i = 0; i < (length-8); i += 8) {
594 b[i] = (byte) (65 + r.nextInt(26));
595 b[i+1] = b[i];
596 b[i+2] = b[i];
597 b[i+3] = b[i];
598 b[i+4] = b[i];
599 b[i+5] = b[i];
600 b[i+6] = b[i];
601 b[i+7] = b[i];
602 }
603
604 byte a = (byte) (65 + r.nextInt(26));
605 for(; i < length; i++) {
606 b[i] = a;
607 }
608 return b;
609 }
610 public static void main(String[] args) {
611 new LoadTestTool().doStaticMain(args);
612 }
613
614
615
616
617
618
619
620
621
622 private int parallelLoadTables()
623 throws IOException {
624
625 String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME);
626 String[] newArgs = null;
627 if (!cmd.hasOption(LoadTestTool.OPT_TABLE_NAME)) {
628 newArgs = new String[cmdLineArgs.length + 2];
629 newArgs[0] = "-" + LoadTestTool.OPT_TABLE_NAME;
630 newArgs[1] = LoadTestTool.DEFAULT_TABLE_NAME;
631 for (int i = 0; i < cmdLineArgs.length; i++) {
632 newArgs[i + 2] = cmdLineArgs[i];
633 }
634 } else {
635 newArgs = cmdLineArgs;
636 }
637
638 int tableNameValueIndex = -1;
639 for (int j = 0; j < newArgs.length; j++) {
640 if (newArgs[j].endsWith(OPT_TABLE_NAME)) {
641 tableNameValueIndex = j + 1;
642 } else if (newArgs[j].endsWith(NUM_TABLES)) {
643
644 newArgs[j + 1] = "1";
645 }
646 }
647
648
649 List<WorkerThread> workers = new ArrayList<WorkerThread>();
650 for (int i = 0; i < numTables; i++) {
651 String[] workerArgs = newArgs.clone();
652 workerArgs[tableNameValueIndex] = tableName + "_" + (i+1);
653 WorkerThread worker = new WorkerThread(i, workerArgs);
654 workers.add(worker);
655 LOG.info(worker + " starting");
656 worker.start();
657 }
658
659
660 LOG.info("Waiting for worker threads to finish");
661 for (WorkerThread t : workers) {
662 try {
663 t.join();
664 } catch (InterruptedException ie) {
665 IOException iie = new InterruptedIOException();
666 iie.initCause(ie);
667 throw iie;
668 }
669 checkForErrors();
670 }
671
672 return EXIT_SUCCESS;
673 }
674
675
676
677 protected AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
678
679 private void workerThreadError(Throwable t) {
680 thrown.compareAndSet(null, t);
681 }
682
683
684
685
686 private void checkForErrors() throws IOException {
687 Throwable thrown = this.thrown.get();
688 if (thrown == null) return;
689 if (thrown instanceof IOException) {
690 throw (IOException) thrown;
691 } else {
692 throw new RuntimeException(thrown);
693 }
694 }
695
696 class WorkerThread extends Thread {
697 private String[] workerArgs;
698
699 WorkerThread(int i, String[] args) {
700 super("WorkerThread-" + i);
701 workerArgs = args;
702 }
703
704 @Override
705 public void run() {
706 try {
707 int ret = ToolRunner.run(HBaseConfiguration.create(), new LoadTestTool(), workerArgs);
708 if (ret != 0) {
709 throw new RuntimeException("LoadTestTool exit with non-zero return code.");
710 }
711 } catch (Exception ex) {
712 LOG.error("Error in worker thread", ex);
713 workerThreadError(ex);
714 }
715 }
716 }
717 }