View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.util;
18  
19  import java.io.IOException;
20  import java.io.InterruptedIOException;
21  import java.lang.reflect.Constructor;
22  import java.net.InetAddress;
23  import java.security.SecureRandom;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.List;
27  import java.util.Properties;
28  import java.util.Random;
29  import java.util.concurrent.atomic.AtomicReference;
30  
31  import javax.crypto.spec.SecretKeySpec;
32  
33  import org.apache.commons.cli.CommandLine;
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.hbase.HBaseConfiguration;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HColumnDescriptor;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.PerformanceEvaluation;
43  import org.apache.hadoop.hbase.TableName;
44  import org.apache.hadoop.hbase.client.HBaseAdmin;
45  import org.apache.hadoop.hbase.io.compress.Compression;
46  import org.apache.hadoop.hbase.io.crypto.Cipher;
47  import org.apache.hadoop.hbase.io.crypto.Encryption;
48  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
49  import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
50  import org.apache.hadoop.hbase.regionserver.BloomType;
51  import org.apache.hadoop.hbase.security.EncryptionUtil;
52  import org.apache.hadoop.hbase.security.User;
53  import org.apache.hadoop.hbase.security.access.AccessControlClient;
54  import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
55  import org.apache.hadoop.hbase.util.test.LoadTestDataGeneratorWithACL;
56  import org.apache.hadoop.security.SecurityUtil;
57  import org.apache.hadoop.security.UserGroupInformation;
58  import org.apache.hadoop.util.ToolRunner;
59  
60  /**
61   * A command-line utility that reads, writes, and verifies data. Unlike
62   * {@link PerformanceEvaluation}, this tool validates the data written,
63   * and supports simultaneously writing and reading the same set of keys.
64   */
65  public class LoadTestTool extends AbstractHBaseTool {
66  
67    private static final Log LOG = LogFactory.getLog(LoadTestTool.class);
68    private static final String COLON = ":";
69  
70    /** Table name for the test */
71    private TableName tableName;
72  
73    /** Table name to use of not overridden on the command line */
74    protected static final String DEFAULT_TABLE_NAME = "cluster_test";
75  
76    /** Column family used by the test */
77    public static byte[] COLUMN_FAMILY = Bytes.toBytes("test_cf");
78  
79    /** Column families used by the test */
80    protected static final byte[][] COLUMN_FAMILIES = { COLUMN_FAMILY };
81  
82    /** The default data size if not specified */
83    protected static final int DEFAULT_DATA_SIZE = 64;
84  
85    /** The number of reader/writer threads if not specified */
86    protected static final int DEFAULT_NUM_THREADS = 20;
87  
88    /** Usage string for the load option */
89    protected static final String OPT_USAGE_LOAD =
90        "<avg_cols_per_key>:<avg_data_size>" +
91        "[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
92  
93    /** Usage string for the read option */
94    protected static final String OPT_USAGE_READ =
95        "<verify_percent>[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
96  
97    /** Usage string for the update option */
98    protected static final String OPT_USAGE_UPDATE =
99        "<update_percent>[:<#threads=" + DEFAULT_NUM_THREADS
100       + ">][:<#whether to ignore nonce collisions=0>]";
101 
102   protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " +
103       Arrays.toString(BloomType.values());
104 
105   protected static final String OPT_USAGE_COMPRESSION = "Compression type, " +
106       "one of " + Arrays.toString(Compression.Algorithm.values());
107 
108   public static final String OPT_DATA_BLOCK_ENCODING_USAGE =
109     "Encoding algorithm (e.g. prefix "
110         + "compression) to use for data blocks in the test column family, "
111         + "one of " + Arrays.toString(DataBlockEncoding.values()) + ".";
112 
113   private static final String OPT_BLOOM = "bloom";
114   private static final String OPT_COMPRESSION = "compression";
115   public static final String OPT_DATA_BLOCK_ENCODING =
116       HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase();
117 
118   public static final String OPT_INMEMORY = "in_memory";
119   public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
120   		"inmemory as far as possible.  Not guaranteed that reads are always served from inmemory";
121 
122   public static final String OPT_GENERATOR = "generator";
123   public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool."
124       + " Any args for this class can be passed as colon separated after class name";
125 
126   protected static final String OPT_KEY_WINDOW = "key_window";
127   protected static final String OPT_WRITE = "write";
128   protected static final String OPT_MAX_READ_ERRORS = "max_read_errors";
129   protected static final String OPT_MULTIPUT = "multiput";
130   protected static final String OPT_NUM_KEYS = "num_keys";
131   protected static final String OPT_READ = "read";
132   protected static final String OPT_START_KEY = "start_key";
133   protected static final String OPT_TABLE_NAME = "tn";
134   protected static final String OPT_ZK_QUORUM = "zk";
135   protected static final String OPT_ZK_PARENT_NODE = "zk_root";
136   protected static final String OPT_SKIP_INIT = "skip_init";
137   protected static final String OPT_INIT_ONLY = "init_only";
138   protected static final String NUM_TABLES = "num_tables";
139   protected static final String OPT_REGIONS_PER_SERVER = "regions_per_server";
140   protected static final String OPT_BATCHUPDATE = "batchupdate";
141   protected static final String OPT_UPDATE = "update";
142 
143   protected static final String OPT_ENCRYPTION = "encryption";
144   protected static final String OPT_ENCRYPTION_USAGE =
145     "Enables transparent encryption on the test table, one of " +
146     Arrays.toString(Encryption.getSupportedCiphers());
147 
148   protected static final long DEFAULT_START_KEY = 0;
149 
150   /** This will be removed as we factor out the dependency on command line */
151   protected CommandLine cmd;
152 
153   protected MultiThreadedWriter writerThreads = null;
154   protected MultiThreadedReader readerThreads = null;
155   protected MultiThreadedUpdater updaterThreads = null;
156 
157   protected long startKey, endKey;
158 
159   protected boolean isWrite, isRead, isUpdate;
160 
161   // Column family options
162   protected DataBlockEncoding dataBlockEncodingAlgo;
163   protected Compression.Algorithm compressAlgo;
164   protected BloomType bloomType;
165   private boolean inMemoryCF;
166 
167   private User userOwner;
168   // Writer options
169   protected int numWriterThreads = DEFAULT_NUM_THREADS;
170   protected int minColsPerKey, maxColsPerKey;
171   protected int minColDataSize = DEFAULT_DATA_SIZE, maxColDataSize = DEFAULT_DATA_SIZE;
172   protected boolean isMultiPut;
173 
174   // Updater options
175   protected int numUpdaterThreads = DEFAULT_NUM_THREADS;
176   protected int updatePercent;
177   protected boolean ignoreConflicts = false;
178   protected boolean isBatchUpdate;
179 
180   // Reader options
181   private int numReaderThreads = DEFAULT_NUM_THREADS;
182   private int keyWindow = MultiThreadedReader.DEFAULT_KEY_WINDOW;
183   private int maxReadErrors = MultiThreadedReader.DEFAULT_MAX_ERRORS;
184   private int verifyPercent;
185 
186   private int numTables = 1;
187   private int regionsPerServer = HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER;
188 
189   private String superUser;
190 
191   private String userNames = "user1, user2, user3, user4";
192   //This file is used to read authentication information in secure clusters.
193   private String authnFileName;
194 
195   // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad,
196   //       console tool itself should only be used from console.
197   protected boolean isSkipInit = false;
198   protected boolean isInitOnly = false;
199 
200   protected Cipher cipher = null;
201 
202   protected String[] splitColonSeparated(String option,
203       int minNumCols, int maxNumCols) {
204     String optVal = cmd.getOptionValue(option);
205     String[] cols = optVal.split(COLON);
206     if (cols.length < minNumCols || cols.length > maxNumCols) {
207       throw new IllegalArgumentException("Expected at least "
208           + minNumCols + " columns but no more than " + maxNumCols +
209           " in the colon-separated value '" + optVal + "' of the " +
210           "-" + option + " option");
211     }
212     return cols;
213   }
214 
215   protected int getNumThreads(String numThreadsStr) {
216     return parseInt(numThreadsStr, 1, Short.MAX_VALUE);
217   }
218 
219   /**
220    * Apply column family options such as Bloom filters, compression, and data
221    * block encoding.
222    */
223   protected void applyColumnFamilyOptions(TableName tableName,
224       byte[][] columnFamilies) throws IOException {
225     HBaseAdmin admin = new HBaseAdmin(conf);
226     HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
227     LOG.info("Disabling table " + tableName);
228     admin.disableTable(tableName);
229     for (byte[] cf : columnFamilies) {
230       HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
231       boolean isNewCf = columnDesc == null;
232       if (isNewCf) {
233         columnDesc = new HColumnDescriptor(cf);
234       }
235       if (bloomType != null) {
236         columnDesc.setBloomFilterType(bloomType);
237       }
238       if (compressAlgo != null) {
239         columnDesc.setCompressionType(compressAlgo);
240       }
241       if (dataBlockEncodingAlgo != null) {
242         columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
243       }
244       if (inMemoryCF) {
245         columnDesc.setInMemory(inMemoryCF);
246       }
247       if (cipher != null) {
248         byte[] keyBytes = new byte[cipher.getKeyLength()];
249         new SecureRandom().nextBytes(keyBytes);
250         columnDesc.setEncryptionType(cipher.getName());
251         columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf,
252           User.getCurrent().getShortName(),
253           new SecretKeySpec(keyBytes, cipher.getName())));
254       }
255       if (isNewCf) {
256         admin.addColumn(tableName, columnDesc);
257       } else {
258         admin.modifyColumn(tableName, columnDesc);
259       }
260     }
261     LOG.info("Enabling table " + tableName);
262     admin.enableTable(tableName);
263   }
264 
265   @Override
266   protected void addOptions() {
267     addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " +
268         "without port numbers");
269     addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper");
270     addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write");
271     addOptWithArg(OPT_WRITE, OPT_USAGE_LOAD);
272     addOptWithArg(OPT_READ, OPT_USAGE_READ);
273     addOptWithArg(OPT_UPDATE, OPT_USAGE_UPDATE);
274     addOptNoArg(OPT_INIT_ONLY, "Initialize the test table only, don't do any loading");
275     addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM);
276     addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION);
277     addOptWithArg(OPT_DATA_BLOCK_ENCODING, OPT_DATA_BLOCK_ENCODING_USAGE);
278     addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " +
279         "to tolerate before terminating all reader threads. The default is " +
280         MultiThreadedReader.DEFAULT_MAX_ERRORS + ".");
281     addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " +
282         "reads and writes for concurrent write/read workload. The default " +
283         "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + ".");
284 
285     addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " +
286         "separate puts for every column in a row");
287     addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
288         "separate updates for every column in a row");
289     addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
290     addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE);
291 
292     addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write");
293     addOptWithArg(OPT_START_KEY, "The first key to read/write " +
294         "(a 0-based index). The default value is " +
295         DEFAULT_START_KEY + ".");
296     addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table "
297         + "already exists");
298 
299     addOptWithArg(NUM_TABLES,
300       "A positive integer number. When a number n is speicfied, load test "
301           + "tool  will load n table parallely. -tn parameter value becomes "
302           + "table name prefix. Each table name is in format <tn>_1...<tn>_n");
303 
304     addOptWithArg(OPT_REGIONS_PER_SERVER,
305       "A positive integer number. When a number n is specified, load test "
306           + "tool will create the test table with n regions per server");
307 
308     addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE);
309   }
310 
311   @Override
312   protected void processOptions(CommandLine cmd) {
313     this.cmd = cmd;
314 
315     tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME,
316         DEFAULT_TABLE_NAME));
317 
318     isWrite = cmd.hasOption(OPT_WRITE);
319     isRead = cmd.hasOption(OPT_READ);
320     isUpdate = cmd.hasOption(OPT_UPDATE);
321     isInitOnly = cmd.hasOption(OPT_INIT_ONLY);
322 
323     if (!isWrite && !isRead && !isUpdate && !isInitOnly) {
324       throw new IllegalArgumentException("Either -" + OPT_WRITE + " or " +
325         "-" + OPT_UPDATE + "-" + OPT_READ + " has to be specified");
326     }
327 
328     if (isInitOnly && (isRead || isWrite || isUpdate)) {
329       throw new IllegalArgumentException(OPT_INIT_ONLY + " cannot be specified with"
330           + " either -" + OPT_WRITE + " or -" + OPT_UPDATE + " or -" + OPT_READ);
331     }
332 
333     if (!isInitOnly) {
334       if (!cmd.hasOption(OPT_NUM_KEYS)) {
335         throw new IllegalArgumentException(OPT_NUM_KEYS + " must be specified in "
336             + "read or write mode");
337       }
338       startKey = parseLong(cmd.getOptionValue(OPT_START_KEY,
339           String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE);
340       long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1,
341           Long.MAX_VALUE - startKey);
342       endKey = startKey + numKeys;
343       isSkipInit = cmd.hasOption(OPT_SKIP_INIT);
344       System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
345     }
346 
347     parseColumnFamilyOptions(cmd);
348 
349     if (isWrite) {
350       String[] writeOpts = splitColonSeparated(OPT_WRITE, 2, 3);
351 
352       int colIndex = 0;
353       minColsPerKey = 1;
354       maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]);
355       int avgColDataSize =
356           parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE);
357       minColDataSize = avgColDataSize / 2;
358       maxColDataSize = avgColDataSize * 3 / 2;
359 
360       if (colIndex < writeOpts.length) {
361         numWriterThreads = getNumThreads(writeOpts[colIndex++]);
362       }
363 
364       isMultiPut = cmd.hasOption(OPT_MULTIPUT);
365 
366       System.out.println("Multi-puts: " + isMultiPut);
367       System.out.println("Columns per key: " + minColsPerKey + ".."
368           + maxColsPerKey);
369       System.out.println("Data size per column: " + minColDataSize + ".."
370           + maxColDataSize);
371     }
372 
373     if (isUpdate) {
374       String[] mutateOpts = splitColonSeparated(OPT_UPDATE, 1, 3);
375       int colIndex = 0;
376       updatePercent = parseInt(mutateOpts[colIndex++], 0, 100);
377       if (colIndex < mutateOpts.length) {
378         numUpdaterThreads = getNumThreads(mutateOpts[colIndex++]);
379       }
380       if (colIndex < mutateOpts.length) {
381         ignoreConflicts = parseInt(mutateOpts[colIndex++], 0, 1) == 1;
382       }
383 
384       isBatchUpdate = cmd.hasOption(OPT_BATCHUPDATE);
385 
386       System.out.println("Batch updates: " + isBatchUpdate);
387       System.out.println("Percent of keys to update: " + updatePercent);
388       System.out.println("Updater threads: " + numUpdaterThreads);
389       System.out.println("Ignore nonce conflicts: " + ignoreConflicts);
390     }
391 
392     if (isRead) {
393       String[] readOpts = splitColonSeparated(OPT_READ, 1, 2);
394       int colIndex = 0;
395       verifyPercent = parseInt(readOpts[colIndex++], 0, 100);
396       if (colIndex < readOpts.length) {
397         numReaderThreads = getNumThreads(readOpts[colIndex++]);
398       }
399 
400       if (cmd.hasOption(OPT_MAX_READ_ERRORS)) {
401         maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS),
402             0, Integer.MAX_VALUE);
403       }
404 
405       if (cmd.hasOption(OPT_KEY_WINDOW)) {
406         keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW),
407             0, Integer.MAX_VALUE);
408       }
409 
410       System.out.println("Percent of keys to verify: " + verifyPercent);
411       System.out.println("Reader threads: " + numReaderThreads);
412     }
413 
414     numTables = 1;
415     if (cmd.hasOption(NUM_TABLES)) {
416       numTables = parseInt(cmd.getOptionValue(NUM_TABLES), 1, Short.MAX_VALUE);
417     }
418     regionsPerServer = HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER;
419     if (cmd.hasOption(OPT_REGIONS_PER_SERVER)) {
420       regionsPerServer = parseInt(cmd.getOptionValue(OPT_REGIONS_PER_SERVER), 1,
421         Integer.MAX_VALUE);
422       conf.setInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY, regionsPerServer);
423     }
424     System.out.println("Regions per server: " + regionsPerServer);
425   }
426 
427   private void parseColumnFamilyOptions(CommandLine cmd) {
428     String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
429     dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
430         DataBlockEncoding.valueOf(dataBlockEncodingStr);
431 
432     String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
433     compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
434         Compression.Algorithm.valueOf(compressStr);
435 
436     String bloomStr = cmd.getOptionValue(OPT_BLOOM);
437     bloomType = bloomStr == null ? null :
438         BloomType.valueOf(bloomStr);
439 
440     inMemoryCF = cmd.hasOption(OPT_INMEMORY);
441     if (cmd.hasOption(OPT_ENCRYPTION)) {
442       cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
443     }
444   }
445 
446   public void initTestTable() throws IOException {
447     HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName,
448         COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo);
449     applyColumnFamilyOptions(tableName, COLUMN_FAMILIES);
450   }
451 
452   @Override
453   protected int doWork() throws IOException {
454     if (numTables > 1) {
455       return parallelLoadTables();
456     } else {
457       return loadTable();
458     }
459   }
460 
461   protected int loadTable() throws IOException {
462     if (cmd.hasOption(OPT_ZK_QUORUM)) {
463       conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue(OPT_ZK_QUORUM));
464     }
465     if (cmd.hasOption(OPT_ZK_PARENT_NODE)) {
466       conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, cmd.getOptionValue(OPT_ZK_PARENT_NODE));
467     }
468 
469     if (isInitOnly) {
470       LOG.info("Initializing only; no reads or writes");
471       initTestTable();
472       return 0;
473     }
474 
475     if (!isSkipInit) {
476       initTestTable();
477     }
478     LoadTestDataGenerator dataGen = null;
479     if (cmd.hasOption(OPT_GENERATOR)) {
480       String[] clazzAndArgs = cmd.getOptionValue(OPT_GENERATOR).split(COLON);
481       dataGen = getLoadGeneratorInstance(clazzAndArgs[0]);
482       String[] args;
483       if (dataGen instanceof LoadTestDataGeneratorWithACL) {
484         LOG.info("Using LoadTestDataGeneratorWithACL");
485         if (User.isHBaseSecurityEnabled(conf)) {
486           LOG.info("Security is enabled");
487           authnFileName = clazzAndArgs[1];
488           superUser = clazzAndArgs[2];
489           userNames = clazzAndArgs[3];
490           args = Arrays.copyOfRange(clazzAndArgs, 2, clazzAndArgs.length);
491           Properties authConfig = new Properties();
492           authConfig.load(this.getClass().getClassLoader().getResourceAsStream(authnFileName));
493           try {
494             addAuthInfoToConf(authConfig, conf, superUser, userNames);
495           } catch (IOException exp) {
496             LOG.error(exp);
497             return EXIT_FAILURE;
498           }
499           userOwner = User.create(loginAndReturnUGI(conf, superUser));
500         } else {
501           superUser = clazzAndArgs[1];
502           userNames = clazzAndArgs[2];
503           args = Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length);
504           userOwner = User.createUserForTesting(conf, superUser, new String[0]);
505         }
506       } else {
507         args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1,
508             clazzAndArgs.length);
509       }
510       dataGen.initialize(args);
511     } else {
512       // Default DataGenerator is MultiThreadedAction.DefaultDataGenerator
513       dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize,
514           minColsPerKey, maxColsPerKey, COLUMN_FAMILY);
515     }
516 
517     if (User.isHBaseSecurityEnabled(conf) && userOwner != null) {
518       LOG.info("Granting permissions for user " + userOwner.getShortName());
519       AccessControlProtos.Permission.Action[] actions = {
520         AccessControlProtos.Permission.Action.ADMIN, AccessControlProtos.Permission.Action.CREATE,
521         AccessControlProtos.Permission.Action.READ, AccessControlProtos.Permission.Action.WRITE };
522       try {
523         AccessControlClient.grant(conf, tableName, userOwner.getShortName(), null, null, actions);
524       } catch (Throwable e) {
525         LOG.fatal("Error in granting permission for the user " + userOwner.getShortName(), e);
526         return EXIT_FAILURE;
527       }
528     }
529     if (userNames != null) {
530       // This will be comma separated list of expressions.
531       String users[] = userNames.split(",");
532       User user = null;
533       if (User.isHBaseSecurityEnabled(conf)) {
534         for (String userStr : users) {
535           user = User.create(loginAndReturnUGI(conf, userStr));
536           LOG.info("Granting READ permission for the user " + user.getShortName());
537           AccessControlProtos.Permission.Action[] actions = { AccessControlProtos.Permission.Action.READ };
538           try {
539             AccessControlClient.grant(conf, tableName, user.getShortName(), null, null, actions);
540           } catch (Throwable e) {
541             LOG.fatal("Error in granting READ permission for the user " + user.getShortName(), e);
542             return EXIT_FAILURE;
543           }
544 	}
545       } else {
546         for (String userStr : users) {
547           user = User.createUserForTesting(conf, userStr, new String[0]);
548         }
549       }
550     }
551 
552     if (isWrite) {
553       if (userOwner != null) {
554         writerThreads = new MultiThreadedWriterWithACL(dataGen, conf, tableName, userOwner);
555       } else {
556         writerThreads = new MultiThreadedWriter(dataGen, conf, tableName);
557       }
558       writerThreads.setMultiPut(isMultiPut);
559     }
560 
561     if (isUpdate) {
562       if (userOwner != null) {
563         updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent,
564             userOwner, userNames);
565       } else {
566         updaterThreads = new MultiThreadedUpdater(dataGen, conf, tableName, updatePercent);
567       }
568       updaterThreads.setBatchUpdate(isBatchUpdate);
569       updaterThreads.setIgnoreNonceConflicts(ignoreConflicts);
570     }
571 
572     if (isRead) {
573       if (userOwner != null) {
574         readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent,
575             userNames);
576       } else {
577         readerThreads = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent);
578       }
579       readerThreads.setMaxErrors(maxReadErrors);
580       readerThreads.setKeyWindow(keyWindow);
581     }
582 
583     if (isUpdate && isWrite) {
584       LOG.info("Concurrent write/update workload: making updaters aware of the " +
585         "write point");
586       updaterThreads.linkToWriter(writerThreads);
587     }
588 
589     if (isRead && (isUpdate || isWrite)) {
590       LOG.info("Concurrent write/read workload: making readers aware of the " +
591         "write point");
592       readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads);
593     }
594 
595     if (isWrite) {
596       System.out.println("Starting to write data...");
597       writerThreads.start(startKey, endKey, numWriterThreads);
598     }
599 
600     if (isUpdate) {
601       LOG.info("Starting to mutate data...");
602       System.out.println("Starting to mutate data...");
603       // TODO : currently append and increment operations not tested with tags
604       // Will update this aftet it is done
605       updaterThreads.start(startKey, endKey, numUpdaterThreads);
606     }
607 
608     if (isRead) {
609       System.out.println("Starting to read data...");
610       readerThreads.start(startKey, endKey, numReaderThreads);
611     }
612 
613     if (isWrite) {
614       writerThreads.waitForFinish();
615     }
616 
617     if (isUpdate) {
618       updaterThreads.waitForFinish();
619     }
620 
621     if (isRead) {
622       readerThreads.waitForFinish();
623     }
624 
625     boolean success = true;
626     if (isWrite) {
627       success = success && writerThreads.getNumWriteFailures() == 0;
628     }
629     if (isUpdate) {
630       success = success && updaterThreads.getNumWriteFailures() == 0;
631     }
632     if (isRead) {
633       success = success && readerThreads.getNumReadErrors() == 0
634           && readerThreads.getNumReadFailures() == 0;
635     }
636     return success ? EXIT_SUCCESS : EXIT_FAILURE;
637   }
638 
639   private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException {
640     try {
641       Class<?> clazz = Class.forName(clazzName);
642       Constructor<?> constructor = clazz.getConstructor(int.class, int.class, int.class, int.class,
643           byte[][].class);
644       return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize,
645           minColsPerKey, maxColsPerKey, COLUMN_FAMILIES);
646     } catch (Exception e) {
647       throw new IOException(e);
648     }
649   }
650 
651   public static byte[] generateData(final Random r, int length) {
652     byte [] b = new byte [length];
653     int i = 0;
654 
655     for(i = 0; i < (length-8); i += 8) {
656       b[i] = (byte) (65 + r.nextInt(26));
657       b[i+1] = b[i];
658       b[i+2] = b[i];
659       b[i+3] = b[i];
660       b[i+4] = b[i];
661       b[i+5] = b[i];
662       b[i+6] = b[i];
663       b[i+7] = b[i];
664     }
665 
666     byte a = (byte) (65 + r.nextInt(26));
667     for(; i < length; i++) {
668       b[i] = a;
669     }
670     return b;
671   }
672   public static void main(String[] args) {
673     new LoadTestTool().doStaticMain(args);
674   }
675 
676   /**
677    * When NUM_TABLES is specified, the function starts multiple worker threads
678    * which individually start a LoadTestTool instance to load a table. Each
679    * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
680    * , table names will be "test_1", "test_2"
681    *
682    * @throws IOException
683    */
684   private int parallelLoadTables()
685       throws IOException {
686     // create new command args
687     String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME);
688     String[] newArgs = null;
689     if (!cmd.hasOption(LoadTestTool.OPT_TABLE_NAME)) {
690       newArgs = new String[cmdLineArgs.length + 2];
691       newArgs[0] = "-" + LoadTestTool.OPT_TABLE_NAME;
692       newArgs[1] = LoadTestTool.DEFAULT_TABLE_NAME;
693       for (int i = 0; i < cmdLineArgs.length; i++) {
694         newArgs[i + 2] = cmdLineArgs[i];
695       }
696     } else {
697       newArgs = cmdLineArgs;
698     }
699 
700     int tableNameValueIndex = -1;
701     for (int j = 0; j < newArgs.length; j++) {
702       if (newArgs[j].endsWith(OPT_TABLE_NAME)) {
703         tableNameValueIndex = j + 1;
704       } else if (newArgs[j].endsWith(NUM_TABLES)) {
705         // change NUM_TABLES to 1 so that each worker loads one table
706         newArgs[j + 1] = "1";
707       }
708     }
709 
710     // starting to load multiple tables
711     List<WorkerThread> workers = new ArrayList<WorkerThread>();
712     for (int i = 0; i < numTables; i++) {
713       String[] workerArgs = newArgs.clone();
714       workerArgs[tableNameValueIndex] = tableName + "_" + (i+1);
715       WorkerThread worker = new WorkerThread(i, workerArgs);
716       workers.add(worker);
717       LOG.info(worker + " starting");
718       worker.start();
719     }
720 
721     // wait for all workers finish
722     LOG.info("Waiting for worker threads to finish");
723     for (WorkerThread t : workers) {
724       try {
725         t.join();
726       } catch (InterruptedException ie) {
727         IOException iie = new InterruptedIOException();
728         iie.initCause(ie);
729         throw iie;
730       }
731       checkForErrors();
732     }
733 
734     return EXIT_SUCCESS;
735   }
736 
737   // If an exception is thrown by one of worker threads, it will be
738   // stored here.
739   protected AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
740 
741   private void workerThreadError(Throwable t) {
742     thrown.compareAndSet(null, t);
743   }
744 
745   /**
746    * Check for errors in the writer threads. If any is found, rethrow it.
747    */
748   private void checkForErrors() throws IOException {
749     Throwable thrown = this.thrown.get();
750     if (thrown == null) return;
751     if (thrown instanceof IOException) {
752       throw (IOException) thrown;
753     } else {
754       throw new RuntimeException(thrown);
755     }
756   }
757 
758   class WorkerThread extends Thread {
759     private String[] workerArgs;
760 
761     WorkerThread(int i, String[] args) {
762       super("WorkerThread-" + i);
763       workerArgs = args;
764     }
765 
766     @Override
767     public void run() {
768       try {
769         int ret = ToolRunner.run(HBaseConfiguration.create(), new LoadTestTool(), workerArgs);
770         if (ret != 0) {
771           throw new RuntimeException("LoadTestTool exit with non-zero return code.");
772         }
773       } catch (Exception ex) {
774         LOG.error("Error in worker thread", ex);
775         workerThreadError(ex);
776       }
777     }
778   }
779 
780   private void addAuthInfoToConf(Properties authConfig, Configuration conf, String owner,
781       String userList) throws IOException {
782     List<String> users = Arrays.asList(userList.split(","));
783     users.add(owner);
784     for (String user : users) {
785       String keyTabFileConfKey = "hbase." + user + ".keytab.file";
786       String principalConfKey = "hbase." + user + ".kerberos.principal";
787       if (!authConfig.containsKey(keyTabFileConfKey) || !authConfig.containsKey(principalConfKey)) {
788         throw new IOException("Authentication configs missing for user : " + user);
789       }
790     }
791     for (String key : authConfig.stringPropertyNames()) {
792       conf.set(key, authConfig.getProperty(key));
793     }
794     LOG.debug("Added authentication properties to config successfully.");
795   }
796 
797   public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username)
798       throws IOException {
799     String hostname = InetAddress.getLocalHost().getHostName();
800     String keyTabFileConfKey = "hbase." + username + ".keytab.file";
801     String keyTabFileLocation = conf.get(keyTabFileConfKey);
802     String principalConfKey = "hbase." + username + ".kerberos.principal";
803     String principal = SecurityUtil.getServerPrincipal(conf.get(principalConfKey), hostname);
804     if (keyTabFileLocation == null || principal == null) {
805       LOG.warn("Principal or key tab file null for : " + principalConfKey + ", "
806           + keyTabFileConfKey);
807     }
808     UserGroupInformation ugi =
809         UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation);
810     return ugi;
811   }
812 }