1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 package org.apache.hadoop.hbase.util;
18
19 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT;
20 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.MUTATE_INFO;
21
22 import java.io.IOException;
23 import java.util.Arrays;
24 import java.util.HashSet;
25 import java.util.Set;
26
27 import org.apache.commons.logging.Log;
28 import org.apache.commons.logging.LogFactory;
29 import org.apache.hadoop.conf.Configuration;
30 import org.apache.hadoop.hbase.HConstants;
31 import org.apache.hadoop.hbase.TableName;
32 import org.apache.hadoop.hbase.client.HTable;
33 import org.apache.hadoop.hbase.client.Put;
34 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
35
36
37 public class MultiThreadedWriter extends MultiThreadedWriterBase {
38 private static final Log LOG = LogFactory.getLog(MultiThreadedWriter.class);
39
40 private Set<HBaseWriterThread> writers = new HashSet<HBaseWriterThread>();
41
42 private boolean isMultiPut = false;
43
44 public MultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf,
45 TableName tableName) {
46 super(dataGen, conf, tableName, "W");
47 }
48
49
50 public void setMultiPut(boolean isMultiPut) {
51 this.isMultiPut = isMultiPut;
52 }
53
54 @Override
55 public void start(long startKey, long endKey, int numThreads)
56 throws IOException {
57 super.start(startKey, endKey, numThreads);
58
59 if (verbose) {
60 LOG.debug("Inserting keys [" + startKey + ", " + endKey + ")");
61 }
62
63 for (int i = 0; i < numThreads; ++i) {
64 HBaseWriterThread writer = new HBaseWriterThread(i);
65 writers.add(writer);
66 }
67
68 startThreads(writers);
69 }
70
71 private class HBaseWriterThread extends Thread {
72 private final HTable table;
73
74 public HBaseWriterThread(int writerId) throws IOException {
75 setName(getClass().getSimpleName() + "_" + writerId);
76 table = new HTable(conf, tableName);
77 }
78
79 public void run() {
80 try {
81 long rowKeyBase;
82 byte[][] columnFamilies = dataGenerator.getColumnFamilies();
83 while ((rowKeyBase = nextKeyToWrite.getAndIncrement()) < endKey) {
84 byte[] rowKey = dataGenerator.getDeterministicUniqueKey(rowKeyBase);
85 Put put = new Put(rowKey);
86 numKeys.addAndGet(1);
87 int columnCount = 0;
88 for (byte[] cf : columnFamilies) {
89 byte[][] columns = dataGenerator.generateColumnsForCf(rowKey, cf);
90 for (byte[] column : columns) {
91 byte[] value = dataGenerator.generateValue(rowKey, cf, column);
92 put.add(cf, column, value);
93 ++columnCount;
94 if (!isMultiPut) {
95 insert(table, put, rowKeyBase);
96 numCols.addAndGet(1);
97 put = new Put(rowKey);
98 }
99 }
100 long rowKeyHash = Arrays.hashCode(rowKey);
101 put.add(cf, MUTATE_INFO, HConstants.EMPTY_BYTE_ARRAY);
102 put.add(cf, INCREMENT, Bytes.toBytes(rowKeyHash));
103 if (!isMultiPut) {
104 insert(table, put, rowKeyBase);
105 numCols.addAndGet(1);
106 put = new Put(rowKey);
107 }
108 }
109 if (isMultiPut) {
110 if (verbose) {
111 LOG.debug("Preparing put for key = [" + rowKey + "], " + columnCount + " columns");
112 }
113 insert(table, put, rowKeyBase);
114 numCols.addAndGet(columnCount);
115 }
116 if (trackWroteKeys) {
117 wroteKeys.add(rowKeyBase);
118 }
119 }
120 } finally {
121 try {
122 table.close();
123 } catch (IOException e) {
124 LOG.error("Error closing table", e);
125 }
126 numThreadsWorking.decrementAndGet();
127 }
128 }
129 }
130
131 @Override
132 public void waitForFinish() {
133 super.waitForFinish();
134 System.out.println("Failed to write keys: " + failedKeySet.size());
135 for (Long key : failedKeySet) {
136 System.out.println("Failed to write key: " + key);
137 }
138 }
139 }