1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.regionserver;
19
20
21 import java.io.IOException;
22 import java.util.ArrayList;
23 import java.util.Arrays;
24 import java.util.HashMap;
25 import java.util.List;
26 import java.util.Map;
27 import java.util.TreeMap;
28 import java.util.Random;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.conf.Configuration;
33 import org.apache.hadoop.hbase.*;
34 import org.apache.hadoop.hdfs.MiniDFSCluster;
35 import org.apache.hadoop.fs.FileSystem;
36 import org.apache.hadoop.fs.Path;
37 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
38 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
39 import org.apache.hadoop.hbase.client.Delete;
40 import org.apache.hadoop.hbase.client.Get;
41 import org.apache.hadoop.hbase.client.Put;
42 import org.apache.hadoop.hbase.client.Result;
43 import org.apache.hadoop.hbase.client.Scan;
44 import org.apache.hadoop.hbase.client.HTable;
45 import org.apache.hadoop.hbase.filter.BinaryComparator;
46 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
47 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
48 import org.apache.hadoop.hbase.filter.Filter;
49 import org.apache.hadoop.hbase.filter.FilterList;
50 import org.apache.hadoop.hbase.filter.NullComparator;
51 import org.apache.hadoop.hbase.filter.PrefixFilter;
52 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
53 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
54 import org.apache.hadoop.hbase.regionserver.wal.HLog;
55 import org.apache.hadoop.hbase.util.Bytes;
56 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
57 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
58 import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
59 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
60 import org.apache.hadoop.hbase.util.Pair;
61 import org.apache.hadoop.hbase.util.PairOfSameType;
62 import org.apache.hadoop.hbase.util.Threads;
63 import org.junit.Test;
64
65 import com.google.common.collect.Lists;
66 import org.junit.experimental.categories.Category;
67
68
69
70
71
72
73 @Category(MediumTests.class)
74 public class TestParallelPut extends HBaseTestCase {
75 static final Log LOG = LogFactory.getLog(TestParallelPut.class);
76
77 private static HRegion region = null;
78 private static HBaseTestingUtility hbtu = new HBaseTestingUtility();
79 private static final String DIR = hbtu.getDataTestDir() + "/TestParallelPut/";
80
81
82 static final byte[] tableName = Bytes.toBytes("testtable");;
83 static final byte[] qual1 = Bytes.toBytes("qual1");
84 static final byte[] qual2 = Bytes.toBytes("qual2");
85 static final byte[] qual3 = Bytes.toBytes("qual3");
86 static final byte[] value1 = Bytes.toBytes("value1");
87 static final byte[] value2 = Bytes.toBytes("value2");
88 static final byte [] row = Bytes.toBytes("rowA");
89 static final byte [] row2 = Bytes.toBytes("rowB");
90
91
92
93
94 @Override
95 protected void setUp() throws Exception {
96 super.setUp();
97 }
98
99 @Override
100 protected void tearDown() throws Exception {
101 super.tearDown();
102 EnvironmentEdgeManagerTestHelper.reset();
103 }
104
105
106
107
108
109
110
111
112
113 public void testPut() throws IOException {
114 LOG.info("Starting testPut");
115 initHRegion(tableName, getName(), fam1);
116
117 long value = 1L;
118
119 Put put = new Put(row);
120 put.add(fam1, qual1, Bytes.toBytes(value));
121 region.put(put);
122
123 assertGet(row, fam1, qual1, Bytes.toBytes(value));
124 }
125
126
127
128
129 public void testParallelPuts() throws IOException {
130
131 LOG.info("Starting testParallelPuts");
132 initHRegion(tableName, getName(), fam1);
133 int numOps = 1000;
134
135
136 int numThreads = 100;
137 Putter[] all = new Putter[numThreads];
138
139
140 for (int i = 0; i < numThreads; i++) {
141 all[i] = new Putter(region, i, numOps);
142 }
143
144
145 for (int i = 0; i < numThreads; i++) {
146 all[i].start();
147 }
148
149
150 for (int i = 0; i < numThreads; i++) {
151 try {
152 all[i].join();
153 } catch (InterruptedException e) {
154 LOG.warn("testParallelPuts encountered InterruptedException." +
155 " Ignoring....", e);
156 }
157 }
158 LOG.info("testParallelPuts successfully verified " +
159 (numOps * numThreads) + " put operations.");
160 }
161
162
163 static private void assertGet(byte [] row,
164 byte [] familiy,
165 byte[] qualifier,
166 byte[] value) throws IOException {
167
168 Get get = new Get(row);
169 get.addColumn(familiy, qualifier);
170 Result result = region.get(get, null);
171 assertEquals(1, result.size());
172
173 KeyValue kv = result.raw()[0];
174 byte[] r = kv.getValue();
175 assertTrue(Bytes.compareTo(r, value) == 0);
176 }
177
178 private void initHRegion(byte [] tableName, String callingMethod,
179 byte[] ... families)
180 throws IOException {
181 initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families);
182 }
183
184 private void initHRegion(byte [] tableName, String callingMethod,
185 Configuration conf, byte [] ... families)
186 throws IOException{
187 HTableDescriptor htd = new HTableDescriptor(tableName);
188 for(byte [] family : families) {
189 htd.addFamily(new HColumnDescriptor(family));
190 }
191 HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
192 Path path = new Path(DIR + callingMethod);
193 if (fs.exists(path)) {
194 if (!fs.delete(path, true)) {
195 throw new IOException("Failed delete of " + path);
196 }
197 }
198 region = HRegion.createHRegion(info, path, conf, htd);
199 }
200
201
202
203
204 public static class Putter extends Thread {
205
206 private final HRegion region;
207 private final int threadNumber;
208 private final int numOps;
209 private final Random rand = new Random();
210 byte [] rowkey = null;
211
212 public Putter(HRegion region, int threadNumber, int numOps) {
213 this.region = region;
214 this.threadNumber = threadNumber;
215 this.numOps = numOps;
216 this.rowkey = Bytes.toBytes((long)threadNumber);
217 setDaemon(true);
218 }
219
220 @Override
221 public void run() {
222 byte[] value = new byte[100];
223 Put[] in = new Put[1];
224
225
226 for (int i=0; i<numOps; i++) {
227
228 rand.nextBytes(value);
229
230
231
232 Put put = new Put(rowkey);
233 put.add(fam1, qual1, value);
234 in[0] = put;
235 try {
236 OperationStatus[] ret = region.put(in);
237 assertEquals(1, ret.length);
238 assertEquals(OperationStatusCode.SUCCESS, ret[0].getOperationStatusCode());
239 assertGet(rowkey, fam1, qual1, value);
240 } catch (IOException e) {
241 assertTrue("Thread id " + threadNumber + " operation " + i + " failed.",
242 false);
243 }
244 }
245 }
246 }
247
248 @org.junit.Rule
249 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
250 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
251 }
252