1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.mapreduce;
19
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertTrue;
22 import static org.junit.Assert.fail;
23
24 import java.io.IOException;
25 import java.nio.ByteBuffer;
26 import java.util.Collection;
27 import java.util.Deque;
28 import java.util.List;
29 import java.util.NavigableMap;
30 import java.util.concurrent.ExecutorService;
31 import java.util.concurrent.atomic.AtomicInteger;
32
33 import org.apache.commons.logging.Log;
34 import org.apache.commons.logging.LogFactory;
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.FileSystem;
37 import org.apache.hadoop.fs.Path;
38 import org.apache.hadoop.hbase.HBaseTestingUtility;
39 import org.apache.hadoop.hbase.HColumnDescriptor;
40 import org.apache.hadoop.hbase.HRegionInfo;
41 import org.apache.hadoop.hbase.HRegionLocation;
42 import org.apache.hadoop.hbase.HTableDescriptor;
43 import org.apache.hadoop.hbase.LargeTests;
44 import org.apache.hadoop.hbase.TableExistsException;
45 import org.apache.hadoop.hbase.client.HConnection;
46 import org.apache.hadoop.hbase.client.HTable;
47 import org.apache.hadoop.hbase.client.Result;
48 import org.apache.hadoop.hbase.client.ResultScanner;
49 import org.apache.hadoop.hbase.client.Scan;
50 import org.apache.hadoop.hbase.client.ServerCallable;
51 import org.apache.hadoop.hbase.ipc.HRegionInterface;
52 import org.apache.hadoop.hbase.regionserver.HRegionServer;
53 import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
54 import org.apache.hadoop.hbase.util.Bytes;
55 import org.apache.hadoop.hbase.util.Pair;
56 import org.junit.AfterClass;
57 import org.junit.BeforeClass;
58 import org.junit.Test;
59 import org.junit.experimental.categories.Category;
60 import org.mockito.Mockito;
61
62 import com.google.common.collect.Multimap;
63
64
65
66
67 @Category(LargeTests.class)
68 public class TestLoadIncrementalHFilesSplitRecovery {
69 final static Log LOG = LogFactory.getLog(TestHRegionServerBulkLoad.class);
70
71 static HBaseTestingUtility util;
72
73 static boolean useSecure = false;
74
75 final static int NUM_CFS = 10;
76 final static byte[] QUAL = Bytes.toBytes("qual");
77 final static int ROWCOUNT = 100;
78
79 private final static byte[][] families = new byte[NUM_CFS][];
80 static {
81 for (int i = 0; i < NUM_CFS; i++) {
82 families[i] = Bytes.toBytes(family(i));
83 }
84 }
85
86 static byte[] rowkey(int i) {
87 return Bytes.toBytes(String.format("row_%08d", i));
88 }
89
90 static String family(int i) {
91 return String.format("family_%04d", i);
92 }
93
94 static byte[] value(int i) {
95 return Bytes.toBytes(String.format("%010d", i));
96 }
97
98 public static void buildHFiles(FileSystem fs, Path dir, int value)
99 throws IOException {
100 byte[] val = value(value);
101 for (int i = 0; i < NUM_CFS; i++) {
102 Path testIn = new Path(dir, family(i));
103
104 TestHRegionServerBulkLoad.createHFile(fs, new Path(testIn, "hfile_" + i),
105 Bytes.toBytes(family(i)), QUAL, val, ROWCOUNT);
106 }
107 }
108
109
110
111
112
113 private void setupTable(String table, int cfs) throws IOException {
114 try {
115 LOG.info("Creating table " + table);
116 HTableDescriptor htd = new HTableDescriptor(table);
117 for (int i = 0; i < 10; i++) {
118 htd.addFamily(new HColumnDescriptor(family(i)));
119 }
120
121 util.getHBaseAdmin().createTable(htd);
122 } catch (TableExistsException tee) {
123 LOG.info("Table " + table + " already exists");
124 }
125 }
126
127 private Path buildBulkFiles(String table, int value) throws Exception {
128 Path dir = util.getDataTestDir(table);
129 Path bulk1 = new Path(dir, table+value);
130 FileSystem fs = util.getTestFileSystem();
131 buildHFiles(fs, bulk1, value);
132 return bulk1;
133 }
134
135
136
137
138 private void populateTable(String table, int value) throws Exception {
139
140 LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration(), useSecure);
141 Path bulk1 = buildBulkFiles(table, value);
142 HTable t = new HTable(util.getConfiguration(), Bytes.toBytes(table));
143 lih.doBulkLoad(bulk1, t);
144 }
145
146
147
148
149 private void forceSplit(String table) {
150 try {
151
152 HRegionServer hrs = util.getRSForFirstRegionInTable(Bytes
153 .toBytes(table));
154
155 for (HRegionInfo hri : hrs.getOnlineRegions()) {
156 if (Bytes.equals(hri.getTableName(), Bytes.toBytes(table))) {
157
158 hrs.splitRegion(hri, rowkey(ROWCOUNT / 2));
159 }
160 }
161
162
163 int regions;
164 do {
165 regions = 0;
166 for (HRegionInfo hri : hrs.getOnlineRegions()) {
167 if (Bytes.equals(hri.getTableName(), Bytes.toBytes(table))) {
168 regions++;
169 }
170 }
171 if (regions != 2) {
172 LOG.info("Taking some time to complete split...");
173 Thread.sleep(250);
174 }
175 } while (regions != 2);
176 } catch (IOException e) {
177 e.printStackTrace();
178 } catch (InterruptedException e) {
179 e.printStackTrace();
180 }
181 }
182
183 @BeforeClass
184 public static void setupCluster() throws Exception {
185 util = new HBaseTestingUtility();
186 util.startMiniCluster(1);
187 }
188
189 @AfterClass
190 public static void teardownCluster() throws Exception {
191 util.shutdownMiniCluster();
192 }
193
194
195
196
197
198 void assertExpectedTable(String table, int count, int value) {
199 try {
200 assertEquals(util.getHBaseAdmin().listTables(table).length, 1);
201
202 HTable t = new HTable(util.getConfiguration(), table);
203 Scan s = new Scan();
204 ResultScanner sr = t.getScanner(s);
205 int i = 0;
206 for (Result r : sr) {
207 i++;
208 for (NavigableMap<byte[], byte[]> nm : r.getNoVersionMap().values()) {
209 for (byte[] val : nm.values()) {
210 assertTrue(Bytes.equals(val, value(value)));
211 }
212 }
213 }
214 assertEquals(count, i);
215 } catch (IOException e) {
216 fail("Failed due to exception");
217 }
218 }
219
220
221
222
223
224 @Test(expected=IOException.class)
225 public void testBulkLoadPhaseFailure() throws Exception {
226 String table = "bulkLoadPhaseFailure";
227 setupTable(table, 10);
228
229 final AtomicInteger attmptedCalls = new AtomicInteger();
230 final AtomicInteger failedCalls = new AtomicInteger();
231 LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
232 util.getConfiguration(), useSecure) {
233
234 protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
235 byte[] tableName, final byte[] first, Collection<LoadQueueItem> lqis)
236 throws IOException {
237 int i = attmptedCalls.incrementAndGet();
238 if (i == 1) {
239 HConnection errConn = null;
240 try {
241 errConn = getMockedConnection(util.getConfiguration());
242 } catch (Exception e) {
243 LOG.fatal("mocking cruft, should never happen", e);
244 throw new RuntimeException("mocking cruft, should never happen");
245 }
246 failedCalls.incrementAndGet();
247 return super.tryAtomicRegionLoad(errConn, tableName, first, lqis);
248 }
249
250 return super.tryAtomicRegionLoad(conn, tableName, first, lqis);
251 }
252 };
253
254
255 Path dir = buildBulkFiles(table, 1);
256 HTable t = new HTable(util.getConfiguration(), Bytes.toBytes(table));
257 lih.doBulkLoad(dir, t);
258
259 fail("doBulkLoad should have thrown an exception");
260 }
261
262 private HConnection getMockedConnection(final Configuration conf)
263 throws IOException {
264 HConnection c = Mockito.mock(HConnection.class);
265 Mockito.when(c.getConfiguration()).thenReturn(conf);
266 Mockito.doNothing().when(c).close();
267
268 final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
269 "example.org", 1234);
270 Mockito.when(c.getRegionLocation((byte[]) Mockito.any(),
271 (byte[]) Mockito.any(), Mockito.anyBoolean())).
272 thenReturn(loc);
273 Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())).
274 thenReturn(loc);
275 HRegionInterface hri = Mockito.mock(HRegionInterface.class);
276 Mockito.when(hri.bulkLoadHFiles(Mockito.anyList(), (byte [])Mockito.any())).
277 thenThrow(new IOException("injecting bulk load error"));
278 Mockito.when(c.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())).
279 thenReturn(hri);
280 return c;
281 }
282
283
284
285
286
287
288
289 @Test
290 public void testSplitWhileBulkLoadPhase() throws Exception {
291 final String table = "splitWhileBulkloadPhase";
292 setupTable(table, 10);
293 populateTable(table,1);
294 assertExpectedTable(table, ROWCOUNT, 1);
295
296
297
298 final AtomicInteger attemptedCalls = new AtomicInteger();
299 LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(
300 util.getConfiguration(), useSecure) {
301
302 protected void bulkLoadPhase(final HTable htable, final HConnection conn,
303 ExecutorService pool, Deque<LoadQueueItem> queue,
304 final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException {
305 int i = attemptedCalls.incrementAndGet();
306 if (i == 1) {
307
308 forceSplit(table);
309 }
310
311 super.bulkLoadPhase(htable, conn, pool, queue, regionGroups);
312 }
313 };
314
315
316 HTable t = new HTable(util.getConfiguration(), Bytes.toBytes(table));
317 Path bulk = buildBulkFiles(table, 2);
318 lih2.doBulkLoad(bulk, t);
319
320
321
322
323 assertEquals(attemptedCalls.get(), 3);
324 assertExpectedTable(table, ROWCOUNT, 2);
325 }
326
327
328
329
330
331 @Test
332 public void testGroupOrSplitPresplit() throws Exception {
333 final String table = "groupOrSplitPresplit";
334 setupTable(table, 10);
335 populateTable(table, 1);
336 assertExpectedTable(table, ROWCOUNT, 1);
337 forceSplit(table);
338
339 final AtomicInteger countedLqis= new AtomicInteger();
340 LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
341 util.getConfiguration(), useSecure) {
342 protected List<LoadQueueItem> groupOrSplit(
343 Multimap<ByteBuffer, LoadQueueItem> regionGroups,
344 final LoadQueueItem item, final HTable htable,
345 final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
346 List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
347 if (lqis != null) {
348 countedLqis.addAndGet(lqis.size());
349 }
350 return lqis;
351 }
352 };
353
354
355 Path bulk = buildBulkFiles(table, 2);
356 HTable ht = new HTable(util.getConfiguration(), Bytes.toBytes(table));
357 lih.doBulkLoad(bulk, ht);
358
359 assertExpectedTable(table, ROWCOUNT, 2);
360 assertEquals(20, countedLqis.get());
361 }
362
363
364
365
366
367 @Test(expected = IOException.class)
368 public void testGroupOrSplitFailure() throws Exception {
369 String table = "groupOrSplitFailure";
370 setupTable(table, 10);
371
372 LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
373 util.getConfiguration(), useSecure) {
374 int i = 0;
375
376 protected List<LoadQueueItem> groupOrSplit(
377 Multimap<ByteBuffer, LoadQueueItem> regionGroups,
378 final LoadQueueItem item, final HTable table,
379 final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
380 i++;
381
382 if (i == 5) {
383 throw new IOException("failure");
384 }
385 return super.groupOrSplit(regionGroups, item, table, startEndKeys);
386 }
387 };
388
389
390 Path dir = buildBulkFiles(table,1);
391 HTable t = new HTable(util.getConfiguration(), Bytes.toBytes(table));
392 lih.doBulkLoad(dir, t);
393
394 fail("doBulkLoad should have thrown an exception");
395 }
396
397 @org.junit.Rule
398 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
399 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
400 }
401