1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.junit.Assert.*;
23
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.util.ArrayList;
27 import java.util.Collections;
28 import java.util.List;
29 import java.util.Map;
30 import java.util.concurrent.atomic.AtomicBoolean;
31 import java.util.concurrent.atomic.AtomicLong;
32
33 import org.apache.commons.logging.Log;
34 import org.apache.commons.logging.LogFactory;
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.FSDataInputStream;
37 import org.apache.hadoop.fs.FSDataOutputStream;
38 import org.apache.hadoop.fs.FileStatus;
39 import org.apache.hadoop.fs.FileSystem;
40 import org.apache.hadoop.fs.FileUtil;
41 import org.apache.hadoop.fs.Path;
42 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
43 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
44 import org.apache.hadoop.hbase.HBaseTestingUtility;
45 import org.apache.hadoop.hbase.HConstants;
46 import org.apache.hadoop.hbase.HRegionInfo;
47 import org.apache.hadoop.hbase.HTableDescriptor;
48 import org.apache.hadoop.hbase.KeyValue;
49 import org.apache.hadoop.hbase.regionserver.HRegion;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.Threads;
52 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
53 import org.apache.hadoop.ipc.RemoteException;
54 import org.junit.After;
55 import org.junit.AfterClass;
56 import org.junit.Assert;
57 import org.junit.Before;
58 import org.junit.BeforeClass;
59 import org.junit.Test;
60 import org.mockito.Mockito;
61 import org.mockito.invocation.InvocationOnMock;
62 import org.mockito.stubbing.Answer;
63
64 import com.google.common.base.Joiner;
65 import com.google.common.collect.ImmutableList;
66
67
68
69
70 public class TestHLogSplit {
71
72 private final static Log LOG = LogFactory.getLog(TestHLogSplit.class);
73
74 private Configuration conf;
75 private FileSystem fs;
76
77 private final static HBaseTestingUtility
78 TEST_UTIL = new HBaseTestingUtility();
79
80
81 private static final Path hbaseDir = new Path("/hbase");
82 private static final Path hlogDir = new Path(hbaseDir, "hlog");
83 private static final Path oldLogDir = new Path(hbaseDir, "hlog.old");
84 private static final Path corruptDir = new Path(hbaseDir, ".corrupt");
85
86 private static final int NUM_WRITERS = 10;
87 private static final int ENTRIES = 10;
88
89 private HLog.Writer[] writer = new HLog.Writer[NUM_WRITERS];
90 private long seq = 0;
91 private static final byte[] TABLE_NAME = "t1".getBytes();
92 private static final byte[] FAMILY = "f1".getBytes();
93 private static final byte[] QUALIFIER = "q1".getBytes();
94 private static final byte[] VALUE = "v1".getBytes();
95 private static final String HLOG_FILE_PREFIX = "hlog.dat.";
96 private static List<String> regions;
97 private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
98 private static final Path tabledir =
99 new Path(hbaseDir, Bytes.toString(TABLE_NAME));
100
101 static enum Corruptions {
102 INSERT_GARBAGE_ON_FIRST_LINE,
103 INSERT_GARBAGE_IN_THE_MIDDLE,
104 APPEND_GARBAGE,
105 TRUNCATE,
106 }
107
108 @BeforeClass
109 public static void setUpBeforeClass() throws Exception {
110 TEST_UTIL.getConfiguration().
111 setInt("hbase.regionserver.flushlogentries", 1);
112 TEST_UTIL.getConfiguration().
113 setBoolean("dfs.support.append", true);
114 TEST_UTIL.getConfiguration().
115 setStrings("hbase.rootdir", hbaseDir.toString());
116 TEST_UTIL.getConfiguration().
117 setClass("hbase.regionserver.hlog.writer.impl",
118 InstrumentedSequenceFileLogWriter.class, HLog.Writer.class);
119
120 TEST_UTIL.startMiniDFSCluster(2);
121 }
122
123 @AfterClass
124 public static void tearDownAfterClass() throws Exception {
125 TEST_UTIL.shutdownMiniDFSCluster();
126 }
127
128 @Before
129 public void setUp() throws Exception {
130 flushToConsole("Cleaning up cluster for new test\n"
131 + "--------------------------");
132 conf = TEST_UTIL.getConfiguration();
133 fs = TEST_UTIL.getDFSCluster().getFileSystem();
134 FileStatus[] entries = fs.listStatus(new Path("/"));
135 flushToConsole("Num entries in /:" + entries.length);
136 for (FileStatus dir : entries){
137 assertTrue("Deleting " + dir.getPath(),
138 fs.delete(dir.getPath(), true));
139 }
140 seq = 0;
141 regions = new ArrayList<String>();
142 Collections.addAll(regions, "bbb", "ccc");
143 InstrumentedSequenceFileLogWriter.activateFailure = false;
144
145 TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 50000);
146 }
147
148 @After
149 public void tearDown() throws Exception {
150 }
151
152
153
154
155
156 @Test public void testRecoveredEditsPathForMeta() throws IOException {
157 FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
158 byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
159 Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
160 Path regiondir = new Path(tdir,
161 HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
162 fs.mkdirs(regiondir);
163 long now = System.currentTimeMillis();
164 HLog.Entry entry =
165 new HLog.Entry(new HLogKey(encoded, HConstants.META_TABLE_NAME, 1, now),
166 new WALEdit());
167 Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir);
168 String parentOfParent = p.getParent().getParent().getName();
169 assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
170 }
171
172 @Test(expected = OrphanHLogAfterSplitException.class)
173 public void testSplitFailsIfNewHLogGetsCreatedAfterSplitStarted()
174 throws IOException {
175 AtomicBoolean stop = new AtomicBoolean(false);
176
177 FileStatus[] stats = fs.listStatus(new Path("/hbase/t1"));
178 assertTrue("Previous test should clean up table dir",
179 stats == null || stats.length == 0);
180
181 generateHLogs(-1);
182
183 try {
184 (new ZombieNewLogWriterRegionServer(stop)).start();
185 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
186 hbaseDir, hlogDir, oldLogDir, fs);
187 logSplitter.splitLog();
188 } finally {
189 stop.set(true);
190 }
191 }
192
193 @Test
194 public void testSplitPreservesEdits() throws IOException{
195 final String REGION = "region__1";
196 regions.removeAll(regions);
197 regions.add(REGION);
198
199 generateHLogs(1, 10, -1);
200 fs.initialize(fs.getUri(), conf);
201 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
202 hbaseDir, hlogDir, oldLogDir, fs);
203 logSplitter.splitLog();
204
205 Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
206 Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
207
208 assertEquals("edits differ after split", true, logsAreEqual(originalLog, splitLog));
209 }
210
211
212 @Test
213 public void testEmptyLogFiles() throws IOException {
214
215 injectEmptyFile(".empty", true);
216 generateHLogs(Integer.MAX_VALUE);
217 injectEmptyFile("empty", true);
218
219
220
221 fs.initialize(fs.getUri(), conf);
222
223 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
224 hbaseDir, hlogDir, oldLogDir, fs);
225 logSplitter.splitLog();
226
227
228 for (String region : regions) {
229 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
230 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
231 }
232
233 }
234
235
236 @Test
237 public void testEmptyOpenLogFiles() throws IOException {
238 injectEmptyFile(".empty", false);
239 generateHLogs(Integer.MAX_VALUE);
240 injectEmptyFile("empty", false);
241
242
243
244 fs.initialize(fs.getUri(), conf);
245
246 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
247 hbaseDir, hlogDir, oldLogDir, fs);
248 logSplitter.splitLog();
249
250 for (String region : regions) {
251 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
252 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
253 }
254 }
255
256 @Test
257 public void testOpenZeroLengthReportedFileButWithDataGetsSplit() throws IOException {
258
259 generateHLogs(5);
260
261 fs.initialize(fs.getUri(), conf);
262
263 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
264 hbaseDir, hlogDir, oldLogDir, fs);
265 logSplitter.splitLog();
266
267 for (String region : regions) {
268 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
269 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
270 }
271
272
273 }
274
275
276 @Test
277 public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOException {
278 conf.setBoolean(HBASE_SKIP_ERRORS, true);
279 generateHLogs(Integer.MAX_VALUE);
280 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
281 Corruptions.APPEND_GARBAGE, true, fs);
282 fs.initialize(fs.getUri(), conf);
283
284 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
285 hbaseDir, hlogDir, oldLogDir, fs);
286 logSplitter.splitLog();
287 for (String region : regions) {
288 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
289 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
290 }
291
292
293 }
294
295 @Test
296 public void testFirstLineCorruptionLogFileSkipErrorsPasses() throws IOException {
297 conf.setBoolean(HBASE_SKIP_ERRORS, true);
298 generateHLogs(Integer.MAX_VALUE);
299 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
300 Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
301 fs.initialize(fs.getUri(), conf);
302
303 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
304 hbaseDir, hlogDir, oldLogDir, fs);
305 logSplitter.splitLog();
306 for (String region : regions) {
307 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
308 assertEquals((NUM_WRITERS - 1) * ENTRIES, countHLog(logfile, fs, conf));
309 }
310
311
312 }
313
314
315 @Test
316 public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOException {
317 conf.setBoolean(HBASE_SKIP_ERRORS, true);
318 generateHLogs(Integer.MAX_VALUE);
319 corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
320 Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs);
321 fs.initialize(fs.getUri(), conf);
322 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
323 hbaseDir, hlogDir, oldLogDir, fs);
324 logSplitter.splitLog();
325
326 for (String region : regions) {
327 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
328
329
330
331 int goodEntries = (NUM_WRITERS - 1) * ENTRIES;
332 int firstHalfEntries = (int) Math.ceil(ENTRIES / 2) - 1;
333 assertTrue("The file up to the corrupted area hasn't been parsed",
334 goodEntries + firstHalfEntries <= countHLog(logfile, fs, conf));
335 }
336 }
337
338 @Test
339 public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException {
340 conf.setBoolean(HBASE_SKIP_ERRORS, true);
341 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
342 Reader.class);
343 InstrumentedSequenceFileLogWriter.activateFailure = false;
344 HLog.resetLogReaderClass();
345
346 try {
347 Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
348 conf.setClass("hbase.regionserver.hlog.reader.impl",
349 FaultySequenceFileLogReader.class, HLog.Reader.class);
350 for (FaultySequenceFileLogReader.FailureType failureType : FaultySequenceFileLogReader.FailureType.values()) {
351 conf.set("faultysequencefilelogreader.failuretype", failureType.name());
352 generateHLogs(1, ENTRIES, -1);
353 fs.initialize(fs.getUri(), conf);
354 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
355 hbaseDir, hlogDir, oldLogDir, fs);
356 logSplitter.splitLog();
357 FileStatus[] archivedLogs = fs.listStatus(corruptDir);
358 assertEquals("expected a different file", c1.getName(), archivedLogs[0]
359 .getPath().getName());
360 assertEquals(archivedLogs.length, 1);
361 fs.delete(new Path(oldLogDir, HLOG_FILE_PREFIX + "0"), false);
362 }
363 } finally {
364 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
365 Reader.class);
366 HLog.resetLogReaderClass();
367 }
368 }
369
370 @Test(expected = IOException.class)
371 public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows()
372 throws IOException {
373 conf.setBoolean(HBASE_SKIP_ERRORS, false);
374 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
375 Reader.class);
376 InstrumentedSequenceFileLogWriter.activateFailure = false;
377 HLog.resetLogReaderClass();
378
379 try {
380 conf.setClass("hbase.regionserver.hlog.reader.impl",
381 FaultySequenceFileLogReader.class, HLog.Reader.class);
382 conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
383 generateHLogs(Integer.MAX_VALUE);
384 fs.initialize(fs.getUri(), conf);
385 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
386 hbaseDir, hlogDir, oldLogDir, fs);
387 logSplitter.splitLog();
388 } finally {
389 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
390 Reader.class);
391 HLog.resetLogReaderClass();
392 }
393
394 }
395
396 @Test
397 public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs()
398 throws IOException {
399 conf.setBoolean(HBASE_SKIP_ERRORS, false);
400 Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
401 Reader.class);
402 InstrumentedSequenceFileLogWriter.activateFailure = false;
403 HLog.resetLogReaderClass();
404
405 try {
406 conf.setClass("hbase.regionserver.hlog.reader.impl",
407 FaultySequenceFileLogReader.class, HLog.Reader.class);
408 conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
409 generateHLogs(-1);
410 fs.initialize(fs.getUri(), conf);
411 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
412 hbaseDir, hlogDir, oldLogDir, fs);
413 try {
414 logSplitter.splitLog();
415 } catch (IOException e) {
416 assertEquals(
417 "if skip.errors is false all files should remain in place",
418 NUM_WRITERS, fs.listStatus(hlogDir).length);
419 }
420 } finally {
421 conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
422 Reader.class);
423 HLog.resetLogReaderClass();
424 }
425
426 }
427
428 @Test
429 public void testEOFisIgnored() throws IOException {
430 conf.setBoolean(HBASE_SKIP_ERRORS, false);
431
432 final String REGION = "region__1";
433 regions.removeAll(regions);
434 regions.add(REGION);
435
436 int entryCount = 10;
437 Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
438 generateHLogs(1, entryCount, -1);
439 corruptHLog(c1, Corruptions.TRUNCATE, true, fs);
440
441 fs.initialize(fs.getUri(), conf);
442 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
443 hbaseDir, hlogDir, oldLogDir, fs);
444 logSplitter.splitLog();
445
446 Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
447 Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
448
449 int actualCount = 0;
450 HLog.Reader in = HLog.getReader(fs, splitLog, conf);
451 HLog.Entry entry;
452 while ((entry = in.next()) != null) ++actualCount;
453 assertEquals(entryCount-1, actualCount);
454
455
456 FileStatus[] archivedLogs = fs.listStatus(corruptDir);
457 assertEquals(archivedLogs.length, 0);
458 }
459
460 @Test
461 public void testLogsGetArchivedAfterSplit() throws IOException {
462 conf.setBoolean(HBASE_SKIP_ERRORS, false);
463
464 generateHLogs(-1);
465
466 fs.initialize(fs.getUri(), conf);
467 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
468 hbaseDir, hlogDir, oldLogDir, fs);
469 logSplitter.splitLog();
470
471 FileStatus[] archivedLogs = fs.listStatus(oldLogDir);
472
473 assertEquals("wrong number of files in the archive log", NUM_WRITERS, archivedLogs.length);
474 }
475
476 @Test
477 public void testSplit() throws IOException {
478 generateHLogs(-1);
479 fs.initialize(fs.getUri(), conf);
480 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
481 hbaseDir, hlogDir, oldLogDir, fs);
482 logSplitter.splitLog();
483
484 for (String region : regions) {
485 Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
486 assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
487
488 }
489 }
490
491 @Test
492 public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit()
493 throws IOException {
494 generateHLogs(-1);
495 fs.initialize(fs.getUri(), conf);
496 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
497 hbaseDir, hlogDir, oldLogDir, fs);
498 logSplitter.splitLog();
499 FileStatus [] statuses = null;
500 try {
501 statuses = fs.listStatus(hlogDir);
502 if (statuses != null) {
503 Assert.fail("Files left in log dir: " +
504 Joiner.on(",").join(FileUtil.stat2Paths(statuses)));
505 }
506 } catch (FileNotFoundException e) {
507
508 }
509 }
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541 @Test
542 public void testSplitWillNotTouchLogsIfNewHLogGetsCreatedAfterSplitStarted()
543 throws IOException {
544 AtomicBoolean stop = new AtomicBoolean(false);
545 generateHLogs(-1);
546 fs.initialize(fs.getUri(), conf);
547 Thread zombie = new ZombieNewLogWriterRegionServer(stop);
548
549 try {
550 zombie.start();
551 try {
552 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
553 hbaseDir, hlogDir, oldLogDir, fs);
554 logSplitter.splitLog();
555 } catch (IOException ex) {
556 int logFilesNumber = fs.listStatus(hlogDir).length;
557
558 assertEquals("Log files should not be archived if there's an extra file after split",
559 NUM_WRITERS + 1, logFilesNumber);
560 } finally {
561 stop.set(true);
562 }
563
564 }
565
566
567
568 @Test(expected = IOException.class)
569 public void testSplitWillFailIfWritingToRegionFails() throws Exception {
570
571 generateHLogs(4);
572
573 fs.initialize(fs.getUri(), conf);
574
575 String region = "break";
576 Path regiondir = new Path(tabledir, region);
577 fs.mkdirs(regiondir);
578
579 InstrumentedSequenceFileLogWriter.activateFailure = false;
580 appendEntry(writer[4], TABLE_NAME, Bytes.toBytes(region),
581 ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
582 writer[4].close();
583
584 try {
585 InstrumentedSequenceFileLogWriter.activateFailure = true;
586 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
587 hbaseDir, hlogDir, oldLogDir, fs);
588 logSplitter.splitLog();
589
590 } catch (IOException e) {
591 assertEquals("This exception is instrumented and should only be thrown for testing", e.getMessage());
592 throw e;
593 } finally {
594 InstrumentedSequenceFileLogWriter.activateFailure = false;
595 }
596 }
597
598
599
600
601
602
603 public void testSplittingLargeNumberOfRegionsConsistency() throws IOException {
604
605 regions.removeAll(regions);
606 for (int i=0; i<100; i++) {
607 regions.add("region__"+i);
608 }
609
610 generateHLogs(1, 100, -1);
611 fs.initialize(fs.getUri(), conf);
612
613 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
614 hbaseDir, hlogDir, oldLogDir, fs);
615 logSplitter.splitLog();
616 fs.rename(oldLogDir, hlogDir);
617 Path firstSplitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME) + ".first");
618 Path splitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME));
619 fs.rename(splitPath,
620 firstSplitPath);
621
622
623 fs.initialize(fs.getUri(), conf);
624 logSplitter = HLogSplitter.createLogSplitter(conf,
625 hbaseDir, hlogDir, oldLogDir, fs);
626 logSplitter.splitLog();
627
628 assertEquals(0, compareHLogSplitDirs(firstSplitPath, splitPath));
629 }
630
631 @Test
632 public void testSplitDeletedRegion() throws IOException {
633 regions.removeAll(regions);
634 String region = "region_that_splits";
635 regions.add(region);
636
637 generateHLogs(1);
638
639 fs.initialize(fs.getUri(), conf);
640
641 Path regiondir = new Path(tabledir, region);
642 fs.delete(regiondir, true);
643
644 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
645 hbaseDir, hlogDir, oldLogDir, fs);
646 logSplitter.splitLog();
647
648 assertFalse(fs.exists(regiondir));
649 }
650
651 @Test
652 public void testIOEOnOutputThread() throws Exception {
653 conf.setBoolean(HBASE_SKIP_ERRORS, false);
654
655 generateHLogs(-1);
656
657 fs.initialize(fs.getUri(), conf);
658
659 HLogSplitter logSplitter = new HLogSplitter(
660 conf, hbaseDir, hlogDir, oldLogDir, fs) {
661 protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
662 throws IOException {
663 HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
664 Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
665 return mockWriter;
666
667 }
668 };
669 try {
670 logSplitter.splitLog();
671 fail("Didn't throw!");
672 } catch (IOException ioe) {
673 assertTrue(ioe.toString().contains("Injected"));
674 }
675 }
676
677
678 @Test
679 public void testMovedHLogDuringRecovery() throws Exception {
680 generateHLogs(-1);
681
682 fs.initialize(fs.getUri(), conf);
683
684
685
686 FileSystem spiedFs = Mockito.spy(fs);
687
688
689 Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).
690 when(spiedFs).append(Mockito.<Path>any());
691
692 HLogSplitter logSplitter = new HLogSplitter(
693 conf, hbaseDir, hlogDir, oldLogDir, spiedFs);
694
695 try {
696 logSplitter.splitLog();
697 assertEquals(NUM_WRITERS, fs.listStatus(oldLogDir).length);
698 assertFalse(fs.exists(hlogDir));
699 } catch (IOException e) {
700 fail("There shouldn't be any exception but: " + e.toString());
701 }
702 }
703
704
705
706
707
708 @Test
709 public void testThreading() throws Exception {
710 doTestThreading(20000, 128*1024*1024, 0);
711 }
712
713
714
715
716
717 @Test
718 public void testThreadingSlowWriterSmallBuffer() throws Exception {
719 doTestThreading(200, 1024, 50);
720 }
721
722
723
724
725
726
727
728
729
730
731
732
733
734 private void doTestThreading(final int numFakeEdits,
735 final int bufferSize,
736 final int writerSlowness) throws Exception {
737
738 Configuration localConf = new Configuration(conf);
739 localConf.setInt("hbase.regionserver.hlog.splitlog.buffersize", bufferSize);
740
741
742 FSDataOutputStream out = fs.create(new Path(hlogDir, HLOG_FILE_PREFIX + ".fake"));
743 out.close();
744
745
746 final List<String> regions = ImmutableList.of("r0", "r1", "r2", "r3", "r4");
747 makeRegionDirs(fs, regions);
748
749
750 HLogSplitter logSplitter = new HLogSplitter(
751 localConf, hbaseDir, hlogDir, oldLogDir, fs) {
752
753
754 protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
755 throws IOException {
756 HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
757 Mockito.doAnswer(new Answer<Void>() {
758 int expectedIndex = 0;
759
760 @Override
761 public Void answer(InvocationOnMock invocation) {
762 if (writerSlowness > 0) {
763 try {
764 Thread.sleep(writerSlowness);
765 } catch (InterruptedException ie) {
766 Thread.currentThread().interrupt();
767 }
768 }
769 HLog.Entry entry = (Entry) invocation.getArguments()[0];
770 WALEdit edit = entry.getEdit();
771 List<KeyValue> keyValues = edit.getKeyValues();
772 assertEquals(1, keyValues.size());
773 KeyValue kv = keyValues.get(0);
774
775
776 assertEquals(expectedIndex, Bytes.toInt(kv.getRow()));
777 expectedIndex++;
778 return null;
779 }
780 }).when(mockWriter).append(Mockito.<HLog.Entry>any());
781 return mockWriter;
782 }
783
784
785
786 protected Reader getReader(FileSystem fs, Path curLogFile, Configuration conf)
787 throws IOException {
788 Reader mockReader = Mockito.mock(Reader.class);
789 Mockito.doAnswer(new Answer<HLog.Entry>() {
790 int index = 0;
791
792 @Override
793 public HLog.Entry answer(InvocationOnMock invocation) throws Throwable {
794 if (index >= numFakeEdits) return null;
795
796
797 int regionIdx = index % regions.size();
798 byte region[] = new byte[] {(byte)'r', (byte) (0x30 + regionIdx)};
799
800 HLog.Entry ret = createTestEntry(TABLE_NAME, region,
801 Bytes.toBytes((int)(index / regions.size())),
802 FAMILY, QUALIFIER, VALUE, index);
803 index++;
804 return ret;
805 }
806 }).when(mockReader).next();
807 return mockReader;
808 }
809 };
810
811 logSplitter.splitLog();
812
813
814
815 Map<byte[], Long> outputCounts = logSplitter.getOutputCounts();
816 for (Map.Entry<byte[], Long> entry : outputCounts.entrySet()) {
817 LOG.info("Got " + entry.getValue() + " output edits for region " +
818 Bytes.toString(entry.getKey()));
819
820 assertEquals((long)entry.getValue(), numFakeEdits / regions.size());
821 }
822 assertEquals(regions.size(), outputCounts.size());
823 }
824
825
826
827
828
829
830
831
832 class ZombieLastLogWriterRegionServer extends Thread {
833 AtomicLong editsCount;
834 AtomicBoolean stop;
835 Path log;
836 HLog.Writer lastLogWriter;
837 public ZombieLastLogWriterRegionServer(HLog.Writer writer, AtomicLong counter, AtomicBoolean stop) {
838 this.stop = stop;
839 this.editsCount = counter;
840 this.lastLogWriter = writer;
841 }
842
843 @Override
844 public void run() {
845 if (stop.get()){
846 return;
847 }
848 flushToConsole("starting");
849 while (true) {
850 try {
851 String region = "juliet";
852
853 fs.mkdirs(new Path(new Path(hbaseDir, region), region));
854 appendEntry(lastLogWriter, TABLE_NAME, region.getBytes(),
855 ("r" + editsCount).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
856 lastLogWriter.sync();
857 editsCount.incrementAndGet();
858 try {
859 Thread.sleep(1);
860 } catch (InterruptedException e) {
861
862 }
863
864
865 } catch (IOException ex) {
866 if (ex instanceof RemoteException) {
867 flushToConsole("Juliet: got RemoteException " +
868 ex.getMessage() + " while writing " + (editsCount.get() + 1));
869 break;
870 } else {
871 assertTrue("Failed to write " + editsCount.get(), false);
872 }
873
874 }
875 }
876
877
878 }
879 }
880
881
882
883
884
885
886 class ZombieNewLogWriterRegionServer extends Thread {
887 AtomicBoolean stop;
888 public ZombieNewLogWriterRegionServer(AtomicBoolean stop) {
889 super("ZombieNewLogWriterRegionServer");
890 this.stop = stop;
891 }
892
893 @Override
894 public void run() {
895 if (stop.get()) {
896 return;
897 }
898 Path tableDir = new Path(hbaseDir, new String(TABLE_NAME));
899 Path regionDir = new Path(tableDir, regions.get(0));
900 Path recoveredEdits = new Path(regionDir, HLogSplitter.RECOVERED_EDITS);
901 String region = "juliet";
902 Path julietLog = new Path(hlogDir, HLOG_FILE_PREFIX + ".juliet");
903 try {
904
905 while (!fs.exists(recoveredEdits) && !stop.get()) {
906 flushToConsole("Juliet: split not started, sleeping a bit...");
907 Threads.sleep(10);
908 }
909
910 fs.mkdirs(new Path(tableDir, region));
911 HLog.Writer writer = HLog.createWriter(fs,
912 julietLog, conf);
913 appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
914 ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
915 writer.close();
916 flushToConsole("Juliet file creator: created file " + julietLog);
917 } catch (IOException e1) {
918 assertTrue("Failed to create file " + julietLog, false);
919 }
920 }
921 }
922
923 private void flushToConsole(String s) {
924 System.out.println(s);
925 System.out.flush();
926 }
927
928
929 private void generateHLogs(int leaveOpen) throws IOException {
930 generateHLogs(NUM_WRITERS, ENTRIES, leaveOpen);
931 }
932
933 private void makeRegionDirs(FileSystem fs, List<String> regions) throws IOException {
934 for (String region : regions) {
935 flushToConsole("Creating dir for region " + region);
936 fs.mkdirs(new Path(tabledir, region));
937 }
938 }
939
940 private void generateHLogs(int writers, int entries, int leaveOpen) throws IOException {
941 makeRegionDirs(fs, regions);
942 for (int i = 0; i < writers; i++) {
943 writer[i] = HLog.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), conf);
944 for (int j = 0; j < entries; j++) {
945 int prefix = 0;
946 for (String region : regions) {
947 String row_key = region + prefix++ + i + j;
948 appendEntry(writer[i], TABLE_NAME, region.getBytes(),
949 row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq);
950 }
951 }
952 if (i != leaveOpen) {
953 writer[i].close();
954 flushToConsole("Closing writer " + i);
955 }
956 }
957 }
958
959 private Path getLogForRegion(Path rootdir, byte[] table, String region)
960 throws IOException {
961 Path tdir = HTableDescriptor.getTableDir(rootdir, table);
962 Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
963 Bytes.toString(region.getBytes())));
964 FileStatus [] files = this.fs.listStatus(editsdir);
965 assertEquals(1, files.length);
966 return files[0].getPath();
967 }
968
969 private void corruptHLog(Path path, Corruptions corruption, boolean close,
970 FileSystem fs) throws IOException {
971
972 FSDataOutputStream out;
973 int fileSize = (int) fs.listStatus(path)[0].getLen();
974
975 FSDataInputStream in = fs.open(path);
976 byte[] corrupted_bytes = new byte[fileSize];
977 in.readFully(0, corrupted_bytes, 0, fileSize);
978 in.close();
979
980 switch (corruption) {
981 case APPEND_GARBAGE:
982 out = fs.append(path);
983 out.write("-----".getBytes());
984 closeOrFlush(close, out);
985 break;
986
987 case INSERT_GARBAGE_ON_FIRST_LINE:
988 fs.delete(path, false);
989 out = fs.create(path);
990 out.write(0);
991 out.write(corrupted_bytes);
992 closeOrFlush(close, out);
993 break;
994
995 case INSERT_GARBAGE_IN_THE_MIDDLE:
996 fs.delete(path, false);
997 out = fs.create(path);
998 int middle = (int) Math.floor(corrupted_bytes.length / 2);
999 out.write(corrupted_bytes, 0, middle);
1000 out.write(0);
1001 out.write(corrupted_bytes, middle, corrupted_bytes.length - middle);
1002 closeOrFlush(close, out);
1003 break;
1004
1005 case TRUNCATE:
1006 fs.delete(path, false);
1007 out = fs.create(path);
1008 out.write(corrupted_bytes, 0, fileSize-32);
1009 closeOrFlush(close, out);
1010
1011 break;
1012 }
1013
1014
1015 }
1016
1017 private void closeOrFlush(boolean close, FSDataOutputStream out)
1018 throws IOException {
1019 if (close) {
1020 out.close();
1021 } else {
1022 out.sync();
1023
1024 }
1025 }
1026
1027 @SuppressWarnings("unused")
1028 private void dumpHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1029 HLog.Entry entry;
1030 HLog.Reader in = HLog.getReader(fs, log, conf);
1031 while ((entry = in.next()) != null) {
1032 System.out.println(entry);
1033 }
1034 }
1035
1036 private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1037 int count = 0;
1038 HLog.Reader in = HLog.getReader(fs, log, conf);
1039 while (in.next() != null) {
1040 count++;
1041 }
1042 return count;
1043 }
1044
1045
1046 public long appendEntry(HLog.Writer writer, byte[] table, byte[] region,
1047 byte[] row, byte[] family, byte[] qualifier,
1048 byte[] value, long seq)
1049 throws IOException {
1050
1051 writer.append(createTestEntry(table, region, row, family, qualifier, value, seq));
1052 writer.sync();
1053 return seq;
1054 }
1055
1056 private HLog.Entry createTestEntry(
1057 byte[] table, byte[] region,
1058 byte[] row, byte[] family, byte[] qualifier,
1059 byte[] value, long seq) {
1060 long time = System.nanoTime();
1061 WALEdit edit = new WALEdit();
1062 seq++;
1063 edit.add(new KeyValue(row, family, qualifier, time, KeyValue.Type.Put, value));
1064 return new HLog.Entry(new HLogKey(region, table, seq, time), edit);
1065 }
1066
1067
1068 private void injectEmptyFile(String suffix, boolean closeFile)
1069 throws IOException {
1070 HLog.Writer writer = HLog.createWriter(
1071 fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
1072 if (closeFile) writer.close();
1073 }
1074
1075 @SuppressWarnings("unused")
1076 private void listLogs(FileSystem fs, Path dir) throws IOException {
1077 for (FileStatus file : fs.listStatus(dir)) {
1078 System.out.println(file.getPath());
1079 }
1080
1081 }
1082
1083 private int compareHLogSplitDirs(Path p1, Path p2) throws IOException {
1084 FileStatus[] f1 = fs.listStatus(p1);
1085 FileStatus[] f2 = fs.listStatus(p2);
1086 assertNotNull("Path " + p1 + " doesn't exist", f1);
1087 assertNotNull("Path " + p2 + " doesn't exist", f2);
1088
1089 System.out.println("Files in " + p1 + ": " +
1090 Joiner.on(",").join(FileUtil.stat2Paths(f1)));
1091 System.out.println("Files in " + p2 + ": " +
1092 Joiner.on(",").join(FileUtil.stat2Paths(f2)));
1093 assertEquals(f1.length, f2.length);
1094
1095 for (int i = 0; i < f1.length; i++) {
1096
1097
1098 Path rd1 = HLog.getRegionDirRecoveredEditsDir(f1[i].getPath());
1099 FileStatus[] rd1fs = fs.listStatus(rd1);
1100 assertEquals(1, rd1fs.length);
1101 Path rd2 = HLog.getRegionDirRecoveredEditsDir(f2[i].getPath());
1102 FileStatus[] rd2fs = fs.listStatus(rd2);
1103 assertEquals(1, rd2fs.length);
1104 if (!logsAreEqual(rd1fs[0].getPath(), rd2fs[0].getPath())) {
1105 return -1;
1106 }
1107 }
1108 return 0;
1109 }
1110
1111 private boolean logsAreEqual(Path p1, Path p2) throws IOException {
1112 HLog.Reader in1, in2;
1113 in1 = HLog.getReader(fs, p1, conf);
1114 in2 = HLog.getReader(fs, p2, conf);
1115 HLog.Entry entry1;
1116 HLog.Entry entry2;
1117 while ((entry1 = in1.next()) != null) {
1118 entry2 = in2.next();
1119 if ((entry1.getKey().compareTo(entry2.getKey()) != 0) ||
1120 (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) {
1121 return false;
1122 }
1123 }
1124 return true;
1125 }
1126 }