1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import static org.mockito.Mockito.doAnswer;
23 import static org.mockito.Mockito.spy;
24
25 import java.io.IOException;
26 import java.util.ArrayList;
27 import java.util.Collections;
28 import java.util.HashMap;
29 import java.util.List;
30 import java.util.Map;
31 import java.util.Map.Entry;
32 import java.util.concurrent.CountDownLatch;
33
34 import org.apache.commons.logging.Log;
35 import org.apache.commons.logging.LogFactory;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hbase.HBaseTestCase;
41 import org.apache.hadoop.hbase.HBaseTestingUtility;
42 import org.apache.hadoop.hbase.HConstants;
43 import org.apache.hadoop.hbase.HTableDescriptor;
44 import org.apache.hadoop.hbase.KeyValue;
45 import org.apache.hadoop.hbase.SmallTests;
46 import org.apache.hadoop.hbase.client.Delete;
47 import org.apache.hadoop.hbase.client.Get;
48 import org.apache.hadoop.hbase.client.Put;
49 import org.apache.hadoop.hbase.client.Result;
50 import org.apache.hadoop.hbase.client.Scan;
51 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
52 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
53 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
54 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
55 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
56 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
57 import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
58 import org.apache.hadoop.hbase.regionserver.wal.HLog;
59 import org.apache.hadoop.hbase.util.Bytes;
60 import org.junit.experimental.categories.Category;
61 import org.mockito.Mockito;
62 import org.mockito.invocation.InvocationOnMock;
63 import org.mockito.stubbing.Answer;
64
65
66
67
68
69 @Category(SmallTests.class)
70 public class TestCompaction extends HBaseTestCase {
71 static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
72 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
73
74 private HRegion r = null;
75 private HTableDescriptor htd = null;
76 private Path compactionDir = null;
77 private Path regionCompactionDir = null;
78 private static final byte [] COLUMN_FAMILY = fam1;
79 private final byte [] STARTROW = Bytes.toBytes(START_KEY);
80 private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
81 private int compactionThreshold;
82 private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
83 final private byte[] col1, col2;
84 private static final long MAX_FILES_TO_COMPACT = 10;
85
86
87 public TestCompaction() throws Exception {
88 super();
89
90
91 conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
92 conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
93 compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
94
95 firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
96 secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
97
98 secondRowBytes[START_KEY_BYTES.length - 1]++;
99 thirdRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
100 thirdRowBytes[START_KEY_BYTES.length - 1]++;
101 thirdRowBytes[START_KEY_BYTES.length - 1]++;
102 col1 = "column1".getBytes(HConstants.UTF8_ENCODING);
103 col2 = "column2".getBytes(HConstants.UTF8_ENCODING);
104 }
105
106 @Override
107 public void setUp() throws Exception {
108 super.setUp();
109 this.htd = createTableDescriptor(getName());
110 this.r = createNewHRegion(htd, null, null);
111 }
112
113 @Override
114 public void tearDown() throws Exception {
115 HLog hlog = r.getLog();
116 this.r.close();
117 hlog.closeAndDelete();
118 super.tearDown();
119 }
120
121
122
123
124
125
126
127 public void testMajorCompactingToNoOutput() throws IOException {
128 createStoreFile(r);
129 for (int i = 0; i < compactionThreshold; i++) {
130 createStoreFile(r);
131 }
132
133 InternalScanner s = r.getScanner(new Scan());
134 do {
135 List<KeyValue> results = new ArrayList<KeyValue>();
136 boolean result = s.next(results);
137 r.delete(new Delete(results.get(0).getRow()), null, false);
138 if (!result) break;
139 } while(true);
140 s.close();
141
142 r.flushcache();
143
144 r.compactStores(true);
145 s = r.getScanner(new Scan());
146 int counter = 0;
147 do {
148 List<KeyValue> results = new ArrayList<KeyValue>();
149 boolean result = s.next(results);
150 if (!result) break;
151 counter++;
152 } while(true);
153 assertEquals(0, counter);
154 }
155
156
157
158
159
160
161 public void testMajorCompaction() throws Exception {
162 majorCompaction();
163 }
164
165 public void testDataBlockEncodingInCacheOnly() throws Exception {
166 majorCompactionWithDataBlockEncoding(true);
167 }
168
169 public void testDataBlockEncodingEverywhere() throws Exception {
170 majorCompactionWithDataBlockEncoding(false);
171 }
172
173 public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
174 throws Exception {
175 Map<Store, HFileDataBlockEncoder> replaceBlockCache =
176 new HashMap<Store, HFileDataBlockEncoder>();
177 for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
178 Store store = pair.getValue();
179 HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
180 replaceBlockCache.put(pair.getValue(), blockEncoder);
181 final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
182 final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
183 inCache;
184 store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
185 onDisk, inCache));
186 }
187
188 majorCompaction();
189
190
191 for (Entry<Store, HFileDataBlockEncoder> entry :
192 replaceBlockCache.entrySet()) {
193 entry.getKey().setDataBlockEncoderInTest(entry.getValue());
194 }
195 }
196
197 private void majorCompaction() throws Exception {
198 createStoreFile(r);
199 for (int i = 0; i < compactionThreshold; i++) {
200 createStoreFile(r);
201 }
202
203 addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
204
205
206
207
208
209 Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
210 assertEquals(compactionThreshold, result.size());
211
212
213 for (Store store: this.r.stores.values()) {
214 assertNull(store.getCompactionProgress());
215 }
216
217 r.flushcache();
218 r.compactStores(true);
219
220
221 int storeCount = 0;
222 for (Store store: this.r.stores.values()) {
223 CompactionProgress progress = store.getCompactionProgress();
224 if( progress != null ) {
225 ++storeCount;
226 assertTrue(progress.currentCompactedKVs > 0);
227 assertTrue(progress.totalCompactingKVs > 0);
228 }
229 assertTrue(storeCount > 0);
230 }
231
232
233
234 byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
235 secondRowBytes[START_KEY_BYTES.length - 1]++;
236
237
238 result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).
239 setMaxVersions(100), null);
240 LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " +
241 "initial compaction: " + result);
242 assertEquals("Invalid number of versions of row "
243 + Bytes.toStringBinary(secondRowBytes) + ".", compactionThreshold,
244 result.size());
245
246
247
248
249
250
251 LOG.debug("Adding deletes to memstore and flushing");
252 Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
253 byte [][] famAndQf = {COLUMN_FAMILY, null};
254 delete.deleteFamily(famAndQf[0]);
255 r.delete(delete, null, true);
256
257
258 result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
259 assertTrue("Second row should have been deleted", result.isEmpty());
260
261 r.flushcache();
262
263 result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
264 assertTrue("Second row should have been deleted", result.isEmpty());
265
266
267 createSmallerStoreFile(this.r);
268 r.flushcache();
269
270 result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
271 assertTrue("Second row should still be deleted", result.isEmpty());
272
273
274 r.compactStores(true);
275 assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
276
277 result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
278 assertTrue("Second row should still be deleted", result.isEmpty());
279
280
281
282
283 verifyCounts(3,0);
284
285
286
287 final int ttl = 1000;
288 for (Store store: this.r.stores.values()) {
289 Store.ScanInfo old = store.scanInfo;
290 Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
291 old.getMinVersions(), old.getMaxVersions(), ttl,
292 old.getKeepDeletedCells(), 0, old.getComparator());
293 store.scanInfo = si;
294 }
295 Thread.sleep(1000);
296
297 r.compactStores(true);
298 int count = count();
299 assertEquals("Should not see anything after TTL has expired", 0, count);
300 }
301
302 public void testTimeBasedMajorCompaction() throws Exception {
303
304 int delay = 10 * 1000;
305 float jitterPct = 0.20f;
306 conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
307 conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
308
309 Store s = r.getStore(COLUMN_FAMILY);
310 try {
311 createStoreFile(r);
312 createStoreFile(r);
313 r.compactStores(true);
314
315
316 createStoreFile(r);
317 r.compactStores(false);
318 assertEquals(2, s.getStorefilesCount());
319
320
321 long mcTime = s.getNextMajorCompactTime();
322 for (int i = 0; i < 10; ++i) {
323 assertEquals(mcTime, s.getNextMajorCompactTime());
324 }
325
326
327 long jitter = Math.round(delay * jitterPct);
328 assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);
329
330
331 Thread.sleep(mcTime);
332
333
334 r.compactStores(false);
335 assertEquals(1, s.getStorefilesCount());
336 } finally {
337
338 conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
339 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
340
341 createStoreFile(r);
342 r.compactStores(true);
343 assertEquals(1, s.getStorefilesCount());
344 }
345 }
346
347 public void testMinorCompactionWithDeleteRow() throws Exception {
348 Delete deleteRow = new Delete(secondRowBytes);
349 testMinorCompactionWithDelete(deleteRow);
350 }
351 public void testMinorCompactionWithDeleteColumn1() throws Exception {
352 Delete dc = new Delete(secondRowBytes);
353
354 dc.deleteColumns(fam2, col2);
355 testMinorCompactionWithDelete(dc);
356 }
357 public void testMinorCompactionWithDeleteColumn2() throws Exception {
358 Delete dc = new Delete(secondRowBytes);
359 dc.deleteColumn(fam2, col2);
360
361
362
363
364
365
366
367 testMinorCompactionWithDelete(dc, 3);
368 }
369 public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
370 Delete deleteCF = new Delete(secondRowBytes);
371 deleteCF.deleteFamily(fam2);
372 testMinorCompactionWithDelete(deleteCF);
373 }
374 public void testMinorCompactionWithDeleteVersion1() throws Exception {
375 Delete deleteVersion = new Delete(secondRowBytes);
376 deleteVersion.deleteColumns(fam2, col2, 2);
377
378
379
380 testMinorCompactionWithDelete(deleteVersion, 1);
381 }
382 public void testMinorCompactionWithDeleteVersion2() throws Exception {
383 Delete deleteVersion = new Delete(secondRowBytes);
384 deleteVersion.deleteColumn(fam2, col2, 1);
385
386
387
388
389
390 testMinorCompactionWithDelete(deleteVersion, 3);
391 }
392
393
394
395
396
397
398
399 private void testMinorCompactionWithDelete(Delete delete) throws Exception {
400 testMinorCompactionWithDelete(delete, 0);
401 }
402 private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
403 HRegionIncommon loader = new HRegionIncommon(r);
404 for (int i = 0; i < compactionThreshold + 1; i++) {
405 addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
406 addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
407 addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
408 addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
409 r.flushcache();
410 }
411
412 Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
413 assertEquals(compactionThreshold, result.size());
414 result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
415 assertEquals(compactionThreshold, result.size());
416
417
418
419
420
421 r.delete(delete, null, true);
422
423
424 result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
425 assertEquals(expectedResultsAfterDelete, result.size());
426
427 result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
428 assertEquals(compactionThreshold, result.size());
429
430 r.flushcache();
431
432
433
434
435 result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
436 assertEquals(expectedResultsAfterDelete, result.size());
437
438 result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
439 assertEquals(compactionThreshold, result.size());
440
441
442 Store store2 = this.r.stores.get(fam2);
443 int numFiles1 = store2.getStorefiles().size();
444 assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold);
445 store2.compactRecentForTesting(compactionThreshold);
446 int numFiles2 = store2.getStorefiles().size();
447
448 assertTrue("Number of store files should go down", numFiles1 > numFiles2);
449
450 assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);
451
452
453 result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
454 assertEquals(expectedResultsAfterDelete, result.size());
455
456 result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
457 assertEquals(compactionThreshold, result.size());
458 }
459
460 private void verifyCounts(int countRow1, int countRow2) throws Exception {
461 int count1 = 0;
462 int count2 = 0;
463 for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
464 HFileScanner scanner = f.getReader().getScanner(false, false);
465 scanner.seekTo();
466 do {
467 byte [] row = scanner.getKeyValue().getRow();
468 if (Bytes.equals(row, STARTROW)) {
469 count1++;
470 } else if(Bytes.equals(row, secondRowBytes)) {
471 count2++;
472 }
473 } while(scanner.next());
474 }
475 assertEquals(countRow1,count1);
476 assertEquals(countRow2,count2);
477 }
478
479
480
481
482
483
484 public void testInterruptCompaction() throws Exception {
485 assertEquals(0, count());
486
487
488 int origWI = Store.closeCheckInterval;
489 Store.closeCheckInterval = 10*1000;
490
491 try {
492
493 int jmax = (int) Math.ceil(15.0/compactionThreshold);
494 byte [] pad = new byte[1000];
495 for (int i = 0; i < compactionThreshold; i++) {
496 HRegionIncommon loader = new HRegionIncommon(r);
497 Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
498 p.setWriteToWAL(false);
499 for (int j = 0; j < jmax; j++) {
500 p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
501 }
502 addContent(loader, Bytes.toString(COLUMN_FAMILY));
503 loader.put(p);
504 loader.flushcache();
505 }
506
507 HRegion spyR = spy(r);
508 doAnswer(new Answer() {
509 public Object answer(InvocationOnMock invocation) throws Throwable {
510 r.writestate.writesEnabled = false;
511 return invocation.callRealMethod();
512 }
513 }).when(spyR).doRegionCompactionPrep();
514
515
516 spyR.compactStores();
517
518
519 Store s = r.stores.get(COLUMN_FAMILY);
520 assertEquals(compactionThreshold, s.getStorefilesCount());
521 assertTrue(s.getStorefilesSize() > 15*1000);
522
523 FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
524 assertEquals(0, ls.length);
525
526 } finally {
527
528 r.writestate.writesEnabled = true;
529 Store.closeCheckInterval = origWI;
530
531
532 for (int i = 0; i < compactionThreshold; i++) {
533 Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
534 byte [][] famAndQf = {COLUMN_FAMILY, null};
535 delete.deleteFamily(famAndQf[0]);
536 r.delete(delete, null, true);
537 }
538 r.flushcache();
539
540
541
542 final int ttl = 1000;
543 for (Store store: this.r.stores.values()) {
544 Store.ScanInfo old = store.scanInfo;
545 Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
546 old.getMinVersions(), old.getMaxVersions(), ttl,
547 old.getKeepDeletedCells(), 0, old.getComparator());
548 store.scanInfo = si;
549 }
550 Thread.sleep(ttl);
551
552 r.compactStores(true);
553 assertEquals(0, count());
554 }
555 }
556
557 private int count() throws IOException {
558 int count = 0;
559 for (StoreFile f: this.r.stores.
560 get(COLUMN_FAMILY_TEXT).getStorefiles()) {
561 HFileScanner scanner = f.getReader().getScanner(false, false);
562 if (!scanner.seekTo()) {
563 continue;
564 }
565 do {
566 count++;
567 } while(scanner.next());
568 }
569 return count;
570 }
571
572 private void createStoreFile(final HRegion region) throws IOException {
573 createStoreFile(region, Bytes.toString(COLUMN_FAMILY));
574 }
575
576 private void createStoreFile(final HRegion region, String family) throws IOException {
577 HRegionIncommon loader = new HRegionIncommon(region);
578 addContent(loader, family);
579 loader.flushcache();
580 }
581
582 private void createSmallerStoreFile(final HRegion region) throws IOException {
583 HRegionIncommon loader = new HRegionIncommon(region);
584 addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
585 "bbb").getBytes(), null);
586 loader.flushcache();
587 }
588
589 public void testCompactionWithCorruptResult() throws Exception {
590 int nfiles = 10;
591 for (int i = 0; i < nfiles; i++) {
592 createStoreFile(r);
593 }
594 Store store = r.getStore(COLUMN_FAMILY);
595
596 List<StoreFile> storeFiles = store.getStorefiles();
597 long maxId = StoreFile.getMaxSequenceIdInList(storeFiles, true);
598 Compactor tool = new Compactor(this.conf);
599
600 StoreFile.Writer compactedFile = tool.compactForTesting(store, this.conf, storeFiles, false,
601 maxId);
602
603
604 FileSystem fs = FileSystem.get(conf);
605 Path origPath = compactedFile.getPath();
606 Path homedir = store.getHomedir();
607 Path dstPath = new Path(homedir, origPath.getName());
608 FSDataOutputStream stream = fs.create(origPath, null, true, 512, (short) 3,
609 (long) 1024,
610 null);
611 stream.writeChars("CORRUPT FILE!!!!");
612 stream.close();
613
614 try {
615 store.completeCompaction(storeFiles, compactedFile);
616 } catch (Exception e) {
617
618
619 assert (fs.exists(origPath));
620 assert (!fs.exists(dstPath));
621 System.out.println("testCompactionWithCorruptResult Passed");
622 return;
623 }
624 fail("testCompactionWithCorruptResult failed since no exception was" +
625 "thrown while completing a corrupt file");
626 }
627
628
629
630
631 public void testNonUserMajorCompactionRequest() throws Exception {
632 Store store = r.getStore(COLUMN_FAMILY);
633 createStoreFile(r);
634 for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
635 createStoreFile(r);
636 }
637 store.triggerMajorCompaction();
638
639 CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null);
640 assertNotNull("Expected to receive a compaction request", request);
641 assertEquals(
642 "System-requested major compaction should not occur if there are too many store files",
643 false,
644 request.isMajor());
645 }
646
647
648
649
650 public void testUserMajorCompactionRequest() throws IOException{
651 Store store = r.getStore(COLUMN_FAMILY);
652 createStoreFile(r);
653 for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
654 createStoreFile(r);
655 }
656 store.triggerMajorCompaction();
657 CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null);
658 assertNotNull("Expected to receive a compaction request", request);
659 assertEquals(
660 "User-requested major compaction should always occur, even if there are too many store files",
661 true,
662 request.isMajor());
663 }
664
665
666
667
668
669 public void testTrackingCompactionRequest() throws Exception {
670
671 HRegionServer mockServer = Mockito.mock(HRegionServer.class);
672 Mockito.when(mockServer.getConfiguration()).thenReturn(r.getConf());
673 CompactSplitThread thread = new CompactSplitThread(mockServer);
674 Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
675
676 RegionServerMetrics mockMetrics = Mockito.mock(RegionServerMetrics.class);
677 Mockito.when(mockServer.getMetrics()).thenReturn(mockMetrics);
678
679
680 Store store = r.getStore(COLUMN_FAMILY);
681 createStoreFile(r);
682 for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
683 createStoreFile(r);
684 }
685
686 CountDownLatch latch = new CountDownLatch(1);
687 TrackableCompactionRequest request = new TrackableCompactionRequest(r, store, latch);
688 thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request);
689
690 latch.await();
691
692 thread.interruptIfNecessary();
693 }
694
695 public void testMultipleCustomCompactionRequests() throws Exception {
696
697 HRegionServer mockServer = Mockito.mock(HRegionServer.class);
698 Mockito.when(mockServer.getConfiguration()).thenReturn(r.getConf());
699 CompactSplitThread thread = new CompactSplitThread(mockServer);
700 Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
701
702 RegionServerMetrics mockMetrics = Mockito.mock(RegionServerMetrics.class);
703 Mockito.when(mockServer.getMetrics()).thenReturn(mockMetrics);
704
705
706 int numStores = r.getStores().size();
707 List<CompactionRequest> requests = new ArrayList<CompactionRequest>(numStores);
708 CountDownLatch latch = new CountDownLatch(numStores);
709
710
711 for (Store store : r.getStores().values()) {
712 createStoreFile(r, store.getColumnFamilyName());
713 createStoreFile(r, store.getColumnFamilyName());
714 createStoreFile(r, store.getColumnFamilyName());
715 requests.add(new TrackableCompactionRequest(r, store, latch));
716 }
717
718 thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER,
719 Collections.unmodifiableList(requests));
720
721
722 latch.await();
723
724 thread.interruptIfNecessary();
725 }
726
727
728
729
730 public static class TrackableCompactionRequest extends CompactionRequest {
731 private CountDownLatch done;
732
733
734
735
736
737 public TrackableCompactionRequest(HRegion region, Store store, CountDownLatch finished) {
738 super(region, store, Store.PRIORITY_USER);
739 this.done = finished;
740 }
741
742 @Override
743 public void run() {
744 super.run();
745 this.done.countDown();
746 }
747 }
748
749 @org.junit.Rule
750 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
751 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
752 }
753