View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
22  import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY_BYTES;
23  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
24  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
25  import static org.junit.Assert.assertEquals;
26  import static org.junit.Assert.assertNotNull;
27  import static org.junit.Assert.assertNull;
28  import static org.junit.Assert.assertTrue;
29  import static org.junit.Assert.fail;
30  import static org.mockito.Matchers.any;
31  import static org.mockito.Mockito.doAnswer;
32  import static org.mockito.Mockito.mock;
33  import static org.mockito.Mockito.spy;
34  import static org.mockito.Mockito.when;
35  
36  import java.io.IOException;
37  import java.util.ArrayList;
38  import java.util.Collection;
39  import java.util.Collections;
40  import java.util.HashMap;
41  import java.util.List;
42  import java.util.Map;
43  import java.util.Map.Entry;
44  import java.util.concurrent.CountDownLatch;
45  
46  import org.apache.commons.logging.Log;
47  import org.apache.commons.logging.LogFactory;
48  import org.apache.hadoop.conf.Configuration;
49  import org.apache.hadoop.fs.FSDataOutputStream;
50  import org.apache.hadoop.fs.FileStatus;
51  import org.apache.hadoop.fs.FileSystem;
52  import org.apache.hadoop.fs.Path;
53  import org.apache.hadoop.hbase.Cell;
54  import org.apache.hadoop.hbase.CellUtil;
55  import org.apache.hadoop.hbase.HBaseConfiguration;
56  import org.apache.hadoop.hbase.HBaseTestCase;
57  import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
58  import org.apache.hadoop.hbase.HBaseTestingUtility;
59  import org.apache.hadoop.hbase.HConstants;
60  import org.apache.hadoop.hbase.HTableDescriptor;
61  import org.apache.hadoop.hbase.MediumTests;
62  import org.apache.hadoop.hbase.client.Delete;
63  import org.apache.hadoop.hbase.client.Durability;
64  import org.apache.hadoop.hbase.client.Get;
65  import org.apache.hadoop.hbase.client.Put;
66  import org.apache.hadoop.hbase.client.Result;
67  import org.apache.hadoop.hbase.client.Scan;
68  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
69  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
70  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
71  import org.apache.hadoop.hbase.io.hfile.HFileScanner;
72  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
73  import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
74  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
75  import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
76  import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
77  import org.apache.hadoop.hbase.regionserver.wal.HLog;
78  import org.apache.hadoop.hbase.util.Bytes;
79  import org.apache.hadoop.hbase.util.Pair;
80  import org.apache.hadoop.hbase.util.Threads;
81  import org.junit.After;
82  import org.junit.Assume;
83  import org.junit.Before;
84  import org.junit.Rule;
85  import org.junit.Test;
86  import org.junit.experimental.categories.Category;
87  import org.junit.rules.TestName;
88  import org.mockito.Mockito;
89  import org.mockito.invocation.InvocationOnMock;
90  import org.mockito.stubbing.Answer;
91  
92  
93  /**
94   * Test compactions
95   */
96  @Category(MediumTests.class)
97  public class TestCompaction {
98    @Rule public TestName name = new TestName();
99    static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
100   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility().createLocalHTU();
101   protected Configuration conf = UTIL.getConfiguration();
102   
103   private HRegion r = null;
104   private HTableDescriptor htd = null;
105   private Path compactionDir = null;
106   private Path regionCompactionDir = null;
107   private static final byte [] COLUMN_FAMILY = fam1;
108   private final byte [] STARTROW = Bytes.toBytes(START_KEY);
109   private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
110   private int compactionThreshold;
111   private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
112   final private byte[] col1, col2;
113   private static final long MAX_FILES_TO_COMPACT = 10;
114 
115   /** constructor */
116   public TestCompaction() {
117     super();
118 
119     // Set cache flush size to 1MB
120     conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
121     conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
122     compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
123 
124     firstRowBytes = START_KEY_BYTES;
125     secondRowBytes = START_KEY_BYTES.clone();
126     // Increment the least significant character so we get to next row.
127     secondRowBytes[START_KEY_BYTES.length - 1]++;
128     thirdRowBytes = START_KEY_BYTES.clone();
129     thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
130     col1 = Bytes.toBytes("column1");
131     col2 = Bytes.toBytes("column2");
132   }
133 
134   @Before
135   public void setUp() throws Exception {
136     this.htd = UTIL.createTableDescriptor(name.getMethodName());
137     this.r = UTIL.createLocalHRegion(htd, null, null);
138   }
139 
140   @After
141   public void tearDown() throws Exception {
142     HLog hlog = r.getLog();
143     this.r.close();
144     hlog.closeAndDelete();
145   }
146 
147   /**
148    * Test that on a major compaction, if all cells are expired or deleted, then
149    * we'll end up with no product.  Make sure scanner over region returns
150    * right answer in this case - and that it just basically works.
151    * @throws IOException
152    */
153   @Test
154   public void testMajorCompactingToNoOutput() throws IOException {
155     createStoreFile(r);
156     for (int i = 0; i < compactionThreshold; i++) {
157       createStoreFile(r);
158     }
159     // Now delete everything.
160     InternalScanner s = r.getScanner(new Scan());
161     do {
162       List<Cell> results = new ArrayList<Cell>();
163       boolean result = s.next(results);
164       r.delete(new Delete(CellUtil.cloneRow(results.get(0))));
165       if (!result) break;
166     } while(true);
167     s.close();
168     // Flush
169     r.flushcache();
170     // Major compact.
171     r.compactStores(true);
172     s = r.getScanner(new Scan());
173     int counter = 0;
174     do {
175       List<Cell> results = new ArrayList<Cell>();
176       boolean result = s.next(results);
177       if (!result) break;
178       counter++;
179     } while(true);
180     assertEquals(0, counter);
181   }
182 
183   /**
184    * Run compaction and flushing memstore
185    * Assert deletes get cleaned up.
186    * @throws Exception
187    */
188   @Test
189   public void testMajorCompaction() throws Exception {
190     majorCompaction();
191   }
192 
193   @Test
194   public void testDataBlockEncodingInCacheOnly() throws Exception {
195     majorCompactionWithDataBlockEncoding(true);
196   }
197 
198   @Test
199   public void testDataBlockEncodingEverywhere() throws Exception {
200     majorCompactionWithDataBlockEncoding(false);
201   }
202 
203   public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
204       throws Exception {
205     Map<HStore, HFileDataBlockEncoder> replaceBlockCache =
206         new HashMap<HStore, HFileDataBlockEncoder>();
207     for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
208       HStore store = (HStore) pair.getValue();
209       HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
210       replaceBlockCache.put(store, blockEncoder);
211       final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
212       final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
213           inCache;
214       store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
215     }
216 
217     majorCompaction();
218 
219     // restore settings
220     for (Entry<HStore, HFileDataBlockEncoder> entry :
221         replaceBlockCache.entrySet()) {
222       entry.getKey().setDataBlockEncoderInTest(entry.getValue());
223     }
224   }
225 
226   private void majorCompaction() throws Exception {
227     createStoreFile(r);
228     for (int i = 0; i < compactionThreshold; i++) {
229       createStoreFile(r);
230     }
231     // Add more content.
232     HBaseTestCase.addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
233 
234     // Now there are about 5 versions of each column.
235     // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
236     //
237     // Assert == 3 when we ask for versions.
238     Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
239     assertEquals(compactionThreshold, result.size());
240 
241     // see if CompactionProgress is in place but null
242     for (Store store : this.r.stores.values()) {
243       assertNull(store.getCompactionProgress());
244     }
245 
246     r.flushcache();
247     r.compactStores(true);
248 
249     // see if CompactionProgress has done its thing on at least one store
250     int storeCount = 0;
251     for (Store store : this.r.stores.values()) {
252       CompactionProgress progress = store.getCompactionProgress();
253       if( progress != null ) {
254         ++storeCount;
255         assertTrue(progress.currentCompactedKVs > 0);
256         assertTrue(progress.totalCompactingKVs > 0);
257       }
258       assertTrue(storeCount > 0);
259     }
260 
261     // look at the second row
262     // Increment the least significant character so we get to next row.
263     byte [] secondRowBytes = START_KEY_BYTES.clone();
264     secondRowBytes[START_KEY_BYTES.length - 1]++;
265 
266     // Always 3 versions if that is what max versions is.
267     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).
268         setMaxVersions(100));
269     LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " +
270         "initial compaction: " + result);
271     assertEquals("Invalid number of versions of row "
272         + Bytes.toStringBinary(secondRowBytes) + ".", compactionThreshold,
273         result.size());
274 
275     // Now add deletes to memstore and then flush it.
276     // That will put us over
277     // the compaction threshold of 3 store files.  Compacting these store files
278     // should result in a compacted store file that has no references to the
279     // deleted row.
280     LOG.debug("Adding deletes to memstore and flushing");
281     Delete delete = new Delete(secondRowBytes, System.currentTimeMillis());
282     byte [][] famAndQf = {COLUMN_FAMILY, null};
283     delete.deleteFamily(famAndQf[0]);
284     r.delete(delete);
285 
286     // Assert deleted.
287     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
288     assertTrue("Second row should have been deleted", result.isEmpty());
289 
290     r.flushcache();
291 
292     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
293     assertTrue("Second row should have been deleted", result.isEmpty());
294 
295     // Add a bit of data and flush.  Start adding at 'bbb'.
296     createSmallerStoreFile(this.r);
297     r.flushcache();
298     // Assert that the second row is still deleted.
299     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
300     assertTrue("Second row should still be deleted", result.isEmpty());
301 
302     // Force major compaction.
303     r.compactStores(true);
304     assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
305 
306     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
307     assertTrue("Second row should still be deleted", result.isEmpty());
308 
309     // Make sure the store files do have some 'aaa' keys in them -- exactly 3.
310     // Also, that compacted store files do not have any secondRowBytes because
311     // they were deleted.
312     verifyCounts(3,0);
313 
314     // Multiple versions allowed for an entry, so the delete isn't enough
315     // Lower TTL and expire to ensure that all our entries have been wiped
316     final int ttl = 1000;
317     for (Store hstore : this.r.stores.values()) {
318       HStore store = ((HStore) hstore);
319       ScanInfo old = store.getScanInfo();
320       ScanInfo si = new ScanInfo(old.getFamily(),
321           old.getMinVersions(), old.getMaxVersions(), ttl,
322           old.getKeepDeletedCells(), 0, old.getComparator());
323       store.setScanInfo(si);
324     }
325     Thread.sleep(1000);
326 
327     r.compactStores(true);
328     int count = count();
329     assertEquals("Should not see anything after TTL has expired", 0, count);
330   }
331 
332   @Test
333   public void testTimeBasedMajorCompaction() throws Exception {
334     // create 2 storefiles and force a major compaction to reset the time
335     int delay = 10 * 1000; // 10 sec
336     float jitterPct = 0.20f; // 20%
337     conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
338     conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
339 
340     HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
341     s.storeEngine.getCompactionPolicy().setConf(conf);
342     try {
343       createStoreFile(r);
344       createStoreFile(r);
345       r.compactStores(true);
346 
347       // add one more file & verify that a regular compaction won't work
348       createStoreFile(r);
349       r.compactStores(false);
350       assertEquals(2, s.getStorefilesCount());
351 
352       // ensure that major compaction time is deterministic
353       RatioBasedCompactionPolicy
354           c = (RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy();
355       Collection<StoreFile> storeFiles = s.getStorefiles();
356       long mcTime = c.getNextMajorCompactTime(storeFiles);
357       for (int i = 0; i < 10; ++i) {
358         assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
359       }
360 
361       // ensure that the major compaction time is within the variance
362       long jitter = Math.round(delay * jitterPct);
363       assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);
364 
365       // wait until the time-based compaction interval
366       Thread.sleep(mcTime);
367 
368       // trigger a compaction request and ensure that it's upgraded to major
369       r.compactStores(false);
370       assertEquals(1, s.getStorefilesCount());
371     } finally {
372       // reset the timed compaction settings
373       conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
374       conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
375       // run a major to reset the cache
376       createStoreFile(r);
377       r.compactStores(true);
378       assertEquals(1, s.getStorefilesCount());
379     }
380   }
381 
382   @Test
383   public void testMinorCompactionWithDeleteRow() throws Exception {
384     Delete deleteRow = new Delete(secondRowBytes);
385     testMinorCompactionWithDelete(deleteRow);
386   }
387 
388   @Test
389   public void testMinorCompactionWithDeleteColumn1() throws Exception {
390     Delete dc = new Delete(secondRowBytes);
391     /* delete all timestamps in the column */
392     dc.deleteColumns(fam2, col2);
393     testMinorCompactionWithDelete(dc);
394   }
395 
396   @Test
397   public void testMinorCompactionWithDeleteColumn2() throws Exception {
398     Delete dc = new Delete(secondRowBytes);
399     dc.deleteColumn(fam2, col2);
400     /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
401      * we only delete the latest version. One might expect to see only
402      * versions 1 and 2. HBase differs, and gives us 0, 1 and 2.
403      * This is okay as well. Since there was no compaction done before the
404      * delete, version 0 seems to stay on.
405      */
406     //testMinorCompactionWithDelete(dc, 2);
407     testMinorCompactionWithDelete(dc, 3);
408   }
409 
410   @Test
411   public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
412     Delete deleteCF = new Delete(secondRowBytes);
413     deleteCF.deleteFamily(fam2);
414     testMinorCompactionWithDelete(deleteCF);
415   }
416 
417   @Test
418   public void testMinorCompactionWithDeleteVersion1() throws Exception {
419     Delete deleteVersion = new Delete(secondRowBytes);
420     deleteVersion.deleteColumns(fam2, col2, 2);
421     /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
422      * We delete versions 0 ... 2. So, we still have one remaining.
423      */
424     testMinorCompactionWithDelete(deleteVersion, 1);
425   }
426 
427   @Test
428   public void testMinorCompactionWithDeleteVersion2() throws Exception {
429     Delete deleteVersion = new Delete(secondRowBytes);
430     deleteVersion.deleteColumn(fam2, col2, 1);
431     /*
432      * the table has 4 versions: 0, 1, 2, and 3.
433      * We delete 1.
434      * Should have 3 remaining.
435      */
436     testMinorCompactionWithDelete(deleteVersion, 3);
437   }
438 
439   /*
440    * A helper function to test the minor compaction algorithm. We check that
441    * the delete markers are left behind. Takes delete as an argument, which
442    * can be any delete (row, column, columnfamliy etc), that essentially
443    * deletes row2 and column2. row1 and column1 should be undeleted
444    */
445   private void testMinorCompactionWithDelete(Delete delete) throws Exception {
446     testMinorCompactionWithDelete(delete, 0);
447   }
448   private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
449     HRegionIncommon loader = new HRegionIncommon(r);
450     for (int i = 0; i < compactionThreshold + 1; i++) {
451       HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
452       HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
453       HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
454       HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
455       r.flushcache();
456     }
457 
458     Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
459     assertEquals(compactionThreshold, result.size());
460     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
461     assertEquals(compactionThreshold, result.size());
462 
463     // Now add deletes to memstore and then flush it.  That will put us over
464     // the compaction threshold of 3 store files.  Compacting these store files
465     // should result in a compacted store file that has no references to the
466     // deleted row.
467     r.delete(delete);
468 
469     // Make sure that we have only deleted family2 from secondRowBytes
470     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
471     assertEquals(expectedResultsAfterDelete, result.size());
472     // but we still have firstrow
473     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
474     assertEquals(compactionThreshold, result.size());
475 
476     r.flushcache();
477     // should not change anything.
478     // Let us check again
479 
480     // Make sure that we have only deleted family2 from secondRowBytes
481     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
482     assertEquals(expectedResultsAfterDelete, result.size());
483     // but we still have firstrow
484     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
485     assertEquals(compactionThreshold, result.size());
486 
487     // do a compaction
488     Store store2 = this.r.stores.get(fam2);
489     int numFiles1 = store2.getStorefiles().size();
490     assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
491     ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
492     int numFiles2 = store2.getStorefiles().size();
493     // Check that we did compact
494     assertTrue("Number of store files should go down", numFiles1 > numFiles2);
495     // Check that it was a minor compaction.
496     assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);
497 
498     // Make sure that we have only deleted family2 from secondRowBytes
499     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
500     assertEquals(expectedResultsAfterDelete, result.size());
501     // but we still have firstrow
502     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
503     assertEquals(compactionThreshold, result.size());
504   }
505 
506   private void verifyCounts(int countRow1, int countRow2) throws Exception {
507     int count1 = 0;
508     int count2 = 0;
509     for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
510       HFileScanner scanner = f.getReader().getScanner(false, false);
511       scanner.seekTo();
512       do {
513         byte [] row = scanner.getKeyValue().getRow();
514         if (Bytes.equals(row, STARTROW)) {
515           count1++;
516         } else if(Bytes.equals(row, secondRowBytes)) {
517           count2++;
518         }
519       } while(scanner.next());
520     }
521     assertEquals(countRow1,count1);
522     assertEquals(countRow2,count2);
523   }
524 
525   /**
526    * Verify that you can stop a long-running compaction
527    * (used during RS shutdown)
528    * @throws Exception
529    */
530   @Test
531   public void testInterruptCompaction() throws Exception {
532     assertEquals(0, count());
533 
534     // lower the polling interval for this test
535     int origWI = HStore.closeCheckInterval;
536     HStore.closeCheckInterval = 10*1000; // 10 KB
537 
538     try {
539       // Create a couple store files w/ 15KB (over 10KB interval)
540       int jmax = (int) Math.ceil(15.0/compactionThreshold);
541       byte [] pad = new byte[1000]; // 1 KB chunk
542       for (int i = 0; i < compactionThreshold; i++) {
543         HRegionIncommon loader = new HRegionIncommon(r);
544         Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
545         p.setDurability(Durability.SKIP_WAL);
546         for (int j = 0; j < jmax; j++) {
547           p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
548         }
549         HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
550         loader.put(p);
551         loader.flushcache();
552       }
553 
554       HRegion spyR = spy(r);
555       doAnswer(new Answer() {
556         public Object answer(InvocationOnMock invocation) throws Throwable {
557           r.writestate.writesEnabled = false;
558           return invocation.callRealMethod();
559         }
560       }).when(spyR).doRegionCompactionPrep();
561 
562       // force a minor compaction, but not before requesting a stop
563       spyR.compactStores();
564 
565       // ensure that the compaction stopped, all old files are intact,
566       Store s = r.stores.get(COLUMN_FAMILY);
567       assertEquals(compactionThreshold, s.getStorefilesCount());
568       assertTrue(s.getStorefilesSize() > 15*1000);
569       // and no new store files persisted past compactStores()
570       FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
571       assertEquals(0, ls.length);
572 
573     } finally {
574       // don't mess up future tests
575       r.writestate.writesEnabled = true;
576       HStore.closeCheckInterval = origWI;
577 
578       // Delete all Store information once done using
579       for (int i = 0; i < compactionThreshold; i++) {
580         Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
581         byte [][] famAndQf = {COLUMN_FAMILY, null};
582         delete.deleteFamily(famAndQf[0]);
583         r.delete(delete);
584       }
585       r.flushcache();
586 
587       // Multiple versions allowed for an entry, so the delete isn't enough
588       // Lower TTL and expire to ensure that all our entries have been wiped
589       final int ttl = 1000;
590       for (Store hstore: this.r.stores.values()) {
591         HStore store = (HStore)hstore;
592         ScanInfo old = store.getScanInfo();
593         ScanInfo si = new ScanInfo(old.getFamily(),
594             old.getMinVersions(), old.getMaxVersions(), ttl,
595             old.getKeepDeletedCells(), 0, old.getComparator());
596         store.setScanInfo(si);
597       }
598       Thread.sleep(ttl);
599 
600       r.compactStores(true);
601       assertEquals(0, count());
602     }
603   }
604 
605   private int count() throws IOException {
606     int count = 0;
607     for (StoreFile f: this.r.stores.
608         get(COLUMN_FAMILY_TEXT).getStorefiles()) {
609       HFileScanner scanner = f.getReader().getScanner(false, false);
610       if (!scanner.seekTo()) {
611         continue;
612       }
613       do {
614         count++;
615       } while(scanner.next());
616     }
617     return count;
618   }
619 
620   private void createStoreFile(final HRegion region) throws IOException {
621     createStoreFile(region, Bytes.toString(COLUMN_FAMILY));
622   }
623 
624   private void createStoreFile(final HRegion region, String family) throws IOException {
625     HRegionIncommon loader = new HRegionIncommon(region);
626     HBaseTestCase.addContent(loader, family);
627     loader.flushcache();
628   }
629 
630   private void createSmallerStoreFile(final HRegion region) throws IOException {
631     HRegionIncommon loader = new HRegionIncommon(region);
632     HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
633     		"bbb").getBytes(), null);
634     loader.flushcache();
635   }
636 
637   @Test
638   public void testCompactionWithCorruptResult() throws Exception {
639     int nfiles = 10;
640     for (int i = 0; i < nfiles; i++) {
641       createStoreFile(r);
642     }
643     HStore store = (HStore) r.getStore(COLUMN_FAMILY);
644 
645     Collection<StoreFile> storeFiles = store.getStorefiles();
646     DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
647 
648     List<Path> newFiles = tool.compactForTesting(storeFiles, false);
649 
650     // Now lets corrupt the compacted file.
651     FileSystem fs = store.getFileSystem();
652     // default compaction policy created one and only one new compacted file
653     Path dstPath = store.getRegionFileSystem().createTempName();
654     FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
655     stream.writeChars("CORRUPT FILE!!!!");
656     stream.close();
657     Path origPath = store.getRegionFileSystem().commitStoreFile(
658       Bytes.toString(COLUMN_FAMILY), dstPath);
659 
660     try {
661       ((HStore)store).moveFileIntoPlace(origPath);
662     } catch (Exception e) {
663       // The complete compaction should fail and the corrupt file should remain
664       // in the 'tmp' directory;
665       assert (fs.exists(origPath));
666       assert (!fs.exists(dstPath));
667       System.out.println("testCompactionWithCorruptResult Passed");
668       return;
669     }
670     fail("testCompactionWithCorruptResult failed since no exception was" +
671         "thrown while completing a corrupt file");
672   }
673 
674   /**
675    * Test for HBASE-5920 - Test user requested major compactions always occurring
676    */
677   @Test
678   public void testNonUserMajorCompactionRequest() throws Exception {
679     Store store = r.getStore(COLUMN_FAMILY);
680     createStoreFile(r);
681     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
682       createStoreFile(r);
683     }
684     store.triggerMajorCompaction();
685 
686     CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
687     assertNotNull("Expected to receive a compaction request", request);
688     assertEquals(
689       "System-requested major compaction should not occur if there are too many store files",
690       false,
691       request.isMajor());
692   }
693 
694   /**
695    * Test for HBASE-5920
696    */
697   @Test
698   public void testUserMajorCompactionRequest() throws IOException{
699     Store store = r.getStore(COLUMN_FAMILY);
700     createStoreFile(r);
701     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
702       createStoreFile(r);
703     }
704     store.triggerMajorCompaction();
705     CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null).getRequest();
706     assertNotNull("Expected to receive a compaction request", request);
707     assertEquals(
708       "User-requested major compaction should always occur, even if there are too many store files",
709       true, 
710       request.isMajor());
711   }
712 
713   /**
714    * Create a custom compaction request and be sure that we can track it through the queue, knowing
715    * when the compaction is completed.
716    */
717   @Test
718   public void testTrackingCompactionRequest() throws Exception {
719     // setup a compact/split thread on a mock server
720     HRegionServer mockServer = Mockito.mock(HRegionServer.class);
721     Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
722     CompactSplitThread thread = new CompactSplitThread(mockServer);
723     Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
724 
725     // setup a region/store with some files
726     Store store = r.getStore(COLUMN_FAMILY);
727     createStoreFile(r);
728     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
729       createStoreFile(r);
730     }
731 
732     CountDownLatch latch = new CountDownLatch(1);
733     TrackableCompactionRequest request = new TrackableCompactionRequest(latch);
734     thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request);
735     // wait for the latch to complete.
736     latch.await();
737 
738     thread.interruptIfNecessary();
739   }
740 
741   /**
742    * HBASE-7947: Regression test to ensure adding to the correct list in the
743    * {@link CompactSplitThread}
744    * @throws Exception on failure
745    */
746   @Test
747   public void testMultipleCustomCompactionRequests() throws Exception {
748     // setup a compact/split thread on a mock server
749     HRegionServer mockServer = Mockito.mock(HRegionServer.class);
750     Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
751     CompactSplitThread thread = new CompactSplitThread(mockServer);
752     Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
753 
754     // setup a region/store with some files
755     int numStores = r.getStores().size();
756     List<Pair<CompactionRequest, Store>> requests =
757         new ArrayList<Pair<CompactionRequest, Store>>(numStores);
758     CountDownLatch latch = new CountDownLatch(numStores);
759     // create some store files and setup requests for each store on which we want to do a
760     // compaction
761     for (Store store : r.getStores().values()) {
762       createStoreFile(r, store.getColumnFamilyName());
763       createStoreFile(r, store.getColumnFamilyName());
764       createStoreFile(r, store.getColumnFamilyName());
765       requests
766           .add(new Pair<CompactionRequest, Store>(new TrackableCompactionRequest(latch), store));
767     }
768 
769     thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER,
770       Collections.unmodifiableList(requests));
771 
772     // wait for the latch to complete.
773     latch.await();
774 
775     thread.interruptIfNecessary();
776   }
777 
778   /**
779    * Test that on a major compaction, if all cells are expired or deleted, then we'll end up with no
780    * product. Make sure scanner over region returns right answer in this case - and that it just
781    * basically works.
782    * @throws IOException
783    */
784   public void testMajorCompactingToNoOutputWithReverseScan() throws IOException {
785     createStoreFile(r);
786     for (int i = 0; i < compactionThreshold; i++) {
787       createStoreFile(r);
788     }
789     // Now delete everything.
790     Scan scan = new Scan();
791     scan.setReversed(true);
792     InternalScanner s = r.getScanner(scan);
793     do {
794       List<Cell> results = new ArrayList<Cell>();
795       boolean result = s.next(results);
796       assertTrue(!results.isEmpty());
797       r.delete(new Delete(results.get(0).getRow()));
798       if (!result) break;
799     } while (true);
800     s.close();
801     // Flush
802     r.flushcache();
803     // Major compact.
804     r.compactStores(true);
805     scan = new Scan();
806     scan.setReversed(true);
807     s = r.getScanner(scan);
808     int counter = 0;
809     do {
810       List<Cell> results = new ArrayList<Cell>();
811       boolean result = s.next(results);
812       if (!result) break;
813       counter++;
814     } while (true);
815     s.close();
816     assertEquals(0, counter);
817   }
818 
819   private class StoreMockMaker extends StatefulStoreMockMaker {
820     public ArrayList<StoreFile> compacting = new ArrayList<StoreFile>();
821     public ArrayList<StoreFile> notCompacting = new ArrayList<StoreFile>();
822     private ArrayList<Integer> results;
823 
824     public StoreMockMaker(ArrayList<Integer> results) {
825       this.results = results;
826     }
827 
828     public class TestCompactionContext extends CompactionContext {
829       private List<StoreFile> selectedFiles;
830       public TestCompactionContext(List<StoreFile> selectedFiles) {
831         super();
832         this.selectedFiles = selectedFiles;
833       }
834 
835       @Override
836       public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
837         return new ArrayList<StoreFile>();
838       }
839 
840       @Override
841       public boolean select(List<StoreFile> filesCompacting, boolean isUserCompaction,
842           boolean mayUseOffPeak, boolean forceMajor) throws IOException {
843         this.request = new CompactionRequest(selectedFiles);
844         this.request.setPriority(getPriority());
845         return true;
846       }
847 
848       @Override
849       public List<Path> compact() throws IOException {
850         finishCompaction(this.selectedFiles);
851         return new ArrayList<Path>();
852       }
853     }
854 
855     @Override
856     public synchronized CompactionContext selectCompaction() {
857       CompactionContext ctx = new TestCompactionContext(new ArrayList<StoreFile>(notCompacting));
858       compacting.addAll(notCompacting);
859       notCompacting.clear();
860       try {
861         ctx.select(null, false, false, false);
862       } catch (IOException ex) {
863         fail("Shouldn't happen");
864       }
865       return ctx;
866     }
867 
868     @Override
869     public synchronized void cancelCompaction(Object object) {
870       TestCompactionContext ctx = (TestCompactionContext)object;
871       compacting.removeAll(ctx.selectedFiles);
872       notCompacting.addAll(ctx.selectedFiles);
873     }
874 
875     public synchronized void finishCompaction(List<StoreFile> sfs) {
876       if (sfs.isEmpty()) return;
877       synchronized (results) {
878         results.add(sfs.size());
879       }
880       compacting.removeAll(sfs);
881     }
882 
883     @Override
884     public int getPriority() {
885       return 7 - compacting.size() - notCompacting.size();
886     }
887   }
888 
889   public class BlockingStoreMockMaker extends StatefulStoreMockMaker {
890     BlockingCompactionContext blocked = null;
891 
892     public class BlockingCompactionContext extends CompactionContext {
893       public volatile boolean isInCompact = false;
894 
895       public void unblock() {
896         synchronized (this) { this.notifyAll(); }
897       }
898 
899       @Override
900       public List<Path> compact() throws IOException {
901         try {
902           isInCompact = true;
903           synchronized (this) { this.wait(); }
904         } catch (InterruptedException e) {
905            Assume.assumeNoException(e);
906         }
907         return new ArrayList<Path>();
908       }
909 
910       @Override
911       public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
912         return new ArrayList<StoreFile>();
913       }
914 
915       @Override
916       public boolean select(List<StoreFile> f, boolean i, boolean m, boolean e)
917           throws IOException {
918         this.request = new CompactionRequest(new ArrayList<StoreFile>());
919         return true;
920       }
921     }
922 
923     @Override
924     public CompactionContext selectCompaction() {
925       this.blocked = new BlockingCompactionContext();
926       try {
927         this.blocked.select(null, false, false, false);
928       } catch (IOException ex) {
929         fail("Shouldn't happen");
930       }
931       return this.blocked;
932     }
933 
934     @Override
935     public void cancelCompaction(Object object) {}
936 
937     public int getPriority() {
938       return Integer.MIN_VALUE; // some invalid value, see createStoreMock
939     }
940 
941     public BlockingCompactionContext waitForBlocking() {
942       while (this.blocked == null || !this.blocked.isInCompact) {
943         Threads.sleepWithoutInterrupt(50);
944       }
945       BlockingCompactionContext ctx = this.blocked;
946       this.blocked = null;
947       return ctx;
948     }
949 
950     @Override
951     public Store createStoreMock(String name) throws Exception {
952       return createStoreMock(Integer.MIN_VALUE, name);
953     }
954 
955     public Store createStoreMock(int priority, String name) throws Exception {
956       // Override the mock to always return the specified priority.
957       Store s = super.createStoreMock(name);
958       when(s.getCompactPriority()).thenReturn(priority);
959       return s;
960     }
961   }
962 
963   /** Test compaction priority management and multiple compactions per store (HBASE-8665). */
964   @Test
965   public void testCompactionQueuePriorities() throws Exception {
966     // Setup a compact/split thread on a mock server.
967     final Configuration conf = HBaseConfiguration.create();
968     HRegionServer mockServer = mock(HRegionServer.class);
969     when(mockServer.isStopped()).thenReturn(false);
970     when(mockServer.getConfiguration()).thenReturn(conf);
971     CompactSplitThread cst = new CompactSplitThread(mockServer);
972     when(mockServer.getCompactSplitThread()).thenReturn(cst);
973 
974     // Set up the region mock that redirects compactions.
975     HRegion r = mock(HRegion.class);
976     when(r.compact(any(CompactionContext.class), any(Store.class))).then(new Answer<Boolean>() {
977       public Boolean answer(InvocationOnMock invocation) throws Throwable {
978         ((CompactionContext)invocation.getArguments()[0]).compact();
979         return true;
980       }
981     });
982 
983     // Set up store mocks for 2 "real" stores and the one we use for blocking CST.
984     ArrayList<Integer> results = new ArrayList<Integer>();
985     StoreMockMaker sm = new StoreMockMaker(results), sm2 = new StoreMockMaker(results);
986     Store store = sm.createStoreMock("store1"), store2 = sm2.createStoreMock("store2");
987     BlockingStoreMockMaker blocker = new BlockingStoreMockMaker();
988 
989     // First, block the compaction thread so that we could muck with queue.
990     cst.requestSystemCompaction(r, blocker.createStoreMock(1, "b-pri1"), "b-pri1");
991     BlockingStoreMockMaker.BlockingCompactionContext currentBlock = blocker.waitForBlocking();
992 
993     // Add 4 files to store1, 3 to store2, and queue compactions; pri 3 and 4 respectively.
994     for (int i = 0; i < 4; ++i) {
995       sm.notCompacting.add(createFile());
996     }
997     cst.requestSystemCompaction(r, store, "s1-pri3");
998     for (int i = 0; i < 3; ++i) {
999       sm2.notCompacting.add(createFile());
1000     }
1001     cst.requestSystemCompaction(r, store2, "s2-pri4");
1002     // Now add 2 more files to store1 and queue compaction - pri 1.
1003     for (int i = 0; i < 2; ++i) {
1004       sm.notCompacting.add(createFile());
1005     }
1006     cst.requestSystemCompaction(r, store, "s1-pri1");
1007     // Finally add blocking compaction with priority 2.
1008     cst.requestSystemCompaction(r, blocker.createStoreMock(2, "b-pri2"), "b-pri2");
1009 
1010     // Unblock the blocking compaction; we should run pri1 and become block again in pri2.
1011     currentBlock.unblock();
1012     currentBlock = blocker.waitForBlocking();
1013     // Pri1 should have "compacted" all 6 files.
1014     assertEquals(1, results.size());
1015     assertEquals(6, results.get(0).intValue());
1016     // Add 2 files to store 1 (it has 2 files now).
1017     for (int i = 0; i < 2; ++i) {
1018       sm.notCompacting.add(createFile());
1019     }
1020     // Now we have pri4 for store 2 in queue, and pri3 for store1; store1's current priority
1021     // is 5, however, so it must not preempt store 2. Add blocking compaction at the end.
1022     cst.requestSystemCompaction(r, blocker.createStoreMock(7, "b-pri7"), "b-pri7");
1023     currentBlock.unblock();
1024     currentBlock = blocker.waitForBlocking();
1025     assertEquals(3, results.size());
1026     assertEquals(3, results.get(1).intValue()); // 3 files should go before 2 files.
1027     assertEquals(2, results.get(2).intValue());
1028 
1029     currentBlock.unblock();
1030     cst.interruptIfNecessary();
1031   }
1032 
1033   private static StoreFile createFile() throws Exception {
1034     StoreFile sf = mock(StoreFile.class);
1035     when(sf.getPath()).thenReturn(new Path("file"));
1036     StoreFile.Reader r = mock(StoreFile.Reader.class);
1037     when(r.length()).thenReturn(10L);
1038     when(sf.getReader()).thenReturn(r);
1039     return sf;
1040   }
1041 
1042   /**
1043    * Simple {@link CompactionRequest} on which you can wait until the requested compaction finishes.
1044    */
1045   public static class TrackableCompactionRequest extends CompactionRequest {
1046     private CountDownLatch done;
1047 
1048     /**
1049      * Constructor for a custom compaction. Uses the setXXX methods to update the state of the
1050      * compaction before being used.
1051      */
1052     public TrackableCompactionRequest(CountDownLatch finished) {
1053       super();
1054       this.done = finished;
1055     }
1056 
1057     @Override
1058     public void afterExecute() {
1059       super.afterExecute();
1060       this.done.countDown();
1061     }
1062   }
1063 }