1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.mockito.Mockito.doAnswer;
23  import static org.mockito.Mockito.spy;
24  
25  import java.io.IOException;
26  import java.util.ArrayList;
27  import java.util.Collections;
28  import java.util.HashMap;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Map.Entry;
32  import java.util.concurrent.CountDownLatch;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.HBaseTestCase;
41  import org.apache.hadoop.hbase.HBaseTestingUtility;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.SmallTests;
46  import org.apache.hadoop.hbase.client.Delete;
47  import org.apache.hadoop.hbase.client.Get;
48  import org.apache.hadoop.hbase.client.Put;
49  import org.apache.hadoop.hbase.client.Result;
50  import org.apache.hadoop.hbase.client.Scan;
51  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
52  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
53  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
54  import org.apache.hadoop.hbase.io.hfile.HFileScanner;
55  import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
56  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
57  import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
58  import org.apache.hadoop.hbase.regionserver.wal.HLog;
59  import org.apache.hadoop.hbase.util.Bytes;
60  import org.junit.experimental.categories.Category;
61  import org.mockito.Mockito;
62  import org.mockito.invocation.InvocationOnMock;
63  import org.mockito.stubbing.Answer;
64  
65  
66  /**
67   * Test compactions
68   */
69  @Category(SmallTests.class)
70  public class TestCompaction extends HBaseTestCase {
71    static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
72    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
73  
74    private HRegion r = null;
75    private HTableDescriptor htd = null;
76    private Path compactionDir = null;
77    private Path regionCompactionDir = null;
78    private static final byte [] COLUMN_FAMILY = fam1;
79    private final byte [] STARTROW = Bytes.toBytes(START_KEY);
80    private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
81    private int compactionThreshold;
82    private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
83    final private byte[] col1, col2;
84    private static final long MAX_FILES_TO_COMPACT = 10;
85  
86    /** constructor */
87    public TestCompaction() throws Exception {
88      super();
89  
90      // Set cache flush size to 1MB
91      conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
92      conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
93      compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
94  
95      firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
96      secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
97      // Increment the least significant character so we get to next row.
98      secondRowBytes[START_KEY_BYTES.length - 1]++;
99      thirdRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
100     thirdRowBytes[START_KEY_BYTES.length - 1]++;
101     thirdRowBytes[START_KEY_BYTES.length - 1]++;
102     col1 = "column1".getBytes(HConstants.UTF8_ENCODING);
103     col2 = "column2".getBytes(HConstants.UTF8_ENCODING);
104   }
105 
106   @Override
107   public void setUp() throws Exception {
108     super.setUp();
109     this.htd = createTableDescriptor(getName());
110     this.r = createNewHRegion(htd, null, null);
111   }
112 
113   @Override
114   public void tearDown() throws Exception {
115     HLog hlog = r.getLog();
116     this.r.close();
117     hlog.closeAndDelete();
118     super.tearDown();
119   }
120 
121   /**
122    * Test that on a major compaction, if all cells are expired or deleted, then
123    * we'll end up with no product.  Make sure scanner over region returns
124    * right answer in this case - and that it just basically works.
125    * @throws IOException
126    */
127   public void testMajorCompactingToNoOutput() throws IOException {
128     createStoreFile(r);
129     for (int i = 0; i < compactionThreshold; i++) {
130       createStoreFile(r);
131     }
132     // Now delete everything.
133     InternalScanner s = r.getScanner(new Scan());
134     do {
135       List<KeyValue> results = new ArrayList<KeyValue>();
136       boolean result = s.next(results);
137       r.delete(new Delete(results.get(0).getRow()), null, false);
138       if (!result) break;
139     } while(true);
140     s.close();
141     // Flush
142     r.flushcache();
143     // Major compact.
144     r.compactStores(true);
145     s = r.getScanner(new Scan());
146     int counter = 0;
147     do {
148       List<KeyValue> results = new ArrayList<KeyValue>();
149       boolean result = s.next(results);
150       if (!result) break;
151       counter++;
152     } while(true);
153     assertEquals(0, counter);
154   }
155 
156   /**
157    * Run compaction and flushing memstore
158    * Assert deletes get cleaned up.
159    * @throws Exception
160    */
161   public void testMajorCompaction() throws Exception {
162     majorCompaction();
163   }
164 
165   public void testDataBlockEncodingInCacheOnly() throws Exception {
166     majorCompactionWithDataBlockEncoding(true);
167   }
168 
169   public void testDataBlockEncodingEverywhere() throws Exception {
170     majorCompactionWithDataBlockEncoding(false);
171   }
172 
173   public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
174       throws Exception {
175     Map<Store, HFileDataBlockEncoder> replaceBlockCache =
176         new HashMap<Store, HFileDataBlockEncoder>();
177     for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
178       Store store = pair.getValue();
179       HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
180       replaceBlockCache.put(pair.getValue(), blockEncoder);
181       final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
182       final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
183           inCache;
184       store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
185           onDisk, inCache));
186     }
187 
188     majorCompaction();
189 
190     // restore settings
191     for (Entry<Store, HFileDataBlockEncoder> entry :
192         replaceBlockCache.entrySet()) {
193       entry.getKey().setDataBlockEncoderInTest(entry.getValue());
194     }
195   }
196 
197   private void majorCompaction() throws Exception {
198     createStoreFile(r);
199     for (int i = 0; i < compactionThreshold; i++) {
200       createStoreFile(r);
201     }
202     // Add more content.
203     addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
204 
205     // Now there are about 5 versions of each column.
206     // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
207     //
208     // Assert == 3 when we ask for versions.
209     Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null);
210     assertEquals(compactionThreshold, result.size());
211 
212     // see if CompactionProgress is in place but null
213     for (Store store: this.r.stores.values()) {
214       assertNull(store.getCompactionProgress());
215     }
216 
217     r.flushcache();
218     r.compactStores(true);
219 
220     // see if CompactionProgress has done its thing on at least one store
221     int storeCount = 0;
222     for (Store store: this.r.stores.values()) {
223       CompactionProgress progress = store.getCompactionProgress();
224       if( progress != null ) {
225         ++storeCount;
226         assertTrue(progress.currentCompactedKVs > 0);
227         assertTrue(progress.totalCompactingKVs > 0);
228       }
229       assertTrue(storeCount > 0);
230     }
231 
232     // look at the second row
233     // Increment the least significant character so we get to next row.
234     byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
235     secondRowBytes[START_KEY_BYTES.length - 1]++;
236 
237     // Always 3 versions if that is what max versions is.
238     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).
239         setMaxVersions(100), null);
240     LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " +
241         "initial compaction: " + result);
242     assertEquals("Invalid number of versions of row "
243         + Bytes.toStringBinary(secondRowBytes) + ".", compactionThreshold,
244         result.size());
245 
246     // Now add deletes to memstore and then flush it.
247     // That will put us over
248     // the compaction threshold of 3 store files.  Compacting these store files
249     // should result in a compacted store file that has no references to the
250     // deleted row.
251     LOG.debug("Adding deletes to memstore and flushing");
252     Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
253     byte [][] famAndQf = {COLUMN_FAMILY, null};
254     delete.deleteFamily(famAndQf[0]);
255     r.delete(delete, null, true);
256 
257     // Assert deleted.
258     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
259     assertTrue("Second row should have been deleted", result.isEmpty());
260 
261     r.flushcache();
262 
263     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
264     assertTrue("Second row should have been deleted", result.isEmpty());
265 
266     // Add a bit of data and flush.  Start adding at 'bbb'.
267     createSmallerStoreFile(this.r);
268     r.flushcache();
269     // Assert that the second row is still deleted.
270     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
271     assertTrue("Second row should still be deleted", result.isEmpty());
272 
273     // Force major compaction.
274     r.compactStores(true);
275     assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
276 
277     result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null );
278     assertTrue("Second row should still be deleted", result.isEmpty());
279 
280     // Make sure the store files do have some 'aaa' keys in them -- exactly 3.
281     // Also, that compacted store files do not have any secondRowBytes because
282     // they were deleted.
283     verifyCounts(3,0);
284 
285     // Multiple versions allowed for an entry, so the delete isn't enough
286     // Lower TTL and expire to ensure that all our entries have been wiped
287     final int ttl = 1000;
288     for (Store store: this.r.stores.values()) {
289       Store.ScanInfo old = store.scanInfo;
290       Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
291           old.getMinVersions(), old.getMaxVersions(), ttl,
292           old.getKeepDeletedCells(), 0, old.getComparator());
293       store.scanInfo = si;
294     }
295     Thread.sleep(1000);
296 
297     r.compactStores(true);
298     int count = count();
299     assertEquals("Should not see anything after TTL has expired", 0, count);
300   }
301 
302   public void testTimeBasedMajorCompaction() throws Exception {
303     // create 2 storefiles and force a major compaction to reset the time
304     int delay = 10 * 1000; // 10 sec
305     float jitterPct = 0.20f; // 20%
306     conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
307     conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
308 
309     Store s = r.getStore(COLUMN_FAMILY);
310     try {
311       createStoreFile(r);
312       createStoreFile(r);
313       r.compactStores(true);
314 
315       // add one more file & verify that a regular compaction won't work
316       createStoreFile(r);
317       r.compactStores(false);
318       assertEquals(2, s.getStorefilesCount());
319 
320       // ensure that major compaction time is deterministic
321       long mcTime = s.getNextMajorCompactTime();
322       for (int i = 0; i < 10; ++i) {
323         assertEquals(mcTime, s.getNextMajorCompactTime());
324       }
325 
326       // ensure that the major compaction time is within the variance
327       long jitter = Math.round(delay * jitterPct);
328       assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter);
329 
330       // wait until the time-based compaction interval
331       Thread.sleep(mcTime);
332 
333       // trigger a compaction request and ensure that it's upgraded to major
334       r.compactStores(false);
335       assertEquals(1, s.getStorefilesCount());
336     } finally {
337       // reset the timed compaction settings
338       conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
339       conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
340       // run a major to reset the cache
341       createStoreFile(r);
342       r.compactStores(true);
343       assertEquals(1, s.getStorefilesCount());
344     }
345   }
346 
347   public void testMinorCompactionWithDeleteRow() throws Exception {
348     Delete deleteRow = new Delete(secondRowBytes);
349     testMinorCompactionWithDelete(deleteRow);
350   }
351   public void testMinorCompactionWithDeleteColumn1() throws Exception {
352     Delete dc = new Delete(secondRowBytes);
353     /* delete all timestamps in the column */
354     dc.deleteColumns(fam2, col2);
355     testMinorCompactionWithDelete(dc);
356   }
357   public void testMinorCompactionWithDeleteColumn2() throws Exception {
358     Delete dc = new Delete(secondRowBytes);
359     dc.deleteColumn(fam2, col2);
360     /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
361      * we only delete the latest version. One might expect to see only
362      * versions 1 and 2. HBase differs, and gives us 0, 1 and 2.
363      * This is okay as well. Since there was no compaction done before the
364      * delete, version 0 seems to stay on.
365      */
366     //testMinorCompactionWithDelete(dc, 2);
367     testMinorCompactionWithDelete(dc, 3);
368   }
369   public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
370     Delete deleteCF = new Delete(secondRowBytes);
371     deleteCF.deleteFamily(fam2);
372     testMinorCompactionWithDelete(deleteCF);
373   }
374   public void testMinorCompactionWithDeleteVersion1() throws Exception {
375     Delete deleteVersion = new Delete(secondRowBytes);
376     deleteVersion.deleteColumns(fam2, col2, 2);
377     /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
378      * We delete versions 0 ... 2. So, we still have one remaining.
379      */
380     testMinorCompactionWithDelete(deleteVersion, 1);
381   }
382   public void testMinorCompactionWithDeleteVersion2() throws Exception {
383     Delete deleteVersion = new Delete(secondRowBytes);
384     deleteVersion.deleteColumn(fam2, col2, 1);
385     /*
386      * the table has 4 versions: 0, 1, 2, and 3.
387      * We delete 1.
388      * Should have 3 remaining.
389      */
390     testMinorCompactionWithDelete(deleteVersion, 3);
391   }
392 
393   /*
394    * A helper function to test the minor compaction algorithm. We check that
395    * the delete markers are left behind. Takes delete as an argument, which
396    * can be any delete (row, column, columnfamliy etc), that essentially
397    * deletes row2 and column2. row1 and column1 should be undeleted
398    */
399   private void testMinorCompactionWithDelete(Delete delete) throws Exception {
400     testMinorCompactionWithDelete(delete, 0);
401   }
402   private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
403     HRegionIncommon loader = new HRegionIncommon(r);
404     for (int i = 0; i < compactionThreshold + 1; i++) {
405       addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
406       addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
407       addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
408       addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
409       r.flushcache();
410     }
411 
412     Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
413     assertEquals(compactionThreshold, result.size());
414     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
415     assertEquals(compactionThreshold, result.size());
416 
417     // Now add deletes to memstore and then flush it.  That will put us over
418     // the compaction threshold of 3 store files.  Compacting these store files
419     // should result in a compacted store file that has no references to the
420     // deleted row.
421     r.delete(delete, null, true);
422 
423     // Make sure that we have only deleted family2 from secondRowBytes
424     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
425     assertEquals(expectedResultsAfterDelete, result.size());
426     // but we still have firstrow
427     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
428     assertEquals(compactionThreshold, result.size());
429 
430     r.flushcache();
431     // should not change anything.
432     // Let us check again
433 
434     // Make sure that we have only deleted family2 from secondRowBytes
435     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
436     assertEquals(expectedResultsAfterDelete, result.size());
437     // but we still have firstrow
438     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
439     assertEquals(compactionThreshold, result.size());
440 
441     // do a compaction
442     Store store2 = this.r.stores.get(fam2);
443     int numFiles1 = store2.getStorefiles().size();
444     assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
445     store2.compactRecentForTesting(compactionThreshold);   // = 3
446     int numFiles2 = store2.getStorefiles().size();
447     // Check that we did compact
448     assertTrue("Number of store files should go down", numFiles1 > numFiles2);
449     // Check that it was a minor compaction.
450     assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);
451 
452     // Make sure that we have only deleted family2 from secondRowBytes
453     result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null);
454     assertEquals(expectedResultsAfterDelete, result.size());
455     // but we still have firstrow
456     result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null);
457     assertEquals(compactionThreshold, result.size());
458   }
459 
460   private void verifyCounts(int countRow1, int countRow2) throws Exception {
461     int count1 = 0;
462     int count2 = 0;
463     for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) {
464       HFileScanner scanner = f.getReader().getScanner(false, false);
465       scanner.seekTo();
466       do {
467         byte [] row = scanner.getKeyValue().getRow();
468         if (Bytes.equals(row, STARTROW)) {
469           count1++;
470         } else if(Bytes.equals(row, secondRowBytes)) {
471           count2++;
472         }
473       } while(scanner.next());
474     }
475     assertEquals(countRow1,count1);
476     assertEquals(countRow2,count2);
477   }
478 
479   /**
480    * Verify that you can stop a long-running compaction
481    * (used during RS shutdown)
482    * @throws Exception
483    */
484   public void testInterruptCompaction() throws Exception {
485     assertEquals(0, count());
486 
487     // lower the polling interval for this test
488     int origWI = Store.closeCheckInterval;
489     Store.closeCheckInterval = 10*1000; // 10 KB
490 
491     try {
492       // Create a couple store files w/ 15KB (over 10KB interval)
493       int jmax = (int) Math.ceil(15.0/compactionThreshold);
494       byte [] pad = new byte[1000]; // 1 KB chunk
495       for (int i = 0; i < compactionThreshold; i++) {
496         HRegionIncommon loader = new HRegionIncommon(r);
497         Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
498         p.setWriteToWAL(false);
499         for (int j = 0; j < jmax; j++) {
500           p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
501         }
502         addContent(loader, Bytes.toString(COLUMN_FAMILY));
503         loader.put(p);
504         loader.flushcache();
505       }
506 
507       HRegion spyR = spy(r);
508       doAnswer(new Answer() {
509         public Object answer(InvocationOnMock invocation) throws Throwable {
510           r.writestate.writesEnabled = false;
511           return invocation.callRealMethod();
512         }
513       }).when(spyR).doRegionCompactionPrep();
514 
515       // force a minor compaction, but not before requesting a stop
516       spyR.compactStores();
517 
518       // ensure that the compaction stopped, all old files are intact,
519       Store s = r.stores.get(COLUMN_FAMILY);
520       assertEquals(compactionThreshold, s.getStorefilesCount());
521       assertTrue(s.getStorefilesSize() > 15*1000);
522       // and no new store files persisted past compactStores()
523       FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
524       assertEquals(0, ls.length);
525 
526     } finally {
527       // don't mess up future tests
528       r.writestate.writesEnabled = true;
529       Store.closeCheckInterval = origWI;
530 
531       // Delete all Store information once done using
532       for (int i = 0; i < compactionThreshold; i++) {
533         Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
534         byte [][] famAndQf = {COLUMN_FAMILY, null};
535         delete.deleteFamily(famAndQf[0]);
536         r.delete(delete, null, true);
537       }
538       r.flushcache();
539 
540       // Multiple versions allowed for an entry, so the delete isn't enough
541       // Lower TTL and expire to ensure that all our entries have been wiped
542       final int ttl = 1000;
543       for (Store store: this.r.stores.values()) {
544         Store.ScanInfo old = store.scanInfo;
545         Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
546             old.getMinVersions(), old.getMaxVersions(), ttl,
547             old.getKeepDeletedCells(), 0, old.getComparator());
548         store.scanInfo = si;
549       }
550       Thread.sleep(ttl);
551 
552       r.compactStores(true);
553       assertEquals(0, count());
554     }
555   }
556 
557   private int count() throws IOException {
558     int count = 0;
559     for (StoreFile f: this.r.stores.
560         get(COLUMN_FAMILY_TEXT).getStorefiles()) {
561       HFileScanner scanner = f.getReader().getScanner(false, false);
562       if (!scanner.seekTo()) {
563         continue;
564       }
565       do {
566         count++;
567       } while(scanner.next());
568     }
569     return count;
570   }
571 
572   private void createStoreFile(final HRegion region) throws IOException {
573     createStoreFile(region, Bytes.toString(COLUMN_FAMILY));
574   }
575 
576   private void createStoreFile(final HRegion region, String family) throws IOException {
577     HRegionIncommon loader = new HRegionIncommon(region);
578     addContent(loader, family);
579     loader.flushcache();
580   }
581 
582   private void createSmallerStoreFile(final HRegion region) throws IOException {
583     HRegionIncommon loader = new HRegionIncommon(region);
584     addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
585     		"bbb").getBytes(), null);
586     loader.flushcache();
587   }
588 
589   public void testCompactionWithCorruptResult() throws Exception {
590     int nfiles = 10;
591     for (int i = 0; i < nfiles; i++) {
592       createStoreFile(r);
593     }
594     Store store = r.getStore(COLUMN_FAMILY);
595 
596     List<StoreFile> storeFiles = store.getStorefiles();
597     long maxId = StoreFile.getMaxSequenceIdInList(storeFiles, true);
598     Compactor tool = new Compactor(this.conf);
599 
600     StoreFile.Writer compactedFile = tool.compactForTesting(store, this.conf, storeFiles, false,
601       maxId);
602 
603     // Now lets corrupt the compacted file.
604     FileSystem fs = FileSystem.get(conf);
605     Path origPath = compactedFile.getPath();
606     Path homedir = store.getHomedir();
607     Path dstPath = new Path(homedir, origPath.getName());
608     FSDataOutputStream stream = fs.create(origPath, null, true, 512, (short) 3,
609         (long) 1024,
610         null);
611     stream.writeChars("CORRUPT FILE!!!!");
612     stream.close();
613 
614     try {
615       store.completeCompaction(storeFiles, compactedFile);
616     } catch (Exception e) {
617       // The complete compaction should fail and the corrupt file should remain
618       // in the 'tmp' directory;
619       assert (fs.exists(origPath));
620       assert (!fs.exists(dstPath));
621       System.out.println("testCompactionWithCorruptResult Passed");
622       return;
623     }
624     fail("testCompactionWithCorruptResult failed since no exception was" +
625         "thrown while completing a corrupt file");
626   }
627   
628   /**
629    * Test for HBASE-5920 - Test user requested major compactions always occurring
630    */
631   public void testNonUserMajorCompactionRequest() throws Exception {
632     Store store = r.getStore(COLUMN_FAMILY);
633     createStoreFile(r);
634     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
635       createStoreFile(r);
636     }
637     store.triggerMajorCompaction();
638 
639     CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null);
640     assertNotNull("Expected to receive a compaction request", request);
641     assertEquals(
642       "System-requested major compaction should not occur if there are too many store files",
643       false,
644       request.isMajor());
645   }
646 
647   /**
648    * Test for HBASE-5920
649    */
650   public void testUserMajorCompactionRequest() throws IOException{
651     Store store = r.getStore(COLUMN_FAMILY);
652     createStoreFile(r);
653     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
654       createStoreFile(r);
655     }
656     store.triggerMajorCompaction();
657     CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null);
658     assertNotNull("Expected to receive a compaction request", request);
659     assertEquals(
660       "User-requested major compaction should always occur, even if there are too many store files",
661       true, 
662       request.isMajor());
663   }
664 
665   /**
666    * Create a custom compaction request and be sure that we can track it through the queue, knowing
667    * when the compaction is completed.
668    */
669   public void testTrackingCompactionRequest() throws Exception {
670     // setup a compact/split thread on a mock server
671     HRegionServer mockServer = Mockito.mock(HRegionServer.class);
672     Mockito.when(mockServer.getConfiguration()).thenReturn(r.getConf());
673     CompactSplitThread thread = new CompactSplitThread(mockServer);
674     Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
675     // simple stop for the metrics - we ignore any updates in the test
676     RegionServerMetrics mockMetrics = Mockito.mock(RegionServerMetrics.class);
677     Mockito.when(mockServer.getMetrics()).thenReturn(mockMetrics);
678 
679     // setup a region/store with some files
680     Store store = r.getStore(COLUMN_FAMILY);
681     createStoreFile(r);
682     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
683       createStoreFile(r);
684     }
685 
686     CountDownLatch latch = new CountDownLatch(1);
687     TrackableCompactionRequest request = new TrackableCompactionRequest(r, store, latch);
688     thread.requestCompaction(r, store, "test custom comapction", Store.PRIORITY_USER, request);
689     // wait for the latch to complete.
690     latch.await();
691 
692     thread.interruptIfNecessary();
693   }
694 
695   public void testMultipleCustomCompactionRequests() throws Exception {
696     // setup a compact/split thread on a mock server
697     HRegionServer mockServer = Mockito.mock(HRegionServer.class);
698     Mockito.when(mockServer.getConfiguration()).thenReturn(r.getConf());
699     CompactSplitThread thread = new CompactSplitThread(mockServer);
700     Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
701     // simple stop for the metrics - we ignore any updates in the test
702     RegionServerMetrics mockMetrics = Mockito.mock(RegionServerMetrics.class);
703     Mockito.when(mockServer.getMetrics()).thenReturn(mockMetrics);
704 
705     // setup a region/store with some files
706     int numStores = r.getStores().size();
707     List<CompactionRequest> requests = new ArrayList<CompactionRequest>(numStores);
708     CountDownLatch latch = new CountDownLatch(numStores);
709     // create some store files and setup requests for each store on which we want to do a
710     // compaction
711     for (Store store : r.getStores().values()) {
712       createStoreFile(r, store.getColumnFamilyName());
713       createStoreFile(r, store.getColumnFamilyName());
714       createStoreFile(r, store.getColumnFamilyName());
715       requests.add(new TrackableCompactionRequest(r, store, latch));
716     }
717 
718     thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER,
719       Collections.unmodifiableList(requests));
720 
721     // wait for the latch to complete.
722     latch.await();
723 
724     thread.interruptIfNecessary();
725   }
726 
727   /**
728    * Simple {@link CompactionRequest} on which you can wait until the requested compaction finishes.
729    */
730   public static class TrackableCompactionRequest extends CompactionRequest {
731     private CountDownLatch done;
732 
733     /**
734      * Constructor for a custom compaction. Uses the setXXX methods to update the state of the
735      * compaction before being used.
736      */
737     public TrackableCompactionRequest(HRegion region, Store store, CountDownLatch finished) {
738       super(region, store, Store.PRIORITY_USER);
739       this.done = finished;
740     }
741 
742     @Override
743     public void run() {
744       super.run();
745       this.done.countDown();
746     }
747   }
748 
749   @org.junit.Rule
750   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
751     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
752 }
753