1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.Collections;
27  import java.util.Comparator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.TreeSet;
31  import java.util.regex.Pattern;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FileStatus;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.HBaseTestCase;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.KeyValue;
43  import org.apache.hadoop.hbase.SmallTests;
44  import org.apache.hadoop.hbase.client.Scan;
45  import org.apache.hadoop.hbase.io.HFileLink;
46  import org.apache.hadoop.hbase.io.HalfStoreFileReader;
47  import org.apache.hadoop.hbase.io.Reference;
48  import org.apache.hadoop.hbase.io.Reference.Range;
49  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
50  import org.apache.hadoop.hbase.io.hfile.BlockCache;
51  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
52  import org.apache.hadoop.hbase.io.hfile.CacheStats;
53  import org.apache.hadoop.hbase.io.hfile.HFile;
54  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
55  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
56  import org.apache.hadoop.hbase.io.hfile.HFileScanner;
57  import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
58  import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
59  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
60  import org.apache.hadoop.hbase.util.BloomFilterFactory;
61  import org.apache.hadoop.hbase.util.Bytes;
62  import org.apache.hadoop.hbase.util.ChecksumType;
63  import org.apache.hadoop.hbase.util.FSUtils;
64  import org.junit.experimental.categories.Category;
65  import org.mockito.Mockito;
66  
67  import com.google.common.base.Joiner;
68  import com.google.common.collect.Iterables;
69  import com.google.common.collect.Lists;
70  
71  /**
72   * Test HStoreFile
73   */
74  @Category(SmallTests.class)
75  public class TestStoreFile extends HBaseTestCase {
76    static final Log LOG = LogFactory.getLog(TestStoreFile.class);
77    private CacheConfig cacheConf =  new CacheConfig(conf);
78    private String ROOT_DIR;
79    private Map<String, Long> startingMetrics;
80  
81    private static final ChecksumType CKTYPE = ChecksumType.CRC32;
82    private static final int CKBYTES = 512;
83  
84    @Override
85    public void setUp() throws Exception {
86      super.setUp();
87      startingMetrics = SchemaMetrics.getMetricsSnapshot();
88      ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
89    }
90  
91    @Override
92    public void tearDown() throws Exception {
93      super.tearDown();
94      SchemaMetrics.validateMetricChanges(startingMetrics);
95    }
96  
97    /**
98     * Write a file and then assert that we can read from top and bottom halves
99     * using two HalfMapFiles.
100    * @throws Exception
101    */
102   public void testBasicHalfMapFile() throws Exception {
103     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
104     Path outputDir = new Path(new Path(this.testDir, "7e0102"),
105         "familyname");
106     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
107         this.fs, 2 * 1024)
108             .withOutputDir(outputDir)
109             .build();
110     writeStoreFile(writer);
111     checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
112         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
113   }
114 
115   private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
116     writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName()));
117   }
118 
119   // pick an split point (roughly halfway)
120   byte[] SPLITKEY = new byte[] { (LAST_CHAR + FIRST_CHAR)/2, FIRST_CHAR};
121 
122   /*
123    * Writes HStoreKey and ImmutableBytes data to passed writer and
124    * then closes it.
125    * @param writer
126    * @throws IOException
127    */
128   public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier)
129   throws IOException {
130     long now = System.currentTimeMillis();
131     try {
132       for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
133         for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
134           byte[] b = new byte[] { (byte) d, (byte) e };
135           writer.append(new KeyValue(b, fam, qualifier, now, b));
136         }
137       }
138     } finally {
139       writer.close();
140     }
141   }
142 
143   /**
144    * Test that our mechanism of writing store files in one region to reference
145    * store files in other regions works.
146    * @throws IOException
147    */
148   public void testReference()
149   throws IOException {
150     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
151     Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
152     // Make a store file and write data to it.
153     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
154         this.fs, 8 * 1024)
155             .withOutputDir(storedir)
156             .build();
157     writeStoreFile(writer);
158     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
159         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
160     StoreFile.Reader reader = hsf.createReader();
161     // Split on a row, not in middle of row.  Midkey returned by reader
162     // may be in middle of row.  Create new one with empty column and
163     // timestamp.
164     KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
165     byte [] midRow = kv.getRow();
166     kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
167     byte [] finalRow = kv.getRow();
168     // Make a reference
169     Path refPath = StoreFile.split(fs, storedir, hsf, midRow, Range.top);
170     StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
171         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
172     // Now confirm that I can read from the reference and that it only gets
173     // keys from top half of the file.
174     HFileScanner s = refHsf.createReader().getScanner(false, false);
175     for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
176       ByteBuffer bb = s.getKey();
177       kv = KeyValue.createKeyValueFromKey(bb);
178       if (first) {
179         assertTrue(Bytes.equals(kv.getRow(), midRow));
180         first = false;
181       }
182     }
183     assertTrue(Bytes.equals(kv.getRow(), finalRow));
184   }
185 
186   public void testHFileLink() throws IOException {
187     final String columnFamily = "f";
188 
189     Configuration testConf = new Configuration(this.conf);
190     FSUtils.setRootDir(testConf, this.testDir);
191 
192     HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
193     Path storedir = new Path(new Path(this.testDir,
194       new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
195 
196     // Make a store file and write data to it.
197     StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
198          this.fs, 8 * 1024)
199             .withOutputDir(storedir)
200             .build();
201     Path storeFilePath = writer.getPath();
202     writeStoreFile(writer);
203     writer.close();
204 
205     Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
206     HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
207     Path linkFilePath = new Path(dstPath,
208                   HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
209 
210     // Try to open store file from link
211     StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
212         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
213     assertTrue(hsf.isLink());
214 
215     // Now confirm that I can read from the link
216     int count = 1;
217     HFileScanner s = hsf.createReader().getScanner(false, false);
218     s.seekTo();
219     while (s.next()) {
220       count++;
221     }
222     assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
223   }
224 
225   /**
226    * Validate that we can handle valid tables with '.', '_', and '-' chars.
227    */
228   public void testStoreFileNames() {
229     String[] legalHFileLink = { "MyTable_02=abc012-def345", "MyTable_02.300=abc012-def345",
230       "MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345",
231       "MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" };
232     for (String name: legalHFileLink) {
233       assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name));
234       assertTrue("should be a valid StoreFile" + name, StoreFile.validateStoreFileName(name));
235       assertFalse("should not be a valid reference: " + name, StoreFile.isReference(name));
236 
237       String refName = name + ".6789";
238       assertTrue("should be a valid link reference: " + refName, StoreFile.isReference(refName));
239       assertTrue("should be a valid StoreFile" + refName, StoreFile.validateStoreFileName(refName));
240     }
241 
242     String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345",
243       "MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." };
244     for (String name: illegalHFileLink) {
245       assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name));
246     }
247   }
248 
249   /**
250    * This test creates an hfile and then the dir structures and files to verify that references
251    * to hfilelinks (created by snapshot clones) can be properly interpreted.
252    */
253   public void testReferenceToHFileLink() throws IOException {
254     final String columnFamily = "f";
255 
256     Path rootDir = FSUtils.getRootDir(conf);
257 
258     String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
259     HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
260     // store dir = <root>/<tablename>/<rgn>/<cf>
261     Path storedir = new Path(new Path(rootDir,
262       new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
263 
264     // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
265     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
266          this.fs, 8 * 1024)
267             .withOutputDir(storedir)
268             .build();
269     Path storeFilePath = writer.getPath();
270     writeStoreFile(writer);
271     writer.close();
272 
273     // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
274     String target = "clone";
275     Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
276     HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
277     Path linkFilePath = new Path(dstPath,
278                   HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
279 
280     // create splits of the link.
281     // <root>/clone/splitA/<cf>/<reftohfilelink>,
282     // <root>/clone/splitB/<cf>/<reftohfilelink>
283     Path splitDirA = new Path(new Path(rootDir,
284         new Path(target, "571A")), columnFamily);
285     Path splitDirB = new Path(new Path(rootDir,
286         new Path(target, "571B")), columnFamily);
287     StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
288         NoOpDataBlockEncoder.INSTANCE);
289     byte[] splitRow = SPLITKEY;
290     Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, Range.top); // top
291     Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, Range.bottom); // bottom
292 
293     // OK test the thing
294     FSUtils.logFileSystemState(fs, rootDir, LOG);
295 
296     // There is a case where a file with the hfilelink pattern is actually a daughter
297     // reference to a hfile link.  This code in StoreFile that handles this case.
298 
299     // Try to open store file from link
300     StoreFile hsfA = new StoreFile(this.fs, pathA,  conf, cacheConf,
301         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
302 
303     // Now confirm that I can read from the ref to link
304     int count = 1;
305     HFileScanner s = hsfA.createReader().getScanner(false, false);
306     s.seekTo();
307     while (s.next()) {
308       count++;
309     }
310     assertTrue(count > 0); // read some rows here
311 
312     // Try to open store file from link
313     StoreFile hsfB = new StoreFile(this.fs, pathB,  conf, cacheConf,
314         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
315 
316     // Now confirm that I can read from the ref to link
317     HFileScanner sB = hsfB.createReader().getScanner(false, false);
318     sB.seekTo();
319     
320     //count++ as seekTo() will advance the scanner
321     count++;
322     while (sB.next()) {
323       count++;
324     }
325 
326     // read the rest of the rows
327     assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
328   }
329 
330   private void checkHalfHFile(final StoreFile f)
331   throws IOException {
332     byte [] midkey = f.createReader().midkey();
333     KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
334     byte [] midRow = midKV.getRow();
335     // Create top split.
336     Path topDir = Store.getStoreHomedir(this.testDir, "1",
337       Bytes.toBytes(f.getPath().getParent().getName()));
338     if (this.fs.exists(topDir)) {
339       this.fs.delete(topDir, true);
340     }
341     Path topPath = StoreFile.split(this.fs, topDir, f, midRow, Range.top);
342     // Create bottom split.
343     Path bottomDir = Store.getStoreHomedir(this.testDir, "2",
344       Bytes.toBytes(f.getPath().getParent().getName()));
345     if (this.fs.exists(bottomDir)) {
346       this.fs.delete(bottomDir, true);
347     }
348     Path bottomPath = StoreFile.split(this.fs, bottomDir,
349       f, midRow, Range.bottom);
350     // Make readers on top and bottom.
351     StoreFile.Reader top =
352         new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
353             NoOpDataBlockEncoder.INSTANCE).createReader();
354     StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath,
355         conf, cacheConf, BloomType.NONE,
356         NoOpDataBlockEncoder.INSTANCE).createReader();
357     ByteBuffer previous = null;
358     LOG.info("Midkey: " + midKV.toString());
359     ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
360     try {
361       // Now make two HalfMapFiles and assert they can read the full backing
362       // file, one from the top and the other from the bottom.
363       // Test bottom half first.
364       // Now test reading from the top.
365       boolean first = true;
366       ByteBuffer key = null;
367       HFileScanner topScanner = top.getScanner(false, false);
368       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
369           (topScanner.isSeeked() && topScanner.next())) {
370         key = topScanner.getKey();
371 
372         if (topScanner.getReader().getComparator().compare(key.array(),
373           key.arrayOffset(), key.limit(), midkey, 0, midkey.length) < 0) {
374           fail("key=" + Bytes.toStringBinary(key) + " < midkey=" +
375               Bytes.toStringBinary(midkey));
376         }
377         if (first) {
378           first = false;
379           LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
380         }
381       }
382       LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));
383 
384       first = true;
385       HFileScanner bottomScanner = bottom.getScanner(false, false);
386       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
387           bottomScanner.next()) {
388         previous = bottomScanner.getKey();
389         key = bottomScanner.getKey();
390         if (first) {
391           first = false;
392           LOG.info("First in bottom: " +
393             Bytes.toString(Bytes.toBytes(previous)));
394         }
395         assertTrue(key.compareTo(bbMidkeyBytes) < 0);
396       }
397       if (previous != null) {
398         LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
399       }
400       // Remove references.
401       this.fs.delete(topPath, false);
402       this.fs.delete(bottomPath, false);
403 
404       // Next test using a midkey that does not exist in the file.
405       // First, do a key that is < than first key. Ensure splits behave
406       // properly.
407       byte [] badmidkey = Bytes.toBytes("  .");
408       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
409       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
410         Range.bottom);
411       
412       assertNull(bottomPath);
413       
414       top = new StoreFile(this.fs, topPath, conf, cacheConf,
415           StoreFile.BloomType.NONE,
416           NoOpDataBlockEncoder.INSTANCE).createReader();
417       // Now read from the top.
418       first = true;
419       topScanner = top.getScanner(false, false);
420       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
421           topScanner.next()) {
422         key = topScanner.getKey();
423         assertTrue(topScanner.getReader().getComparator().compare(key.array(),
424           key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0);
425         if (first) {
426           first = false;
427           KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
428           LOG.info("First top when key < bottom: " + keyKV);
429           String tmp = Bytes.toString(keyKV.getRow());
430           for (int i = 0; i < tmp.length(); i++) {
431             assertTrue(tmp.charAt(i) == 'a');
432           }
433         }
434       }
435       KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
436       LOG.info("Last top when key < bottom: " + keyKV);
437       String tmp = Bytes.toString(keyKV.getRow());
438       for (int i = 0; i < tmp.length(); i++) {
439         assertTrue(tmp.charAt(i) == 'z');
440       }
441       // Remove references.
442       this.fs.delete(topPath, false);
443 
444       // Test when badkey is > than last key in file ('||' > 'zz').
445       badmidkey = Bytes.toBytes("|||");
446       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
447       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
448         Range.bottom);
449 
450       assertNull(topPath);
451       
452       bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
453           StoreFile.BloomType.NONE,
454           NoOpDataBlockEncoder.INSTANCE).createReader();
455       first = true;
456       bottomScanner = bottom.getScanner(false, false);
457       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
458           bottomScanner.next()) {
459         key = bottomScanner.getKey();
460         if (first) {
461           first = false;
462           keyKV = KeyValue.createKeyValueFromKey(key);
463           LOG.info("First bottom when key > top: " + keyKV);
464           tmp = Bytes.toString(keyKV.getRow());
465           for (int i = 0; i < tmp.length(); i++) {
466             assertTrue(tmp.charAt(i) == 'a');
467           }
468         }
469       }
470       keyKV = KeyValue.createKeyValueFromKey(key);
471       LOG.info("Last bottom when key > top: " + keyKV);
472       for (int i = 0; i < tmp.length(); i++) {
473         assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z');
474       }
475     } finally {
476       if (top != null) {
477         top.close(true); // evict since we are about to delete the file
478       }
479       if (bottom != null) {
480         bottom.close(true); // evict since we are about to delete the file
481       }
482       fs.delete(f.getPath(), true);
483     }
484   }
485 
486   private static final String localFormatter = "%010d";
487 
488   private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
489   throws Exception {
490     float err = conf.getFloat(
491         BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
492     Path f = writer.getPath();
493     long now = System.currentTimeMillis();
494     for (int i = 0; i < 2000; i += 2) {
495       String row = String.format(localFormatter, i);
496       KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
497         "col".getBytes(), now, "value".getBytes());
498       writer.append(kv);
499     }
500     writer.close();
501 
502     StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
503         DataBlockEncoding.NONE);
504     reader.loadFileInfo();
505     reader.loadBloomfilter();
506     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
507 
508     // check false positives rate
509     int falsePos = 0;
510     int falseNeg = 0;
511     for (int i = 0; i < 2000; i++) {
512       String row = String.format(localFormatter, i);
513       TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
514       columns.add("family:col".getBytes());
515 
516       Scan scan = new Scan(row.getBytes(),row.getBytes());
517       scan.addColumn("family".getBytes(), "family:col".getBytes());
518       boolean exists = scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
519       if (i % 2 == 0) {
520         if (!exists) falseNeg++;
521       } else {
522         if (exists) falsePos++;
523       }
524     }
525     reader.close(true); // evict because we are about to delete the file
526     fs.delete(f, true);
527     assertEquals("False negatives: " + falseNeg, 0, falseNeg);
528     int maxFalsePos = (int) (2 * 2000 * err);
529     assertTrue("Too many false positives: " + falsePos + " (err=" + err
530         + ", expected no more than " + maxFalsePos + ")",
531         falsePos <= maxFalsePos);
532   }
533 
534   public void testBloomFilter() throws Exception {
535     FileSystem fs = FileSystem.getLocal(conf);
536     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
537         (float) 0.01);
538     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
539 
540     // write the file
541     Path f = new Path(ROOT_DIR, getName());
542     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
543         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
544             .withFilePath(f)
545             .withBloomType(StoreFile.BloomType.ROW)
546             .withMaxKeyCount(2000)
547             .withChecksumType(CKTYPE)
548             .withBytesPerChecksum(CKBYTES)
549             .build();
550     bloomWriteRead(writer, fs);
551   }
552 
553   public void testDeleteFamilyBloomFilter() throws Exception {
554     FileSystem fs = FileSystem.getLocal(conf);
555     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
556         (float) 0.01);
557     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
558     float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
559         0);
560 
561     // write the file
562     Path f = new Path(ROOT_DIR, getName());
563 
564     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
565         fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
566             .withFilePath(f)
567             .withMaxKeyCount(2000)
568             .withChecksumType(CKTYPE)
569             .withBytesPerChecksum(CKBYTES)
570             .build();
571 
572     // add delete family
573     long now = System.currentTimeMillis();
574     for (int i = 0; i < 2000; i += 2) {
575       String row = String.format(localFormatter, i);
576       KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
577           "col".getBytes(), now, KeyValue.Type.DeleteFamily, "value".getBytes());
578       writer.append(kv);
579     }
580     writer.close();
581 
582     StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
583         DataBlockEncoding.NONE);
584     reader.loadFileInfo();
585     reader.loadBloomfilter();
586 
587     // check false positives rate
588     int falsePos = 0;
589     int falseNeg = 0;
590     for (int i = 0; i < 2000; i++) {
591       String row = String.format(localFormatter, i);
592       byte[] rowKey = Bytes.toBytes(row);
593       boolean exists = reader.passesDeleteFamilyBloomFilter(rowKey, 0,
594           rowKey.length);
595       if (i % 2 == 0) {
596         if (!exists)
597           falseNeg++;
598       } else {
599         if (exists)
600           falsePos++;
601       }
602     }
603     assertEquals(1000, reader.getDeleteFamilyCnt());
604     reader.close(true); // evict because we are about to delete the file
605     fs.delete(f, true);
606     assertEquals("False negatives: " + falseNeg, 0, falseNeg);
607     int maxFalsePos = (int) (2 * 2000 * err);
608     assertTrue("Too many false positives: " + falsePos + " (err=" + err
609         + ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos);
610   }
611 
612   /**
613    * Test for HBASE-8012
614    */
615   public void testReseek() throws Exception {
616     // write the file
617     Path f = new Path(ROOT_DIR, getName());
618 
619     // Make a store file and write data to it.
620     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
621          this.fs, 8 * 1024)
622             .withFilePath(f)
623             .build();
624 
625     writeStoreFile(writer);
626     writer.close();
627 
628     StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
629 
630     // Now do reseek with empty KV to position to the beginning of the file
631 
632     KeyValue k = KeyValue.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
633     StoreFileScanner s = reader.getStoreFileScanner(false, false);
634     s.reseek(k);
635 
636     assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
637   }
638 
639   public void testBloomTypes() throws Exception {
640     float err = (float) 0.01;
641     FileSystem fs = FileSystem.getLocal(conf);
642     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err);
643     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
644 
645     int rowCount = 50;
646     int colCount = 10;
647     int versions = 2;
648 
649     // run once using columns and once using rows
650     StoreFile.BloomType[] bt =
651       {StoreFile.BloomType.ROWCOL, StoreFile.BloomType.ROW};
652     int[] expKeys    = {rowCount*colCount, rowCount};
653     // below line deserves commentary.  it is expected bloom false positives
654     //  column = rowCount*2*colCount inserts
655     //  row-level = only rowCount*2 inserts, but failures will be magnified by
656     //              2nd for loop for every column (2*colCount)
657     float[] expErr   = {2*rowCount*colCount*err, 2*rowCount*2*colCount*err};
658 
659     for (int x : new int[]{0,1}) {
660       // write the file
661       Path f = new Path(ROOT_DIR, getName() + x);
662       StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
663           fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
664               .withFilePath(f)
665               .withBloomType(bt[x])
666               .withMaxKeyCount(expKeys[x])
667               .withChecksumType(CKTYPE)
668               .withBytesPerChecksum(CKBYTES)
669               .build();
670 
671       long now = System.currentTimeMillis();
672       for (int i = 0; i < rowCount*2; i += 2) { // rows
673         for (int j = 0; j < colCount*2; j += 2) {   // column qualifiers
674           String row = String.format(localFormatter, i);
675           String col = String.format(localFormatter, j);
676           for (int k= 0; k < versions; ++k) { // versions
677             KeyValue kv = new KeyValue(row.getBytes(),
678               "family".getBytes(), ("col" + col).getBytes(),
679                 now-k, Bytes.toBytes((long)-1));
680             writer.append(kv);
681           }
682         }
683       }
684       writer.close();
685 
686       StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
687           DataBlockEncoding.NONE);
688       reader.loadFileInfo();
689       reader.loadBloomfilter();
690       StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
691       assertEquals(expKeys[x], reader.generalBloomFilter.getKeyCount());
692 
693       // check false positives rate
694       int falsePos = 0;
695       int falseNeg = 0;
696       for (int i = 0; i < rowCount*2; ++i) { // rows
697         for (int j = 0; j < colCount*2; ++j) {   // column qualifiers
698           String row = String.format(localFormatter, i);
699           String col = String.format(localFormatter, j);
700           TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
701           columns.add(("col" + col).getBytes());
702 
703           Scan scan = new Scan(row.getBytes(),row.getBytes());
704           scan.addColumn("family".getBytes(), ("col"+col).getBytes());
705           boolean exists =
706               scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
707           boolean shouldRowExist = i % 2 == 0;
708           boolean shouldColExist = j % 2 == 0;
709           shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
710           if (shouldRowExist && shouldColExist) {
711             if (!exists) falseNeg++;
712           } else {
713             if (exists) falsePos++;
714           }
715         }
716       }
717       reader.close(true); // evict because we are about to delete the file
718       fs.delete(f, true);
719       System.out.println(bt[x].toString());
720       System.out.println("  False negatives: " + falseNeg);
721       System.out.println("  False positives: " + falsePos);
722       assertEquals(0, falseNeg);
723       assertTrue(falsePos < 2*expErr[x]);
724     }
725   }
726 
727   public void testBloomEdgeCases() throws Exception {
728     float err = (float)0.005;
729     FileSystem fs = FileSystem.getLocal(conf);
730     Path f = new Path(ROOT_DIR, getName());
731     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err);
732     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
733     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS, 1000);
734 
735     // This test only runs for HFile format version 1.
736     conf.setInt(HFile.FORMAT_VERSION_KEY, 1);
737 
738     // this should not create a bloom because the max keys is too small
739     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
740         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
741             .withFilePath(f)
742             .withBloomType(StoreFile.BloomType.ROW)
743             .withMaxKeyCount(2000)
744             .withChecksumType(CKTYPE)
745             .withBytesPerChecksum(CKBYTES)
746             .build();
747     assertFalse(writer.hasGeneralBloom());
748     writer.close();
749     fs.delete(f, true);
750 
751     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS,
752         Integer.MAX_VALUE);
753 
754     // TODO: commented out because we run out of java heap space on trunk
755     // the below config caused IllegalArgumentException in our production cluster
756     // however, the resulting byteSize is < MAX_INT, so this should work properly
757     writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
758         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
759             .withFilePath(f)
760             .withBloomType(StoreFile.BloomType.ROW)
761             .withMaxKeyCount(27244696)
762             .build();
763     assertTrue(writer.hasGeneralBloom());
764     bloomWriteRead(writer, fs);
765 
766     // this, however, is too large and should not create a bloom
767     // because Java can't create a contiguous array > MAX_INT
768     writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
769         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
770             .withFilePath(f)
771             .withBloomType(StoreFile.BloomType.ROW)
772             .withMaxKeyCount(Integer.MAX_VALUE)
773             .withChecksumType(CKTYPE)
774             .withBytesPerChecksum(CKBYTES)
775             .build();
776     assertFalse(writer.hasGeneralBloom());
777     writer.close();
778     fs.delete(f, true);
779   }
780 
781   public void testSeqIdComparator() {
782     assertOrdering(StoreFile.Comparators.SEQ_ID,
783         mockStoreFile(true, 1000, -1, "/foo/123"),
784         mockStoreFile(true, 1000, -1, "/foo/126"),
785         mockStoreFile(true, 2000, -1, "/foo/126"),
786         mockStoreFile(false, -1, 1, "/foo/1"),
787         mockStoreFile(false, -1, 3, "/foo/2"),
788         mockStoreFile(false, -1, 5, "/foo/2"),
789         mockStoreFile(false, -1, 5, "/foo/3"));
790   }
791 
792   /**
793    * Assert that the given comparator orders the given storefiles in the
794    * same way that they're passed.
795    */
796   private void assertOrdering(Comparator<StoreFile> comparator, StoreFile ... sfs) {
797     ArrayList<StoreFile> sorted = Lists.newArrayList(sfs);
798     Collections.shuffle(sorted);
799     Collections.sort(sorted, comparator);
800     LOG.debug("sfs: " + Joiner.on(",").join(sfs));
801     LOG.debug("sorted: " + Joiner.on(",").join(sorted));
802     assertTrue(Iterables.elementsEqual(Arrays.asList(sfs), sorted));
803   }
804 
805   /**
806    * Create a mock StoreFile with the given attributes.
807    */
808   private StoreFile mockStoreFile(boolean bulkLoad, long bulkTimestamp,
809       long seqId, String path) {
810     StoreFile mock = Mockito.mock(StoreFile.class);
811     Mockito.doReturn(bulkLoad).when(mock).isBulkLoadResult();
812     Mockito.doReturn(bulkTimestamp).when(mock).getBulkLoadTimestamp();
813     Mockito.doReturn(seqId).when(mock).getMaxSequenceId();
814     Mockito.doReturn(new Path(path)).when(mock).getPath();
815     String name = "mock storefile, bulkLoad=" + bulkLoad +
816       " bulkTimestamp=" + bulkTimestamp +
817       " seqId=" + seqId +
818       " path=" + path;
819     Mockito.doReturn(name).when(mock).toString();
820     return mock;
821   }
822 
823   /**
824    * Generate a list of KeyValues for testing based on given parameters
825    * @param timestamps
826    * @param numRows
827    * @param qualifier
828    * @param family
829    * @return
830    */
831   List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
832       byte[] qualifier, byte[] family) {
833     List<KeyValue> kvList = new ArrayList<KeyValue>();
834     for (int i=1;i<=numRows;i++) {
835       byte[] b = Bytes.toBytes(i) ;
836       LOG.info(Bytes.toString(b));
837       LOG.info(Bytes.toString(b));
838       for (long timestamp: timestamps)
839       {
840         kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
841       }
842     }
843     return kvList;
844   }
845 
846   /**
847    * Test to ensure correctness when using StoreFile with multiple timestamps
848    * @throws IOException
849    */
850   public void testMultipleTimestamps() throws IOException {
851     byte[] family = Bytes.toBytes("familyname");
852     byte[] qualifier = Bytes.toBytes("qualifier");
853     int numRows = 10;
854     long[] timestamps = new long[] {20,10,5,1};
855     Scan scan = new Scan();
856 
857     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
858     Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
859     Path dir = new Path(storedir, "1234567890");
860     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
861         this.fs, 8 * 1024)
862             .withOutputDir(dir)
863             .build();
864 
865     List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
866         family, qualifier);
867 
868     for (KeyValue kv : kvList) {
869       writer.append(kv);
870     }
871     writer.appendMetadata(0, false);
872     writer.close();
873 
874     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
875         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
876     StoreFile.Reader reader = hsf.createReader();
877     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
878     TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
879     columns.add(qualifier);
880 
881     scan.setTimeRange(20, 100);
882     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
883 
884     scan.setTimeRange(1, 2);
885     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
886 
887     scan.setTimeRange(8, 10);
888     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
889 
890     scan.setTimeRange(7, 50);
891     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
892 
893     // This test relies on the timestamp range optimization
894     scan.setTimeRange(27, 50);
895     assertTrue(!scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
896   }
897 
898   public void testCacheOnWriteEvictOnClose() throws Exception {
899     Configuration conf = this.conf;
900 
901     // Find a home for our files (regiondir ("7e0102") and familyname).
902     Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");
903 
904     // Grab the block cache and get the initial hit/miss counts
905     BlockCache bc = new CacheConfig(conf).getBlockCache();
906     assertNotNull(bc);
907     CacheStats cs = bc.getStats();
908     long startHit = cs.getHitCount();
909     long startMiss = cs.getMissCount();
910     long startEvicted = cs.getEvictedCount();
911 
912     // Let's write a StoreFile with three blocks, with cache on write off
913     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
914     CacheConfig cacheConf = new CacheConfig(conf);
915     Path pathCowOff = new Path(baseDir, "123456789");
916     StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
917     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
918         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
919     LOG.debug(hsf.getPath().toString());
920 
921     // Read this file, we should see 3 misses
922     StoreFile.Reader reader = hsf.createReader();
923     reader.loadFileInfo();
924     StoreFileScanner scanner = reader.getStoreFileScanner(true, true);
925     scanner.seek(KeyValue.LOWESTKEY);
926     while (scanner.next() != null);
927     assertEquals(startHit, cs.getHitCount());
928     assertEquals(startMiss + 3, cs.getMissCount());
929     assertEquals(startEvicted, cs.getEvictedCount());
930     startMiss += 3;
931     scanner.close();
932     reader.close(cacheConf.shouldEvictOnClose());
933 
934     // Now write a StoreFile with three blocks, with cache on write on
935     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
936     cacheConf = new CacheConfig(conf);
937     Path pathCowOn = new Path(baseDir, "123456788");
938     writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
939     hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
940         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
941 
942     // Read this file, we should see 3 hits
943     reader = hsf.createReader();
944     scanner = reader.getStoreFileScanner(true, true);
945     scanner.seek(KeyValue.LOWESTKEY);
946     while (scanner.next() != null);
947     assertEquals(startHit + 3, cs.getHitCount());
948     assertEquals(startMiss, cs.getMissCount());
949     assertEquals(startEvicted, cs.getEvictedCount());
950     startHit += 3;
951     scanner.close();
952     reader.close(cacheConf.shouldEvictOnClose());
953 
954     // Let's read back the two files to ensure the blocks exactly match
955     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
956         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
957     StoreFile.Reader readerOne = hsf.createReader();
958     readerOne.loadFileInfo();
959     StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
960     scannerOne.seek(KeyValue.LOWESTKEY);
961     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
962         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
963     StoreFile.Reader readerTwo = hsf.createReader();
964     readerTwo.loadFileInfo();
965     StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
966     scannerTwo.seek(KeyValue.LOWESTKEY);
967     KeyValue kv1 = null;
968     KeyValue kv2 = null;
969     while ((kv1 = scannerOne.next()) != null) {
970       kv2 = scannerTwo.next();
971       assertTrue(kv1.equals(kv2));
972       assertTrue(Bytes.compareTo(
973           kv1.getBuffer(), kv1.getKeyOffset(), kv1.getKeyLength(),
974           kv2.getBuffer(), kv2.getKeyOffset(), kv2.getKeyLength()) == 0);
975       assertTrue(Bytes.compareTo(
976           kv1.getBuffer(), kv1.getValueOffset(), kv1.getValueLength(),
977           kv2.getBuffer(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
978     }
979     assertNull(scannerTwo.next());
980     assertEquals(startHit + 6, cs.getHitCount());
981     assertEquals(startMiss, cs.getMissCount());
982     assertEquals(startEvicted, cs.getEvictedCount());
983     startHit += 6;
984     scannerOne.close();
985     readerOne.close(cacheConf.shouldEvictOnClose());
986     scannerTwo.close();
987     readerTwo.close(cacheConf.shouldEvictOnClose());
988 
989     // Let's close the first file with evict on close turned on
990     conf.setBoolean("hbase.rs.evictblocksonclose", true);
991     cacheConf = new CacheConfig(conf);
992     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
993         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
994     reader = hsf.createReader();
995     reader.close(cacheConf.shouldEvictOnClose());
996 
997     // We should have 3 new evictions
998     assertEquals(startHit, cs.getHitCount());
999     assertEquals(startMiss, cs.getMissCount());
1000     assertEquals(startEvicted + 3, cs.getEvictedCount());
1001     startEvicted += 3;
1002 
1003     // Let's close the second file with evict on close turned off
1004     conf.setBoolean("hbase.rs.evictblocksonclose", false);
1005     cacheConf = new CacheConfig(conf);
1006     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
1007         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
1008     reader = hsf.createReader();
1009     reader.close(cacheConf.shouldEvictOnClose());
1010 
1011     // We expect no changes
1012     assertEquals(startHit, cs.getHitCount());
1013     assertEquals(startMiss, cs.getMissCount());
1014     assertEquals(startEvicted, cs.getEvictedCount());
1015   }
1016 
1017   private StoreFile.Writer writeStoreFile(Configuration conf,
1018       CacheConfig cacheConf, Path path, int numBlocks)
1019   throws IOException {
1020     // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
1021     int numKVs = 5 * numBlocks;
1022     List<KeyValue> kvs = new ArrayList<KeyValue>(numKVs);
1023     byte [] b = Bytes.toBytes("x");
1024     int totalSize = 0;
1025     for (int i=numKVs;i>0;i--) {
1026       KeyValue kv = new KeyValue(b, b, b, i, b);
1027       kvs.add(kv);
1028       // kv has memstoreTS 0, which takes 1 byte to store.
1029       totalSize += kv.getLength() + 1;
1030     }
1031     int blockSize = totalSize / numBlocks;
1032     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
1033         blockSize)
1034             .withFilePath(path)
1035             .withMaxKeyCount(2000)
1036             .withChecksumType(CKTYPE)
1037             .withBytesPerChecksum(CKBYTES)
1038             .build();
1039     // We'll write N-1 KVs to ensure we don't write an extra block
1040     kvs.remove(kvs.size()-1);
1041     for (KeyValue kv : kvs) {
1042       writer.append(kv);
1043     }
1044     writer.appendMetadata(0, false);
1045     writer.close();
1046     return writer;
1047   }
1048 
1049   /**
1050    * Check if data block encoding information is saved correctly in HFile's
1051    * file info.
1052    */
1053   public void testDataBlockEncodingMetaData() throws IOException {
1054     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
1055     Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
1056     Path path = new Path(dir, "1234567890");
1057 
1058     DataBlockEncoding dataBlockEncoderAlgo =
1059         DataBlockEncoding.FAST_DIFF;
1060     HFileDataBlockEncoder dataBlockEncoder =
1061         new HFileDataBlockEncoderImpl(
1062             dataBlockEncoderAlgo,
1063             dataBlockEncoderAlgo);
1064     cacheConf = new CacheConfig(conf);
1065     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
1066         HFile.DEFAULT_BLOCKSIZE)
1067             .withFilePath(path)
1068             .withDataBlockEncoder(dataBlockEncoder)
1069             .withMaxKeyCount(2000)
1070             .withChecksumType(CKTYPE)
1071             .withBytesPerChecksum(CKBYTES)
1072             .build();
1073     writer.close();
1074 
1075     StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
1076         cacheConf, BloomType.NONE, dataBlockEncoder);
1077     StoreFile.Reader reader = storeFile.createReader();
1078 
1079     Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
1080     byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
1081 
1082     assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
1083   }
1084 
1085   @org.junit.Rule
1086   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
1087     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
1088 }
1089