View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNull;
24  import static org.junit.Assert.assertTrue;
25  
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.Collection;
29  import java.util.EnumMap;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Random;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.HBaseTestingUtility;
40  import org.apache.hadoop.hbase.HColumnDescriptor;
41  import org.apache.hadoop.hbase.KeyValue;
42  import org.apache.hadoop.hbase.MediumTests;
43  import org.apache.hadoop.hbase.client.Put;
44  import org.apache.hadoop.hbase.fs.HFileSystem;
45  import org.apache.hadoop.hbase.io.compress.Compression;
46  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
47  import org.apache.hadoop.hbase.regionserver.HRegion;
48  import org.apache.hadoop.hbase.regionserver.StoreFile;
49  import org.apache.hadoop.hbase.regionserver.BloomType;
50  import org.apache.hadoop.hbase.util.BloomFilterFactory;
51  import org.apache.hadoop.hbase.util.Bytes;
52  import org.apache.hadoop.hbase.util.ChecksumType;
53  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
54  import org.junit.After;
55  import org.junit.Before;
56  import org.junit.Test;
57  import org.junit.experimental.categories.Category;
58  import org.junit.runner.RunWith;
59  import org.junit.runners.Parameterized;
60  import org.junit.runners.Parameterized.Parameters;
61  
62  /**
63   * Tests {@link HFile} cache-on-write functionality for the following block
64   * types: data blocks, non-root index blocks, and Bloom filter blocks.
65   */
66  @RunWith(Parameterized.class)
67  @Category(MediumTests.class)
68  public class TestCacheOnWrite {
69  
70    private static final Log LOG = LogFactory.getLog(TestCacheOnWrite.class);
71  
72    private static final HBaseTestingUtility TEST_UTIL =
73      new HBaseTestingUtility();
74    private Configuration conf;
75    private CacheConfig cacheConf;
76    private FileSystem fs;
77    private Random rand = new Random(12983177L);
78    private Path storeFilePath;
79    private BlockCache blockCache;
80    private String testDescription;
81  
82    private final CacheOnWriteType cowType;
83    private final Compression.Algorithm compress;
84    private final BlockEncoderTestType encoderType;
85    private final HFileDataBlockEncoder encoder;
86  
87    private static final int DATA_BLOCK_SIZE = 2048;
88    private static final int NUM_KV = 25000;
89    private static final int INDEX_BLOCK_SIZE = 512;
90    private static final int BLOOM_BLOCK_SIZE = 4096;
91    private static final BloomType BLOOM_TYPE = BloomType.ROWCOL;
92    private static final ChecksumType CKTYPE = ChecksumType.CRC32;
93    private static final int CKBYTES = 512;
94  
95    /** The number of valid key types possible in a store file */
96    private static final int NUM_VALID_KEY_TYPES =
97        KeyValue.Type.values().length - 2;
98  
99    private static enum CacheOnWriteType {
100     DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
101         BlockType.DATA, BlockType.ENCODED_DATA),
102     BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
103         BlockType.BLOOM_CHUNK),
104     INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
105         BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
106 
107     private final String confKey;
108     private final BlockType blockType1;
109     private final BlockType blockType2;
110 
111     private CacheOnWriteType(String confKey, BlockType blockType) {
112       this(confKey, blockType, blockType);
113     }
114 
115     private CacheOnWriteType(String confKey, BlockType blockType1,
116         BlockType blockType2) {
117       this.blockType1 = blockType1;
118       this.blockType2 = blockType2;
119       this.confKey = confKey;
120     }
121 
122     public boolean shouldBeCached(BlockType blockType) {
123       return blockType == blockType1 || blockType == blockType2;
124     }
125 
126     public void modifyConf(Configuration conf) {
127       for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
128         conf.setBoolean(cowType.confKey, cowType == this);
129       }
130     }
131 
132   }
133 
134   private static final DataBlockEncoding ENCODING_ALGO =
135       DataBlockEncoding.PREFIX;
136 
137   /** Provides fancy names for three combinations of two booleans */
138   private static enum BlockEncoderTestType {
139     NO_BLOCK_ENCODING(false, false),
140     BLOCK_ENCODING_IN_CACHE_ONLY(false, true),
141     BLOCK_ENCODING_EVERYWHERE(true, true);
142 
143     private final boolean encodeOnDisk;
144     private final boolean encodeInCache;
145 
146     BlockEncoderTestType(boolean encodeOnDisk, boolean encodeInCache) {
147       this.encodeOnDisk = encodeOnDisk;
148       this.encodeInCache = encodeInCache;
149     }
150 
151     public HFileDataBlockEncoder getEncoder() {
152       return new HFileDataBlockEncoderImpl(
153           encodeOnDisk ? ENCODING_ALGO : DataBlockEncoding.NONE,
154           encodeInCache ? ENCODING_ALGO : DataBlockEncoding.NONE);
155     }
156   }
157 
158   public TestCacheOnWrite(CacheOnWriteType cowType,
159       Compression.Algorithm compress, BlockEncoderTestType encoderType) {
160     this.cowType = cowType;
161     this.compress = compress;
162     this.encoderType = encoderType;
163     this.encoder = encoderType.getEncoder();
164     testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + 
165         ", encoderType=" + encoderType + "]";
166     System.out.println(testDescription);
167   }
168 
169   @Parameters
170   public static Collection<Object[]> getParameters() {
171     List<Object[]> cowTypes = new ArrayList<Object[]>();
172     for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
173       for (Compression.Algorithm compress :
174            HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
175         for (BlockEncoderTestType encoderType :
176              BlockEncoderTestType.values()) {
177           cowTypes.add(new Object[] { cowType, compress, encoderType });
178         }
179       }
180     }
181     return cowTypes;
182   }
183 
184   @Before
185   public void setUp() throws IOException {
186     conf = TEST_UTIL.getConfiguration();
187     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
188     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
189     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
190         BLOOM_BLOCK_SIZE);
191     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
192         cowType.shouldBeCached(BlockType.DATA));
193     conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
194         cowType.shouldBeCached(BlockType.LEAF_INDEX));
195     conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
196         cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
197     cowType.modifyConf(conf);
198     fs = HFileSystem.get(conf);
199     cacheConf = new CacheConfig(conf);
200     blockCache = cacheConf.getBlockCache();
201   }
202 
203   @After
204   public void tearDown() {
205     cacheConf = new CacheConfig(conf);
206     blockCache = cacheConf.getBlockCache();
207   }
208 
209   @Test
210   public void testStoreFileCacheOnWrite() throws IOException {
211     writeStoreFile();
212     readStoreFile();
213   }
214 
215   private void readStoreFile() throws IOException {
216     HFileReaderV2 reader = (HFileReaderV2) HFile.createReaderWithEncoding(fs,
217         storeFilePath, cacheConf, encoder.getEncodingInCache());
218     LOG.info("HFile information: " + reader);
219     final boolean cacheBlocks = false;
220     final boolean pread = false;
221     HFileScanner scanner = reader.getScanner(cacheBlocks, pread);
222     assertTrue(testDescription, scanner.seekTo());
223 
224     long offset = 0;
225     HFileBlock prevBlock = null;
226     EnumMap<BlockType, Integer> blockCountByType =
227         new EnumMap<BlockType, Integer>(BlockType.class);
228 
229     DataBlockEncoding encodingInCache =
230         encoderType.getEncoder().getEncodingInCache();
231     while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
232       long onDiskSize = -1;
233       if (prevBlock != null) {
234          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
235       }
236       // Flags: don't cache the block, use pread, this is not a compaction.
237       // Also, pass null for expected block type to avoid checking it.
238       HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
239           false, null);
240       BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
241           offset, encodingInCache, block.getBlockType());
242       boolean isCached = blockCache.getBlock(blockCacheKey, true, false) != null;
243       boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
244       if (shouldBeCached != isCached) {
245         throw new AssertionError(
246             "shouldBeCached: " + shouldBeCached+ "\n" +
247             "isCached: " + isCached + "\n" +
248             "Test description: " + testDescription + "\n" +
249             "block: " + block + "\n" +
250             "encodingInCache: " + encodingInCache + "\n" +
251             "blockCacheKey: " + blockCacheKey);
252       }
253       prevBlock = block;
254       offset += block.getOnDiskSizeWithHeader();
255       BlockType bt = block.getBlockType();
256       Integer count = blockCountByType.get(bt);
257       blockCountByType.put(bt, (count == null ? 0 : count) + 1);
258     }
259 
260     LOG.info("Block count by type: " + blockCountByType);
261     String countByType = blockCountByType.toString();
262     BlockType cachedDataBlockType =
263         encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
264     assertEquals("{" + cachedDataBlockType
265         + "=1379, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=24}",
266         countByType);
267 
268     reader.close();
269   }
270 
271   public static KeyValue.Type generateKeyType(Random rand) {
272     if (rand.nextBoolean()) {
273       // Let's make half of KVs puts.
274       return KeyValue.Type.Put;
275     } else {
276       KeyValue.Type keyType =
277           KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
278       if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum)
279       {
280         throw new RuntimeException("Generated an invalid key type: " + keyType
281             + ". " + "Probably the layout of KeyValue.Type has changed.");
282       }
283       return keyType;
284     }
285   }
286 
287   public void writeStoreFile() throws IOException {
288     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
289         "test_cache_on_write");
290     StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs,
291         DATA_BLOCK_SIZE)
292             .withOutputDir(storeFileParentDir)
293             .withCompression(compress)
294             .withDataBlockEncoder(encoder)
295             .withComparator(KeyValue.COMPARATOR)
296             .withBloomType(BLOOM_TYPE)
297             .withMaxKeyCount(NUM_KV)
298             .withChecksumType(CKTYPE)
299             .withBytesPerChecksum(CKBYTES)
300             .build();
301 
302     final int rowLen = 32;
303     for (int i = 0; i < NUM_KV; ++i) {
304       byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
305       byte[] v = TestHFileWriterV2.randomValue(rand);
306       int cfLen = rand.nextInt(k.length - rowLen + 1);
307       KeyValue kv = new KeyValue(
308           k, 0, rowLen,
309           k, rowLen, cfLen,
310           k, rowLen + cfLen, k.length - rowLen - cfLen,
311           rand.nextLong(),
312           generateKeyType(rand),
313           v, 0, v.length);
314       sfw.append(kv);
315     }
316 
317     sfw.close();
318     storeFilePath = sfw.getPath();
319   }
320 
321   @Test
322   public void testNotCachingDataBlocksDuringCompaction() throws IOException {
323     // TODO: need to change this test if we add a cache size threshold for
324     // compactions, or if we implement some other kind of intelligent logic for
325     // deciding what blocks to cache-on-write on compaction.
326     final String table = "CompactionCacheOnWrite";
327     final String cf = "myCF";
328     final byte[] cfBytes = Bytes.toBytes(cf);
329     final int maxVersions = 3;
330     HRegion region = TEST_UTIL.createTestRegion(table, 
331         new HColumnDescriptor(cf)
332             .setCompressionType(compress)
333             .setBloomFilterType(BLOOM_TYPE)
334             .setMaxVersions(maxVersions)
335             .setDataBlockEncoding(encoder.getEncodingInCache())
336             .setEncodeOnDisk(encoder.getEncodingOnDisk() !=
337                 DataBlockEncoding.NONE)
338     );
339     int rowIdx = 0;
340     long ts = EnvironmentEdgeManager.currentTimeMillis();
341     for (int iFile = 0; iFile < 5; ++iFile) {
342       for (int iRow = 0; iRow < 500; ++iRow) {
343         String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
344             iRow;
345         Put p = new Put(Bytes.toBytes(rowStr));
346         ++rowIdx;
347         for (int iCol = 0; iCol < 10; ++iCol) {
348           String qualStr = "col" + iCol;
349           String valueStr = "value_" + rowStr + "_" + qualStr;
350           for (int iTS = 0; iTS < 5; ++iTS) {
351             p.add(cfBytes, Bytes.toBytes(qualStr), ts++,
352                 Bytes.toBytes(valueStr));
353           }
354         }
355         region.put(p);
356       }
357       region.flushcache();
358     }
359     LruBlockCache blockCache =
360         (LruBlockCache) new CacheConfig(conf).getBlockCache();
361     blockCache.clearCache();
362     assertEquals(0, blockCache.getBlockTypeCountsForTest().size());
363     region.compactStores();
364     LOG.debug("compactStores() returned");
365 
366     Map<BlockType, Integer> blockTypesInCache =
367         blockCache.getBlockTypeCountsForTest();
368     LOG.debug("Block types in cache: " + blockTypesInCache);
369     assertNull(blockTypesInCache.get(BlockType.DATA));
370     region.close();
371     blockCache.shutdown();
372   }
373 
374 }
375