1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.io.hfile;
18  
19  import static org.junit.Assert.assertEquals;
20  import static org.junit.Assert.assertTrue;
21  
22  import java.io.IOException;
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.List;
27  
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.hbase.HBaseTestingUtility;
30  import org.apache.hadoop.hbase.SmallTests;
31  import org.apache.hadoop.hbase.io.HeapSize;
32  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
33  import org.apache.hadoop.hbase.io.encoding.RedundantKVGenerator;
34  import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
35  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
36  import org.apache.hadoop.hbase.util.ChecksumType;
37  import org.apache.hadoop.hbase.util.Pair;
38  import org.junit.After;
39  import org.junit.Before;
40  import org.junit.Test;
41  import org.junit.experimental.categories.Category;
42  import org.junit.runner.RunWith;
43  import org.junit.runners.Parameterized;
44  import org.junit.runners.Parameterized.Parameters;
45  
46  @RunWith(Parameterized.class)
47  @Category(SmallTests.class)
48  public class TestHFileDataBlockEncoder {
49    private Configuration conf;
50    private final HBaseTestingUtility TEST_UTIL =
51        new HBaseTestingUtility();
52    private HFileDataBlockEncoderImpl blockEncoder;
53    private RedundantKVGenerator generator = new RedundantKVGenerator();
54    private SchemaConfigured UNKNOWN_TABLE_AND_CF =
55        SchemaConfigured.createUnknown();
56    private boolean includesMemstoreTS;
57  
58    /**
59     * Create test for given data block encoding configuration.
60     * @param blockEncoder What kind of encoding policy will be used.
61     */
62    public TestHFileDataBlockEncoder(HFileDataBlockEncoderImpl blockEncoder,
63        boolean includesMemstoreTS) {
64      this.blockEncoder = blockEncoder;
65      this.includesMemstoreTS = includesMemstoreTS;
66      System.err.println("On-disk encoding: " + blockEncoder.getEncodingOnDisk()
67          + ", in-cache encoding: " + blockEncoder.getEncodingInCache()
68          + ", includesMemstoreTS: " + includesMemstoreTS);
69    }
70  
71    /**
72     * Preparation before JUnit test.
73     */
74    @Before
75    public void setUp() {
76      conf = TEST_UTIL.getConfiguration();
77      SchemaMetrics.configureGlobally(conf);
78    }
79  
80    /**
81     * Cleanup after JUnit test.
82     */
83    @After
84    public void tearDown() throws IOException {
85      TEST_UTIL.cleanupTestDir();
86    }
87  
88    /**
89     * Test putting and taking out blocks into cache with different
90     * encoding options.
91     */
92    @Test
93    public void testEncodingWithCache() {
94      HFileBlock block = getSampleHFileBlock();
95      LruBlockCache blockCache =
96          new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
97      HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
98      BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
99      blockCache.cacheBlock(cacheKey, cacheBlock);
100 
101     HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
102     assertTrue(heapSize instanceof HFileBlock);
103 
104     HFileBlock returnedBlock = (HFileBlock) heapSize;;
105 
106     if (blockEncoder.getEncodingInCache() ==
107         DataBlockEncoding.NONE) {
108       assertEquals(block.getBufferWithHeader(),
109           returnedBlock.getBufferWithHeader());
110     } else {
111       if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
112         System.out.println(blockEncoder);
113       }
114       assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
115     }
116   }
117 
118   /**
119    * Test writing to disk.
120    */
121   @Test
122   public void testEncodingWritePath() {
123     // usually we have just block without headers, but don't complicate that
124     HFileBlock block = getSampleHFileBlock();
125     Pair<ByteBuffer, BlockType> result =
126         blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
127             includesMemstoreTS, HFileBlock.DUMMY_HEADER_WITH_CHECKSUM);
128 
129     int size = result.getFirst().limit() - HFileBlock.HEADER_SIZE_WITH_CHECKSUMS;
130     HFileBlock blockOnDisk = new HFileBlock(result.getSecond(),
131         size, size, -1, result.getFirst(), HFileBlock.FILL_HEADER, 0,
132         includesMemstoreTS, block.getMinorVersion(),
133         block.getBytesPerChecksum(), block.getChecksumType(),
134         block.getOnDiskDataSizeWithHeader());
135 
136     if (blockEncoder.getEncodingOnDisk() !=
137         DataBlockEncoding.NONE) {
138       assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType());
139       assertEquals(blockEncoder.getEncodingOnDisk().getId(),
140           blockOnDisk.getDataBlockEncodingId());
141     } else {
142       assertEquals(BlockType.DATA, blockOnDisk.getBlockType());
143     }
144   }
145 
146   /**
147    * Test converting blocks from disk to cache format.
148    */
149   @Test
150   public void testEncodingReadPath() {
151     HFileBlock origBlock = getSampleHFileBlock();
152     blockEncoder.diskToCacheFormat(origBlock, false);
153   }
154 
155   private HFileBlock getSampleHFileBlock() {
156     ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
157         generator.generateTestKeyValues(60), includesMemstoreTS);
158     int size = keyValues.limit();
159     ByteBuffer buf = ByteBuffer.allocate(size + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS);
160     buf.position(HFileBlock.HEADER_SIZE_WITH_CHECKSUMS);
161     keyValues.rewind();
162     buf.put(keyValues);
163     HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
164         HFileBlock.FILL_HEADER, 0, includesMemstoreTS, 
165         HFileReaderV2.MAX_MINOR_VERSION, 0, ChecksumType.NULL.getCode(), 0);
166     UNKNOWN_TABLE_AND_CF.passSchemaMetricsTo(b);
167     return b;
168   }
169 
170   /**
171    * @return All possible data block encoding configurations
172    */
173   @Parameters
174   public static Collection<Object[]> getAllConfigurations() {
175     List<Object[]> configurations =
176         new ArrayList<Object[]>();
177 
178     for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) {
179       for (DataBlockEncoding cacheAlgo : DataBlockEncoding.values()) {
180         if (diskAlgo != cacheAlgo && diskAlgo != DataBlockEncoding.NONE) {
181           // We allow (1) the same encoding on disk and in cache, and
182           // (2) some encoding in cache but no encoding on disk (for testing).
183           continue;
184         }
185         for (boolean includesMemstoreTS : new boolean[] {false, true}) {
186           configurations.add(new Object[] {
187               new HFileDataBlockEncoderImpl(diskAlgo, cacheAlgo),
188               new Boolean(includesMemstoreTS)});
189         }
190       }
191     }
192 
193     return configurations;
194   }
195 }