1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.IOException;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.List;
28 import java.util.Random;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.conf.Configuration;
33 import org.apache.hadoop.fs.FileSystem;
34 import org.apache.hadoop.fs.Path;
35 import org.apache.hadoop.hbase.HBaseTestingUtility;
36 import org.apache.hadoop.hbase.HColumnDescriptor;
37 import org.apache.hadoop.hbase.HRegionInfo;
38 import org.apache.hadoop.hbase.HTableDescriptor;
39 import org.apache.hadoop.hbase.KeyValue;
40 import org.apache.hadoop.hbase.MediumTests;
41 import org.apache.hadoop.hbase.TableName;
42 import org.apache.hadoop.hbase.fs.HFileSystem;
43 import org.apache.hadoop.hbase.io.hfile.BlockCache;
44 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
45 import org.apache.hadoop.hbase.io.hfile.BlockType;
46 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
47 import org.apache.hadoop.hbase.io.hfile.HFile;
48 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
49 import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
50 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
51 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
52 import org.apache.hadoop.hbase.regionserver.wal.HLog;
53 import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
54 import org.apache.hadoop.hbase.util.Bytes;
55 import org.junit.After;
56 import org.junit.Before;
57 import org.junit.Test;
58 import org.junit.experimental.categories.Category;
59 import org.junit.runner.RunWith;
60 import org.junit.runners.Parameterized;
61 import org.junit.runners.Parameterized.Parameters;
62
63
64
65
66
67 @RunWith(Parameterized.class)
68 @Category(MediumTests.class)
69 public class TestCacheOnWriteInSchema {
70
71 private static final Log LOG = LogFactory.getLog(TestCacheOnWriteInSchema.class);
72
73 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
74 private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString();
75 private static final byte [] table = Bytes.toBytes("table");
76 private static byte [] family = Bytes.toBytes("family");
77 private static final int NUM_KV = 25000;
78 private static final Random rand = new Random(12983177L);
79
80 private static final int NUM_VALID_KEY_TYPES =
81 KeyValue.Type.values().length - 2;
82
83 private static enum CacheOnWriteType {
84 DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA),
85 BLOOM_BLOCKS(BlockType.BLOOM_CHUNK),
86 INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
87
88 private final BlockType blockType1;
89 private final BlockType blockType2;
90
91 private CacheOnWriteType(BlockType blockType) {
92 this(blockType, blockType);
93 }
94
95 private CacheOnWriteType(BlockType blockType1, BlockType blockType2) {
96 this.blockType1 = blockType1;
97 this.blockType2 = blockType2;
98 }
99
100 public boolean shouldBeCached(BlockType blockType) {
101 return blockType == blockType1 || blockType == blockType2;
102 }
103
104 public void modifyFamilySchema(HColumnDescriptor family) {
105 switch (this) {
106 case DATA_BLOCKS:
107 family.setCacheDataOnWrite(true);
108 break;
109 case BLOOM_BLOCKS:
110 family.setCacheBloomsOnWrite(true);
111 break;
112 case INDEX_BLOCKS:
113 family.setCacheIndexesOnWrite(true);
114 break;
115 }
116 }
117 }
118
119 private final CacheOnWriteType cowType;
120 private Configuration conf;
121 private final String testDescription;
122 private HRegion region;
123 private HStore store;
124 private HLog hlog;
125 private FileSystem fs;
126
127 public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
128 this.cowType = cowType;
129 testDescription = "[cacheOnWrite=" + cowType + "]";
130 System.out.println(testDescription);
131 }
132
133 @Parameters
134 public static Collection<Object[]> getParameters() {
135 List<Object[]> cowTypes = new ArrayList<Object[]>();
136 for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
137 cowTypes.add(new Object[] { cowType });
138 }
139 return cowTypes;
140 }
141
142 @Before
143 public void setUp() throws IOException {
144 conf = TEST_UTIL.getConfiguration();
145 conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
146 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
147 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
148 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
149
150 fs = HFileSystem.get(conf);
151
152
153 HColumnDescriptor hcd = new HColumnDescriptor(family);
154 hcd.setBloomFilterType(BloomType.ROWCOL);
155 cowType.modifyFamilySchema(hcd);
156 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
157 htd.addFamily(hcd);
158
159
160 Path basedir = new Path(DIR);
161 String logName = "logs";
162 Path logdir = new Path(DIR, logName);
163 fs.delete(logdir, true);
164
165 HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
166 hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
167
168 region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
169 store = new HStore(region, hcd, conf);
170 }
171
172 @After
173 public void tearDown() throws IOException {
174 IOException ex = null;
175 try {
176 region.close();
177 } catch (IOException e) {
178 LOG.warn("Caught Exception", e);
179 ex = e;
180 }
181 try {
182 hlog.closeAndDelete();
183 } catch (IOException e) {
184 LOG.warn("Caught Exception", e);
185 ex = e;
186 }
187 try {
188 fs.delete(new Path(DIR), true);
189 } catch (IOException e) {
190 LOG.error("Could not delete " + DIR, e);
191 ex = e;
192 }
193 if (ex != null) {
194 throw ex;
195 }
196 }
197
198 @Test
199 public void testCacheOnWriteInSchema() throws IOException {
200
201 StoreFile.Writer writer = store.createWriterInTmp(Integer.MAX_VALUE,
202 HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true);
203 writeStoreFile(writer);
204 writer.close();
205
206 readStoreFile(writer.getPath());
207 }
208
209 private void readStoreFile(Path path) throws IOException {
210 CacheConfig cacheConf = store.getCacheConfig();
211 BlockCache cache = cacheConf.getBlockCache();
212 StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
213 BloomType.ROWCOL, null);
214 HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
215 try {
216
217 HFileScanner scanner = reader.getScanner(false, false);
218 assertTrue(testDescription, scanner.seekTo());
219
220 long offset = 0;
221 HFileBlock prevBlock = null;
222 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
223 long onDiskSize = -1;
224 if (prevBlock != null) {
225 onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
226 }
227
228
229 HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
230 false, null);
231 BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
232 offset);
233 boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
234 boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
235 if (shouldBeCached != isCached) {
236 throw new AssertionError(
237 "shouldBeCached: " + shouldBeCached+ "\n" +
238 "isCached: " + isCached + "\n" +
239 "Test description: " + testDescription + "\n" +
240 "block: " + block + "\n" +
241 "blockCacheKey: " + blockCacheKey);
242 }
243 prevBlock = block;
244 offset += block.getOnDiskSizeWithHeader();
245 }
246 } finally {
247 reader.close();
248 }
249 }
250
251 private static KeyValue.Type generateKeyType(Random rand) {
252 if (rand.nextBoolean()) {
253
254 return KeyValue.Type.Put;
255 } else {
256 KeyValue.Type keyType =
257 KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
258 if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum)
259 {
260 throw new RuntimeException("Generated an invalid key type: " + keyType
261 + ". " + "Probably the layout of KeyValue.Type has changed.");
262 }
263 return keyType;
264 }
265 }
266
267 private void writeStoreFile(StoreFile.Writer writer) throws IOException {
268 final int rowLen = 32;
269 for (int i = 0; i < NUM_KV; ++i) {
270 byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
271 byte[] v = TestHFileWriterV2.randomValue(rand);
272 int cfLen = rand.nextInt(k.length - rowLen + 1);
273 KeyValue kv = new KeyValue(
274 k, 0, rowLen,
275 k, rowLen, cfLen,
276 k, rowLen + cfLen, k.length - rowLen - cfLen,
277 rand.nextLong(),
278 generateKeyType(rand),
279 v, 0, v.length);
280 writer.append(kv);
281 }
282 }
283
284 }
285