1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 package org.apache.hadoop.hbase.regionserver;
22
23 import static org.junit.Assert.*;
24
25 import java.io.IOException;
26 import java.util.ArrayList;
27 import java.util.Collection;
28 import java.util.List;
29 import java.util.Random;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.fs.FileSystem;
35 import org.apache.hadoop.fs.Path;
36 import org.apache.hadoop.hbase.HBaseTestingUtility;
37 import org.apache.hadoop.hbase.HColumnDescriptor;
38 import org.apache.hadoop.hbase.HConstants;
39 import org.apache.hadoop.hbase.HRegionInfo;
40 import org.apache.hadoop.hbase.HTableDescriptor;
41 import org.apache.hadoop.hbase.KeyValue;
42 import org.apache.hadoop.hbase.MediumTests;
43 import org.apache.hadoop.hbase.fs.HFileSystem;
44 import org.apache.hadoop.hbase.io.hfile.BlockCache;
45 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
46 import org.apache.hadoop.hbase.io.hfile.BlockType;
47 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
48 import org.apache.hadoop.hbase.io.hfile.Compression;
49 import org.apache.hadoop.hbase.io.hfile.HFile;
50 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
51 import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
52 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
53 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
54 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
55 import org.apache.hadoop.hbase.regionserver.wal.HLog;
56 import org.apache.hadoop.hbase.util.Bytes;
57
58 import org.junit.After;
59 import org.junit.Before;
60 import org.junit.Test;
61 import org.junit.experimental.categories.Category;
62 import org.junit.runner.RunWith;
63 import org.junit.runners.Parameterized;
64 import org.junit.runners.Parameterized.Parameters;
65
66
67
68
69
70 @RunWith(Parameterized.class)
71 @Category(MediumTests.class)
72 public class TestCacheOnWriteInSchema {
73
74 private static final Log LOG = LogFactory.getLog(TestCacheOnWriteInSchema.class);
75
76 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
77 private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString();
78 private static final byte [] table = Bytes.toBytes("table");
79 private static byte [] family = Bytes.toBytes("family");
80 private static final int NUM_KV = 25000;
81 private static final Random rand = new Random(12983177L);
82
83 private static final int NUM_VALID_KEY_TYPES =
84 KeyValue.Type.values().length - 2;
85
86 private static enum CacheOnWriteType {
87 DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA),
88 BLOOM_BLOCKS(BlockType.BLOOM_CHUNK),
89 INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
90
91 private final BlockType blockType1;
92 private final BlockType blockType2;
93
94 private CacheOnWriteType(BlockType blockType) {
95 this(blockType, blockType);
96 }
97
98 private CacheOnWriteType(BlockType blockType1, BlockType blockType2) {
99 this.blockType1 = blockType1;
100 this.blockType2 = blockType2;
101 }
102
103 public boolean shouldBeCached(BlockType blockType) {
104 return blockType == blockType1 || blockType == blockType2;
105 }
106
107 public void modifyFamilySchema(HColumnDescriptor family) {
108 switch (this) {
109 case DATA_BLOCKS:
110 family.setCacheDataOnWrite(true);
111 break;
112 case BLOOM_BLOCKS:
113 family.setCacheBloomsOnWrite(true);
114 break;
115 case INDEX_BLOCKS:
116 family.setCacheIndexesOnWrite(true);
117 break;
118 }
119 }
120 }
121
122 private final CacheOnWriteType cowType;
123 private Configuration conf;
124 private final String testDescription;
125 private Store store;
126 private FileSystem fs;
127
128 public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
129 this.cowType = cowType;
130 testDescription = "[cacheOnWrite=" + cowType + "]";
131 System.out.println(testDescription);
132 }
133
134 @Parameters
135 public static Collection<Object[]> getParameters() {
136 List<Object[]> cowTypes = new ArrayList<Object[]>();
137 for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
138 cowTypes.add(new Object[] { cowType });
139 }
140 return cowTypes;
141 }
142
143 @Before
144 public void setUp() throws IOException {
145 conf = TEST_UTIL.getConfiguration();
146 conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
147 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
148 conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
149 conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
150
151 fs = HFileSystem.get(conf);
152
153
154 HColumnDescriptor hcd = new HColumnDescriptor(family);
155 hcd.setBloomFilterType(BloomType.ROWCOL);
156 cowType.modifyFamilySchema(hcd);
157 HTableDescriptor htd = new HTableDescriptor(table);
158 htd.addFamily(hcd);
159
160
161 Path basedir = new Path(DIR);
162 Path logdir = new Path(DIR+"/logs");
163 Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
164 fs.delete(logdir, true);
165 HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
166 HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
167 HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
168 store = new Store(basedir, region, hcd, fs, conf);
169 }
170
171 @After
172 public void tearDown() {
173 try {
174 fs.delete(new Path(DIR), true);
175 } catch (IOException e) {
176 LOG.error("Could not delete " + DIR, e);
177 }
178 }
179
180 @Test
181 public void testCacheOnWriteInSchema() throws IOException {
182
183 StoreFile.Writer writer = store.createWriterInTmp(Integer.MAX_VALUE,
184 Compression.Algorithm.NONE, false);
185 writeStoreFile(writer);
186 writer.close();
187
188 readStoreFile(writer.getPath());
189 }
190
191 private void readStoreFile(Path path) throws IOException {
192 CacheConfig cacheConf = store.getCacheConfig();
193 BlockCache cache = cacheConf.getBlockCache();
194 StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
195 BloomType.ROWCOL, null);
196 store.passSchemaMetricsTo(sf);
197 HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
198 try {
199
200 HFileScanner scanner = reader.getScanner(false, false);
201 assertTrue(testDescription, scanner.seekTo());
202
203 long offset = 0;
204 HFileBlock prevBlock = null;
205 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
206 long onDiskSize = -1;
207 if (prevBlock != null) {
208 onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
209 }
210
211
212 HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
213 false, null);
214 BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
215 offset);
216 boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
217 boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
218 if (shouldBeCached != isCached) {
219 throw new AssertionError(
220 "shouldBeCached: " + shouldBeCached+ "\n" +
221 "isCached: " + isCached + "\n" +
222 "Test description: " + testDescription + "\n" +
223 "block: " + block + "\n" +
224 "blockCacheKey: " + blockCacheKey);
225 }
226 prevBlock = block;
227 offset += block.getOnDiskSizeWithHeader();
228 }
229 } finally {
230 reader.close();
231 }
232 }
233
234 private static KeyValue.Type generateKeyType(Random rand) {
235 if (rand.nextBoolean()) {
236
237 return KeyValue.Type.Put;
238 } else {
239 KeyValue.Type keyType =
240 KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
241 if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum)
242 {
243 throw new RuntimeException("Generated an invalid key type: " + keyType
244 + ". " + "Probably the layout of KeyValue.Type has changed.");
245 }
246 return keyType;
247 }
248 }
249
250 private void writeStoreFile(StoreFile.Writer writer) throws IOException {
251 final int rowLen = 32;
252 for (int i = 0; i < NUM_KV; ++i) {
253 byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
254 byte[] v = TestHFileWriterV2.randomValue(rand);
255 int cfLen = rand.nextInt(k.length - rowLen + 1);
256 KeyValue kv = new KeyValue(
257 k, 0, rowLen,
258 k, rowLen, cfLen,
259 k, rowLen + cfLen, k.length - rowLen - cfLen,
260 rand.nextLong(),
261 generateKeyType(rand),
262 v, 0, v.length);
263 writer.append(kv);
264 }
265 }
266
267 @org.junit.Rule
268 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
269 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
270 }
271