1   /*
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertEquals;
24  import static org.junit.Assert.assertNull;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.IOException;
29  import java.nio.ByteBuffer;
30  import java.util.Arrays;
31  import java.util.HashSet;
32  import java.util.Random;
33  import java.util.concurrent.ConcurrentLinkedQueue;
34  import java.util.concurrent.atomic.AtomicInteger;
35  
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.hbase.MultithreadedTestUtil;
38  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
39  import org.apache.hadoop.hbase.io.HeapSize;
40  import org.apache.hadoop.hbase.util.ChecksumType;
41  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
42  
43  public class CacheTestUtils {
44  
45    private static final boolean includesMemstoreTS = true;
46  
47    /**
48     * Just checks if heapsize grows when something is cached, and gets smaller
49     * when the same object is evicted
50     */
51  
52    public static void testHeapSizeChanges(final BlockCache toBeTested,
53        final int blockSize) {
54      HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
55      long heapSize = ((HeapSize) toBeTested).heapSize();
56      toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);
57  
58      /*When we cache something HeapSize should always increase */
59      assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());
60  
61      toBeTested.evictBlock(blocks[0].blockName);
62  
63      /*Post eviction, heapsize should be the same */
64      assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
65    }
66    public static void testCacheMultiThreaded(final BlockCache toBeTested,
67        final int blockSize, final int numThreads, final int numQueries,
68        final double passingScore) throws Exception {
69  
70      Configuration conf = new Configuration();
71      MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(
72          conf);
73  
74      final AtomicInteger totalQueries = new AtomicInteger();
75      final ConcurrentLinkedQueue<HFileBlockPair> blocksToTest = new ConcurrentLinkedQueue<HFileBlockPair>();
76      final AtomicInteger hits = new AtomicInteger();
77      final AtomicInteger miss = new AtomicInteger();
78  
79      HFileBlockPair[] blocks = generateHFileBlocks(numQueries, blockSize);
80      blocksToTest.addAll(Arrays.asList(blocks));
81  
82      for (int i = 0; i < numThreads; i++) {
83        TestThread t = new MultithreadedTestUtil.RepeatingTestThread(ctx) {
84          @Override
85          public void doAnAction() throws Exception {
86            if (!blocksToTest.isEmpty()) {
87              HFileBlockPair ourBlock = blocksToTest.poll();
88              // if we run out of blocks to test, then we should stop the tests.
89              if (ourBlock == null) {
90                ctx.setStopFlag(true);
91                return;
92              }
93              toBeTested.cacheBlock(ourBlock.blockName, ourBlock.block);
94              Cacheable retrievedBlock = toBeTested.getBlock(ourBlock.blockName,
95                  false, false);
96              if (retrievedBlock != null) {
97                assertEquals(ourBlock.block, retrievedBlock);
98                toBeTested.evictBlock(ourBlock.blockName);
99                hits.incrementAndGet();
100               assertNull(toBeTested.getBlock(ourBlock.blockName, false, false));
101             } else {
102               miss.incrementAndGet();
103             }
104             totalQueries.incrementAndGet();
105           }
106         }
107       };
108       t.setDaemon(true);
109       ctx.addThread(t);
110     }
111     ctx.startThreads();
112     while (!blocksToTest.isEmpty() && ctx.shouldRun()) {
113       Thread.sleep(10);
114     }
115     ctx.stop();
116     if (hits.get() / ((double) hits.get() + (double) miss.get()) < passingScore) {
117       fail("Too many nulls returned. Hits: " + hits.get() + " Misses: "
118           + miss.get());
119     }
120   }
121 
122   public static void testCacheSimple(BlockCache toBeTested, int blockSize,
123       int numBlocks) throws Exception {
124 
125     HFileBlockPair[] blocks = generateHFileBlocks(numBlocks, blockSize);
126     // Confirm empty
127     for (HFileBlockPair block : blocks) {
128       assertNull(toBeTested.getBlock(block.blockName, true, false));
129     }
130 
131     // Add blocks
132     for (HFileBlockPair block : blocks) {
133       toBeTested.cacheBlock(block.blockName, block.block);
134     }
135 
136     // Check if all blocks are properly cached and contain the right
137     // information, or the blocks are null.
138     // MapMaker makes no guarantees when it will evict, so neither can we.
139 
140     for (HFileBlockPair block : blocks) {
141       HFileBlock buf = (HFileBlock) toBeTested.getBlock(block.blockName, true, false);
142       if (buf != null) {
143         assertEquals(block.block, buf);
144       }
145 
146     }
147 
148     // Re-add some duplicate blocks. Hope nothing breaks.
149 
150     for (HFileBlockPair block : blocks) {
151       try {
152         if (toBeTested.getBlock(block.blockName, true, false) != null) {
153           toBeTested.cacheBlock(block.blockName, block.block);
154           fail("Cache should not allow re-caching a block");
155         }
156       } catch (RuntimeException re) {
157         // expected
158       }
159     }
160 
161   }
162 
163   public static void hammerSingleKey(final BlockCache toBeTested,
164       int BlockSize, int numThreads, int numQueries) throws Exception {
165     final BlockCacheKey key = new BlockCacheKey("key", 0);
166     final byte[] buf = new byte[5 * 1024];
167     Arrays.fill(buf, (byte) 5);
168 
169     final ByteArrayCacheable bac = new ByteArrayCacheable(buf);
170     Configuration conf = new Configuration();
171     MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(
172         conf);
173 
174     final AtomicInteger totalQueries = new AtomicInteger();
175     toBeTested.cacheBlock(key, bac);
176 
177     for (int i = 0; i < numThreads; i++) {
178       TestThread t = new MultithreadedTestUtil.RepeatingTestThread(ctx) {
179         @Override
180         public void doAnAction() throws Exception {
181           ByteArrayCacheable returned = (ByteArrayCacheable) toBeTested
182               .getBlock(key, false, false);
183           assertArrayEquals(buf, returned.buf);
184           totalQueries.incrementAndGet();
185         }
186       };
187 
188       t.setDaemon(true);
189       ctx.addThread(t);
190     }
191 
192     ctx.startThreads();
193     while (totalQueries.get() < numQueries && ctx.shouldRun()) {
194       Thread.sleep(10);
195     }
196     ctx.stop();
197   }
198 
199   public static void hammerEviction(final BlockCache toBeTested, int BlockSize,
200       int numThreads, int numQueries) throws Exception {
201 
202     Configuration conf = new Configuration();
203     MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(
204         conf);
205 
206     final AtomicInteger totalQueries = new AtomicInteger();
207 
208     for (int i = 0; i < numThreads; i++) {
209       final int finalI = i;
210 
211       final byte[] buf = new byte[5 * 1024];
212       TestThread t = new MultithreadedTestUtil.RepeatingTestThread(ctx) {
213         @Override
214         public void doAnAction() throws Exception {
215           for (int j = 0; j < 100; j++) {
216             BlockCacheKey key = new BlockCacheKey("key_" + finalI + "_" + j, 0);
217             Arrays.fill(buf, (byte) (finalI * j));
218             final ByteArrayCacheable bac = new ByteArrayCacheable(buf);
219 
220             ByteArrayCacheable gotBack = (ByteArrayCacheable) toBeTested
221                 .getBlock(key, true, false);
222             if (gotBack != null) {
223               assertArrayEquals(gotBack.buf, bac.buf);
224             } else {
225               toBeTested.cacheBlock(key, bac);
226             }
227           }
228           totalQueries.incrementAndGet();
229         }
230       };
231 
232       t.setDaemon(true);
233       ctx.addThread(t);
234     }
235 
236     ctx.startThreads();
237     while (totalQueries.get() < numQueries && ctx.shouldRun()) {
238       Thread.sleep(10);
239     }
240     ctx.stop();
241 
242     assertTrue(toBeTested.getStats().getEvictedCount() > 0);
243   }
244 
245   private static class ByteArrayCacheable implements Cacheable {
246 
247     final byte[] buf;
248 
249     public ByteArrayCacheable(byte[] buf) {
250       this.buf = buf;
251     }
252 
253     @Override
254     public long heapSize() {
255       return 4 + buf.length;
256     }
257 
258     @Override
259     public int getSerializedLength() {
260       return 4 + buf.length;
261     }
262 
263     @Override
264     public void serialize(ByteBuffer destination) {
265       destination.putInt(buf.length);
266       Thread.yield();
267       destination.put(buf);
268       destination.rewind();
269     }
270 
271     @Override
272     public CacheableDeserializer<Cacheable> getDeserializer() {
273       return new CacheableDeserializer<Cacheable>() {
274 
275         @Override
276         public Cacheable deserialize(ByteBuffer b) throws IOException {
277           int len = b.getInt();
278           Thread.yield();
279           byte buf[] = new byte[len];
280           b.get(buf);
281           return new ByteArrayCacheable(buf);
282         }
283       };
284     }
285 
286     @Override
287     public BlockType getBlockType() {
288       return BlockType.DATA;
289     }
290 
291     @Override
292     public SchemaMetrics getSchemaMetrics() {
293       return SchemaMetrics.getUnknownInstanceForTest();
294     }
295 
296   }
297 
298   private static HFileBlockPair[] generateHFileBlocks(int blockSize,
299       int numBlocks) {
300     HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
301     Random rand = new Random();
302     HashSet<String> usedStrings = new HashSet<String>();
303     for (int i = 0; i < numBlocks; i++) {
304 
305       // The buffer serialized size needs to match the size of BlockSize. So we
306       // declare our data size to be smaller than it by the serialization space
307       // required.
308 
309       ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize
310           - HFileBlock.EXTRA_SERIALIZATION_SPACE);
311       rand.nextBytes(cachedBuffer.array());
312       cachedBuffer.rewind();
313       int onDiskSizeWithoutHeader = blockSize
314           - HFileBlock.EXTRA_SERIALIZATION_SPACE;
315       int uncompressedSizeWithoutHeader = blockSize
316           - HFileBlock.EXTRA_SERIALIZATION_SPACE;
317       long prevBlockOffset = rand.nextLong();
318       BlockType.DATA.write(cachedBuffer);
319       cachedBuffer.putInt(onDiskSizeWithoutHeader);
320       cachedBuffer.putInt(uncompressedSizeWithoutHeader);
321       cachedBuffer.putLong(prevBlockOffset);
322       cachedBuffer.rewind();
323 
324       HFileBlock generated = new HFileBlock(BlockType.DATA,
325           onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
326           prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
327           blockSize, includesMemstoreTS, HFileBlock.MINOR_VERSION_NO_CHECKSUM,
328           0, ChecksumType.NULL.getCode(),
329           onDiskSizeWithoutHeader + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS);
330 
331       String strKey;
332       /* No conflicting keys */
333       for (strKey = new Long(rand.nextLong()).toString(); !usedStrings
334           .add(strKey); strKey = new Long(rand.nextLong()).toString())
335         ;
336 
337       returnedBlocks[i] = new HFileBlockPair();
338       returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
339       returnedBlocks[i].block = generated;
340     }
341     return returnedBlocks;
342   }
343 
344   private static class HFileBlockPair {
345     BlockCacheKey blockName;
346     HFileBlock block;
347   }
348 }