View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io.hfile.slab;
20  
21  import java.nio.ByteBuffer;
22  import java.util.List;
23  import java.util.concurrent.ConcurrentMap;
24  import java.util.concurrent.atomic.AtomicLong;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.classification.InterfaceAudience;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.io.HeapSize;
31  import org.apache.hadoop.hbase.io.hfile.BlockCache;
32  import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
33  import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
34  import org.apache.hadoop.hbase.io.hfile.CacheStats;
35  import org.apache.hadoop.hbase.io.hfile.Cacheable;
36  import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
37  import org.apache.hadoop.hbase.util.Bytes;
38  import org.apache.hadoop.hbase.util.ClassSize;
39  import org.apache.hadoop.util.StringUtils;
40  
41  import com.google.common.cache.CacheBuilder;
42  import com.google.common.cache.RemovalListener;
43  import com.google.common.cache.RemovalNotification;
44  
45  /**
46   * SingleSizeCache is a slab allocated cache that caches elements up to a single
47   * size. It uses a slab allocator (Slab.java) to divide a direct bytebuffer,
48   * into evenly sized blocks. Any cached data will take up exactly 1 block. An
49   * exception will be thrown if the cached data cannot fit into the blockSize of
50   * this SingleSizeCache.
51   *
52   * Eviction and LRUness is taken care of by Guava's MapMaker, which creates a
53   * ConcurrentLinkedHashMap.
54   *
55   **/
56  @InterfaceAudience.Private
57  public class SingleSizeCache implements BlockCache, HeapSize {
58    private final Slab backingStore;
59    private final ConcurrentMap<BlockCacheKey, CacheablePair> backingMap;
60    private final int numBlocks;
61    private final int blockSize;
62    private final CacheStats stats;
63    private final SlabItemActionWatcher actionWatcher;
64    private final AtomicLong size;
65    private final AtomicLong timeSinceLastAccess;
66    public final static long CACHE_FIXED_OVERHEAD = ClassSize
67        .align((2 * Bytes.SIZEOF_INT) + (5 * ClassSize.REFERENCE)
68            + +ClassSize.OBJECT);
69  
70    static final Log LOG = LogFactory.getLog(SingleSizeCache.class);
71  
72    /**
73     * Default constructor. Specify the size of the blocks, number of blocks, and
74     * the SlabCache this cache will be assigned to.
75     *
76     *
77     * @param blockSize the size of each block, in bytes
78     *
79     * @param numBlocks the number of blocks of blockSize this cache will hold.
80     *
81     * @param master the SlabCache this SingleSlabCache is assigned to.
82     */
83    public SingleSizeCache(int blockSize, int numBlocks,
84        SlabItemActionWatcher master) {
85      this.blockSize = blockSize;
86      this.numBlocks = numBlocks;
87      backingStore = new Slab(blockSize, numBlocks);
88      this.stats = new CacheStats();
89      this.actionWatcher = master;
90      this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
91      this.timeSinceLastAccess = new AtomicLong();
92  
93      // This evictionListener is called whenever the cache automatically
94      // evicts something.
95      RemovalListener<BlockCacheKey, CacheablePair> listener =
96        new RemovalListener<BlockCacheKey, CacheablePair>() {
97          @Override
98          public void onRemoval(
99              RemovalNotification<BlockCacheKey, CacheablePair> notification) {
100           if (!notification.wasEvicted()) {
101             // Only process removals by eviction, not by replacement or
102             // explicit removal
103             return;
104           }
105           CacheablePair value = notification.getValue();
106           timeSinceLastAccess.set(System.nanoTime()
107               - value.recentlyAccessed.get());
108           stats.evict();
109           doEviction(notification.getKey(), value);
110         }
111       };
112 
113     backingMap = CacheBuilder.newBuilder()
114         .maximumSize(numBlocks - 1)
115         .removalListener(listener)
116         .<BlockCacheKey, CacheablePair>build()
117         .asMap();
118   }
119 
120   @Override
121   public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
122     ByteBuffer storedBlock;
123 
124     try {
125       storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
126     } catch (InterruptedException e) {
127       LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
128       LOG.warn(e);
129       return;
130     }
131 
132     CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(),
133         storedBlock);
134     toBeCached.serialize(storedBlock);
135 
136     synchronized (this) {
137       CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);
138 
139       if (alreadyCached != null) {
140         backingStore.free(storedBlock);
141         throw new RuntimeException("already cached " + blockName);
142       }
143       if (actionWatcher != null) {
144         actionWatcher.onInsertion(blockName, this);
145       }
146     }
147     newEntry.recentlyAccessed.set(System.nanoTime());
148     this.size.addAndGet(newEntry.heapSize());
149   }
150 
151   @Override
152   public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
153       boolean updateCacheMetrics) {
154     CacheablePair contentBlock = backingMap.get(key);
155     if (contentBlock == null) {
156       if (!repeat && updateCacheMetrics) stats.miss(caching);
157       return null;
158     }
159 
160     if (updateCacheMetrics) stats.hit(caching);
161     // If lock cannot be obtained, that means we're undergoing eviction.
162     try {
163       contentBlock.recentlyAccessed.set(System.nanoTime());
164       synchronized (contentBlock) {
165         if (contentBlock.serializedData == null) {
166           // concurrently evicted
167           LOG.warn("Concurrent eviction of " + key);
168           return null;
169         }
170         return contentBlock.deserializer
171             .deserialize(contentBlock.serializedData.asReadOnlyBuffer());
172       }
173     } catch (Throwable t) {
174       LOG.error("Deserializer threw an exception. This may indicate a bug.", t);
175       return null;
176     }
177   }
178 
179   /**
180    * Evicts the block
181    *
182    * @param key the key of the entry we are going to evict
183    * @return the evicted ByteBuffer
184    */
185   public boolean evictBlock(BlockCacheKey key) {
186     stats.evict();
187     CacheablePair evictedBlock = backingMap.remove(key);
188 
189     if (evictedBlock != null) {
190       doEviction(key, evictedBlock);
191     }
192     return evictedBlock != null;
193   }
194 
195   private void doEviction(BlockCacheKey key, CacheablePair evictedBlock) {
196     long evictedHeap = 0;
197     synchronized (evictedBlock) {
198       if (evictedBlock.serializedData == null) {
199         // someone else already freed
200         return;
201       }
202       evictedHeap = evictedBlock.heapSize();
203       ByteBuffer bb = evictedBlock.serializedData;
204       evictedBlock.serializedData = null;
205       backingStore.free(bb);
206 
207       // We have to do this callback inside the synchronization here.
208       // Otherwise we can have the following interleaving:
209       // Thread A calls getBlock():
210       // SlabCache directs call to this SingleSizeCache
211       // It gets the CacheablePair object
212       // Thread B runs eviction
213       // doEviction() is called and sets serializedData = null, here.
214       // Thread A sees the null serializedData, and returns null
215       // Thread A calls cacheBlock on the same block, and gets
216       // "already cached" since the block is still in backingStore
217 
218       if (actionWatcher != null) {
219         actionWatcher.onEviction(key, this);
220       }
221     }
222     stats.evicted();
223     size.addAndGet(-1 * evictedHeap);
224   }
225 
226   public void logStats() {
227 
228     long milliseconds = this.timeSinceLastAccess.get() / 1000000;
229 
230     LOG.info("For Slab of size " + this.blockSize + ": "
231         + this.getOccupiedSize() / this.blockSize
232         + " occupied, out of a capacity of " + this.numBlocks
233         + " blocks. HeapSize is "
234         + StringUtils.humanReadableInt(this.heapSize()) + " bytes." + ", "
235         + "churnTime=" + StringUtils.formatTime(milliseconds));
236 
237     LOG.info("Slab Stats: " + "accesses="
238         + stats.getRequestCount()
239         + ", "
240         + "hits="
241         + stats.getHitCount()
242         + ", "
243         + "hitRatio="
244         + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(
245             stats.getHitRatio(), 2) + "%, "))
246         + "cachingAccesses="
247         + stats.getRequestCachingCount()
248         + ", "
249         + "cachingHits="
250         + stats.getHitCachingCount()
251         + ", "
252         + "cachingHitsRatio="
253         + (stats.getHitCachingCount() == 0 ? "0" : (StringUtils.formatPercent(
254             stats.getHitCachingRatio(), 2) + "%, ")) + "evictions="
255         + stats.getEvictionCount() + ", " + "evicted="
256         + stats.getEvictedCount() + ", " + "evictedPerRun="
257         + stats.evictedPerEviction());
258 
259   }
260 
261   public void shutdown() {
262     backingStore.shutdown();
263   }
264 
265   public long heapSize() {
266     return this.size.get() + backingStore.heapSize();
267   }
268 
269   public long size() {
270     return (long) this.blockSize * (long) this.numBlocks;
271   }
272 
273   public long getFreeSize() {
274     return (long) backingStore.getBlocksRemaining() * (long) blockSize;
275   }
276 
277   public long getOccupiedSize() {
278     return (long) (numBlocks - backingStore.getBlocksRemaining()) * (long) blockSize;
279   }
280 
281   public long getEvictedCount() {
282     return stats.getEvictedCount();
283   }
284 
285   public CacheStats getStats() {
286     return this.stats;
287   }
288 
289   @Override
290   public long getBlockCount() {
291     return numBlocks - backingStore.getBlocksRemaining();
292   }
293 
294   /* Since its offheap, it doesn't matter if its in memory or not */
295   @Override
296   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
297     this.cacheBlock(cacheKey, buf);
298   }
299 
300   /*
301    * This is never called, as evictions are handled in the SlabCache layer,
302    * implemented in the event we want to use this as a standalone cache.
303    */
304   @Override
305   public int evictBlocksByHfileName(String hfileName) {
306     int evictedCount = 0;
307     for (BlockCacheKey e : backingMap.keySet()) {
308       if (e.getHfileName().equals(hfileName)) {
309         this.evictBlock(e);
310       }
311     }
312     return evictedCount;
313   }
314 
315   @Override
316   public long getCurrentSize() {
317     return 0;
318   }
319 
320   /*
321    * Not implemented. Extremely costly to do this from the off heap cache, you'd
322    * need to copy every object on heap once
323    */
324   @Override
325   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(
326       Configuration conf) {
327     throw new UnsupportedOperationException();
328   }
329 
330   /* Just a pair class, holds a reference to the parent cacheable */
331   private static class CacheablePair implements HeapSize {
332     final CacheableDeserializer<Cacheable> deserializer;
333     ByteBuffer serializedData;
334     AtomicLong recentlyAccessed;
335 
336     private CacheablePair(CacheableDeserializer<Cacheable> deserializer,
337         ByteBuffer serializedData) {
338       this.recentlyAccessed = new AtomicLong();
339       this.deserializer = deserializer;
340       this.serializedData = serializedData;
341     }
342 
343     /*
344      * Heapsize overhead of this is the default object overhead, the heapsize of
345      * the serialized object, and the cost of a reference to the bytebuffer,
346      * which is already accounted for in SingleSizeCache
347      */
348     @Override
349     public long heapSize() {
350       return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE * 3
351           + ClassSize.ATOMIC_LONG);
352     }
353   }
354 }