View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io.hfile;
20  
21  import java.io.IOException;
22  import java.lang.ref.WeakReference;
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collections;
26  import java.util.EnumMap;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.PriorityQueue;
31  import java.util.SortedSet;
32  import java.util.TreeSet;
33  import java.util.concurrent.ConcurrentHashMap;
34  import java.util.concurrent.Executors;
35  import java.util.concurrent.ScheduledExecutorService;
36  import java.util.concurrent.TimeUnit;
37  import java.util.concurrent.atomic.AtomicLong;
38  import java.util.concurrent.locks.ReentrantLock;
39  
40  import org.apache.commons.logging.Log;
41  import org.apache.commons.logging.LogFactory;
42  import org.apache.hadoop.classification.InterfaceAudience;
43  import org.apache.hadoop.conf.Configuration;
44  import org.apache.hadoop.fs.FileSystem;
45  import org.apache.hadoop.fs.Path;
46  import org.apache.hadoop.hbase.io.HeapSize;
47  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
48  import org.apache.hadoop.hbase.io.hfile.CachedBlock.BlockPriority;
49  import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.hbase.util.ClassSize;
52  import org.apache.hadoop.hbase.util.FSUtils;
53  import org.apache.hadoop.hbase.util.HasThread;
54  import org.apache.hadoop.hbase.util.Threads;
55  import org.apache.hadoop.util.StringUtils;
56  
57  import com.google.common.util.concurrent.ThreadFactoryBuilder;
58  
59  /**
60   * A block cache implementation that is memory-aware using {@link HeapSize},
61   * memory-bound using an LRU eviction algorithm, and concurrent: backed by a
62   * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
63   * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
64   *
65   * Contains three levels of block priority to allow for
66   * scan-resistance and in-memory families.  A block is added with an inMemory
67   * flag if necessary, otherwise a block becomes a single access priority.  Once
68   * a blocked is accessed again, it changes to multiple access.  This is used
69   * to prevent scans from thrashing the cache, adding a least-frequently-used
70   * element to the eviction algorithm.<p>
71   *
72   * Each priority is given its own chunk of the total cache to ensure
73   * fairness during eviction.  Each priority will retain close to its maximum
74   * size, however, if any priority is not using its entire chunk the others
75   * are able to grow beyond their chunk size.<p>
76   *
77   * Instantiated at a minimum with the total size and average block size.
78   * All sizes are in bytes.  The block size is not especially important as this
79   * cache is fully dynamic in its sizing of blocks.  It is only used for
80   * pre-allocating data structures and in initial heap estimation of the map.<p>
81   *
82   * The detailed constructor defines the sizes for the three priorities (they
83   * should total to the maximum size defined).  It also sets the levels that
84   * trigger and control the eviction thread.<p>
85   *
86   * The acceptable size is the cache size level which triggers the eviction
87   * process to start.  It evicts enough blocks to get the size below the
88   * minimum size specified.<p>
89   *
90   * Eviction happens in a separate thread and involves a single full-scan
91   * of the map.  It determines how many bytes must be freed to reach the minimum
92   * size, and then while scanning determines the fewest least-recently-used
93   * blocks necessary from each of the three priorities (would be 3 times bytes
94   * to free).  It then uses the priority chunk sizes to evict fairly according
95   * to the relative sizes and usage.
96   */
97  @InterfaceAudience.Private
98  public class LruBlockCache implements BlockCache, HeapSize {
99  
100   static final Log LOG = LogFactory.getLog(LruBlockCache.class);
101 
102   static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor";
103   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.acceptable.factor";
104 
105   /** Default Configuration Parameters*/
106 
107   /** Backing Concurrent Map Configuration */
108   static final float DEFAULT_LOAD_FACTOR = 0.75f;
109   static final int DEFAULT_CONCURRENCY_LEVEL = 16;
110 
111   /** Eviction thresholds */
112   static final float DEFAULT_MIN_FACTOR = 0.95f;
113   static final float DEFAULT_ACCEPTABLE_FACTOR = 0.99f;
114 
115   /** Priority buckets */
116   static final float DEFAULT_SINGLE_FACTOR = 0.25f;
117   static final float DEFAULT_MULTI_FACTOR = 0.50f;
118   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
119 
120   /** Statistics thread */
121   static final int statThreadPeriod = 60 * 5;
122 
123   /** Concurrent map (the cache) */
124   private final ConcurrentHashMap<BlockCacheKey,CachedBlock> map;
125 
126   /** Eviction lock (locked when eviction in process) */
127   private final ReentrantLock evictionLock = new ReentrantLock(true);
128 
129   /** Volatile boolean to track if we are in an eviction process or not */
130   private volatile boolean evictionInProgress = false;
131 
132   /** Eviction thread */
133   private final EvictionThread evictionThread;
134 
135   /** Statistics thread schedule pool (for heavy debugging, could remove) */
136   private final ScheduledExecutorService scheduleThreadPool =
137     Executors.newScheduledThreadPool(1,
138       new ThreadFactoryBuilder()
139         .setNameFormat("LruStats #%d")
140         .setDaemon(true)
141         .build());
142 
143   /** Current size of cache */
144   private final AtomicLong size;
145 
146   /** Current number of cached elements */
147   private final AtomicLong elements;
148 
149   /** Cache access count (sequential ID) */
150   private final AtomicLong count;
151 
152   /** Cache statistics */
153   private final CacheStats stats;
154 
155   /** Maximum allowable size of cache (block put if size > max, evict) */
156   private long maxSize;
157 
158   /** Approximate block size */
159   private long blockSize;
160 
161   /** Acceptable size of cache (no evictions if size < acceptable) */
162   private float acceptableFactor;
163 
164   /** Minimum threshold of cache (when evicting, evict until size < min) */
165   private float minFactor;
166 
167   /** Single access bucket size */
168   private float singleFactor;
169 
170   /** Multiple access bucket size */
171   private float multiFactor;
172 
173   /** In-memory bucket size */
174   private float memoryFactor;
175 
176   /** Overhead of the structure itself */
177   private long overhead;
178 
179   /** Where to send victims (blocks evicted from the cache) */
180   private BucketCache victimHandler = null;
181 
182   /**
183    * Default constructor.  Specify maximum size and expected average block
184    * size (approximation is fine).
185    *
186    * <p>All other factors will be calculated based on defaults specified in
187    * this class.
188    * @param maxSize maximum size of cache, in bytes
189    * @param blockSize approximate size of each block, in bytes
190    */
191   public LruBlockCache(long maxSize, long blockSize) {
192     this(maxSize, blockSize, true);
193   }
194 
195   /**
196    * Constructor used for testing.  Allows disabling of the eviction thread.
197    */
198   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) {
199     this(maxSize, blockSize, evictionThread,
200         (int)Math.ceil(1.2*maxSize/blockSize),
201         DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL,
202         DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR,
203         DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR,
204         DEFAULT_MEMORY_FACTOR);
205   }
206 
207   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) {
208     this(maxSize, blockSize, evictionThread,
209         (int)Math.ceil(1.2*maxSize/blockSize),
210         DEFAULT_LOAD_FACTOR,
211         DEFAULT_CONCURRENCY_LEVEL,
212         conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR),
213         conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR),
214         DEFAULT_SINGLE_FACTOR,
215         DEFAULT_MULTI_FACTOR,
216         DEFAULT_MEMORY_FACTOR);
217   }
218 
219   public LruBlockCache(long maxSize, long blockSize, Configuration conf) {
220     this(maxSize, blockSize, true, conf);
221   }
222 
223   /**
224    * Configurable constructor.  Use this constructor if not using defaults.
225    * @param maxSize maximum size of this cache, in bytes
226    * @param blockSize expected average size of blocks, in bytes
227    * @param evictionThread whether to run evictions in a bg thread or not
228    * @param mapInitialSize initial size of backing ConcurrentHashMap
229    * @param mapLoadFactor initial load factor of backing ConcurrentHashMap
230    * @param mapConcurrencyLevel initial concurrency factor for backing CHM
231    * @param minFactor percentage of total size that eviction will evict until
232    * @param acceptableFactor percentage of total size that triggers eviction
233    * @param singleFactor percentage of total size for single-access blocks
234    * @param multiFactor percentage of total size for multiple-access blocks
235    * @param memoryFactor percentage of total size for in-memory blocks
236    */
237   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
238       int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
239       float minFactor, float acceptableFactor,
240       float singleFactor, float multiFactor, float memoryFactor) {
241     if(singleFactor + multiFactor + memoryFactor != 1) {
242       throw new IllegalArgumentException("Single, multi, and memory factors " +
243           " should total 1.0");
244     }
245     if(minFactor >= acceptableFactor) {
246       throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor");
247     }
248     if(minFactor >= 1.0f || acceptableFactor >= 1.0f) {
249       throw new IllegalArgumentException("all factors must be < 1");
250     }
251     this.maxSize = maxSize;
252     this.blockSize = blockSize;
253     map = new ConcurrentHashMap<BlockCacheKey,CachedBlock>(mapInitialSize,
254         mapLoadFactor, mapConcurrencyLevel);
255     this.minFactor = minFactor;
256     this.acceptableFactor = acceptableFactor;
257     this.singleFactor = singleFactor;
258     this.multiFactor = multiFactor;
259     this.memoryFactor = memoryFactor;
260     this.stats = new CacheStats();
261     this.count = new AtomicLong(0);
262     this.elements = new AtomicLong(0);
263     this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
264     this.size = new AtomicLong(this.overhead);
265     if(evictionThread) {
266       this.evictionThread = new EvictionThread(this);
267       this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
268     } else {
269       this.evictionThread = null;
270     }
271     this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
272         statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS);
273   }
274 
275   public void setMaxSize(long maxSize) {
276     this.maxSize = maxSize;
277     if(this.size.get() > acceptableSize() && !evictionInProgress) {
278       runEviction();
279     }
280   }
281 
282   // BlockCache implementation
283 
284   /**
285    * Cache the block with the specified name and buffer.
286    * <p>
287    * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547)
288    * this can happen, for which we compare the buffer contents.
289    * @param cacheKey block's cache key
290    * @param buf block buffer
291    * @param inMemory if block is in-memory
292    */
293   @Override
294   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
295     CachedBlock cb = map.get(cacheKey);
296     if(cb != null) {
297       // compare the contents, if they are not equal, we are in big trouble
298       if (compare(buf, cb.getBuffer()) != 0) {
299         throw new RuntimeException("Cached block contents differ, which should not have happened."
300           + "cacheKey:" + cacheKey);
301       }
302       String msg = "Cached an already cached block: " + cacheKey + " cb:" + cb.getCacheKey();
303       msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
304       LOG.warn(msg);
305       return;
306     }
307     cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
308     long newSize = updateSizeMetrics(cb, false);
309     map.put(cacheKey, cb);
310     elements.incrementAndGet();
311     if(newSize > acceptableSize() && !evictionInProgress) {
312       runEviction();
313     }
314   }
315 
316   private int compare(Cacheable left, Cacheable right) {
317     ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
318     left.serialize(l);
319     ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
320     right.serialize(r);
321     return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(),
322       r.array(), r.arrayOffset(), r.limit());
323   }
324 
325   /**
326    * Cache the block with the specified name and buffer.
327    * <p>
328    * It is assumed this will NEVER be called on an already cached block.  If
329    * that is done, it is assumed that you are reinserting the same exact
330    * block due to a race condition and will update the buffer but not modify
331    * the size of the cache.
332    * @param cacheKey block's cache key
333    * @param buf block buffer
334    */
335   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
336     cacheBlock(cacheKey, buf, false);
337   }
338 
339   /**
340    * Helper function that updates the local size counter and also updates any
341    * per-cf or per-blocktype metrics it can discern from given
342    * {@link CachedBlock}
343    *
344    * @param cb
345    * @param evict
346    */
347   protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
348     long heapsize = cb.heapSize();
349     if (evict) {
350       heapsize *= -1;
351     }
352     return size.addAndGet(heapsize);
353   }
354 
355   /**
356    * Get the buffer of the block with the specified name.
357    * @param cacheKey block's cache key
358    * @param caching true if the caller caches blocks on cache misses
359    * @param repeat Whether this is a repeat lookup for the same block
360    *        (used to avoid double counting cache misses when doing double-check locking)
361    * @return buffer of specified cache key, or null if not in cache
362    * @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)
363    */
364   @Override
365   public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) {
366     CachedBlock cb = map.get(cacheKey);
367     if(cb == null) {
368       if (!repeat) stats.miss(caching);
369       if (victimHandler != null)
370         return victimHandler.getBlock(cacheKey, caching, repeat);
371       return null;
372     }
373     stats.hit(caching);
374     cb.access(count.incrementAndGet());
375     return cb.getBuffer();
376   }
377 
378   /**
379    * Whether the cache contains block with specified cacheKey
380    * @param cacheKey
381    * @return true if contains the block
382    */
383   public boolean containsBlock(BlockCacheKey cacheKey) {
384     return map.containsKey(cacheKey);
385   }
386 
387   @Override
388   public boolean evictBlock(BlockCacheKey cacheKey) {
389     CachedBlock cb = map.get(cacheKey);
390     if (cb == null) return false;
391     evictBlock(cb, false);
392     return true;
393   }
394 
395   /**
396    * Evicts all blocks for a specific HFile. This is an
397    * expensive operation implemented as a linear-time search through all blocks
398    * in the cache. Ideally this should be a search in a log-access-time map.
399    *
400    * <p>
401    * This is used for evict-on-close to remove all blocks of a specific HFile.
402    *
403    * @return the number of blocks evicted
404    */
405   @Override
406   public int evictBlocksByHfileName(String hfileName) {
407     int numEvicted = 0;
408     for (BlockCacheKey key : map.keySet()) {
409       if (key.getHfileName().equals(hfileName)) {
410         if (evictBlock(key))
411           ++numEvicted;
412       }
413     }
414     if (victimHandler != null) {
415       numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
416     }
417     return numEvicted;
418   }
419 
420   /**
421    * Evict the block, and it will be cached by the victim handler if exists &&
422    * block may be read again later
423    * @param block
424    * @param evictedByEvictionProcess true if the given block is evicted by
425    *          EvictionThread
426    * @return the heap size of evicted block
427    */
428   protected long evictBlock(CachedBlock block, boolean evictedByEvictionProcess) {
429     map.remove(block.getCacheKey());
430     updateSizeMetrics(block, true);
431     elements.decrementAndGet();
432     stats.evicted();
433     if (evictedByEvictionProcess && victimHandler != null) {
434       boolean wait = getCurrentSize() < acceptableSize();
435       boolean inMemory = block.getPriority() == BlockPriority.MEMORY;
436       victimHandler.cacheBlockWithWait(block.getCacheKey(), block.getBuffer(),
437           inMemory, wait);
438     }
439     return block.heapSize();
440   }
441 
442   /**
443    * Multi-threaded call to run the eviction process.
444    */
445   private void runEviction() {
446     if(evictionThread == null) {
447       evict();
448     } else {
449       evictionThread.evict();
450     }
451   }
452 
453   /**
454    * Eviction method.
455    */
456   void evict() {
457 
458     // Ensure only one eviction at a time
459     if(!evictionLock.tryLock()) return;
460 
461     try {
462       evictionInProgress = true;
463       long currentSize = this.size.get();
464       long bytesToFree = currentSize - minSize();
465 
466       if (LOG.isTraceEnabled()) {
467         LOG.trace("Block cache LRU eviction started; Attempting to free " +
468           StringUtils.byteDesc(bytesToFree) + " of total=" +
469           StringUtils.byteDesc(currentSize));
470       }
471 
472       if(bytesToFree <= 0) return;
473 
474       // Instantiate priority buckets
475       BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize,
476           singleSize());
477       BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize,
478           multiSize());
479       BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize,
480           memorySize());
481 
482       // Scan entire map putting into appropriate buckets
483       for(CachedBlock cachedBlock : map.values()) {
484         switch(cachedBlock.getPriority()) {
485           case SINGLE: {
486             bucketSingle.add(cachedBlock);
487             break;
488           }
489           case MULTI: {
490             bucketMulti.add(cachedBlock);
491             break;
492           }
493           case MEMORY: {
494             bucketMemory.add(cachedBlock);
495             break;
496           }
497         }
498       }
499 
500       PriorityQueue<BlockBucket> bucketQueue =
501         new PriorityQueue<BlockBucket>(3);
502 
503       bucketQueue.add(bucketSingle);
504       bucketQueue.add(bucketMulti);
505       bucketQueue.add(bucketMemory);
506 
507       int remainingBuckets = 3;
508       long bytesFreed = 0;
509 
510       BlockBucket bucket;
511       while((bucket = bucketQueue.poll()) != null) {
512         long overflow = bucket.overflow();
513         if(overflow > 0) {
514           long bucketBytesToFree = Math.min(overflow,
515             (bytesToFree - bytesFreed) / remainingBuckets);
516           bytesFreed += bucket.free(bucketBytesToFree);
517         }
518         remainingBuckets--;
519       }
520 
521       if (LOG.isTraceEnabled()) {
522         long single = bucketSingle.totalSize();
523         long multi = bucketMulti.totalSize();
524         long memory = bucketMemory.totalSize();
525         LOG.trace("Block cache LRU eviction completed; " +
526           "freed=" + StringUtils.byteDesc(bytesFreed) + ", " +
527           "total=" + StringUtils.byteDesc(this.size.get()) + ", " +
528           "single=" + StringUtils.byteDesc(single) + ", " +
529           "multi=" + StringUtils.byteDesc(multi) + ", " +
530           "memory=" + StringUtils.byteDesc(memory));
531       }
532     } finally {
533       stats.evict();
534       evictionInProgress = false;
535       evictionLock.unlock();
536     }
537   }
538 
539   /**
540    * Used to group blocks into priority buckets.  There will be a BlockBucket
541    * for each priority (single, multi, memory).  Once bucketed, the eviction
542    * algorithm takes the appropriate number of elements out of each according
543    * to configuration parameters and their relatives sizes.
544    */
545   private class BlockBucket implements Comparable<BlockBucket> {
546 
547     private CachedBlockQueue queue;
548     private long totalSize = 0;
549     private long bucketSize;
550 
551     public BlockBucket(long bytesToFree, long blockSize, long bucketSize) {
552       this.bucketSize = bucketSize;
553       queue = new CachedBlockQueue(bytesToFree, blockSize);
554       totalSize = 0;
555     }
556 
557     public void add(CachedBlock block) {
558       totalSize += block.heapSize();
559       queue.add(block);
560     }
561 
562     public long free(long toFree) {
563       CachedBlock cb;
564       long freedBytes = 0;
565       while ((cb = queue.pollLast()) != null) {
566         freedBytes += evictBlock(cb, true);
567         if (freedBytes >= toFree) {
568           return freedBytes;
569         }
570       }
571       return freedBytes;
572     }
573 
574     public long overflow() {
575       return totalSize - bucketSize;
576     }
577 
578     public long totalSize() {
579       return totalSize;
580     }
581 
582     public int compareTo(BlockBucket that) {
583       if(this.overflow() == that.overflow()) return 0;
584       return this.overflow() > that.overflow() ? 1 : -1;
585     }
586 
587     @Override
588     public boolean equals(Object that) {
589       if (that == null || !(that instanceof BlockBucket)){
590         return false;
591       }
592 
593       return compareTo(( BlockBucket)that) == 0;
594     }
595 
596   }
597 
598   /**
599    * Get the maximum size of this cache.
600    * @return max size in bytes
601    */
602   public long getMaxSize() {
603     return this.maxSize;
604   }
605 
606   /**
607    * Get the current size of this cache.
608    * @return current size in bytes
609    */
610   public long getCurrentSize() {
611     return this.size.get();
612   }
613 
614   /**
615    * Get the current size of this cache.
616    * @return current size in bytes
617    */
618   public long getFreeSize() {
619     return getMaxSize() - getCurrentSize();
620   }
621 
622   /**
623    * Get the size of this cache (number of cached blocks)
624    * @return number of cached blocks
625    */
626   public long size() {
627     return this.elements.get();
628   }
629 
630   @Override
631   public long getBlockCount() {
632     return this.elements.get();
633   }
634 
635   /**
636    * Get the number of eviction runs that have occurred
637    */
638   public long getEvictionCount() {
639     return this.stats.getEvictionCount();
640   }
641 
642   /**
643    * Get the number of blocks that have been evicted during the lifetime
644    * of this cache.
645    */
646   public long getEvictedCount() {
647     return this.stats.getEvictedCount();
648   }
649 
650   EvictionThread getEvictionThread() {
651     return this.evictionThread;
652   }
653 
654   /*
655    * Eviction thread.  Sits in waiting state until an eviction is triggered
656    * when the cache size grows above the acceptable level.<p>
657    *
658    * Thread is triggered into action by {@link LruBlockCache#runEviction()}
659    */
660   static class EvictionThread extends HasThread {
661     private WeakReference<LruBlockCache> cache;
662     private boolean go = true;
663     // flag set after enter the run method, used for test
664     private boolean enteringRun = false;
665 
666     public EvictionThread(LruBlockCache cache) {
667       super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
668       setDaemon(true);
669       this.cache = new WeakReference<LruBlockCache>(cache);
670     }
671 
672     @Override
673     public void run() {
674       enteringRun = true;
675       while (this.go) {
676         synchronized(this) {
677           try {
678             this.wait();
679           } catch(InterruptedException e) {}
680         }
681         LruBlockCache cache = this.cache.get();
682         if(cache == null) break;
683         cache.evict();
684       }
685     }
686 
687     public void evict() {
688       synchronized(this) {
689         this.notifyAll(); // FindBugs NN_NAKED_NOTIFY
690       }
691     }
692 
693     synchronized void shutdown() {
694       this.go = false;
695       this.notifyAll();
696     }
697 
698     /**
699      * Used for the test.
700      */
701     boolean isEnteringRun() {
702       return this.enteringRun;
703     }
704   }
705 
706   /*
707    * Statistics thread.  Periodically prints the cache statistics to the log.
708    */
709   static class StatisticsThread extends Thread {
710     LruBlockCache lru;
711 
712     public StatisticsThread(LruBlockCache lru) {
713       super("LruBlockCache.StatisticsThread");
714       setDaemon(true);
715       this.lru = lru;
716     }
717     @Override
718     public void run() {
719       lru.logStats();
720     }
721   }
722 
723   public void logStats() {
724     if (!LOG.isDebugEnabled()) return;
725     // Log size
726     long totalSize = heapSize();
727     long freeSize = maxSize - totalSize;
728     LruBlockCache.LOG.debug("Total=" + StringUtils.byteDesc(totalSize) + ", " +
729         "free=" + StringUtils.byteDesc(freeSize) + ", " +
730         "max=" + StringUtils.byteDesc(this.maxSize) + ", " +
731         "blocks=" + size() +", " +
732         "accesses=" + stats.getRequestCount() + ", " +
733         "hits=" + stats.getHitCount() + ", " +
734         "hitRatio=" +
735           (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " +
736         "cachingAccesses=" + stats.getRequestCachingCount() + ", " +
737         "cachingHits=" + stats.getHitCachingCount() + ", " +
738         "cachingHitsRatio=" +
739           (stats.getHitCachingCount() == 0 ? "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) +
740         "evictions=" + stats.getEvictionCount() + ", " +
741         "evicted=" + stats.getEvictedCount() + ", " +
742         "evictedPerRun=" + stats.evictedPerEviction());
743   }
744 
745   /**
746    * Get counter statistics for this cache.
747    *
748    * <p>Includes: total accesses, hits, misses, evicted blocks, and runs
749    * of the eviction processes.
750    */
751   public CacheStats getStats() {
752     return this.stats;
753   }
754 
755   public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
756       (3 * Bytes.SIZEOF_LONG) + (9 * ClassSize.REFERENCE) +
757       (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN
758       + ClassSize.OBJECT);
759 
760   // HeapSize implementation
761   public long heapSize() {
762     return getCurrentSize();
763   }
764 
765   public static long calculateOverhead(long maxSize, long blockSize, int concurrency){
766     // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG
767     return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP +
768         ((long)Math.ceil(maxSize*1.2/blockSize)
769             * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
770         ((long)concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
771   }
772 
773   @Override
774   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
775 
776     Map<String, Path> sfMap = FSUtils.getTableStoreFilePathMap(
777         FileSystem.get(conf),
778         FSUtils.getRootDir(conf));
779 
780     // quirky, but it's a compound key and this is a shortcut taken instead of
781     // creating a class that would represent only a key.
782     Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
783       new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
784 
785     for (CachedBlock cb : map.values()) {
786       String sf = cb.getCacheKey().getHfileName();
787       Path path = sfMap.get(sf);
788       if ( path != null) {
789         BlockCacheColumnFamilySummary lookup =
790           BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
791         BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
792         if (bcse == null) {
793           bcse = BlockCacheColumnFamilySummary.create(lookup);
794           bcs.put(lookup,bcse);
795         }
796         bcse.incrementBlocks();
797         bcse.incrementHeapSize(cb.heapSize());
798       }
799     }
800     List<BlockCacheColumnFamilySummary> list =
801         new ArrayList<BlockCacheColumnFamilySummary>(bcs.values());
802     Collections.sort( list );
803     return list;
804   }
805 
806   // Simple calculators of sizes given factors and maxSize
807 
808   private long acceptableSize() {
809     return (long)Math.floor(this.maxSize * this.acceptableFactor);
810   }
811   private long minSize() {
812     return (long)Math.floor(this.maxSize * this.minFactor);
813   }
814   private long singleSize() {
815     return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor);
816   }
817   private long multiSize() {
818     return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor);
819   }
820   private long memorySize() {
821     return (long)Math.floor(this.maxSize * this.memoryFactor * this.minFactor);
822   }
823 
824   public void shutdown() {
825     if (victimHandler != null)
826       victimHandler.shutdown();
827     this.scheduleThreadPool.shutdown();
828     for (int i = 0; i < 10; i++) {
829       if (!this.scheduleThreadPool.isShutdown()) Threads.sleep(10);
830     }
831     if (!this.scheduleThreadPool.isShutdown()) {
832       List<Runnable> runnables = this.scheduleThreadPool.shutdownNow();
833       LOG.debug("Still running " + runnables);
834     }
835     this.evictionThread.shutdown();
836   }
837 
838   /** Clears the cache. Used in tests. */
839   public void clearCache() {
840     map.clear();
841   }
842 
843   /**
844    * Used in testing. May be very inefficient.
845    * @return the set of cached file names
846    */
847   SortedSet<String> getCachedFileNamesForTest() {
848     SortedSet<String> fileNames = new TreeSet<String>();
849     for (BlockCacheKey cacheKey : map.keySet()) {
850       fileNames.add(cacheKey.getHfileName());
851     }
852     return fileNames;
853   }
854 
855   Map<BlockType, Integer> getBlockTypeCountsForTest() {
856     Map<BlockType, Integer> counts =
857         new EnumMap<BlockType, Integer>(BlockType.class);
858     for (CachedBlock cb : map.values()) {
859       BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
860       Integer count = counts.get(blockType);
861       counts.put(blockType, (count == null ? 0 : count) + 1);
862     }
863     return counts;
864   }
865 
866   public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
867     Map<DataBlockEncoding, Integer> counts =
868         new EnumMap<DataBlockEncoding, Integer>(DataBlockEncoding.class);
869     for (BlockCacheKey cacheKey : map.keySet()) {
870       DataBlockEncoding encoding = cacheKey.getDataBlockEncoding();
871       Integer count = counts.get(encoding);
872       counts.put(encoding, (count == null ? 0 : count) + 1);
873     }
874     return counts;
875   }
876 
877   public void setVictimCache(BucketCache handler) {
878     assert victimHandler == null;
879     victimHandler = handler;
880   }
881 
882 }