View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.io.hfile;
20  
21  import java.io.IOException;
22  import java.lang.ref.WeakReference;
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collections;
26  import java.util.EnumMap;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.PriorityQueue;
31  import java.util.SortedSet;
32  import java.util.TreeSet;
33  import java.util.concurrent.ConcurrentHashMap;
34  import java.util.concurrent.Executors;
35  import java.util.concurrent.ScheduledExecutorService;
36  import java.util.concurrent.TimeUnit;
37  import java.util.concurrent.atomic.AtomicLong;
38  import java.util.concurrent.locks.ReentrantLock;
39  
40  import org.apache.commons.logging.Log;
41  import org.apache.commons.logging.LogFactory;
42  import org.apache.hadoop.classification.InterfaceAudience;
43  import org.apache.hadoop.conf.Configuration;
44  import org.apache.hadoop.fs.FileSystem;
45  import org.apache.hadoop.fs.Path;
46  import org.apache.hadoop.hbase.io.HeapSize;
47  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
48  import org.apache.hadoop.hbase.io.hfile.CachedBlock.BlockPriority;
49  import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.hbase.util.ClassSize;
52  import org.apache.hadoop.hbase.util.FSUtils;
53  import org.apache.hadoop.hbase.util.HasThread;
54  import org.apache.hadoop.hbase.util.Threads;
55  import org.apache.hadoop.util.StringUtils;
56  
57  import com.google.common.util.concurrent.ThreadFactoryBuilder;
58  
59  /**
60   * A block cache implementation that is memory-aware using {@link HeapSize},
61   * memory-bound using an LRU eviction algorithm, and concurrent: backed by a
62   * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
63   * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
64   *
65   * Contains three levels of block priority to allow for
66   * scan-resistance and in-memory families.  A block is added with an inMemory
67   * flag if necessary, otherwise a block becomes a single access priority.  Once
68   * a blocked is accessed again, it changes to multiple access.  This is used
69   * to prevent scans from thrashing the cache, adding a least-frequently-used
70   * element to the eviction algorithm.<p>
71   *
72   * Each priority is given its own chunk of the total cache to ensure
73   * fairness during eviction.  Each priority will retain close to its maximum
74   * size, however, if any priority is not using its entire chunk the others
75   * are able to grow beyond their chunk size.<p>
76   *
77   * Instantiated at a minimum with the total size and average block size.
78   * All sizes are in bytes.  The block size is not especially important as this
79   * cache is fully dynamic in its sizing of blocks.  It is only used for
80   * pre-allocating data structures and in initial heap estimation of the map.<p>
81   *
82   * The detailed constructor defines the sizes for the three priorities (they
83   * should total to the maximum size defined).  It also sets the levels that
84   * trigger and control the eviction thread.<p>
85   *
86   * The acceptable size is the cache size level which triggers the eviction
87   * process to start.  It evicts enough blocks to get the size below the
88   * minimum size specified.<p>
89   *
90   * Eviction happens in a separate thread and involves a single full-scan
91   * of the map.  It determines how many bytes must be freed to reach the minimum
92   * size, and then while scanning determines the fewest least-recently-used
93   * blocks necessary from each of the three priorities (would be 3 times bytes
94   * to free).  It then uses the priority chunk sizes to evict fairly according
95   * to the relative sizes and usage.
96   */
97  @InterfaceAudience.Private
98  public class LruBlockCache implements BlockCache, HeapSize {
99  
100   static final Log LOG = LogFactory.getLog(LruBlockCache.class);
101 
102   static final String LRU_MIN_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.min.factor";
103   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = "hbase.lru.blockcache.acceptable.factor";
104 
105   /** Default Configuration Parameters*/
106 
107   /** Backing Concurrent Map Configuration */
108   static final float DEFAULT_LOAD_FACTOR = 0.75f;
109   static final int DEFAULT_CONCURRENCY_LEVEL = 16;
110 
111   /** Eviction thresholds */
112   static final float DEFAULT_MIN_FACTOR = 0.95f;
113   static final float DEFAULT_ACCEPTABLE_FACTOR = 0.99f;
114 
115   /** Priority buckets */
116   static final float DEFAULT_SINGLE_FACTOR = 0.25f;
117   static final float DEFAULT_MULTI_FACTOR = 0.50f;
118   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
119 
120   /** Statistics thread */
121   static final int statThreadPeriod = 60 * 5;
122 
123   /** Concurrent map (the cache) */
124   private final ConcurrentHashMap<BlockCacheKey,CachedBlock> map;
125 
126   /** Eviction lock (locked when eviction in process) */
127   private final ReentrantLock evictionLock = new ReentrantLock(true);
128 
129   /** Volatile boolean to track if we are in an eviction process or not */
130   private volatile boolean evictionInProgress = false;
131 
132   /** Eviction thread */
133   private final EvictionThread evictionThread;
134 
135   /** Statistics thread schedule pool (for heavy debugging, could remove) */
136   private final ScheduledExecutorService scheduleThreadPool =
137     Executors.newScheduledThreadPool(1,
138       new ThreadFactoryBuilder()
139         .setNameFormat("LruStats #%d")
140         .setDaemon(true)
141         .build());
142 
143   /** Current size of cache */
144   private final AtomicLong size;
145 
146   /** Current number of cached elements */
147   private final AtomicLong elements;
148 
149   /** Cache access count (sequential ID) */
150   private final AtomicLong count;
151 
152   /** Cache statistics */
153   private final CacheStats stats;
154 
155   /** Maximum allowable size of cache (block put if size > max, evict) */
156   private long maxSize;
157 
158   /** Approximate block size */
159   private long blockSize;
160 
161   /** Acceptable size of cache (no evictions if size < acceptable) */
162   private float acceptableFactor;
163 
164   /** Minimum threshold of cache (when evicting, evict until size < min) */
165   private float minFactor;
166 
167   /** Single access bucket size */
168   private float singleFactor;
169 
170   /** Multiple access bucket size */
171   private float multiFactor;
172 
173   /** In-memory bucket size */
174   private float memoryFactor;
175 
176   /** Overhead of the structure itself */
177   private long overhead;
178 
179   /** Where to send victims (blocks evicted from the cache) */
180   private BucketCache victimHandler = null;
181 
182   /**
183    * Default constructor.  Specify maximum size and expected average block
184    * size (approximation is fine).
185    *
186    * <p>All other factors will be calculated based on defaults specified in
187    * this class.
188    * @param maxSize maximum size of cache, in bytes
189    * @param blockSize approximate size of each block, in bytes
190    */
191   public LruBlockCache(long maxSize, long blockSize) {
192     this(maxSize, blockSize, true);
193   }
194 
195   /**
196    * Constructor used for testing.  Allows disabling of the eviction thread.
197    */
198   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) {
199     this(maxSize, blockSize, evictionThread,
200         (int)Math.ceil(1.2*maxSize/blockSize),
201         DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL,
202         DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR,
203         DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR,
204         DEFAULT_MEMORY_FACTOR);
205   }
206 
207   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) {
208     this(maxSize, blockSize, evictionThread,
209         (int)Math.ceil(1.2*maxSize/blockSize),
210         DEFAULT_LOAD_FACTOR,
211         DEFAULT_CONCURRENCY_LEVEL,
212         conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR),
213         conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR),
214         DEFAULT_SINGLE_FACTOR,
215         DEFAULT_MULTI_FACTOR,
216         DEFAULT_MEMORY_FACTOR);
217   }
218 
219   public LruBlockCache(long maxSize, long blockSize, Configuration conf) {
220     this(maxSize, blockSize, true, conf);
221   }
222 
223   /**
224    * Configurable constructor.  Use this constructor if not using defaults.
225    * @param maxSize maximum size of this cache, in bytes
226    * @param blockSize expected average size of blocks, in bytes
227    * @param evictionThread whether to run evictions in a bg thread or not
228    * @param mapInitialSize initial size of backing ConcurrentHashMap
229    * @param mapLoadFactor initial load factor of backing ConcurrentHashMap
230    * @param mapConcurrencyLevel initial concurrency factor for backing CHM
231    * @param minFactor percentage of total size that eviction will evict until
232    * @param acceptableFactor percentage of total size that triggers eviction
233    * @param singleFactor percentage of total size for single-access blocks
234    * @param multiFactor percentage of total size for multiple-access blocks
235    * @param memoryFactor percentage of total size for in-memory blocks
236    */
237   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
238       int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
239       float minFactor, float acceptableFactor,
240       float singleFactor, float multiFactor, float memoryFactor) {
241     if(singleFactor + multiFactor + memoryFactor != 1) {
242       throw new IllegalArgumentException("Single, multi, and memory factors " +
243           " should total 1.0");
244     }
245     if(minFactor >= acceptableFactor) {
246       throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor");
247     }
248     if(minFactor >= 1.0f || acceptableFactor >= 1.0f) {
249       throw new IllegalArgumentException("all factors must be < 1");
250     }
251     this.maxSize = maxSize;
252     this.blockSize = blockSize;
253     map = new ConcurrentHashMap<BlockCacheKey,CachedBlock>(mapInitialSize,
254         mapLoadFactor, mapConcurrencyLevel);
255     this.minFactor = minFactor;
256     this.acceptableFactor = acceptableFactor;
257     this.singleFactor = singleFactor;
258     this.multiFactor = multiFactor;
259     this.memoryFactor = memoryFactor;
260     this.stats = new CacheStats();
261     this.count = new AtomicLong(0);
262     this.elements = new AtomicLong(0);
263     this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
264     this.size = new AtomicLong(this.overhead);
265     if(evictionThread) {
266       this.evictionThread = new EvictionThread(this);
267       this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
268     } else {
269       this.evictionThread = null;
270     }
271     this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
272         statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS);
273   }
274 
275   public void setMaxSize(long maxSize) {
276     this.maxSize = maxSize;
277     if(this.size.get() > acceptableSize() && !evictionInProgress) {
278       runEviction();
279     }
280   }
281 
282   // BlockCache implementation
283 
284   /**
285    * Cache the block with the specified name and buffer.
286    * <p>
287    * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547)
288    * this can happen, for which we compare the buffer contents.
289    * @param cacheKey block's cache key
290    * @param buf block buffer
291    * @param inMemory if block is in-memory
292    */
293   @Override
294   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
295     CachedBlock cb = map.get(cacheKey);
296     if(cb != null) {
297       // compare the contents, if they are not equal, we are in big trouble
298       if (compare(buf, cb.getBuffer()) != 0) {
299         throw new RuntimeException("Cached block contents differ, which should not have happened."
300           + "cacheKey:" + cacheKey);
301       }
302       String msg = "Cached an already cached block: " + cacheKey + " cb:" + cb.getCacheKey();
303       msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
304       LOG.warn(msg);
305       return;
306     }
307     cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
308     long newSize = updateSizeMetrics(cb, false);
309     map.put(cacheKey, cb);
310     elements.incrementAndGet();
311     if(newSize > acceptableSize() && !evictionInProgress) {
312       runEviction();
313     }
314   }
315 
316   private int compare(Cacheable left, Cacheable right) {
317     ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
318     left.serialize(l);
319     ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
320     right.serialize(r);
321     return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(),
322       r.array(), r.arrayOffset(), r.limit());
323   }
324 
325   /**
326    * Cache the block with the specified name and buffer.
327    * <p>
328    * It is assumed this will NEVER be called on an already cached block.  If
329    * that is done, it is assumed that you are reinserting the same exact
330    * block due to a race condition and will update the buffer but not modify
331    * the size of the cache.
332    * @param cacheKey block's cache key
333    * @param buf block buffer
334    */
335   public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
336     cacheBlock(cacheKey, buf, false);
337   }
338 
339   /**
340    * Helper function that updates the local size counter and also updates any
341    * per-cf or per-blocktype metrics it can discern from given
342    * {@link CachedBlock}
343    *
344    * @param cb
345    * @param evict
346    */
347   protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
348     long heapsize = cb.heapSize();
349     if (evict) {
350       heapsize *= -1;
351     }
352     return size.addAndGet(heapsize);
353   }
354 
355   /**
356    * Get the buffer of the block with the specified name.
357    * @param cacheKey block's cache key
358    * @param caching true if the caller caches blocks on cache misses
359    * @param repeat Whether this is a repeat lookup for the same block
360    *        (used to avoid double counting cache misses when doing double-check locking)
361    * @return buffer of specified cache key, or null if not in cache
362    * @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)
363    */
364   @Override
365   public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) {
366     CachedBlock cb = map.get(cacheKey);
367     if(cb == null) {
368       if (!repeat) stats.miss(caching);
369       if (victimHandler != null)
370         return victimHandler.getBlock(cacheKey, caching, repeat);
371       return null;
372     }
373     stats.hit(caching);
374     cb.access(count.incrementAndGet());
375     return cb.getBuffer();
376   }
377 
378   /**
379    * Whether the cache contains block with specified cacheKey
380    * @param cacheKey
381    * @return true if contains the block
382    */
383   public boolean containsBlock(BlockCacheKey cacheKey) {
384     return map.containsKey(cacheKey);
385   }
386 
387   @Override
388   public boolean evictBlock(BlockCacheKey cacheKey) {
389     CachedBlock cb = map.get(cacheKey);
390     if (cb == null) return false;
391     evictBlock(cb, false);
392     return true;
393   }
394 
395   /**
396    * Evicts all blocks for a specific HFile. This is an
397    * expensive operation implemented as a linear-time search through all blocks
398    * in the cache. Ideally this should be a search in a log-access-time map.
399    *
400    * <p>
401    * This is used for evict-on-close to remove all blocks of a specific HFile.
402    *
403    * @return the number of blocks evicted
404    */
405   @Override
406   public int evictBlocksByHfileName(String hfileName) {
407     int numEvicted = 0;
408     for (BlockCacheKey key : map.keySet()) {
409       if (key.getHfileName().equals(hfileName)) {
410         if (evictBlock(key))
411           ++numEvicted;
412       }
413     }
414     if (victimHandler != null) {
415       numEvicted += victimHandler.evictBlocksByHfileName(hfileName);
416     }
417     return numEvicted;
418   }
419 
420   /**
421    * Evict the block, and it will be cached by the victim handler if exists &&
422    * block may be read again later
423    * @param block
424    * @param evictedByEvictionProcess true if the given block is evicted by
425    *          EvictionThread
426    * @return the heap size of evicted block
427    */
428   protected long evictBlock(CachedBlock block, boolean evictedByEvictionProcess) {
429     map.remove(block.getCacheKey());
430     updateSizeMetrics(block, true);
431     elements.decrementAndGet();
432     stats.evicted();
433     if (evictedByEvictionProcess && victimHandler != null) {
434       boolean wait = getCurrentSize() < acceptableSize();
435       boolean inMemory = block.getPriority() == BlockPriority.MEMORY;
436       victimHandler.cacheBlockWithWait(block.getCacheKey(), block.getBuffer(),
437           inMemory, wait);
438     }
439     return block.heapSize();
440   }
441 
442   /**
443    * Multi-threaded call to run the eviction process.
444    */
445   private void runEviction() {
446     if(evictionThread == null) {
447       evict();
448     } else {
449       evictionThread.evict();
450     }
451   }
452 
453   /**
454    * Eviction method.
455    */
456   void evict() {
457 
458     // Ensure only one eviction at a time
459     if(!evictionLock.tryLock()) return;
460 
461     try {
462       evictionInProgress = true;
463       long currentSize = this.size.get();
464       long bytesToFree = currentSize - minSize();
465 
466       if (LOG.isTraceEnabled()) {
467         LOG.trace("Block cache LRU eviction started; Attempting to free " +
468           StringUtils.byteDesc(bytesToFree) + " of total=" +
469           StringUtils.byteDesc(currentSize));
470       }
471 
472       if(bytesToFree <= 0) return;
473 
474       // Instantiate priority buckets
475       BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize,
476           singleSize());
477       BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize,
478           multiSize());
479       BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize,
480           memorySize());
481 
482       // Scan entire map putting into appropriate buckets
483       for(CachedBlock cachedBlock : map.values()) {
484         switch(cachedBlock.getPriority()) {
485           case SINGLE: {
486             bucketSingle.add(cachedBlock);
487             break;
488           }
489           case MULTI: {
490             bucketMulti.add(cachedBlock);
491             break;
492           }
493           case MEMORY: {
494             bucketMemory.add(cachedBlock);
495             break;
496           }
497         }
498       }
499 
500       PriorityQueue<BlockBucket> bucketQueue =
501         new PriorityQueue<BlockBucket>(3);
502 
503       bucketQueue.add(bucketSingle);
504       bucketQueue.add(bucketMulti);
505       bucketQueue.add(bucketMemory);
506 
507       int remainingBuckets = 3;
508       long bytesFreed = 0;
509 
510       BlockBucket bucket;
511       while((bucket = bucketQueue.poll()) != null) {
512         long overflow = bucket.overflow();
513         if(overflow > 0) {
514           long bucketBytesToFree = Math.min(overflow,
515             (bytesToFree - bytesFreed) / remainingBuckets);
516           bytesFreed += bucket.free(bucketBytesToFree);
517         }
518         remainingBuckets--;
519       }
520 
521       if (LOG.isTraceEnabled()) {
522         long single = bucketSingle.totalSize();
523         long multi = bucketMulti.totalSize();
524         long memory = bucketMemory.totalSize();
525         LOG.trace("Block cache LRU eviction completed; " +
526           "freed=" + StringUtils.byteDesc(bytesFreed) + ", " +
527           "total=" + StringUtils.byteDesc(this.size.get()) + ", " +
528           "single=" + StringUtils.byteDesc(single) + ", " +
529           "multi=" + StringUtils.byteDesc(multi) + ", " +
530           "memory=" + StringUtils.byteDesc(memory));
531       }
532     } finally {
533       stats.evict();
534       evictionInProgress = false;
535       evictionLock.unlock();
536     }
537   }
538 
539   /**
540    * Used to group blocks into priority buckets.  There will be a BlockBucket
541    * for each priority (single, multi, memory).  Once bucketed, the eviction
542    * algorithm takes the appropriate number of elements out of each according
543    * to configuration parameters and their relatives sizes.
544    */
545   private class BlockBucket implements Comparable<BlockBucket> {
546 
547     private CachedBlockQueue queue;
548     private long totalSize = 0;
549     private long bucketSize;
550 
551     public BlockBucket(long bytesToFree, long blockSize, long bucketSize) {
552       this.bucketSize = bucketSize;
553       queue = new CachedBlockQueue(bytesToFree, blockSize);
554       totalSize = 0;
555     }
556 
557     public void add(CachedBlock block) {
558       totalSize += block.heapSize();
559       queue.add(block);
560     }
561 
562     public long free(long toFree) {
563       CachedBlock cb;
564       long freedBytes = 0;
565       while ((cb = queue.pollLast()) != null) {
566         freedBytes += evictBlock(cb, true);
567         if (freedBytes >= toFree) {
568           return freedBytes;
569         }
570       }
571       return freedBytes;
572     }
573 
574     public long overflow() {
575       return totalSize - bucketSize;
576     }
577 
578     public long totalSize() {
579       return totalSize;
580     }
581 
582     public int compareTo(BlockBucket that) {
583       if(this.overflow() == that.overflow()) return 0;
584       return this.overflow() > that.overflow() ? 1 : -1;
585     }
586 
587     @Override
588     public boolean equals(Object that) {
589       if (that == null || !(that instanceof BlockBucket)){
590         return false;
591       }
592 
593       return compareTo(( BlockBucket)that) == 0;
594     }
595 
596   }
597 
598   /**
599    * Get the maximum size of this cache.
600    * @return max size in bytes
601    */
602   public long getMaxSize() {
603     return this.maxSize;
604   }
605 
606   @Override
607   public long getCurrentSize() {
608     return this.size.get();
609   }
610 
611   @Override
612   public long getFreeSize() {
613     return getMaxSize() - getCurrentSize();
614   }
615 
616   @Override
617   public long size() {
618     return this.elements.get();
619   }
620 
621   @Override
622   public long getBlockCount() {
623     return this.elements.get();
624   }
625 
626   /**
627    * Get the number of eviction runs that have occurred
628    */
629   public long getEvictionCount() {
630     return this.stats.getEvictionCount();
631   }
632 
633   @Override
634   public long getEvictedCount() {
635     return this.stats.getEvictedCount();
636   }
637 
638   EvictionThread getEvictionThread() {
639     return this.evictionThread;
640   }
641 
642   /*
643    * Eviction thread.  Sits in waiting state until an eviction is triggered
644    * when the cache size grows above the acceptable level.<p>
645    *
646    * Thread is triggered into action by {@link LruBlockCache#runEviction()}
647    */
648   static class EvictionThread extends HasThread {
649     private WeakReference<LruBlockCache> cache;
650     private boolean go = true;
651     // flag set after enter the run method, used for test
652     private boolean enteringRun = false;
653 
654     public EvictionThread(LruBlockCache cache) {
655       super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
656       setDaemon(true);
657       this.cache = new WeakReference<LruBlockCache>(cache);
658     }
659 
660     @Override
661     public void run() {
662       enteringRun = true;
663       while (this.go) {
664         synchronized(this) {
665           try {
666             this.wait();
667           } catch(InterruptedException e) {}
668         }
669         LruBlockCache cache = this.cache.get();
670         if(cache == null) break;
671         cache.evict();
672       }
673     }
674 
675     public void evict() {
676       synchronized(this) {
677         this.notifyAll(); // FindBugs NN_NAKED_NOTIFY
678       }
679     }
680 
681     synchronized void shutdown() {
682       this.go = false;
683       this.notifyAll();
684     }
685 
686     /**
687      * Used for the test.
688      */
689     boolean isEnteringRun() {
690       return this.enteringRun;
691     }
692   }
693 
694   /*
695    * Statistics thread.  Periodically prints the cache statistics to the log.
696    */
697   static class StatisticsThread extends Thread {
698     LruBlockCache lru;
699 
700     public StatisticsThread(LruBlockCache lru) {
701       super("LruBlockCache.StatisticsThread");
702       setDaemon(true);
703       this.lru = lru;
704     }
705     @Override
706     public void run() {
707       lru.logStats();
708     }
709   }
710 
711   public void logStats() {
712     if (!LOG.isDebugEnabled()) return;
713     // Log size
714     long totalSize = heapSize();
715     long freeSize = maxSize - totalSize;
716     LruBlockCache.LOG.debug("Total=" + StringUtils.byteDesc(totalSize) + ", " +
717         "free=" + StringUtils.byteDesc(freeSize) + ", " +
718         "max=" + StringUtils.byteDesc(this.maxSize) + ", " +
719         "blocks=" + size() +", " +
720         "accesses=" + stats.getRequestCount() + ", " +
721         "hits=" + stats.getHitCount() + ", " +
722         "hitRatio=" +
723           (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " +
724         "cachingAccesses=" + stats.getRequestCachingCount() + ", " +
725         "cachingHits=" + stats.getHitCachingCount() + ", " +
726         "cachingHitsRatio=" +
727           (stats.getHitCachingCount() == 0 ? "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) +
728         "evictions=" + stats.getEvictionCount() + ", " +
729         "evicted=" + stats.getEvictedCount() + ", " +
730         "evictedPerRun=" + stats.evictedPerEviction());
731   }
732 
733   /**
734    * Get counter statistics for this cache.
735    *
736    * <p>Includes: total accesses, hits, misses, evicted blocks, and runs
737    * of the eviction processes.
738    */
739   public CacheStats getStats() {
740     return this.stats;
741   }
742 
743   public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
744       (3 * Bytes.SIZEOF_LONG) + (9 * ClassSize.REFERENCE) +
745       (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN
746       + ClassSize.OBJECT);
747 
748   // HeapSize implementation
749   public long heapSize() {
750     return getCurrentSize();
751   }
752 
753   public static long calculateOverhead(long maxSize, long blockSize, int concurrency){
754     // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG
755     return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP +
756         ((long)Math.ceil(maxSize*1.2/blockSize)
757             * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
758         ((long)concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
759   }
760 
761   @Override
762   public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
763 
764     Map<String, Path> sfMap = FSUtils.getTableStoreFilePathMap(
765         FileSystem.get(conf),
766         FSUtils.getRootDir(conf));
767 
768     // quirky, but it's a compound key and this is a shortcut taken instead of
769     // creating a class that would represent only a key.
770     Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
771       new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
772 
773     for (CachedBlock cb : map.values()) {
774       String sf = cb.getCacheKey().getHfileName();
775       Path path = sfMap.get(sf);
776       if ( path != null) {
777         BlockCacheColumnFamilySummary lookup =
778           BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
779         BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
780         if (bcse == null) {
781           bcse = BlockCacheColumnFamilySummary.create(lookup);
782           bcs.put(lookup,bcse);
783         }
784         bcse.incrementBlocks();
785         bcse.incrementHeapSize(cb.heapSize());
786       }
787     }
788     List<BlockCacheColumnFamilySummary> list =
789         new ArrayList<BlockCacheColumnFamilySummary>(bcs.values());
790     Collections.sort( list );
791     return list;
792   }
793 
794   // Simple calculators of sizes given factors and maxSize
795 
796   private long acceptableSize() {
797     return (long)Math.floor(this.maxSize * this.acceptableFactor);
798   }
799   private long minSize() {
800     return (long)Math.floor(this.maxSize * this.minFactor);
801   }
802   private long singleSize() {
803     return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor);
804   }
805   private long multiSize() {
806     return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor);
807   }
808   private long memorySize() {
809     return (long)Math.floor(this.maxSize * this.memoryFactor * this.minFactor);
810   }
811 
812   public void shutdown() {
813     if (victimHandler != null)
814       victimHandler.shutdown();
815     this.scheduleThreadPool.shutdown();
816     for (int i = 0; i < 10; i++) {
817       if (!this.scheduleThreadPool.isShutdown()) Threads.sleep(10);
818     }
819     if (!this.scheduleThreadPool.isShutdown()) {
820       List<Runnable> runnables = this.scheduleThreadPool.shutdownNow();
821       LOG.debug("Still running " + runnables);
822     }
823     this.evictionThread.shutdown();
824   }
825 
826   /** Clears the cache. Used in tests. */
827   public void clearCache() {
828     map.clear();
829   }
830 
831   /**
832    * Used in testing. May be very inefficient.
833    * @return the set of cached file names
834    */
835   SortedSet<String> getCachedFileNamesForTest() {
836     SortedSet<String> fileNames = new TreeSet<String>();
837     for (BlockCacheKey cacheKey : map.keySet()) {
838       fileNames.add(cacheKey.getHfileName());
839     }
840     return fileNames;
841   }
842 
843   Map<BlockType, Integer> getBlockTypeCountsForTest() {
844     Map<BlockType, Integer> counts =
845         new EnumMap<BlockType, Integer>(BlockType.class);
846     for (CachedBlock cb : map.values()) {
847       BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
848       Integer count = counts.get(blockType);
849       counts.put(blockType, (count == null ? 0 : count) + 1);
850     }
851     return counts;
852   }
853 
854   public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
855     Map<DataBlockEncoding, Integer> counts =
856         new EnumMap<DataBlockEncoding, Integer>(DataBlockEncoding.class);
857     for (BlockCacheKey cacheKey : map.keySet()) {
858       DataBlockEncoding encoding = cacheKey.getDataBlockEncoding();
859       Integer count = counts.get(encoding);
860       counts.put(encoding, (count == null ? 0 : count) + 1);
861     }
862     return counts;
863   }
864 
865   public void setVictimCache(BucketCache handler) {
866     assert victimHandler == null;
867     victimHandler = handler;
868   }
869 
870 }