View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.io.hfile;
19  
20  import java.io.DataInput;
21  import java.io.IOException;
22  import java.nio.ByteBuffer;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.hbase.classification.InterfaceAudience;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.KeyValue.KVComparator;
34  import org.apache.hadoop.hbase.NoTagsKeyValue;
35  import org.apache.hadoop.hbase.fs.HFileSystem;
36  import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
37  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
38  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
39  import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
40  import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
41  import org.apache.hadoop.hbase.util.ByteBufferUtils;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.IdLock;
44  import org.apache.hadoop.io.WritableUtils;
45  import org.cloudera.htrace.Trace;
46  import org.cloudera.htrace.TraceScope;
47  
48  import com.google.common.annotations.VisibleForTesting;
49  
50  /**
51   * {@link HFile} reader for version 2.
52   */
53  @InterfaceAudience.Private
54  public class HFileReaderV2 extends AbstractHFileReader {
55  
56    private static final Log LOG = LogFactory.getLog(HFileReaderV2.class);
57  
58    /** Minor versions in HFile V2 starting with this number have hbase checksums */
59    public static final int MINOR_VERSION_WITH_CHECKSUM = 1;
60    /** In HFile V2 minor version that does not support checksums */
61    public static final int MINOR_VERSION_NO_CHECKSUM = 0;
62  
63    /** HFile minor version that introduced pbuf filetrailer */
64    public static final int PBUF_TRAILER_MINOR_VERSION = 2;
65  
66    /**
67     * The size of a (key length, value length) tuple that prefixes each entry in
68     * a data block.
69     */
70    public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT;
71  
72    protected boolean includesMemstoreTS = false;
73    protected boolean decodeMemstoreTS = false;
74    protected boolean shouldIncludeMemstoreTS() {
75      return includesMemstoreTS;
76    }
77  
78    /** Filesystem-level block reader. */
79    protected HFileBlock.FSReader fsBlockReader;
80  
81    /**
82     * A "sparse lock" implementation allowing to lock on a particular block
83     * identified by offset. The purpose of this is to avoid two clients loading
84     * the same block, and have all but one client wait to get the block from the
85     * cache.
86     */
87    private IdLock offsetLock = new IdLock();
88  
89    /**
90     * Blocks read from the load-on-open section, excluding data root index, meta
91     * index, and file info.
92     */
93    private List<HFileBlock> loadOnOpenBlocks = new ArrayList<HFileBlock>();
94  
95    /** Minimum minor version supported by this HFile format */
96    static final int MIN_MINOR_VERSION = 0;
97  
98    /** Maximum minor version supported by this HFile format */
99    // We went to version 2 when we moved to pb'ing fileinfo and the trailer on
100   // the file. This version can read Writables version 1.
101   static final int MAX_MINOR_VERSION = 3;
102 
103   /** Minor versions starting with this number have faked index key */
104   static final int MINOR_VERSION_WITH_FAKED_KEY = 3;
105 
106   protected HFileContext hfileContext;
107 
108   /**
109    * Opens a HFile. You must load the index before you can use it by calling
110    * {@link #loadFileInfo()}.
111    *
112    * @param path Path to HFile.
113    * @param trailer File trailer.
114    * @param fsdis input stream.
115    * @param size Length of the stream.
116    * @param cacheConf Cache configuration.
117    * @param hfs
118    * @param conf
119    */
120   public HFileReaderV2(final Path path, final FixedFileTrailer trailer,
121       final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf,
122       final HFileSystem hfs, final Configuration conf) throws IOException {
123     super(path, trailer, size, cacheConf, hfs, conf);
124     this.conf = conf;
125     trailer.expectMajorVersion(getMajorVersion());
126     validateMinorVersion(path, trailer.getMinorVersion());
127     this.hfileContext = createHFileContext(fsdis, fileSize, hfs, path, trailer);
128     HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis, fileSize, hfs, path,
129         hfileContext);
130     this.fsBlockReader = fsBlockReaderV2; // upcast
131 
132     // Comparator class name is stored in the trailer in version 2.
133     comparator = trailer.createComparator();
134     dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
135         trailer.getNumDataIndexLevels(), this);
136     metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
137         KeyValue.RAW_COMPARATOR, 1);
138 
139     // Parse load-on-open data.
140 
141     HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
142         trailer.getLoadOnOpenDataOffset(),
143         fileSize - trailer.getTrailerSize());
144 
145     // Data index. We also read statistics about the block index written after
146     // the root level.
147     dataBlockIndexReader.readMultiLevelIndexRoot(
148         blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
149         trailer.getDataIndexCount());
150 
151     // Meta index.
152     metaBlockIndexReader.readRootIndex(
153         blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
154         trailer.getMetaIndexCount());
155 
156     // File info
157     fileInfo = new FileInfo();
158     fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
159     lastKey = fileInfo.get(FileInfo.LASTKEY);
160     avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
161     avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
162     byte [] keyValueFormatVersion =
163         fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
164     includesMemstoreTS = keyValueFormatVersion != null &&
165         Bytes.toInt(keyValueFormatVersion) ==
166             HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
167     fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS);
168     if (includesMemstoreTS) {
169       decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0;
170     }
171 
172     // Read data block encoding algorithm name from file info.
173     dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
174     fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
175 
176     // Store all other load-on-open blocks for further consumption.
177     HFileBlock b;
178     while ((b = blockIter.nextBlock()) != null) {
179       loadOnOpenBlocks.add(b);
180     }
181 
182     // Prefetch file blocks upon open if requested
183     if (cacheConf.shouldPrefetchOnOpen()) {
184       PrefetchExecutor.request(path, new Runnable() {
185         public void run() {
186           long offset = 0;
187           long end = 0;
188           try {
189             end = getTrailer().getLoadOnOpenDataOffset();
190             HFileBlock prevBlock = null;
191             if (LOG.isTraceEnabled()) {
192               LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end));
193             }
194             while (offset < end) {
195               if (Thread.interrupted()) {
196                 break;
197               }
198               long onDiskSize = -1;
199               if (prevBlock != null) {
200                 onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
201               }
202               HFileBlock block = readBlock(offset, onDiskSize, true, false, false, false, null);
203               prevBlock = block;
204               offset += block.getOnDiskSizeWithHeader();
205             }
206           } catch (IOException e) {
207             // IOExceptions are probably due to region closes (relocation, etc.)
208             if (LOG.isTraceEnabled()) {
209               LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
210             }
211           } catch (NullPointerException e) {
212             LOG.warn("Stream moved/closed or prefetch cancelled?" +
213                 getPathOffsetEndStr(path, offset, end), e);
214           } catch (Exception e) {
215             // Other exceptions are interesting
216             LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
217           } finally {
218             PrefetchExecutor.complete(path);
219           }
220         }
221       });
222     }
223   }
224 
225   protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize,
226       HFileSystem hfs, Path path, FixedFileTrailer trailer) throws IOException {
227     return new HFileContextBuilder()
228       .withIncludesMvcc(this.includesMemstoreTS)
229       .withCompression(this.compressAlgo)
230       .withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM)
231       .build();
232   }
233 
234   private static String getPathOffsetEndStr(final Path path, final long offset, final long end) {
235     return "path=" + path.toString() + ", offset=" + offset + ", end=" + end;
236   }
237 
238   /**
239    * Create a Scanner on this file. No seeks or reads are done on creation. Call
240    * {@link HFileScanner#seekTo(byte[])} to position an start the read. There is
241    * nothing to clean up in a Scanner. Letting go of your references to the
242    * scanner is sufficient.
243    *
244    * @param cacheBlocks True if we should cache blocks read in by this scanner.
245    * @param pread Use positional read rather than seek+read if true (pread is
246    *          better for random reads, seek+read is better scanning).
247    * @param isCompaction is scanner being used for a compaction?
248    * @return Scanner on this file.
249    */
250    @Override
251    public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
252       final boolean isCompaction) {
253     if (dataBlockEncoder.useEncodedScanner()) {
254       return new EncodedScannerV2(this, cacheBlocks, pread, isCompaction,
255           hfileContext);
256     }
257 
258     return new ScannerV2(this, cacheBlocks, pread, isCompaction);
259   }
260 
261   /**
262    * @param metaBlockName
263    * @param cacheBlock Add block to cache, if found
264    * @return block wrapped in a ByteBuffer, with header skipped
265    * @throws IOException
266    */
267   @Override
268   public ByteBuffer getMetaBlock(String metaBlockName, boolean cacheBlock)
269       throws IOException {
270     if (trailer.getMetaIndexCount() == 0) {
271       return null; // there are no meta blocks
272     }
273     if (metaBlockIndexReader == null) {
274       throw new IOException("Meta index not loaded");
275     }
276 
277     byte[] mbname = Bytes.toBytes(metaBlockName);
278     int block = metaBlockIndexReader.rootBlockContainingKey(mbname, 0,
279         mbname.length);
280     if (block == -1)
281       return null;
282     long blockSize = metaBlockIndexReader.getRootBlockDataSize(block);
283 
284     // Per meta key from any given file, synchronize reads for said block. This
285     // is OK to do for meta blocks because the meta block index is always
286     // single-level.
287     synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
288       // Check cache for block. If found return.
289       long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block);
290       BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset,
291           DataBlockEncoding.NONE, BlockType.META);
292 
293       cacheBlock &= cacheConf.shouldCacheDataOnRead();
294       if (cacheConf.isBlockCacheEnabled()) {
295         HFileBlock cachedBlock =
296           (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock, false, true);
297         if (cachedBlock != null) {
298           assert cachedBlock.isUnpacked() : "Packed block leak.";
299           // Return a distinct 'shallow copy' of the block,
300           // so pos does not get messed by the scanner
301           return cachedBlock.getBufferWithoutHeader();
302         }
303         // Cache Miss, please load.
304       }
305 
306       HFileBlock metaBlock = fsBlockReader.readBlockData(metaBlockOffset,
307           blockSize, -1, true).unpack(hfileContext, fsBlockReader);
308 
309       // Cache the block
310       if (cacheBlock) {
311         cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock,
312             cacheConf.isInMemory());
313       }
314 
315       return metaBlock.getBufferWithoutHeader();
316     }
317   }
318 
319   /**
320    * Read in a file block.
321    * @param dataBlockOffset offset to read.
322    * @param onDiskBlockSize size of the block
323    * @param cacheBlock
324    * @param pread Use positional read instead of seek+read (positional is
325    *          better doing random reads whereas seek+read is better scanning).
326    * @param isCompaction is this block being read as part of a compaction
327    * @param expectedBlockType the block type we are expecting to read with this
328    *          read operation, or null to read whatever block type is available
329    *          and avoid checking (that might reduce caching efficiency of
330    *          encoded data blocks)
331    * @return Block wrapped in a ByteBuffer.
332    * @throws IOException
333    */
334   @Override
335   public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize,
336       final boolean cacheBlock, boolean pread, final boolean isCompaction,
337       final boolean updateCacheMetrics, BlockType expectedBlockType)
338       throws IOException {
339     if (dataBlockIndexReader == null) {
340       throw new IOException("Block index not loaded");
341     }
342     long trailerOffset = trailer.getLoadOnOpenDataOffset();
343     if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) {
344       throw new IOException("Requested block is out of range: " + dataBlockOffset +
345         ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() +
346         ", trailer.getLoadOnOpenDataOffset: " + trailerOffset);
347     }
348     // For any given block from any given file, synchronize reads for said
349     // block.
350     // Without a cache, this synchronizing is needless overhead, but really
351     // the other choice is to duplicate work (which the cache would prevent you
352     // from doing).
353 
354     BlockCacheKey cacheKey =
355         new BlockCacheKey(name, dataBlockOffset,
356             dataBlockEncoder.getDataBlockEncoding(),
357             expectedBlockType);
358 
359     boolean useLock = false;
360     IdLock.Entry lockEntry = null;
361     TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
362     try {
363       while (true) {
364         // Check cache for block. If found return.
365         if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
366           if (useLock) {
367             lockEntry = offsetLock.getLockEntry(dataBlockOffset);
368           }
369           // Try and get the block from the block cache. If the useLock variable is true then this
370           // is the second time through the loop and it should not be counted as a block cache miss.
371           HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, 
372             cacheBlock, useLock, updateCacheMetrics);
373           if (cachedBlock != null) {
374             if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
375               cachedBlock = cachedBlock.unpack(hfileContext, fsBlockReader);
376             }
377             if (Trace.isTracing()) {
378               traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
379             }
380             assert cachedBlock.isUnpacked() : "Packed block leak.";
381             if (cachedBlock.getBlockType().isData()) {
382               HFile.dataBlockReadCnt.incrementAndGet();
383 
384               // Validate encoding type for data blocks. We include encoding
385               // type in the cache key, and we expect it to match on a cache hit.
386               if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
387                 throw new IOException("Cached block under key " + cacheKey + " "
388                   + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
389                   + dataBlockEncoder.getDataBlockEncoding() + ")");
390               }
391             }
392             return cachedBlock;
393           }
394           if (!useLock && cacheBlock && cacheConf.shouldLockOnCacheMiss(expectedBlockType)) {
395             // check cache again with lock
396             useLock = true;
397             continue;
398           }
399           // Carry on, please load.
400         }
401 
402         if (Trace.isTracing()) {
403           traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
404         }
405         // Load block from filesystem.
406         HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
407             pread);
408         validateBlockType(hfileBlock, expectedBlockType);
409         HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader);
410         BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
411 
412         // Cache the block if necessary
413         if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) {
414           cacheConf.getBlockCache().cacheBlock(cacheKey,
415             cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked,
416             cacheConf.isInMemory());
417         }
418 
419         if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
420           HFile.dataBlockReadCnt.incrementAndGet();
421         }
422 
423         return unpacked;
424       }
425     } finally {
426       traceScope.close();
427       if (lockEntry != null) {
428         offsetLock.releaseLockEntry(lockEntry);
429       }
430     }
431   }
432 
433   @Override
434   public boolean hasMVCCInfo() {
435     return includesMemstoreTS && decodeMemstoreTS;
436   }
437 
438   /**
439    * Compares the actual type of a block retrieved from cache or disk with its
440    * expected type and throws an exception in case of a mismatch. Expected
441    * block type of {@link BlockType#DATA} is considered to match the actual
442    * block type [@link {@link BlockType#ENCODED_DATA} as well.
443    * @param block a block retrieved from cache or disk
444    * @param expectedBlockType the expected block type, or null to skip the
445    *          check
446    */
447   private void validateBlockType(HFileBlock block,
448       BlockType expectedBlockType) throws IOException {
449     if (expectedBlockType == null) {
450       return;
451     }
452     BlockType actualBlockType = block.getBlockType();
453     if (actualBlockType == BlockType.ENCODED_DATA &&
454         expectedBlockType == BlockType.DATA) {
455       // We consider DATA to match ENCODED_DATA for the purpose of this
456       // verification.
457       return;
458     }
459     if (actualBlockType != expectedBlockType) {
460       throw new IOException("Expected block type " + expectedBlockType + ", " +
461           "but got " + actualBlockType + ": " + block);
462     }
463   }
464 
465   /**
466    * @return Last key in the file. May be null if file has no entries. Note that
467    *         this is not the last row key, but rather the byte form of the last
468    *         KeyValue.
469    */
470   @Override
471   public byte[] getLastKey() {
472     return dataBlockIndexReader.isEmpty() ? null : lastKey;
473   }
474 
475   /**
476    * @return Midkey for this file. We work with block boundaries only so
477    *         returned midkey is an approximation only.
478    * @throws IOException
479    */
480   @Override
481   public byte[] midkey() throws IOException {
482     return dataBlockIndexReader.midkey();
483   }
484 
485   @Override
486   public void close() throws IOException {
487     close(cacheConf.shouldEvictOnClose());
488   }
489 
490   public void close(boolean evictOnClose) throws IOException {
491     PrefetchExecutor.cancel(path);
492     if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
493       int numEvicted = cacheConf.getBlockCache().evictBlocksByHfileName(name);
494       if (LOG.isTraceEnabled()) {
495         LOG.trace("On close, file=" + name + " evicted=" + numEvicted
496           + " block(s)");
497       }
498     }
499     fsBlockReader.closeStreams();
500   }
501 
502   /** For testing */
503   @Override
504   HFileBlock.FSReader getUncachedBlockReader() {
505     return fsBlockReader;
506   }
507 
508 
509   protected abstract static class AbstractScannerV2
510       extends AbstractHFileReader.Scanner {
511     protected HFileBlock block;
512 
513     @Override
514     public byte[] getNextIndexedKey() {
515       return nextIndexedKey;
516     }
517     /**
518      * The next indexed key is to keep track of the indexed key of the next data block.
519      * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the
520      * current data block is the last data block.
521      *
522      * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet.
523      */
524     protected byte[] nextIndexedKey;
525 
526     public AbstractScannerV2(HFileReaderV2 r, boolean cacheBlocks,
527         final boolean pread, final boolean isCompaction) {
528       super(r, cacheBlocks, pread, isCompaction);
529     }
530 
531     /**
532      * An internal API function. Seek to the given key, optionally rewinding to
533      * the first key of the block before doing the seek.
534      *
535      * @param key key byte array
536      * @param offset key offset in the key byte array
537      * @param length key length
538      * @param rewind whether to rewind to the first key of the block before
539      *        doing the seek. If this is false, we are assuming we never go
540      *        back, otherwise the result is undefined.
541      * @return -1 if the key is earlier than the first key of the file,
542      *         0 if we are at the given key, 1 if we are past the given key
543      *         -2 if the key is earlier than the first key of the file while
544      *         using a faked index key
545      * @throws IOException
546      */
547     protected int seekTo(byte[] key, int offset, int length, boolean rewind)
548         throws IOException {
549       HFileBlockIndex.BlockIndexReader indexReader =
550           reader.getDataBlockIndexReader();
551       BlockWithScanInfo blockWithScanInfo =
552         indexReader.loadDataBlockWithScanInfo(key, offset, length, block,
553             cacheBlocks, pread, isCompaction);
554       if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) {
555         // This happens if the key e.g. falls before the beginning of the file.
556         return -1;
557       }
558       return loadBlockAndSeekToKey(blockWithScanInfo.getHFileBlock(),
559           blockWithScanInfo.getNextIndexedKey(), rewind, key, offset, length, false);
560     }
561 
562     protected abstract ByteBuffer getFirstKeyInBlock(HFileBlock curBlock);
563 
564     protected abstract int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey,
565         boolean rewind, byte[] key, int offset, int length, boolean seekBefore)
566         throws IOException;
567 
568     @Override
569     public int seekTo(byte[] key, int offset, int length) throws IOException {
570       // Always rewind to the first key of the block, because the given key
571       // might be before or after the current key.
572       return seekTo(key, offset, length, true);
573     }
574 
575     @Override
576     public int reseekTo(byte[] key, int offset, int length) throws IOException {
577       int compared;
578       if (isSeeked()) {
579         compared = compareKey(reader.getComparator(), key, offset, length);
580         if (compared < 1) {
581           // If the required key is less than or equal to current key, then
582           // don't do anything.
583           return compared;
584         } else {
585           if (this.nextIndexedKey != null &&
586               (this.nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY ||
587                reader.getComparator().compareFlatKey(key, offset, length,
588                    nextIndexedKey, 0, nextIndexedKey.length) < 0)) {
589             // The reader shall continue to scan the current data block instead of querying the
590             // block index as long as it knows the target key is strictly smaller than
591             // the next indexed key or the current data block is the last data block.
592             return loadBlockAndSeekToKey(this.block, this.nextIndexedKey,
593                 false, key, offset, length, false);
594           }
595         }
596       }
597       // Don't rewind on a reseek operation, because reseek implies that we are
598       // always going forward in the file.
599       return seekTo(key, offset, length, false);
600     }
601 
602     @Override
603     public boolean seekBefore(byte[] key, int offset, int length)
604         throws IOException {
605       HFileBlock seekToBlock =
606           reader.getDataBlockIndexReader().seekToDataBlock(key, offset, length,
607               block, cacheBlocks, pread, isCompaction);
608       if (seekToBlock == null) {
609         return false;
610       }
611       ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
612 
613       if (reader.getComparator().compareFlatKey(firstKey.array(),
614           firstKey.arrayOffset(), firstKey.limit(), key, offset, length) >= 0)
615       {
616         long previousBlockOffset = seekToBlock.getPrevBlockOffset();
617         // The key we are interested in
618         if (previousBlockOffset == -1) {
619           // we have a 'problem', the key we want is the first of the file.
620           return false;
621         }
622 
623         // It is important that we compute and pass onDiskSize to the block
624         // reader so that it does not have to read the header separately to
625         // figure out the size.  Currently, we do not have a way to do this
626         // correctly in the general case however.
627         // TODO: See https://issues.apache.org/jira/browse/HBASE-14576
628         int prevBlockSize = -1;
629         seekToBlock = reader.readBlock(previousBlockOffset,
630             prevBlockSize, cacheBlocks,
631             pread, isCompaction, true, BlockType.DATA);
632         // TODO shortcut: seek forward in this block to the last key of the
633         // block.
634       }
635       byte[] firstKeyInCurrentBlock = Bytes.getBytes(firstKey);
636       loadBlockAndSeekToKey(seekToBlock, firstKeyInCurrentBlock, true, key, offset, length, true);
637       return true;
638     }
639 
640 
641     /**
642      * Scans blocks in the "scanned" section of the {@link HFile} until the next
643      * data block is found.
644      *
645      * @return the next block, or null if there are no more data blocks
646      * @throws IOException
647      */
648     protected HFileBlock readNextDataBlock() throws IOException {
649       long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
650       if (block == null)
651         return null;
652 
653       HFileBlock curBlock = block;
654 
655       do {
656         if (curBlock.getOffset() >= lastDataBlockOffset)
657           return null;
658 
659         if (curBlock.getOffset() < 0) {
660           throw new IOException("Invalid block file offset: " + block);
661         }
662 
663         // We are reading the next block without block type validation, because
664         // it might turn out to be a non-data block.
665         curBlock = reader.readBlock(curBlock.getOffset()
666             + curBlock.getOnDiskSizeWithHeader(),
667             curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread,
668             isCompaction, true, null);
669       } while (!curBlock.getBlockType().isData());
670 
671       return curBlock;
672     }
673     /**
674      * Compare the given key against the current key
675      * @param comparator
676      * @param key
677      * @param offset
678      * @param length
679      * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater
680      */
681     public abstract int compareKey(KVComparator comparator, byte[] key, int offset,
682         int length);
683   }
684 
685   /**
686    * Implementation of {@link HFileScanner} interface.
687    */
688   protected static class ScannerV2 extends AbstractScannerV2 {
689     private HFileReaderV2 reader;
690 
691     public ScannerV2(HFileReaderV2 r, boolean cacheBlocks,
692         final boolean pread, final boolean isCompaction) {
693       super(r, cacheBlocks, pread, isCompaction);
694       this.reader = r;
695     }
696 
697     @Override
698     public KeyValue getKeyValue() {
699       if (!isSeeked())
700         return null;
701 
702       // HFile V2 do not support tags.
703       return formNoTagsKeyValue();
704     }
705 
706     protected KeyValue formNoTagsKeyValue() {
707       KeyValue ret = new NoTagsKeyValue(blockBuffer.array(), blockBuffer.arrayOffset()
708           + blockBuffer.position(), getCellBufSize());
709       if (this.reader.shouldIncludeMemstoreTS()) {
710         ret.setMvccVersion(currMemstoreTS);
711       }
712       return ret;
713     }
714 
715     protected int getCellBufSize() {
716       return KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen;
717     }
718 
719     @Override
720     public ByteBuffer getKey() {
721       assertSeeked();
722       return ByteBuffer.wrap(
723           blockBuffer.array(),
724           blockBuffer.arrayOffset() + blockBuffer.position()
725               + KEY_VALUE_LEN_SIZE, currKeyLen).slice();
726     }
727 
728     @Override
729     public int compareKey(KVComparator comparator, byte[] key, int offset, int length) {
730       return comparator.compareFlatKey(key, offset, length, blockBuffer.array(),
731           blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen);
732     }
733 
734     @Override
735     public ByteBuffer getValue() {
736       assertSeeked();
737       return ByteBuffer.wrap(
738           blockBuffer.array(),
739           blockBuffer.arrayOffset() + blockBuffer.position()
740               + KEY_VALUE_LEN_SIZE + currKeyLen, currValueLen).slice();
741     }
742 
743     protected void setNonSeekedState() {
744       block = null;
745       blockBuffer = null;
746       currKeyLen = 0;
747       currValueLen = 0;
748       currMemstoreTS = 0;
749       currMemstoreTSLen = 0;
750     }
751 
752     /**
753      * Go to the next key/value in the block section. Loads the next block if
754      * necessary. If successful, {@link #getKey()} and {@link #getValue()} can
755      * be called.
756      *
757      * @return true if successfully navigated to the next key/value
758      */
759     @Override
760     public boolean next() throws IOException {
761       assertSeeked();
762 
763       try {
764         blockBuffer.position(getNextCellStartPosition());
765       } catch (IllegalArgumentException e) {
766         LOG.error("Current pos = " + blockBuffer.position()
767             + "; currKeyLen = " + currKeyLen + "; currValLen = "
768             + currValueLen + "; block limit = " + blockBuffer.limit()
769             + "; HFile name = " + reader.getName()
770             + "; currBlock currBlockOffset = " + block.getOffset());
771         throw e;
772       }
773 
774       if (blockBuffer.remaining() <= 0) {
775         long lastDataBlockOffset =
776             reader.getTrailer().getLastDataBlockOffset();
777 
778         if (block.getOffset() >= lastDataBlockOffset) {
779           setNonSeekedState();
780           return false;
781         }
782 
783         // read the next block
784         HFileBlock nextBlock = readNextDataBlock();
785         if (nextBlock == null) {
786           setNonSeekedState();
787           return false;
788         }
789 
790         updateCurrBlock(nextBlock);
791         return true;
792       }
793 
794       // We are still in the same block.
795       readKeyValueLen();
796       return true;
797     }
798 
799     protected int getNextCellStartPosition() {
800       return blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen
801           + currMemstoreTSLen;
802     }
803 
804     /**
805      * Positions this scanner at the start of the file.
806      *
807      * @return false if empty file; i.e. a call to next would return false and
808      *         the current key and value are undefined.
809      * @throws IOException
810      */
811     @Override
812     public boolean seekTo() throws IOException {
813       if (reader == null) {
814         return false;
815       }
816 
817       if (reader.getTrailer().getEntryCount() == 0) {
818         // No data blocks.
819         return false;
820       }
821 
822       long firstDataBlockOffset =
823           reader.getTrailer().getFirstDataBlockOffset();
824       if (block != null && block.getOffset() == firstDataBlockOffset) {
825         blockBuffer.rewind();
826         readKeyValueLen();
827         return true;
828       }
829 
830       block = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread,
831           isCompaction, true, BlockType.DATA);
832       if (block.getOffset() < 0) {
833         throw new IOException("Invalid block offset: " + block.getOffset());
834       }
835       updateCurrBlock(block);
836       return true;
837     }
838 
839     @Override
840     protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey,
841         boolean rewind, byte[] key, int offset, int length, boolean seekBefore)
842         throws IOException {
843       if (block == null || block.getOffset() != seekToBlock.getOffset()) {
844         updateCurrBlock(seekToBlock);
845       } else if (rewind) {
846         blockBuffer.rewind();
847       }
848 
849       // Update the nextIndexedKey
850       this.nextIndexedKey = nextIndexedKey;
851       return blockSeek(key, offset, length, seekBefore);
852     }
853 
854     /**
855      * Updates the current block to be the given {@link HFileBlock}. Seeks to
856      * the the first key/value pair.
857      *
858      * @param newBlock the block to make current
859      */
860     protected void updateCurrBlock(HFileBlock newBlock) {
861       block = newBlock;
862 
863       // sanity check
864       if (block.getBlockType() != BlockType.DATA) {
865         throw new IllegalStateException("ScannerV2 works only on data " +
866             "blocks, got " + block.getBlockType() + "; " +
867             "fileName=" + reader.name + ", " +
868             "dataBlockEncoder=" + reader.dataBlockEncoder + ", " +
869             "isCompaction=" + isCompaction);
870       }
871 
872       blockBuffer = block.getBufferWithoutHeader();
873       readKeyValueLen();
874       blockFetches++;
875 
876       // Reset the next indexed key
877       this.nextIndexedKey = null;
878     }
879 
880     protected void readKeyValueLen() {
881       blockBuffer.mark();
882       currKeyLen = blockBuffer.getInt();
883       currValueLen = blockBuffer.getInt();
884       ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen);
885       readMvccVersion();
886       if (currKeyLen < 0 || currValueLen < 0
887           || currKeyLen > blockBuffer.limit()
888           || currValueLen > blockBuffer.limit()) {
889         throw new IllegalStateException("Invalid currKeyLen " + currKeyLen
890             + " or currValueLen " + currValueLen + ". Block offset: "
891             + block.getOffset() + ", block length: " + blockBuffer.limit()
892             + ", position: " + blockBuffer.position() + " (without header).");
893       }
894       blockBuffer.reset();
895     }
896 
897     protected void readMvccVersion() {
898       if (this.reader.shouldIncludeMemstoreTS()) {
899         if (this.reader.decodeMemstoreTS) {
900           currMemstoreTS = Bytes.readAsVLong(blockBuffer.array(), blockBuffer.arrayOffset()
901               + blockBuffer.position());
902           currMemstoreTSLen = WritableUtils.getVIntSize(currMemstoreTS);
903         } else {
904           currMemstoreTS = 0;
905           currMemstoreTSLen = 1;
906         }
907       }
908     }
909 
910     /**
911      * Within a loaded block, seek looking for the last key that is smaller
912      * than (or equal to?) the key we are interested in.
913      *
914      * A note on the seekBefore: if you have seekBefore = true, AND the first
915      * key in the block = key, then you'll get thrown exceptions. The caller has
916      * to check for that case and load the previous block as appropriate.
917      *
918      * @param key the key to find
919      * @param seekBefore find the key before the given key in case of exact
920      *          match.
921      * @return 0 in case of an exact key match, 1 in case of an inexact match,
922      *         -2 in case of an inexact match and furthermore, the input key less
923      *         than the first key of current block(e.g. using a faked index key)
924      */
925     protected int blockSeek(byte[] key, int offset, int length,
926         boolean seekBefore) {
927       int klen, vlen;
928       long memstoreTS = 0;
929       int memstoreTSLen = 0;
930       int lastKeyValueSize = -1;
931       do {
932         blockBuffer.mark();
933         klen = blockBuffer.getInt();
934         vlen = blockBuffer.getInt();
935         blockBuffer.reset();
936         if (this.reader.shouldIncludeMemstoreTS()) {
937           if (this.reader.decodeMemstoreTS) {
938             int memstoreTSOffset = blockBuffer.arrayOffset() + blockBuffer.position()
939                 + KEY_VALUE_LEN_SIZE + klen + vlen;
940             memstoreTS = Bytes.readAsVLong(blockBuffer.array(), memstoreTSOffset);
941             memstoreTSLen = WritableUtils.getVIntSize(memstoreTS);
942           } else {
943             memstoreTS = 0;
944             memstoreTSLen = 1;
945           }
946         }
947 
948         int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position()
949             + KEY_VALUE_LEN_SIZE;
950         int comp = reader.getComparator().compareFlatKey(key, offset, length,
951             blockBuffer.array(), keyOffset, klen);
952 
953         if (comp == 0) {
954           if (seekBefore) {
955             if (lastKeyValueSize < 0) {
956               throw new IllegalStateException("blockSeek with seekBefore "
957                   + "at the first key of the block: key="
958                   + Bytes.toStringBinary(key) + ", blockOffset="
959                   + block.getOffset() + ", onDiskSize="
960                   + block.getOnDiskSizeWithHeader());
961             }
962             blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
963             readKeyValueLen();
964             return 1; // non exact match.
965           }
966           currKeyLen = klen;
967           currValueLen = vlen;
968           if (this.reader.shouldIncludeMemstoreTS()) {
969             currMemstoreTS = memstoreTS;
970             currMemstoreTSLen = memstoreTSLen;
971           }
972           return 0; // indicate exact match
973         } else if (comp < 0) {
974           if (lastKeyValueSize > 0)
975             blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
976           readKeyValueLen();
977           if (lastKeyValueSize == -1 && blockBuffer.position() == 0
978               && this.reader.trailer.getMinorVersion() >= MINOR_VERSION_WITH_FAKED_KEY) {
979             return HConstants.INDEX_KEY_MAGIC;
980           }
981           return 1;
982         }
983 
984         // The size of this key/value tuple, including key/value length fields.
985         lastKeyValueSize = klen + vlen + memstoreTSLen + KEY_VALUE_LEN_SIZE;
986         blockBuffer.position(blockBuffer.position() + lastKeyValueSize);
987       } while (blockBuffer.remaining() > 0);
988 
989       // Seek to the last key we successfully read. This will happen if this is
990       // the last key/value pair in the file, in which case the following call
991       // to next() has to return false.
992       blockBuffer.position(blockBuffer.position() - lastKeyValueSize);
993       readKeyValueLen();
994       return 1; // didn't exactly find it.
995     }
996 
997     @Override
998     protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
999       ByteBuffer buffer = curBlock.getBufferWithoutHeader();
1000       // It is safe to manipulate this buffer because we own the buffer object.
1001       buffer.rewind();
1002       int klen = buffer.getInt();
1003       buffer.getInt();
1004       ByteBuffer keyBuff = buffer.slice();
1005       keyBuff.limit(klen);
1006       keyBuff.rewind();
1007       return keyBuff;
1008     }
1009 
1010     @Override
1011     public String getKeyString() {
1012       return Bytes.toStringBinary(blockBuffer.array(),
1013           blockBuffer.arrayOffset() + blockBuffer.position()
1014               + KEY_VALUE_LEN_SIZE, currKeyLen);
1015     }
1016 
1017     @Override
1018     public String getValueString() {
1019       return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset()
1020           + blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen,
1021           currValueLen);
1022     }
1023   }
1024 
1025   /**
1026    * ScannerV2 that operates on encoded data blocks.
1027    */
1028   protected static class EncodedScannerV2 extends AbstractScannerV2 {
1029     private final HFileBlockDecodingContext decodingCtx;
1030     private final DataBlockEncoder.EncodedSeeker seeker;
1031     private final DataBlockEncoder dataBlockEncoder;
1032     protected final HFileContext meta;
1033 
1034     public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
1035         boolean pread, boolean isCompaction, HFileContext meta) {
1036       super(reader, cacheBlocks, pread, isCompaction);
1037       DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
1038       dataBlockEncoder = encoding.getEncoder();
1039       decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
1040       seeker = dataBlockEncoder.createSeeker(
1041         reader.getComparator(), decodingCtx);
1042       this.meta = meta;
1043     }
1044 
1045     @Override
1046     public boolean isSeeked(){
1047       return this.block != null;
1048     }
1049 
1050     /**
1051      * Updates the current block to be the given {@link HFileBlock}. Seeks to
1052      * the the first key/value pair.
1053      *
1054      * @param newBlock the block to make current
1055      * @throws CorruptHFileException
1056      */
1057     private void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
1058       block = newBlock;
1059 
1060       // sanity checks
1061       if (block.getBlockType() != BlockType.ENCODED_DATA) {
1062         throw new IllegalStateException(
1063             "EncodedScanner works only on encoded data blocks");
1064       }
1065       short dataBlockEncoderId = block.getDataBlockEncodingId();
1066       if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
1067         String encoderCls = dataBlockEncoder.getClass().getName();
1068         throw new CorruptHFileException("Encoder " + encoderCls
1069           + " doesn't support data block encoding "
1070           + DataBlockEncoding.getNameFromId(dataBlockEncoderId));
1071       }
1072 
1073       seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
1074       blockFetches++;
1075 
1076       // Reset the next indexed key
1077       this.nextIndexedKey = null;
1078     }
1079 
1080     private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
1081       ByteBuffer origBlock = newBlock.getBufferReadOnly();
1082       ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
1083           origBlock.arrayOffset() + newBlock.headerSize() +
1084           DataBlockEncoding.ID_SIZE,
1085           newBlock.getUncompressedSizeWithoutHeader() -
1086           DataBlockEncoding.ID_SIZE).slice();
1087       return encodedBlock;
1088     }
1089 
1090     @Override
1091     public boolean seekTo() throws IOException {
1092       if (reader == null) {
1093         return false;
1094       }
1095 
1096       if (reader.getTrailer().getEntryCount() == 0) {
1097         // No data blocks.
1098         return false;
1099       }
1100 
1101       long firstDataBlockOffset =
1102           reader.getTrailer().getFirstDataBlockOffset();
1103       if (block != null && block.getOffset() == firstDataBlockOffset) {
1104         seeker.rewind();
1105         return true;
1106       }
1107 
1108       block = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread,
1109           isCompaction, true, BlockType.DATA);
1110       if (block.getOffset() < 0) {
1111         throw new IOException("Invalid block offset: " + block.getOffset());
1112       }
1113       updateCurrentBlock(block);
1114       return true;
1115     }
1116 
1117     @Override
1118     public boolean next() throws IOException {
1119       boolean isValid = seeker.next();
1120       if (!isValid) {
1121         block = readNextDataBlock();
1122         isValid = block != null;
1123         if (isValid) {
1124           updateCurrentBlock(block);
1125         }
1126       }
1127       return isValid;
1128     }
1129 
1130     @Override
1131     public ByteBuffer getKey() {
1132       assertValidSeek();
1133       return seeker.getKeyDeepCopy();
1134     }
1135 
1136     @Override
1137     public int compareKey(KVComparator comparator, byte[] key, int offset, int length) {
1138       return seeker.compareKey(comparator, key, offset, length);
1139     }
1140 
1141     @Override
1142     public ByteBuffer getValue() {
1143       assertValidSeek();
1144       return seeker.getValueShallowCopy();
1145     }
1146 
1147     @Override
1148     public KeyValue getKeyValue() {
1149       if (block == null) {
1150         return null;
1151       }
1152       return seeker.getKeyValue();
1153     }
1154 
1155     @Override
1156     public String getKeyString() {
1157       ByteBuffer keyBuffer = getKey();
1158       return Bytes.toStringBinary(keyBuffer.array(),
1159           keyBuffer.arrayOffset(), keyBuffer.limit());
1160     }
1161 
1162     @Override
1163     public String getValueString() {
1164       ByteBuffer valueBuffer = getValue();
1165       return Bytes.toStringBinary(valueBuffer.array(),
1166           valueBuffer.arrayOffset(), valueBuffer.limit());
1167     }
1168 
1169     private void assertValidSeek() {
1170       if (block == null) {
1171         throw new NotSeekedException();
1172       }
1173     }
1174 
1175     @Override
1176     protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
1177       return dataBlockEncoder.getFirstKeyInBlock(getEncodedBuffer(curBlock));
1178     }
1179 
1180     @Override
1181     protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey,
1182         boolean rewind, byte[] key, int offset, int length, boolean seekBefore)
1183         throws IOException  {
1184       if (block == null || block.getOffset() != seekToBlock.getOffset()) {
1185         updateCurrentBlock(seekToBlock);
1186       } else if (rewind) {
1187         seeker.rewind();
1188       }
1189       this.nextIndexedKey = nextIndexedKey;
1190       return seeker.seekToKeyInBlock(key, offset, length, seekBefore);
1191     }
1192   }
1193 
1194   /**
1195    * Returns a buffer with the Bloom filter metadata. The caller takes
1196    * ownership of the buffer.
1197    */
1198   @Override
1199   public DataInput getGeneralBloomFilterMetadata() throws IOException {
1200     return this.getBloomFilterMetadata(BlockType.GENERAL_BLOOM_META);
1201   }
1202 
1203   @Override
1204   public DataInput getDeleteBloomFilterMetadata() throws IOException {
1205     return this.getBloomFilterMetadata(BlockType.DELETE_FAMILY_BLOOM_META);
1206   }
1207 
1208   private DataInput getBloomFilterMetadata(BlockType blockType)
1209   throws IOException {
1210     if (blockType != BlockType.GENERAL_BLOOM_META &&
1211         blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
1212       throw new RuntimeException("Block Type: " + blockType.toString() +
1213           " is not supported") ;
1214     }
1215 
1216     for (HFileBlock b : loadOnOpenBlocks)
1217       if (b.getBlockType() == blockType)
1218         return b.getByteStream();
1219     return null;
1220   }
1221 
1222   @Override
1223   public boolean isFileInfoLoaded() {
1224     return true; // We load file info in constructor in version 2.
1225   }
1226 
1227   /**
1228    * Validates that the minor version is within acceptable limits.
1229    * Otherwise throws an Runtime exception
1230    */
1231   private void validateMinorVersion(Path path, int minorVersion) {
1232     if (minorVersion < MIN_MINOR_VERSION ||
1233         minorVersion > MAX_MINOR_VERSION) {
1234       String msg = "Minor version for path " + path + 
1235                    " is expected to be between " +
1236                    MIN_MINOR_VERSION + " and " + MAX_MINOR_VERSION +
1237                    " but is found to be " + minorVersion;
1238       LOG.error(msg);
1239       throw new RuntimeException(msg);
1240     }
1241   }
1242 
1243   @Override
1244   public int getMajorVersion() {
1245     return 2;
1246   }
1247 
1248   @Override
1249   public HFileContext getFileContext() {
1250     return hfileContext;
1251   }
1252 
1253   /**
1254    * Returns false if block prefetching was requested for this file and has
1255    * not completed, true otherwise
1256    */
1257   @VisibleForTesting
1258   boolean prefetchComplete() {
1259     return PrefetchExecutor.isCompleted(path);
1260   }
1261 }