View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.io.hfile;
18  
19  import java.io.IOException;
20  import java.nio.ByteBuffer;
21  
22  import org.apache.hadoop.classification.InterfaceAudience;
23  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
24  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
25  import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
26  import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
27  import org.apache.hadoop.hbase.util.Bytes;
28  
29  /**
30   * Controls what kind of data block encoding is used. If data block encoding is
31   * not set or the given block is not a data block (encoded or not), methods
32   * should just return the unmodified block.
33   */
34  @InterfaceAudience.Private
35  public interface HFileDataBlockEncoder {
36    /** Type of encoding used for data blocks in HFile. Stored in file info. */
37    byte[] DATA_BLOCK_ENCODING = Bytes.toBytes("DATA_BLOCK_ENCODING");
38    
39    /**
40     * Converts a block from the on-disk format to the in-cache format. Called in
41     * the following cases:
42     * <ul>
43     * <li>After an encoded or unencoded data block is read from disk, but before
44     * it is put into the cache.</li>
45     * <li>To convert brand-new blocks to the in-cache format when doing
46     * cache-on-write.</li>
47     * </ul>
48     * @param block a block in an on-disk format (read from HFile or freshly
49     *          generated).
50     * @return non null block which is coded according to the settings.
51     */
52    public HFileBlock diskToCacheFormat(HFileBlock block,
53        boolean isCompaction);
54  
55    /**
56     * Should be called before an encoded or unencoded data block is written to
57     * disk.
58     * @param in KeyValues next to each other
59     * @param encodingResult the encoded result
60     * @param blockType block type
61     * @throws IOException
62     */
63    public void beforeWriteToDisk(
64        ByteBuffer in, boolean includesMemstoreTS,
65        HFileBlockEncodingContext encodingResult,
66        BlockType blockType) throws IOException;
67  
68    /**
69     * Decides whether we should use a scanner over encoded blocks.
70     * @param isCompaction whether we are in a compaction.
71     * @return Whether to use encoded scanner.
72     */
73    public boolean useEncodedScanner(boolean isCompaction);
74  
75    /**
76     * Save metadata in HFile which will be written to disk
77     * @param writer writer for a given HFile
78     * @exception IOException on disk problems
79     */
80    public void saveMetadata(HFile.Writer writer)
81        throws IOException;
82  
83    /** @return the on-disk data block encoding */
84    public DataBlockEncoding getEncodingOnDisk();
85  
86    /** @return the preferred in-cache data block encoding for normal reads */
87    public DataBlockEncoding getEncodingInCache();
88  
89    /**
90     * @return the effective in-cache data block encoding, taking into account
91     *         whether we are doing a compaction.
92     */
93    public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction);
94  
95    /**
96     * Create an encoder specific encoding context object for writing. And the
97     * encoding context should also perform compression if compressionAlgorithm is
98     * valid.
99     *
100    * @param compressionAlgorithm compression algorithm
101    * @param headerBytes header bytes
102    * @return a new {@link HFileBlockEncodingContext} object
103    */
104   public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
105       Algorithm compressionAlgorithm, byte[] headerBytes);
106 
107   /**
108    * create a encoder specific decoding context for reading. And the
109    * decoding context should also do decompression if compressionAlgorithm
110    * is valid.
111    *
112    * @param compressionAlgorithm
113    * @return a new {@link HFileBlockDecodingContext} object
114    */
115   public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(
116       Algorithm compressionAlgorithm);
117 
118 }