View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import java.io.IOException;
21  import java.util.Collection;
22  import java.util.List;
23  import java.util.NavigableSet;
24  
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.classification.InterfaceStability;
27  import org.apache.hadoop.fs.FileSystem;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.Cell;
30  import org.apache.hadoop.hbase.HColumnDescriptor;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.client.Scan;
34  import org.apache.hadoop.hbase.io.HeapSize;
35  import org.apache.hadoop.hbase.io.compress.Compression;
36  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
37  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
38  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
39  import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
40  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
41  
42  /**
43   * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or
44   * more StoreFiles, which stretch backwards over time.
45   */
46  @InterfaceAudience.Private
47  @InterfaceStability.Evolving
48  public interface Store extends HeapSize, StoreConfigInformation {
49  
50    /* The default priority for user-specified compaction requests.
51     * The user gets top priority unless we have blocking compactions. (Pri <= 0)
52     */
53    public static final int PRIORITY_USER = 1;
54    public static final int NO_PRIORITY = Integer.MIN_VALUE;
55  
56    // General Accessors
57    public KeyValue.KVComparator getComparator();
58  
59    public Collection<StoreFile> getStorefiles();
60  
61    /**
62     * Close all the readers We don't need to worry about subsequent requests because the HRegion
63     * holds a write lock that will prevent any more reads or writes.
64     * @return the {@link StoreFile StoreFiles} that were previously being used.
65     * @throws IOException on failure
66     */
67    public Collection<StoreFile> close() throws IOException;
68  
69    /**
70     * Return a scanner for both the memstore and the HStore files. Assumes we are not in a
71     * compaction.
72     * @param scan Scan to apply when scanning the stores
73     * @param targetCols columns to scan
74     * @return a scanner over the current key values
75     * @throws IOException on failure
76     */
77    public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols)
78        throws IOException;
79  
80    /**
81     * Get all scanners with no filtering based on TTL (that happens further down
82     * the line).
83     * @param cacheBlocks
84     * @param isGet
85     * @param isCompaction
86     * @param matcher
87     * @param startRow
88     * @param stopRow
89     * @return all scanners for this store
90     */
91    public List<KeyValueScanner> getScanners(boolean cacheBlocks,
92        boolean isGet, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
93        byte[] stopRow) throws IOException;
94  
95    public ScanInfo getScanInfo();
96  
97    /**
98     * Adds or replaces the specified KeyValues.
99     * <p>
100    * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in
101    * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore.
102    * <p>
103    * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic
104    * across all of them.
105    * @param cells
106    * @param readpoint readpoint below which we can safely remove duplicate KVs 
107    * @return memstore size delta
108    * @throws IOException
109    */
110   public long upsert(Iterable<? extends Cell> cells, long readpoint) throws IOException;
111 
112   /**
113    * Adds a value to the memstore
114    * @param kv
115    * @return memstore size delta
116    */
117   public long add(KeyValue kv);
118 
119   /**
120    * Removes a kv from the memstore. The KeyValue is removed only if its key & memstoreTS match the
121    * key & memstoreTS value of the kv parameter.
122    * @param kv
123    */
124   public void rollback(final KeyValue kv);
125 
126   /**
127    * Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
128    * Only use this method on a table where writes occur with strictly increasing timestamps. This
129    * method assumes this pattern of writes in order to make it reasonably performant. Also our
130    * search is dependent on the axiom that deletes are for cells that are in the container that
131    * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll
132    * see deletes before we come across cells we are to delete. Presumption is that the
133    * memstore#kvset is processed before memstore#snapshot and so on.
134    * @param row The row key of the targeted row.
135    * @return Found keyvalue or null if none found.
136    * @throws IOException
137    */
138   public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException;
139 
140   public FileSystem getFileSystem();
141 
142   /*
143    * @param maxKeyCount
144    * @param compression Compression algorithm to use
145    * @param isCompaction whether we are creating a new file in a compaction
146    * @param includeMVCCReadpoint whether we should out the MVCC readpoint
147    * @return Writer for a new StoreFile in the tmp dir.
148    */
149   public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
150       boolean isCompaction, boolean includeMVCCReadpoint) throws IOException;
151 
152   // Compaction oriented methods
153 
154   public boolean throttleCompaction(long compactionSize);
155 
156   /**
157    * getter for CompactionProgress object
158    * @return CompactionProgress object; can be null
159    */
160   public CompactionProgress getCompactionProgress();
161 
162   public CompactionContext requestCompaction() throws IOException;
163 
164   public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
165       throws IOException;
166 
167   public void cancelRequestedCompaction(CompactionContext compaction);
168 
169   public List<StoreFile> compact(CompactionContext compaction) throws IOException;
170 
171   /**
172    * @return true if we should run a major compaction.
173    */
174   public boolean isMajorCompaction() throws IOException;
175 
176   public void triggerMajorCompaction();
177 
178   /**
179    * See if there's too much store files in this store
180    * @return true if number of store files is greater than the number defined in minFilesToCompact
181    */
182   public boolean needsCompaction();
183 
184   public int getCompactPriority();
185 
186   public StoreFlusher getStoreFlusher(long cacheFlushId);
187 
188   // Split oriented methods
189 
190   public boolean canSplit();
191 
192   /**
193    * Determines if Store should be split
194    * @return byte[] if store should be split, null otherwise.
195    */
196   public byte[] getSplitPoint();
197 
198   // Bulk Load methods
199 
200   /**
201    * This throws a WrongRegionException if the HFile does not fit in this region, or an
202    * InvalidHFileException if the HFile is not valid.
203    */
204   public void assertBulkLoadHFileOk(Path srcPath) throws IOException;
205 
206   /**
207    * This method should only be called from HRegion. It is assumed that the ranges of values in the
208    * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this)
209    * 
210    * @param srcPathStr
211    * @param sequenceId sequence Id associated with the HFile
212    */
213   public void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
214 
215   // General accessors into the state of the store
216   // TODO abstract some of this out into a metrics class
217 
218   /**
219    * @return <tt>true</tt> if the store has any underlying reference files to older HFiles
220    */
221   public boolean hasReferences();
222 
223   /**
224    * @return The size of this store's memstore, in bytes
225    */
226   public long getMemStoreSize();
227 
228   public HColumnDescriptor getFamily();
229 
230   /**
231    * @return The maximum memstoreTS in all store files.
232    */
233   public long getMaxMemstoreTS();
234 
235   /**
236    * @return the data block encoder
237    */
238   public HFileDataBlockEncoder getDataBlockEncoder();
239 
240   /** @return aggregate size of all HStores used in the last compaction */
241   public long getLastCompactSize();
242 
243   /** @return aggregate size of HStore */
244   public long getSize();
245 
246   /**
247    * @return Count of store files
248    */
249   public int getStorefilesCount();
250 
251   /**
252    * @return The size of the store files, in bytes, uncompressed.
253    */
254   public long getStoreSizeUncompressed();
255 
256   /**
257    * @return The size of the store files, in bytes.
258    */
259   public long getStorefilesSize();
260 
261   /**
262    * @return The size of the store file indexes, in bytes.
263    */
264   public long getStorefilesIndexSize();
265 
266   /**
267    * Returns the total size of all index blocks in the data block indexes, including the root level,
268    * intermediate levels, and the leaf level for multi-level indexes, or just the root level for
269    * single-level indexes.
270    * @return the total size of block indexes in the store
271    */
272   public long getTotalStaticIndexSize();
273 
274   /**
275    * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the
276    * Bloom blocks currently not loaded into the block cache are counted.
277    * @return the total size of all Bloom filters in the store
278    */
279   public long getTotalStaticBloomSize();
280 
281   // Test-helper methods
282 
283   /**
284    * Used for tests.
285    * @return cache configuration for this Store.
286    */
287   public CacheConfig getCacheConfig();
288 
289   /**
290    * @return the parent region info hosting this store
291    */
292   public HRegionInfo getRegionInfo();
293 
294   public RegionCoprocessorHost getCoprocessorHost();
295 
296   public boolean areWritesEnabled();
297 
298   /**
299    * @return The smallest mvcc readPoint across all the scanners in this
300    * region. Writes older than this readPoint, are included  in every
301    * read operation.
302    */
303   public long getSmallestReadPoint();
304 
305   public String getColumnFamilyName();
306 
307   public String getTableName();
308 
309   /*
310    * @param o Observer who wants to know about changes in set of Readers
311    */
312   public void addChangedReaderObserver(ChangedReadersObserver o);
313 
314   /*
315    * @param o Observer no longer interested in changes in set of Readers.
316    */
317   public void deleteChangedReaderObserver(ChangedReadersObserver o);
318 
319   /**
320    * @return Whether this store has too many store files.
321    */
322   public boolean hasTooManyStoreFiles();
323 }