1 /** 2 * Licensed to the Apache Software Foundation (ASF) under one 3 * or more contributor license agreements. See the NOTICE file 4 * distributed with this work for additional information 5 * regarding copyright ownership. The ASF licenses this file 6 * to you under the Apache License, Version 2.0 (the 7 * "License"); you may not use this file except in compliance 8 * with the License. You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 package org.apache.hadoop.hbase.regionserver; 19 20 import java.io.FileNotFoundException; 21 import java.io.IOException; 22 import java.util.Collection; 23 import java.util.List; 24 import java.util.NavigableSet; 25 26 import org.apache.hadoop.fs.FileSystem; 27 import org.apache.hadoop.fs.Path; 28 import org.apache.hadoop.hbase.Cell; 29 import org.apache.hadoop.hbase.HColumnDescriptor; 30 import org.apache.hadoop.hbase.HRegionInfo; 31 import org.apache.hadoop.hbase.KeyValue; 32 import org.apache.hadoop.hbase.TableName; 33 import org.apache.hadoop.hbase.classification.InterfaceAudience; 34 import org.apache.hadoop.hbase.classification.InterfaceStability; 35 import org.apache.hadoop.hbase.client.Scan; 36 import org.apache.hadoop.hbase.io.HeapSize; 37 import org.apache.hadoop.hbase.io.compress.Compression; 38 import org.apache.hadoop.hbase.io.hfile.CacheConfig; 39 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; 40 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; 41 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; 42 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; 43 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; 44 import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController; 45 46 /** 47 * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or 48 * more StoreFiles, which stretch backwards over time. 49 */ 50 @InterfaceAudience.Private 51 @InterfaceStability.Evolving 52 public interface Store extends HeapSize, StoreConfigInformation { 53 54 /* The default priority for user-specified compaction requests. 55 * The user gets top priority unless we have blocking compactions. (Pri <= 0) 56 */ int PRIORITY_USER = 1; 57 int NO_PRIORITY = Integer.MIN_VALUE; 58 59 // General Accessors 60 KeyValue.KVComparator getComparator(); 61 62 Collection<StoreFile> getStorefiles(); 63 64 /** 65 * Close all the readers We don't need to worry about subsequent requests because the HRegion 66 * holds a write lock that will prevent any more reads or writes. 67 * @return the {@link StoreFile StoreFiles} that were previously being used. 68 * @throws IOException on failure 69 */ 70 Collection<StoreFile> close() throws IOException; 71 72 /** 73 * Return a scanner for both the memstore and the HStore files. Assumes we are not in a 74 * compaction. 75 * @param scan Scan to apply when scanning the stores 76 * @param targetCols columns to scan 77 * @return a scanner over the current key values 78 * @throws IOException on failure 79 */ 80 KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols, long readPt) 81 throws IOException; 82 83 /** 84 * Get all scanners with no filtering based on TTL (that happens further down 85 * the line). 86 * @param cacheBlocks 87 * @param isGet 88 * @param usePread 89 * @param isCompaction 90 * @param matcher 91 * @param startRow 92 * @param stopRow 93 * @param readPt 94 * @return all scanners for this store 95 */ 96 List<KeyValueScanner> getScanners( 97 boolean cacheBlocks, 98 boolean isGet, 99 boolean usePread, 100 boolean isCompaction, 101 ScanQueryMatcher matcher, 102 byte[] startRow, 103 byte[] stopRow, 104 long readPt 105 ) throws IOException; 106 107 ScanInfo getScanInfo(); 108 109 /** 110 * Adds or replaces the specified KeyValues. 111 * <p> 112 * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in 113 * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. 114 * <p> 115 * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic 116 * across all of them. 117 * @param cells 118 * @param readpoint readpoint below which we can safely remove duplicate KVs 119 * @return memstore size delta 120 * @throws IOException 121 */ 122 long upsert(Iterable<Cell> cells, long readpoint) throws IOException; 123 124 /** 125 * Adds a value to the memstore 126 * @param kv 127 * @return memstore size delta 128 */ 129 long add(KeyValue kv); 130 131 /** 132 * When was the last edit done in the memstore 133 */ 134 long timeOfOldestEdit(); 135 136 /** 137 * Removes a kv from the memstore. The KeyValue is removed only if its key & memstoreTS match the 138 * key & memstoreTS value of the kv parameter. 139 * @param kv 140 */ 141 void rollback(final KeyValue kv); 142 143 /** 144 * Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING: 145 * Only use this method on a table where writes occur with strictly increasing timestamps. This 146 * method assumes this pattern of writes in order to make it reasonably performant. Also our 147 * search is dependent on the axiom that deletes are for cells that are in the container that 148 * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll 149 * see deletes before we come across cells we are to delete. Presumption is that the 150 * memstore#kvset is processed before memstore#snapshot and so on. 151 * @param row The row key of the targeted row. 152 * @return Found keyvalue or null if none found. 153 * @throws IOException 154 */ 155 KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException; 156 157 FileSystem getFileSystem(); 158 159 /* 160 * @param maxKeyCount 161 * @param compression Compression algorithm to use 162 * @param isCompaction whether we are creating a new file in a compaction 163 * @param includeMVCCReadpoint whether we should out the MVCC readpoint 164 * @return Writer for a new StoreFile in the tmp dir. 165 */ 166 StoreFile.Writer createWriterInTmp( 167 long maxKeyCount, 168 Compression.Algorithm compression, 169 boolean isCompaction, 170 boolean includeMVCCReadpoint, 171 boolean includesTags 172 ) throws IOException; 173 174 // Compaction oriented methods 175 176 boolean throttleCompaction(long compactionSize); 177 178 /** 179 * getter for CompactionProgress object 180 * @return CompactionProgress object; can be null 181 */ 182 CompactionProgress getCompactionProgress(); 183 184 CompactionContext requestCompaction() throws IOException; 185 186 CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) 187 throws IOException; 188 189 void cancelRequestedCompaction(CompactionContext compaction); 190 191 List<StoreFile> compact(CompactionContext compaction, 192 CompactionThroughputController throughputController) throws IOException; 193 194 /** 195 * @return true if we should run a major compaction. 196 */ 197 boolean isMajorCompaction() throws IOException; 198 199 void triggerMajorCompaction(); 200 201 /** 202 * See if there's too much store files in this store 203 * @return true if number of store files is greater than the number defined in minFilesToCompact 204 */ 205 boolean needsCompaction(); 206 207 int getCompactPriority(); 208 209 StoreFlushContext createFlushContext(long cacheFlushId); 210 211 /** 212 * Call to complete a compaction. Its for the case where we find in the WAL a compaction 213 * that was not finished. We could find one recovering a WAL after a regionserver crash. 214 * See HBASE-2331. 215 * @param compaction 216 */ 217 void completeCompactionMarker(CompactionDescriptor compaction) 218 throws IOException; 219 220 // Split oriented methods 221 222 boolean canSplit(); 223 224 /** 225 * Determines if Store should be split 226 * @return byte[] if store should be split, null otherwise. 227 */ 228 byte[] getSplitPoint(); 229 230 // Bulk Load methods 231 232 /** 233 * This throws a WrongRegionException if the HFile does not fit in this region, or an 234 * InvalidHFileException if the HFile is not valid. 235 */ 236 void assertBulkLoadHFileOk(Path srcPath) throws IOException; 237 238 /** 239 * This method should only be called from HRegion. It is assumed that the ranges of values in the 240 * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) 241 * 242 * @param srcPathStr 243 * @param sequenceId sequence Id associated with the HFile 244 */ 245 void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; 246 247 // General accessors into the state of the store 248 // TODO abstract some of this out into a metrics class 249 250 /** 251 * @return <tt>true</tt> if the store has any underlying reference files to older HFiles 252 */ 253 boolean hasReferences(); 254 255 /** 256 * @return The size of this store's memstore, in bytes 257 */ 258 long getMemStoreSize(); 259 260 /** 261 * @return The amount of memory we could flush from this memstore; usually this is equal to 262 * {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the size of 263 * outstanding snapshots. 264 */ 265 long getFlushableSize(); 266 267 HColumnDescriptor getFamily(); 268 269 /** 270 * @return The maximum memstoreTS in all store files. 271 */ 272 long getMaxMemstoreTS(); 273 274 /** 275 * @return the data block encoder 276 */ 277 HFileDataBlockEncoder getDataBlockEncoder(); 278 279 /** @return aggregate size of all HStores used in the last compaction */ 280 long getLastCompactSize(); 281 282 /** @return aggregate size of HStore */ 283 long getSize(); 284 285 /** 286 * @return Count of store files 287 */ 288 int getStorefilesCount(); 289 290 /** 291 * @return The size of the store files, in bytes, uncompressed. 292 */ 293 long getStoreSizeUncompressed(); 294 295 /** 296 * @return The size of the store files, in bytes. 297 */ 298 long getStorefilesSize(); 299 300 /** 301 * @return The size of the store file indexes, in bytes. 302 */ 303 long getStorefilesIndexSize(); 304 305 /** 306 * Returns the total size of all index blocks in the data block indexes, including the root level, 307 * intermediate levels, and the leaf level for multi-level indexes, or just the root level for 308 * single-level indexes. 309 * @return the total size of block indexes in the store 310 */ 311 long getTotalStaticIndexSize(); 312 313 /** 314 * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the 315 * Bloom blocks currently not loaded into the block cache are counted. 316 * @return the total size of all Bloom filters in the store 317 */ 318 long getTotalStaticBloomSize(); 319 320 // Test-helper methods 321 322 /** 323 * Used for tests. 324 * @return cache configuration for this Store. 325 */ 326 CacheConfig getCacheConfig(); 327 328 /** 329 * @return the parent region info hosting this store 330 */ 331 HRegionInfo getRegionInfo(); 332 333 RegionCoprocessorHost getCoprocessorHost(); 334 335 boolean areWritesEnabled(); 336 337 /** 338 * @return The smallest mvcc readPoint across all the scanners in this 339 * region. Writes older than this readPoint, are included in every 340 * read operation. 341 */ 342 long getSmallestReadPoint(); 343 344 String getColumnFamilyName(); 345 346 TableName getTableName(); 347 348 /** 349 * @return The number of cells flushed to disk 350 */ 351 long getFlushedCellsCount(); 352 353 /** 354 * @return The total size of data flushed to disk, in bytes 355 */ 356 long getFlushedCellsSize(); 357 358 /** 359 * @return The number of cells processed during minor compactions 360 */ 361 long getCompactedCellsCount(); 362 363 /** 364 * @return The total amount of data processed during minor compactions, in bytes 365 */ 366 long getCompactedCellsSize(); 367 368 /** 369 * @return The number of cells processed during major compactions 370 */ 371 long getMajorCompactedCellsCount(); 372 373 /** 374 * @return The total amount of data processed during major compactions, in bytes 375 */ 376 long getMajorCompactedCellsSize(); 377 378 /* 379 * @param o Observer who wants to know about changes in set of Readers 380 */ 381 void addChangedReaderObserver(ChangedReadersObserver o); 382 383 /* 384 * @param o Observer no longer interested in changes in set of Readers. 385 */ 386 void deleteChangedReaderObserver(ChangedReadersObserver o); 387 388 /** 389 * @return Whether this store has too many store files. 390 */ 391 boolean hasTooManyStoreFiles(); 392 393 /** 394 * This value can represent the degree of emergency of compaction for this store. It should be 395 * greater than or equal to 0.0, any value greater than 1.0 means we have too many store files. 396 * <ul> 397 * <li>if getStorefilesCount <= getMinFilesToCompact, return 0.0</li> 398 * <li>return (getStorefilesCount - getMinFilesToCompact) / (blockingFileCount - 399 * getMinFilesToCompact)</li> 400 * </ul> 401 * <p> 402 * And for striped stores, we should calculate this value by the files in each stripe separately 403 * and return the maximum value. 404 * <p> 405 * It is similar to {@link #getCompactPriority()} except that it is more suitable to use in a 406 * linear formula. 407 */ 408 double getCompactionPressure(); 409 }