1 /** 2 * 3 * Licensed to the Apache Software Foundation (ASF) under one 4 * or more contributor license agreements. See the NOTICE file 5 * distributed with this work for additional information 6 * regarding copyright ownership. The ASF licenses this file 7 * to you under the Apache License, Version 2.0 (the 8 * "License"); you may not use this file except in compliance 9 * with the License. You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 */ 19 20 package org.apache.hadoop.hbase.regionserver.wal; 21 22 import java.io.DataInput; 23 import java.io.DataOutput; 24 import java.io.IOException; 25 import java.util.UUID; 26 import java.util.regex.Pattern; 27 28 import org.apache.commons.logging.Log; 29 import org.apache.commons.logging.LogFactory; 30 import org.apache.hadoop.conf.Configuration; 31 import org.apache.hadoop.fs.FileSystem; 32 import org.apache.hadoop.fs.Path; 33 import org.apache.hadoop.hbase.exceptions.FailedLogCloseException; 34 import org.apache.hadoop.io.Writable; 35 import org.apache.hadoop.hbase.HRegionInfo; 36 import org.apache.hadoop.hbase.HTableDescriptor; 37 import org.apache.hadoop.hbase.util.Bytes; 38 import org.apache.hadoop.classification.InterfaceAudience; 39 40 41 @InterfaceAudience.Private 42 public interface HLog { 43 public static final Log LOG = LogFactory.getLog(HLog.class); 44 45 public static final byte[] METAFAMILY = Bytes.toBytes("METAFAMILY"); 46 static final byte[] METAROW = Bytes.toBytes("METAROW"); 47 48 /** File Extension used while splitting an HLog into regions (HBASE-2312) */ 49 public static final String SPLITTING_EXT = "-splitting"; 50 public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false; 51 /** The META region's HLog filename extension */ 52 public static final String META_HLOG_FILE_EXTN = ".meta"; 53 54 static final Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+"); 55 public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp"; 56 57 public interface Reader { 58 void init(FileSystem fs, Path path, Configuration c) throws IOException; 59 60 void close() throws IOException; 61 62 Entry next() throws IOException; 63 64 Entry next(Entry reuse) throws IOException; 65 66 void seek(long pos) throws IOException; 67 68 long getPosition() throws IOException; 69 void reset() throws IOException; 70 } 71 72 public interface Writer { 73 void init(FileSystem fs, Path path, Configuration c) throws IOException; 74 75 void close() throws IOException; 76 77 void sync() throws IOException; 78 79 void append(Entry entry) throws IOException; 80 81 long getLength() throws IOException; 82 } 83 84 /** 85 * Utility class that lets us keep track of the edit with it's key Only used 86 * when splitting logs 87 */ 88 public static class Entry implements Writable { 89 private WALEdit edit; 90 private HLogKey key; 91 92 public Entry() { 93 edit = new WALEdit(); 94 key = new HLogKey(); 95 } 96 97 /** 98 * Constructor for both params 99 * 100 * @param edit 101 * log's edit 102 * @param key 103 * log's key 104 */ 105 public Entry(HLogKey key, WALEdit edit) { 106 super(); 107 this.key = key; 108 this.edit = edit; 109 } 110 111 /** 112 * Gets the edit 113 * 114 * @return edit 115 */ 116 public WALEdit getEdit() { 117 return edit; 118 } 119 120 /** 121 * Gets the key 122 * 123 * @return key 124 */ 125 public HLogKey getKey() { 126 return key; 127 } 128 129 /** 130 * Set compression context for this entry. 131 * 132 * @param compressionContext 133 * Compression context 134 */ 135 public void setCompressionContext(CompressionContext compressionContext) { 136 edit.setCompressionContext(compressionContext); 137 key.setCompressionContext(compressionContext); 138 } 139 140 @Override 141 public String toString() { 142 return this.key + "=" + this.edit; 143 } 144 145 @Override 146 public void write(DataOutput dataOutput) throws IOException { 147 this.key.write(dataOutput); 148 this.edit.write(dataOutput); 149 } 150 151 @Override 152 public void readFields(DataInput dataInput) throws IOException { 153 this.key.readFields(dataInput); 154 this.edit.readFields(dataInput); 155 } 156 } 157 158 /** 159 * registers WALActionsListener 160 * 161 * @param listener 162 */ 163 public void registerWALActionsListener(final WALActionsListener listener); 164 165 /** 166 * unregisters WALActionsListener 167 * 168 * @param listener 169 */ 170 public boolean unregisterWALActionsListener(final WALActionsListener listener); 171 172 /** 173 * @return Current state of the monotonically increasing file id. 174 */ 175 public long getFilenum(); 176 177 /** 178 * Called by HRegionServer when it opens a new region to ensure that log 179 * sequence numbers are always greater than the latest sequence number of the 180 * region being brought on-line. 181 * 182 * @param newvalue 183 * We'll set log edit/sequence number to this value if it is greater 184 * than the current value. 185 */ 186 public void setSequenceNumber(final long newvalue); 187 188 /** 189 * @return log sequence number 190 */ 191 public long getSequenceNumber(); 192 193 /** 194 * Roll the log writer. That is, start writing log messages to a new file. 195 * 196 * <p> 197 * The implementation is synchronized in order to make sure there's one rollWriter 198 * running at any given time. 199 * 200 * @return If lots of logs, flush the returned regions so next time through we 201 * can clean logs. Returns null if nothing to flush. Names are actual 202 * region names as returned by {@link HRegionInfo#getEncodedName()} 203 * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException 204 * @throws IOException 205 */ 206 public byte[][] rollWriter() throws FailedLogCloseException, IOException; 207 208 /** 209 * Roll the log writer. That is, start writing log messages to a new file. 210 * 211 * <p> 212 * The implementation is synchronized in order to make sure there's one rollWriter 213 * running at any given time. 214 * 215 * @param force 216 * If true, force creation of a new writer even if no entries have 217 * been written to the current writer 218 * @return If lots of logs, flush the returned regions so next time through we 219 * can clean logs. Returns null if nothing to flush. Names are actual 220 * region names as returned by {@link HRegionInfo#getEncodedName()} 221 * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException 222 * @throws IOException 223 */ 224 public byte[][] rollWriter(boolean force) throws FailedLogCloseException, 225 IOException; 226 227 /** 228 * Shut down the log. 229 * 230 * @throws IOException 231 */ 232 public void close() throws IOException; 233 234 /** 235 * Shut down the log and delete the log directory 236 * 237 * @throws IOException 238 */ 239 public void closeAndDelete() throws IOException; 240 241 /** 242 * Append an entry to the log. 243 * 244 * @param regionInfo 245 * @param logEdit 246 * @param logKey 247 * @param doSync 248 * shall we sync after writing the transaction 249 * @return The txid of this transaction 250 * @throws IOException 251 */ 252 public long append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit, 253 HTableDescriptor htd, boolean doSync) throws IOException; 254 255 /** 256 * Only used in tests. 257 * 258 * @param info 259 * @param tableName 260 * @param edits 261 * @param now 262 * @param htd 263 * @throws IOException 264 */ 265 public void append(HRegionInfo info, byte[] tableName, WALEdit edits, 266 final long now, HTableDescriptor htd) throws IOException; 267 268 /** 269 * Append a set of edits to the log. Log edits are keyed by (encoded) 270 * regionName, rowname, and log-sequence-id. The HLog is not flushed after 271 * this transaction is written to the log. 272 * 273 * @param info 274 * @param tableName 275 * @param edits 276 * @param clusterId 277 * The originating clusterId for this edit (for replication) 278 * @param now 279 * @return txid of this transaction 280 * @throws IOException 281 */ 282 public long appendNoSync(HRegionInfo info, byte[] tableName, WALEdit edits, 283 UUID clusterId, final long now, HTableDescriptor htd) throws IOException; 284 285 /** 286 * Append a set of edits to the log. Log edits are keyed by (encoded) 287 * regionName, rowname, and log-sequence-id. The HLog is flushed after this 288 * transaction is written to the log. 289 * 290 * @param info 291 * @param tableName 292 * @param edits 293 * @param clusterId 294 * The originating clusterId for this edit (for replication) 295 * @param now 296 * @param htd 297 * @return txid of this transaction 298 * @throws IOException 299 */ 300 public long append(HRegionInfo info, byte[] tableName, WALEdit edits, 301 UUID clusterId, final long now, HTableDescriptor htd) throws IOException; 302 303 public void hsync() throws IOException; 304 305 public void hflush() throws IOException; 306 307 public void sync() throws IOException; 308 309 public void sync(long txid) throws IOException; 310 311 /** 312 * Obtain a log sequence number. 313 */ 314 public long obtainSeqNum(); 315 316 /** 317 * WAL keeps track of the sequence numbers that were not yet flushed from memstores 318 * in order to be able to do cleanup. This method tells WAL that some region is about 319 * to flush memstore. 320 * 321 * We stash the oldest seqNum for the region, and let the the next edit inserted in this 322 * region be recorded in {@link #append(HRegionInfo, byte[], WALEdit, long, HTableDescriptor)} 323 * as new oldest seqnum. In case of flush being aborted, we put the stashed value back; 324 * in case of flush succeeding, the seqNum of that first edit after start becomes the 325 * valid oldest seqNum for this region. 326 * 327 * @return current seqNum, to pass on to flushers (who will put it into the metadata of 328 * the resulting file as an upper-bound seqNum for that file), or NULL if flush 329 * should not be started. 330 */ 331 public Long startCacheFlush(final byte[] encodedRegionName); 332 333 /** 334 * Complete the cache flush. 335 * @param encodedRegionName Encoded region name. 336 */ 337 public void completeCacheFlush(final byte[] encodedRegionName); 338 339 /** 340 * Abort a cache flush. Call if the flush fails. Note that the only recovery 341 * for an aborted flush currently is a restart of the regionserver so the 342 * snapshot content dropped by the failure gets restored to the memstore.v 343 * @param encodedRegionName Encoded region name. 344 */ 345 public void abortCacheFlush(byte[] encodedRegionName); 346 347 /** 348 * @return Coprocessor host. 349 */ 350 public WALCoprocessorHost getCoprocessorHost(); 351 352 /** 353 * Get LowReplication-Roller status 354 * 355 * @return lowReplicationRollEnabled 356 */ 357 public boolean isLowReplicationRollEnabled(); 358 359 /** Gets the earliest sequence number in the memstore for this particular region. 360 * This can serve as best-effort "recent" WAL number for this region. 361 * @param encodedRegionName The region to get the number for. 362 * @return The number if present, HConstants.NO_SEQNUM if absent. 363 */ 364 public long getEarliestMemstoreSeqNum(byte[] encodedRegionName); 365 }