View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.io.OutputStream;
24  import java.lang.reflect.InvocationTargetException;
25  import java.lang.reflect.Method;
26  import java.net.URLEncoder;
27  import java.util.ArrayList;
28  import java.util.Arrays;
29  import java.util.Collections;
30  import java.util.LinkedList;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.SortedMap;
34  import java.util.TreeMap;
35  import java.util.TreeSet;
36  import java.util.UUID;
37  import java.util.concurrent.ConcurrentSkipListMap;
38  import java.util.concurrent.CopyOnWriteArrayList;
39  import java.util.concurrent.atomic.AtomicBoolean;
40  import java.util.concurrent.atomic.AtomicInteger;
41  import java.util.concurrent.atomic.AtomicLong;
42  
43  import org.apache.commons.logging.Log;
44  import org.apache.commons.logging.LogFactory;
45  import org.apache.hadoop.classification.InterfaceAudience;
46  import org.apache.hadoop.conf.Configuration;
47  import org.apache.hadoop.fs.FSDataOutputStream;
48  import org.apache.hadoop.fs.FileStatus;
49  import org.apache.hadoop.fs.FileSystem;
50  import org.apache.hadoop.fs.Path;
51  import org.apache.hadoop.fs.Syncable;
52  import org.apache.hadoop.hbase.HBaseConfiguration;
53  import org.apache.hadoop.hbase.HConstants;
54  import org.apache.hadoop.hbase.HRegionInfo;
55  import org.apache.hadoop.hbase.HTableDescriptor;
56  import org.apache.hadoop.hbase.KeyValue;
57  import org.apache.hadoop.hbase.TableName;
58  import org.apache.hadoop.hbase.util.Bytes;
59  import org.apache.hadoop.hbase.util.ClassSize;
60  import org.apache.hadoop.hbase.util.DrainBarrier;
61  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
62  import org.apache.hadoop.hbase.util.FSUtils;
63  import org.apache.hadoop.hbase.util.HasThread;
64  import org.apache.hadoop.hbase.util.Threads;
65  import org.apache.hadoop.util.StringUtils;
66  import org.cloudera.htrace.Trace;
67  import org.cloudera.htrace.TraceScope;
68  
69  /**
70   * HLog stores all the edits to the HStore.  Its the hbase write-ahead-log
71   * implementation.
72   *
73   * It performs logfile-rolling, so external callers are not aware that the
74   * underlying file is being rolled.
75   *
76   * <p>
77   * There is one HLog per RegionServer.  All edits for all Regions carried by
78   * a particular RegionServer are entered first in the HLog.
79   *
80   * <p>
81   * Each HRegion is identified by a unique long <code>int</code>. HRegions do
82   * not need to declare themselves before using the HLog; they simply include
83   * their HRegion-id in the <code>append</code> or
84   * <code>completeCacheFlush</code> calls.
85   *
86   * <p>
87   * An HLog consists of multiple on-disk files, which have a chronological order.
88   * As data is flushed to other (better) on-disk structures, the log becomes
89   * obsolete. We can destroy all the log messages for a given HRegion-id up to
90   * the most-recent CACHEFLUSH message from that HRegion.
91   *
92   * <p>
93   * It's only practical to delete entire files. Thus, we delete an entire on-disk
94   * file F when all of the messages in F have a log-sequence-id that's older
95   * (smaller) than the most-recent CACHEFLUSH message for every HRegion that has
96   * a message in F.
97   *
98   * <p>
99   * Synchronized methods can never execute in parallel. However, between the
100  * start of a cache flush and the completion point, appends are allowed but log
101  * rolling is not. To prevent log rolling taking place during this period, a
102  * separate reentrant lock is used.
103  *
104  * <p>To read an HLog, call {@link HLogFactory#createReader(org.apache.hadoop.fs.FileSystem,
105  * org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration)}.
106  *
107  */
108 @InterfaceAudience.Private
109 class FSHLog implements HLog, Syncable {
110   static final Log LOG = LogFactory.getLog(FSHLog.class);
111 
112   private final FileSystem fs;
113   private final Path rootDir;
114   private final Path dir;
115   private final Configuration conf;
116   // Listeners that are called on WAL events.
117   private List<WALActionsListener> listeners =
118     new CopyOnWriteArrayList<WALActionsListener>();
119   private final long optionalFlushInterval;
120   private final long blocksize;
121   private final String prefix;
122   private final AtomicLong unflushedEntries = new AtomicLong(0);
123   private volatile long syncedTillHere = 0;
124   private long lastDeferredTxid;
125   private final Path oldLogDir;
126   private volatile boolean logRollRunning;
127 
128   private WALCoprocessorHost coprocessorHost;
129 
130   private FSDataOutputStream hdfs_out; // FSDataOutputStream associated with the current SequenceFile.writer
131   // Minimum tolerable replicas, if the actual value is lower than it,
132   // rollWriter will be triggered
133   private int minTolerableReplication;
134   private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas
135   final static Object [] NO_ARGS = new Object []{};
136 
137   /** The barrier used to ensure that close() waits for all log rolls and flushes to finish. */
138   private DrainBarrier closeBarrier = new DrainBarrier();
139 
140   /**
141    * Current log file.
142    */
143   Writer writer;
144 
145   /**
146    * Map of all log files but the current one.
147    */
148   final SortedMap<Long, Path> outputfiles =
149     Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
150 
151 
152   /**
153    * This lock synchronizes all operations on oldestUnflushedSeqNums and oldestFlushingSeqNums,
154    * with the exception of append's putIfAbsent into oldestUnflushedSeqNums.
155    * We only use these to find out the low bound seqNum, or to find regions with old seqNums to
156    * force flush them, so we don't care about these numbers messing with anything. */
157   private final Object oldestSeqNumsLock = new Object();
158 
159   /**
160    * This lock makes sure only one log roll runs at the same time. Should not be taken while
161    * any other lock is held. We don't just use synchronized because that results in bogus and
162    * tedious findbugs warning when it thinks synchronized controls writer thread safety */
163   private final Object rollWriterLock = new Object();
164 
165   /**
166    * Map of encoded region names to their most recent sequence/edit id in their memstore.
167    */
168   private final ConcurrentSkipListMap<byte [], Long> oldestUnflushedSeqNums =
169     new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
170   /**
171    * Map of encoded region names to their most recent sequence/edit id in their memstore;
172    * contains the regions that are currently flushing. That way we can store two numbers for
173    * flushing and non-flushing (oldestUnflushedSeqNums) memstore for the same region.
174    */
175   private final Map<byte[], Long> oldestFlushingSeqNums =
176     new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
177 
178   private volatile boolean closed = false;
179 
180   private final AtomicLong logSeqNum = new AtomicLong(0);
181 
182   private boolean forMeta = false;
183 
184   // The timestamp (in ms) when the log file was created.
185   private volatile long filenum = -1;
186 
187   //number of transactions in the current Hlog.
188   private final AtomicInteger numEntries = new AtomicInteger(0);
189 
190   // If live datanode count is lower than the default replicas value,
191   // RollWriter will be triggered in each sync(So the RollWriter will be
192   // triggered one by one in a short time). Using it as a workaround to slow
193   // down the roll frequency triggered by checkLowReplication().
194   private AtomicInteger consecutiveLogRolls = new AtomicInteger(0);
195   private final int lowReplicationRollLimit;
196 
197   // If consecutiveLogRolls is larger than lowReplicationRollLimit,
198   // then disable the rolling in checkLowReplication().
199   // Enable it if the replications recover.
200   private volatile boolean lowReplicationRollEnabled = true;
201 
202   // If > than this size, roll the log. This is typically 0.95 times the size
203   // of the default Hdfs block size.
204   private final long logrollsize;
205 
206   /** size of current log */
207   private long curLogSize = 0;
208 
209   /**
210    * The total size of hlog
211    */
212   private AtomicLong totalLogSize = new AtomicLong(0);
213 
214   // We synchronize on updateLock to prevent updates and to prevent a log roll
215   // during an update
216   // locked during appends
217   private final Object updateLock = new Object();
218   private final Object flushLock = new Object();
219 
220   private final boolean enabled;
221 
222   /*
223    * If more than this many logs, force flush of oldest region to oldest edit
224    * goes to disk.  If too many and we crash, then will take forever replaying.
225    * Keep the number of logs tidy.
226    */
227   private final int maxLogs;
228 
229   /**
230    * Thread that handles optional sync'ing
231    */
232   private final LogSyncer logSyncer;
233 
234   /** Number of log close errors tolerated before we abort */
235   private final int closeErrorsTolerated;
236 
237   private final AtomicInteger closeErrorCount = new AtomicInteger();
238   private final MetricsWAL metrics;
239 
240   /**
241    * Constructor.
242    *
243    * @param fs filesystem handle
244    * @param root path for stored and archived hlogs
245    * @param logDir dir where hlogs are stored
246    * @param conf configuration to use
247    * @throws IOException
248    */
249   public FSHLog(final FileSystem fs, final Path root, final String logDir,
250                 final Configuration conf)
251   throws IOException {
252     this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME,
253         conf, null, true, null, false);
254   }
255 
256   /**
257    * Constructor.
258    *
259    * @param fs filesystem handle
260    * @param root path for stored and archived hlogs
261    * @param logDir dir where hlogs are stored
262    * @param oldLogDir dir where hlogs are archived
263    * @param conf configuration to use
264    * @throws IOException
265    */
266   public FSHLog(final FileSystem fs, final Path root, final String logDir,
267                 final String oldLogDir, final Configuration conf)
268   throws IOException {
269     this(fs, root, logDir, oldLogDir,
270         conf, null, true, null, false);
271   }
272 
273   /**
274    * Create an edit log at the given <code>dir</code> location.
275    *
276    * You should never have to load an existing log. If there is a log at
277    * startup, it should have already been processed and deleted by the time the
278    * HLog object is started up.
279    *
280    * @param fs filesystem handle
281    * @param root path for stored and archived hlogs
282    * @param logDir dir where hlogs are stored
283    * @param conf configuration to use
284    * @param listeners Listeners on WAL events. Listeners passed here will
285    * be registered before we do anything else; e.g. the
286    * Constructor {@link #rollWriter()}.
287    * @param prefix should always be hostname and port in distributed env and
288    *        it will be URL encoded before being used.
289    *        If prefix is null, "hlog" will be used
290    * @throws IOException
291    */
292   public FSHLog(final FileSystem fs, final Path root, final String logDir,
293       final Configuration conf, final List<WALActionsListener> listeners,
294       final String prefix) throws IOException {
295     this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME,
296         conf, listeners, true, prefix, false);
297   }
298 
299   /**
300    * Create an edit log at the given <code>dir</code> location.
301    *
302    * You should never have to load an existing log. If there is a log at
303    * startup, it should have already been processed and deleted by the time the
304    * HLog object is started up.
305    *
306    * @param fs filesystem handle
307    * @param root path to where logs and oldlogs
308    * @param logDir dir where hlogs are stored
309    * @param oldLogDir dir where hlogs are archived
310    * @param conf configuration to use
311    * @param listeners Listeners on WAL events. Listeners passed here will
312    * be registered before we do anything else; e.g. the
313    * Constructor {@link #rollWriter()}.
314    * @param failIfLogDirExists If true IOException will be thrown if dir already exists.
315    * @param prefix should always be hostname and port in distributed env and
316    *        it will be URL encoded before being used.
317    *        If prefix is null, "hlog" will be used
318    * @param forMeta if this hlog is meant for meta updates
319    * @throws IOException
320    */
321   public FSHLog(final FileSystem fs, final Path root, final String logDir,
322       final String oldLogDir, final Configuration conf,
323       final List<WALActionsListener> listeners,
324       final boolean failIfLogDirExists, final String prefix, boolean forMeta)
325   throws IOException {
326     super();
327     this.fs = fs;
328     this.rootDir = root;
329     this.dir = new Path(this.rootDir, logDir);
330     this.oldLogDir = new Path(this.rootDir, oldLogDir);
331     this.forMeta = forMeta;
332     this.conf = conf;
333 
334     if (listeners != null) {
335       for (WALActionsListener i: listeners) {
336         registerWALActionsListener(i);
337       }
338     }
339 
340     this.blocksize = this.conf.getLong("hbase.regionserver.hlog.blocksize",
341         FSUtils.getDefaultBlockSize(this.fs, this.dir));
342     // Roll at 95% of block size.
343     float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
344     this.logrollsize = (long)(this.blocksize * multi);
345     this.optionalFlushInterval =
346       conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
347 
348     this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
349     this.minTolerableReplication = conf.getInt(
350         "hbase.regionserver.hlog.tolerable.lowreplication",
351         FSUtils.getDefaultReplication(fs, this.dir));
352     this.lowReplicationRollLimit = conf.getInt(
353         "hbase.regionserver.hlog.lowreplication.rolllimit", 5);
354     this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
355     this.closeErrorsTolerated = conf.getInt(
356         "hbase.regionserver.logroll.errors.tolerated", 0);
357 
358     this.logSyncer = new LogSyncer(this.optionalFlushInterval);
359 
360     LOG.info("WAL/HLog configuration: blocksize=" +
361       StringUtils.byteDesc(this.blocksize) +
362       ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
363       ", enabled=" + this.enabled +
364       ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
365     // If prefix is null||empty then just name it hlog
366     this.prefix = prefix == null || prefix.isEmpty() ?
367         "hlog" : URLEncoder.encode(prefix, "UTF8");
368 
369     boolean dirExists = false;
370     if (failIfLogDirExists && (dirExists = this.fs.exists(dir))) {
371       throw new IOException("Target HLog directory already exists: " + dir);
372     }
373     if (!dirExists && !fs.mkdirs(dir)) {
374       throw new IOException("Unable to mkdir " + dir);
375     }
376 
377     if (!fs.exists(this.oldLogDir)) {
378       if (!fs.mkdirs(this.oldLogDir)) {
379         throw new IOException("Unable to mkdir " + this.oldLogDir);
380       }
381     }
382     // rollWriter sets this.hdfs_out if it can.
383     rollWriter();
384 
385     // handle the reflection necessary to call getNumCurrentReplicas()
386     this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
387 
388     // When optionalFlushInterval is set as 0, don't start a thread for deferred log sync.
389     if (this.optionalFlushInterval > 0) {
390       Threads.setDaemonThreadRunning(logSyncer.getThread(), Thread.currentThread().getName()
391           + ".logSyncer");
392     } else {
393       LOG.info("hbase.regionserver.optionallogflushinterval is set as "
394           + this.optionalFlushInterval + ". Deferred log syncing won't work. "
395           + "Any Mutation, marked to be deferred synced, will be flushed immediately.");
396     }
397     coprocessorHost = new WALCoprocessorHost(this, conf);
398 
399     this.metrics = new MetricsWAL();
400   }
401 
402   /**
403    * Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream.
404    * @return Method or null.
405    */
406   private Method getGetNumCurrentReplicas(final FSDataOutputStream os) {
407     Method m = null;
408     if (os != null) {
409       Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream()
410           .getClass();
411       try {
412         m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas",
413             new Class<?>[] {});
414         m.setAccessible(true);
415       } catch (NoSuchMethodException e) {
416         LOG.info("FileSystem's output stream doesn't support"
417             + " getNumCurrentReplicas; --HDFS-826 not available; fsOut="
418             + wrappedStreamClass.getName());
419       } catch (SecurityException e) {
420         LOG.info("Doesn't have access to getNumCurrentReplicas on "
421             + "FileSystems's output stream --HDFS-826 not available; fsOut="
422             + wrappedStreamClass.getName(), e);
423         m = null; // could happen on setAccessible()
424       }
425     }
426     if (m != null) {
427       if (LOG.isTraceEnabled()) LOG.trace("Using getNumCurrentReplicas--HDFS-826");
428     }
429     return m;
430   }
431 
432   @Override
433   public void registerWALActionsListener(final WALActionsListener listener) {
434     this.listeners.add(listener);
435   }
436 
437   @Override
438   public boolean unregisterWALActionsListener(final WALActionsListener listener) {
439     return this.listeners.remove(listener);
440   }
441 
442   @Override
443   public long getFilenum() {
444     return this.filenum;
445   }
446 
447   @Override
448   public void setSequenceNumber(final long newvalue) {
449     for (long id = this.logSeqNum.get(); id < newvalue &&
450         !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
451       // This could spin on occasion but better the occasional spin than locking
452       // every increment of sequence number.
453       LOG.debug("Changed sequenceid from " + id + " to " + newvalue);
454     }
455   }
456 
457   @Override
458   public long getSequenceNumber() {
459     return logSeqNum.get();
460   }
461 
462   /**
463    * Method used internal to this class and for tests only.
464    * @return The wrapped stream our writer is using; its not the
465    * writer's 'out' FSDatoOutputStream but the stream that this 'out' wraps
466    * (In hdfs its an instance of DFSDataOutputStream).
467    *
468    * usage: see TestLogRolling.java
469    */
470   OutputStream getOutputStream() {
471     return this.hdfs_out.getWrappedStream();
472   }
473 
474   @Override
475   public byte [][] rollWriter() throws FailedLogCloseException, IOException {
476     return rollWriter(false);
477   }
478 
479   @Override
480   public byte [][] rollWriter(boolean force)
481       throws FailedLogCloseException, IOException {
482     synchronized (rollWriterLock) {
483       // Return if nothing to flush.
484       if (!force && this.writer != null && this.numEntries.get() <= 0) {
485         return null;
486       }
487       byte [][] regionsToFlush = null;
488       if (closed) {
489         LOG.debug("HLog closed. Skipping rolling of writer");
490         return null;
491       }
492       try {
493         this.logRollRunning = true;
494         if (!closeBarrier.beginOp()) {
495           LOG.debug("HLog closing. Skipping rolling of writer");
496           return regionsToFlush;
497         }
498         // Do all the preparation outside of the updateLock to block
499         // as less as possible the incoming writes
500         long currentFilenum = this.filenum;
501         Path oldPath = null;
502         if (currentFilenum > 0) {
503           //computeFilename  will take care of meta hlog filename
504           oldPath = computeFilename(currentFilenum);
505         }
506         this.filenum = System.currentTimeMillis();
507         Path newPath = computeFilename();
508         while (fs.exists(newPath)) {
509           this.filenum++;
510           newPath = computeFilename();
511         }
512 
513         // Tell our listeners that a new log is about to be created
514         if (!this.listeners.isEmpty()) {
515           for (WALActionsListener i : this.listeners) {
516             i.preLogRoll(oldPath, newPath);
517           }
518         }
519         FSHLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
520         // Can we get at the dfsclient outputstream?
521         FSDataOutputStream nextHdfsOut = null;
522         if (nextWriter instanceof ProtobufLogWriter) {
523           nextHdfsOut = ((ProtobufLogWriter)nextWriter).getStream();
524           // perform the costly sync before we get the lock to roll writers.
525           try {
526             nextWriter.sync();
527           } catch (IOException e) {
528             // optimization failed, no need to abort here.
529             LOG.warn("pre-sync failed", e);
530           }
531         }
532 
533         Path oldFile = null;
534         int oldNumEntries = 0;
535         synchronized (updateLock) {
536           // Clean up current writer.
537           oldNumEntries = this.numEntries.get();
538           oldFile = cleanupCurrentWriter(currentFilenum);
539           this.writer = nextWriter;
540           this.hdfs_out = nextHdfsOut;
541           this.numEntries.set(0);
542         }
543         if (oldFile == null) LOG.info("New WAL " + FSUtils.getPath(newPath));
544         else {
545           long oldFileLen = this.fs.getFileStatus(oldFile).getLen();
546           this.totalLogSize.addAndGet(oldFileLen);
547           LOG.info("Rolled WAL " + FSUtils.getPath(oldFile) + " with entries="
548               + oldNumEntries + ", filesize="
549               + StringUtils.humanReadableInt(oldFileLen) + "; new WAL "
550               + FSUtils.getPath(newPath));
551         }
552 
553         // Tell our listeners that a new log was created
554         if (!this.listeners.isEmpty()) {
555           for (WALActionsListener i : this.listeners) {
556             i.postLogRoll(oldPath, newPath);
557           }
558         }
559 
560         // Can we delete any of the old log files?
561         if (getNumRolledLogFiles() > 0) {
562           cleanOldLogs();
563           regionsToFlush = getRegionsToForceFlush();
564         }
565       } finally {
566         this.logRollRunning = false;
567         closeBarrier.endOp();
568       }
569       return regionsToFlush;
570     }
571   }
572 
573   /**
574    * This method allows subclasses to inject different writers without having to
575    * extend other methods like rollWriter().
576    *
577    * @param fs
578    * @param path
579    * @param conf
580    * @return Writer instance
581    * @throws IOException
582    */
583   protected Writer createWriterInstance(final FileSystem fs, final Path path,
584       final Configuration conf) throws IOException {
585     if (forMeta) {
586       //TODO: set a higher replication for the hlog files (HBASE-6773)
587     }
588     return HLogFactory.createWALWriter(fs, path, conf);
589   }
590 
591   /*
592    * Clean up old commit logs.
593    * @return If lots of logs, flush the returned region so next time through
594    * we can clean logs. Returns null if nothing to flush.  Returns array of
595    * encoded region names to flush.
596    * @throws IOException
597    */
598   private void cleanOldLogs() throws IOException {
599     long oldestOutstandingSeqNum = Long.MAX_VALUE;
600     synchronized (oldestSeqNumsLock) {
601       Long oldestFlushing = (oldestFlushingSeqNums.size() > 0)
602         ? Collections.min(oldestFlushingSeqNums.values()) : Long.MAX_VALUE;
603       Long oldestUnflushed = (oldestUnflushedSeqNums.size() > 0)
604         ? Collections.min(oldestUnflushedSeqNums.values()) : Long.MAX_VALUE;
605       oldestOutstandingSeqNum = Math.min(oldestFlushing, oldestUnflushed);
606     }
607 
608     // Get the set of all log files whose last sequence number is smaller than
609     // the oldest edit's sequence number.
610     TreeSet<Long> sequenceNumbers = new TreeSet<Long>(this.outputfiles.headMap(
611         oldestOutstandingSeqNum).keySet());
612     // Now remove old log files (if any)
613     if (LOG.isDebugEnabled()) {
614       if (sequenceNumbers.size() > 0) {
615         LOG.debug("Found " + sequenceNumbers.size() + " hlogs to remove" +
616           " out of total " + this.outputfiles.size() + ";" +
617           " oldest outstanding sequenceid is " + oldestOutstandingSeqNum);
618       }
619     }
620     for (Long seq : sequenceNumbers) {
621       Path p = this.outputfiles.remove(seq);
622       if (p != null) this.totalLogSize.addAndGet(-this.fs.getFileStatus(p).getLen());
623       archiveLogFile(p, seq);
624     }
625   }
626 
627   /**
628    * Return regions that have edits that are equal or less than a certain sequence number.
629    * Static due to some old unit test.
630    * @param walSeqNum The sequence number to compare with.
631    * @param regionsToSeqNums Encoded region names to sequence ids
632    * @return All regions whose seqNum <= walSeqNum. Null if no regions found.
633    */
634   static byte[][] findMemstoresWithEditsEqualOrOlderThan(
635       final long walSeqNum, final Map<byte[], Long> regionsToSeqNums) {
636     List<byte[]> regions = null;
637     for (Map.Entry<byte[], Long> e : regionsToSeqNums.entrySet()) {
638       if (e.getValue().longValue() <= walSeqNum) {
639         if (regions == null) regions = new ArrayList<byte[]>();
640         regions.add(e.getKey());
641       }
642     }
643     return regions == null ? null : regions
644         .toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY });
645   }
646 
647   private byte[][] getRegionsToForceFlush() throws IOException {
648     // If too many log files, figure which regions we need to flush.
649     // Array is an array of encoded region names.
650     byte [][] regions = null;
651     int logCount = getNumRolledLogFiles();
652     if (logCount > this.maxLogs && logCount > 0) {
653       // This is an array of encoded region names.
654       synchronized (oldestSeqNumsLock) {
655         regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
656           this.oldestUnflushedSeqNums);
657       }
658       if (regions != null) {
659         StringBuilder sb = new StringBuilder();
660         for (int i = 0; i < regions.length; i++) {
661           if (i > 0) sb.append(", ");
662           sb.append(Bytes.toStringBinary(regions[i]));
663         }
664         LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
665            this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
666            sb.toString());
667       }
668     }
669     return regions;
670   }
671 
672   /*
673    * Cleans up current writer closing and adding to outputfiles.
674    * Presumes we're operating inside an updateLock scope.
675    * @return Path to current writer or null if none.
676    * @throws IOException
677    */
678   Path cleanupCurrentWriter(final long currentfilenum) throws IOException {
679     Path oldFile = null;
680     if (this.writer != null) {
681       // Close the current writer, get a new one.
682       try {
683         // Wait till all current transactions are written to the hlog.
684         // No new transactions can occur because we have the updatelock.
685         if (this.unflushedEntries.get() != this.syncedTillHere) {
686           LOG.debug("cleanupCurrentWriter " +
687                    " waiting for transactions to get synced " +
688                    " total " + this.unflushedEntries.get() +
689                    " synced till here " + syncedTillHere);
690           sync();
691         }
692         this.writer.close();
693         this.writer = null;
694         closeErrorCount.set(0);
695       } catch (IOException e) {
696         LOG.error("Failed close of HLog writer", e);
697         int errors = closeErrorCount.incrementAndGet();
698         if (errors <= closeErrorsTolerated && !hasDeferredEntries()) {
699           LOG.warn("Riding over HLog close failure! error count="+errors);
700         } else {
701           if (hasDeferredEntries()) {
702             LOG.error("Aborting due to unflushed edits in HLog");
703           }
704           // Failed close of log file.  Means we're losing edits.  For now,
705           // shut ourselves down to minimize loss.  Alternative is to try and
706           // keep going.  See HBASE-930.
707           FailedLogCloseException flce =
708             new FailedLogCloseException("#" + currentfilenum);
709           flce.initCause(e);
710           throw flce;
711         }
712       }
713       if (currentfilenum >= 0) {
714         oldFile = computeFilename(currentfilenum);
715         this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
716       }
717     }
718     return oldFile;
719   }
720 
721   private void archiveLogFile(final Path p, final Long seqno) throws IOException {
722     Path newPath = getHLogArchivePath(this.oldLogDir, p);
723     LOG.info("moving old hlog file " + FSUtils.getPath(p) +
724       " whose highest sequenceid is " + seqno + " to " +
725       FSUtils.getPath(newPath));
726 
727     // Tell our listeners that a log is going to be archived.
728     if (!this.listeners.isEmpty()) {
729       for (WALActionsListener i : this.listeners) {
730         i.preLogArchive(p, newPath);
731       }
732     }
733     if (!FSUtils.renameAndSetModifyTime(this.fs, p, newPath)) {
734       throw new IOException("Unable to rename " + p + " to " + newPath);
735     }
736     // Tell our listeners that a log has been archived.
737     if (!this.listeners.isEmpty()) {
738       for (WALActionsListener i : this.listeners) {
739         i.postLogArchive(p, newPath);
740       }
741     }
742   }
743 
744   /**
745    * This is a convenience method that computes a new filename with a given
746    * using the current HLog file-number
747    * @return Path
748    */
749   protected Path computeFilename() {
750     return computeFilename(this.filenum);
751   }
752 
753   /**
754    * This is a convenience method that computes a new filename with a given
755    * file-number.
756    * @param filenum to use
757    * @return Path
758    */
759   protected Path computeFilename(long filenum) {
760     if (filenum < 0) {
761       throw new RuntimeException("hlog file number can't be < 0");
762     }
763     String child = prefix + "." + filenum;
764     if (forMeta) {
765       child += HLog.META_HLOG_FILE_EXTN;
766     }
767     return new Path(dir, child);
768   }
769 
770   @Override
771   public void closeAndDelete() throws IOException {
772     close();
773     if (!fs.exists(this.dir)) return;
774     FileStatus[] files = fs.listStatus(this.dir);
775     if (files != null) {
776       for(FileStatus file : files) {
777 
778         Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
779         // Tell our listeners that a log is going to be archived.
780         if (!this.listeners.isEmpty()) {
781           for (WALActionsListener i : this.listeners) {
782             i.preLogArchive(file.getPath(), p);
783           }
784         }
785 
786         if (!FSUtils.renameAndSetModifyTime(fs, file.getPath(), p)) {
787           throw new IOException("Unable to rename " + file.getPath() + " to " + p);
788         }
789         // Tell our listeners that a log was archived.
790         if (!this.listeners.isEmpty()) {
791           for (WALActionsListener i : this.listeners) {
792             i.postLogArchive(file.getPath(), p);
793           }
794         }
795       }
796       LOG.debug("Moved " + files.length + " WAL file(s) to " + FSUtils.getPath(this.oldLogDir));
797     }
798     if (!fs.delete(dir, true)) {
799       LOG.info("Unable to delete " + dir);
800     }
801   }
802 
803   @Override
804   public void close() throws IOException {
805     if (this.closed) {
806       return;
807     }
808     // When optionalFlushInterval is 0, the logSyncer is not started as a Thread.
809     if (this.optionalFlushInterval > 0) {
810       try {
811         logSyncer.close();
812         // Make sure we synced everything
813         logSyncer.join(this.optionalFlushInterval * 2);
814       } catch (InterruptedException e) {
815         LOG.error("Exception while waiting for syncer thread to die", e);
816         Thread.currentThread().interrupt();
817       }
818     }
819     try {
820       // Prevent all further flushing and rolling.
821       closeBarrier.stopAndDrainOps();
822     } catch (InterruptedException e) {
823       LOG.error("Exception while waiting for cache flushes and log rolls", e);
824       Thread.currentThread().interrupt();
825     }
826 
827     // Tell our listeners that the log is closing
828     if (!this.listeners.isEmpty()) {
829       for (WALActionsListener i : this.listeners) {
830         i.logCloseRequested();
831       }
832     }
833     synchronized (updateLock) {
834       this.closed = true;
835       if (LOG.isDebugEnabled()) {
836         LOG.debug("Closing WAL writer in " + this.dir.toString());
837       }
838       if (this.writer != null) {
839         this.writer.close();
840         this.writer = null;
841       }
842     }
843   }
844 
845   /**
846    * @param now
847    * @param encodedRegionName Encoded name of the region as returned by
848    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
849    * @param tableName
850    * @param clusterIds that have consumed the change
851    * @return New log key.
852    */
853   protected HLogKey makeKey(byte[] encodedRegionName, TableName tableName, long seqnum,
854       long now, List<UUID> clusterIds) {
855     return new HLogKey(encodedRegionName, tableName, seqnum, now, clusterIds);
856   }
857 
858   @Override
859   public void append(HRegionInfo info, TableName tableName, WALEdit edits,
860     final long now, HTableDescriptor htd)
861   throws IOException {
862     append(info, tableName, edits, now, htd, true);
863   }
864 
865   @Override
866   public void append(HRegionInfo info, TableName tableName, WALEdit edits,
867     final long now, HTableDescriptor htd, boolean isInMemstore) throws IOException {
868     append(info, tableName, edits, new ArrayList<UUID>(), now, htd, true, isInMemstore);
869   }
870 
871   /**
872    * Append a set of edits to the log. Log edits are keyed by (encoded)
873    * regionName, rowname, and log-sequence-id.
874    *
875    * Later, if we sort by these keys, we obtain all the relevant edits for a
876    * given key-range of the HRegion (TODO). Any edits that do not have a
877    * matching COMPLETE_CACHEFLUSH message can be discarded.
878    *
879    * <p>
880    * Logs cannot be restarted once closed, or once the HLog process dies. Each
881    * time the HLog starts, it must create a new log. This means that other
882    * systems should process the log appropriately upon each startup (and prior
883    * to initializing HLog).
884    *
885    * synchronized prevents appends during the completion of a cache flush or for
886    * the duration of a log roll.
887    *
888    * @param info
889    * @param tableName
890    * @param edits
891    * @param clusterIds that have consumed the change (for replication)
892    * @param now
893    * @param doSync shall we sync?
894    * @return txid of this transaction
895    * @throws IOException
896    */
897   @SuppressWarnings("deprecation")
898   private long append(HRegionInfo info, TableName tableName, WALEdit edits, List<UUID> clusterIds,
899       final long now, HTableDescriptor htd, boolean doSync, boolean isInMemstore)
900     throws IOException {
901       if (edits.isEmpty()) return this.unflushedEntries.get();
902       if (this.closed) {
903         throw new IOException("Cannot append; log is closed");
904       }
905       TraceScope traceScope = Trace.startSpan("FSHlog.append");
906       try {
907         long txid = 0;
908         synchronized (this.updateLock) {
909           long seqNum = obtainSeqNum();
910           // The 'lastSeqWritten' map holds the sequence number of the oldest
911           // write for each region (i.e. the first edit added to the particular
912           // memstore). . When the cache is flushed, the entry for the
913           // region being flushed is removed if the sequence number of the flush
914           // is greater than or equal to the value in lastSeqWritten.
915           // Use encoded name.  Its shorter, guaranteed unique and a subset of
916           // actual  name.
917           byte [] encodedRegionName = info.getEncodedNameAsBytes();
918           if (isInMemstore) this.oldestUnflushedSeqNums.putIfAbsent(encodedRegionName, seqNum);
919           HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterIds);
920           doWrite(info, logKey, edits, htd);
921           this.numEntries.incrementAndGet();
922           txid = this.unflushedEntries.incrementAndGet();
923           if (htd.isDeferredLogFlush()) {
924             lastDeferredTxid = txid;
925           }
926         }
927         // Sync if catalog region, and if not then check if that table supports
928         // deferred log flushing
929         if (doSync &&
930             (info.isMetaRegion() ||
931             !htd.isDeferredLogFlush())) {
932           // sync txn to file system
933           this.sync(txid);
934         }
935         return txid;
936       } finally {
937         traceScope.close();
938       }
939     }
940 
941   @Override
942   public long appendNoSync(HRegionInfo info, TableName tableName, WALEdit edits,
943       List<UUID> clusterIds, final long now, HTableDescriptor htd)
944     throws IOException {
945     return append(info, tableName, edits, clusterIds, now, htd, false, true);
946   }
947 
948   /**
949    * This class is responsible to hold the HLog's appended Entry list
950    * and to sync them according to a configurable interval.
951    *
952    * Deferred log flushing works first by piggy backing on this process by
953    * simply not sync'ing the appended Entry. It can also be sync'd by other
954    * non-deferred log flushed entries outside of this thread.
955    */
956   class LogSyncer extends HasThread {
957 
958     private final long optionalFlushInterval;
959 
960     private final AtomicBoolean closeLogSyncer = new AtomicBoolean(false);
961 
962     // List of pending writes to the HLog. There corresponds to transactions
963     // that have not yet returned to the client. We keep them cached here
964     // instead of writing them to HDFS piecemeal, because the HDFS write
965     // method is pretty heavyweight as far as locking is concerned. The
966     // goal is to increase the batchsize for writing-to-hdfs as well as
967     // sync-to-hdfs, so that we can get better system throughput.
968     private List<Entry> pendingWrites = new LinkedList<Entry>();
969 
970     LogSyncer(long optionalFlushInterval) {
971       this.optionalFlushInterval = optionalFlushInterval;
972     }
973 
974     @Override
975     public void run() {
976       try {
977         // awaiting with a timeout doesn't always
978         // throw exceptions on interrupt
979         while(!this.isInterrupted() && !closeLogSyncer.get()) {
980 
981           try {
982             if (unflushedEntries.get() <= syncedTillHere) {
983               synchronized (closeLogSyncer) {
984                 closeLogSyncer.wait(this.optionalFlushInterval);
985               }
986             }
987             // Calling sync since we waited or had unflushed entries.
988             // Entries appended but not sync'd are taken care of here AKA
989             // deferred log flush
990             sync();
991           } catch (IOException e) {
992             LOG.error("Error while syncing, requesting close of hlog ", e);
993             requestLogRoll();
994             Threads.sleep(this.optionalFlushInterval);
995           }
996         }
997       } catch (InterruptedException e) {
998         LOG.debug(getName() + " interrupted while waiting for sync requests");
999       } finally {
1000         LOG.info(getName() + " exiting");
1001       }
1002     }
1003 
1004     // appends new writes to the pendingWrites. It is better to keep it in
1005     // our own queue rather than writing it to the HDFS output stream because
1006     // HDFSOutputStream.writeChunk is not lightweight at all.
1007     synchronized void append(Entry e) throws IOException {
1008       pendingWrites.add(e);
1009     }
1010 
1011     // Returns all currently pending writes. New writes
1012     // will accumulate in a new list.
1013     synchronized List<Entry> getPendingWrites() {
1014       List<Entry> save = this.pendingWrites;
1015       this.pendingWrites = new LinkedList<Entry>();
1016       return save;
1017     }
1018 
1019     // writes out pending entries to the HLog
1020     void hlogFlush(Writer writer, List<Entry> pending) throws IOException {
1021       if (pending == null) return;
1022 
1023       // write out all accumulated Entries to hdfs.
1024       for (Entry e : pending) {
1025         writer.append(e);
1026       }
1027     }
1028 
1029     void close() {
1030       synchronized (closeLogSyncer) {
1031         closeLogSyncer.set(true);
1032         closeLogSyncer.notifyAll();
1033       }
1034     }
1035   }
1036 
1037   // sync all known transactions
1038   private void syncer() throws IOException {
1039     syncer(this.unflushedEntries.get()); // sync all pending items
1040   }
1041 
1042   // sync all transactions upto the specified txid
1043   private void syncer(long txid) throws IOException {
1044     // if the transaction that we are interested in is already
1045     // synced, then return immediately.
1046     if (txid <= this.syncedTillHere) {
1047       return;
1048     }
1049     Writer tempWriter;
1050     synchronized (this.updateLock) {
1051       if (this.closed) return;
1052       // Guaranteed non-null.
1053       // Note that parallel sync can close tempWriter.
1054       // The current method of dealing with this is to catch exceptions.
1055       // See HBASE-4387, HBASE-5623, HBASE-7329.
1056       tempWriter = this.writer;
1057     }
1058     try {
1059       long doneUpto;
1060       long now = EnvironmentEdgeManager.currentTimeMillis();
1061       // First flush all the pending writes to HDFS. Then
1062       // issue the sync to HDFS. If sync is successful, then update
1063       // syncedTillHere to indicate that transactions till this
1064       // number has been successfully synced.
1065       IOException ioe = null;
1066       List<Entry> pending = null;
1067       synchronized (flushLock) {
1068         if (txid <= this.syncedTillHere) {
1069           return;
1070         }
1071         doneUpto = this.unflushedEntries.get();
1072         pending = logSyncer.getPendingWrites();
1073         try {
1074           logSyncer.hlogFlush(tempWriter, pending);
1075           postAppend(pending);
1076         } catch(IOException io) {
1077           ioe = io;
1078           LOG.error("syncer encountered error, will retry. txid=" + txid, ioe);
1079         }
1080       }
1081       if (ioe != null && pending != null) {
1082         synchronized (this.updateLock) {
1083           synchronized (flushLock) {
1084             // HBASE-4387, HBASE-5623, retry with updateLock held
1085             tempWriter = this.writer;
1086             logSyncer.hlogFlush(tempWriter, pending);
1087             postAppend(pending);
1088           }
1089         }
1090       }
1091       // another thread might have sync'ed avoid double-sync'ing
1092       if (txid <= this.syncedTillHere) {
1093         return;
1094       }
1095       try {
1096         if (tempWriter != null) {
1097           tempWriter.sync();
1098           postSync();
1099         }
1100       } catch(IOException ex) {
1101         synchronized (this.updateLock) {
1102           // HBASE-4387, HBASE-5623, retry with updateLock held
1103           // TODO: we don't actually need to do it for concurrent close - what is the point
1104           //       of syncing new unrelated writer? Keep behavior for now.
1105           tempWriter = this.writer;
1106           if (tempWriter != null) {
1107             tempWriter.sync();
1108             postSync();
1109           }
1110         }
1111       }
1112       this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto);
1113 
1114       this.metrics.finishSync(EnvironmentEdgeManager.currentTimeMillis() - now);
1115       // TODO: preserving the old behavior for now, but this check is strange. It's not
1116       //       protected by any locks here, so for all we know rolling locks might start
1117       //       as soon as we enter the "if". Is this best-effort optimization check?
1118       if (!this.logRollRunning) {
1119         checkLowReplication();
1120         try {
1121           curLogSize = tempWriter.getLength();
1122           if (curLogSize > this.logrollsize) {
1123             requestLogRoll();
1124           }
1125         } catch (IOException x) {
1126           LOG.debug("Log roll failed and will be retried. (This is not an error)");
1127         }
1128       }
1129     } catch (IOException e) {
1130       LOG.fatal("Could not sync. Requesting roll of hlog", e);
1131       requestLogRoll();
1132       throw e;
1133     }
1134   }
1135 
1136   @Override
1137   public void postSync() {}
1138 
1139   @Override
1140   public void postAppend(List<Entry> entries) {}
1141 
1142   private void checkLowReplication() {
1143     // if the number of replicas in HDFS has fallen below the configured
1144     // value, then roll logs.
1145     try {
1146       int numCurrentReplicas = getLogReplication();
1147       if (numCurrentReplicas != 0
1148           && numCurrentReplicas < this.minTolerableReplication) {
1149         if (this.lowReplicationRollEnabled) {
1150           if (this.consecutiveLogRolls.get() < this.lowReplicationRollLimit) {
1151             LOG.warn("HDFS pipeline error detected. " + "Found "
1152                 + numCurrentReplicas + " replicas but expecting no less than "
1153                 + this.minTolerableReplication + " replicas. "
1154                 + " Requesting close of hlog.");
1155             requestLogRoll();
1156             // If rollWriter is requested, increase consecutiveLogRolls. Once it
1157             // is larger than lowReplicationRollLimit, disable the
1158             // LowReplication-Roller
1159             this.consecutiveLogRolls.getAndIncrement();
1160           } else {
1161             LOG.warn("Too many consecutive RollWriter requests, it's a sign of "
1162                 + "the total number of live datanodes is lower than the tolerable replicas.");
1163             this.consecutiveLogRolls.set(0);
1164             this.lowReplicationRollEnabled = false;
1165           }
1166         }
1167       } else if (numCurrentReplicas >= this.minTolerableReplication) {
1168 
1169         if (!this.lowReplicationRollEnabled) {
1170           // The new writer's log replicas is always the default value.
1171           // So we should not enable LowReplication-Roller. If numEntries
1172           // is lower than or equals 1, we consider it as a new writer.
1173           if (this.numEntries.get() <= 1) {
1174             return;
1175           }
1176           // Once the live datanode number and the replicas return to normal,
1177           // enable the LowReplication-Roller.
1178           this.lowReplicationRollEnabled = true;
1179           LOG.info("LowReplication-Roller was enabled.");
1180         }
1181       }
1182     } catch (Exception e) {
1183       LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1184           " still proceeding ahead...");
1185     }
1186   }
1187 
1188   /**
1189    * This method gets the datanode replication count for the current HLog.
1190    *
1191    * If the pipeline isn't started yet or is empty, you will get the default
1192    * replication factor.  Therefore, if this function returns 0, it means you
1193    * are not properly running with the HDFS-826 patch.
1194    * @throws InvocationTargetException
1195    * @throws IllegalAccessException
1196    * @throws IllegalArgumentException
1197    *
1198    * @throws Exception
1199    */
1200   int getLogReplication()
1201   throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1202     if (this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1203       Object repl = this.getNumCurrentReplicas.invoke(getOutputStream(), NO_ARGS);
1204       if (repl instanceof Integer) {
1205         return ((Integer)repl).intValue();
1206       }
1207     }
1208     return 0;
1209   }
1210 
1211   boolean canGetCurReplicas() {
1212     return this.getNumCurrentReplicas != null;
1213   }
1214 
1215   @Override
1216   public void hsync() throws IOException {
1217     syncer();
1218   }
1219 
1220   @Override
1221   public void hflush() throws IOException {
1222     syncer();
1223   }
1224 
1225   @Override
1226   public void sync() throws IOException {
1227     syncer();
1228   }
1229 
1230   @Override
1231   public void sync(long txid) throws IOException {
1232     syncer(txid);
1233   }
1234 
1235   private void requestLogRoll() {
1236     if (!this.listeners.isEmpty()) {
1237       for (WALActionsListener i: this.listeners) {
1238         i.logRollRequested();
1239       }
1240     }
1241   }
1242 
1243   // TODO: Remove info.  Unused.
1244   protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
1245  HTableDescriptor htd)
1246   throws IOException {
1247     if (!this.enabled) {
1248       return;
1249     }
1250     if (!this.listeners.isEmpty()) {
1251       for (WALActionsListener i: this.listeners) {
1252         i.visitLogEntryBeforeWrite(htd, logKey, logEdit);
1253       }
1254     }
1255     try {
1256       long now = EnvironmentEdgeManager.currentTimeMillis();
1257       // coprocessor hook:
1258       if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) {
1259         if (logEdit.isReplay()) {
1260           // set replication scope null so that this won't be replicated
1261           logKey.setScopes(null);
1262         }
1263         // write to our buffer for the Hlog file.
1264         logSyncer.append(new FSHLog.Entry(logKey, logEdit));
1265       }
1266       long took = EnvironmentEdgeManager.currentTimeMillis() - now;
1267       coprocessorHost.postWALWrite(info, logKey, logEdit);
1268       long len = 0;
1269       for (KeyValue kv : logEdit.getKeyValues()) {
1270         len += kv.getLength();
1271       }
1272       this.metrics.finishAppend(took, len);
1273     } catch (IOException e) {
1274       LOG.fatal("Could not append. Requesting close of hlog", e);
1275       requestLogRoll();
1276       throw e;
1277     }
1278   }
1279 
1280 
1281   /** @return How many items have been added to the log */
1282   int getNumEntries() {
1283     return numEntries.get();
1284   }
1285 
1286   @Override
1287   public long obtainSeqNum() {
1288     return this.logSeqNum.incrementAndGet();
1289   }
1290 
1291   /** @return the number of rolled log files */
1292   public int getNumRolledLogFiles() {
1293     return outputfiles.size();
1294   }
1295 
1296   /** @return the number of log files in use */
1297   @Override
1298   public int getNumLogFiles() {
1299     // +1 for current use log
1300     return getNumRolledLogFiles() + 1;
1301   }
1302 
1303   /** @return the size of log files in use */
1304   @Override
1305   public long getLogFileSize() {
1306     return totalLogSize.get() + curLogSize;
1307   }
1308 
1309   @Override
1310   public Long startCacheFlush(final byte[] encodedRegionName) {
1311     Long oldRegionSeqNum = null;
1312     if (!closeBarrier.beginOp()) {
1313       return null;
1314     }
1315     synchronized (oldestSeqNumsLock) {
1316       oldRegionSeqNum = this.oldestUnflushedSeqNums.remove(encodedRegionName);
1317       if (oldRegionSeqNum != null) {
1318         Long oldValue = this.oldestFlushingSeqNums.put(encodedRegionName, oldRegionSeqNum);
1319         assert oldValue == null : "Flushing map not cleaned up for "
1320           + Bytes.toString(encodedRegionName);
1321       }
1322     }
1323     if (oldRegionSeqNum == null) {
1324       // TODO: if we have no oldRegionSeqNum, and WAL is not disabled, presumably either
1325       //       the region is already flushing (which would make this call invalid), or there
1326       //       were no appends after last flush, so why are we starting flush? Maybe we should
1327       //       assert not null, and switch to "long" everywhere. Less rigorous, but safer,
1328       //       alternative is telling the caller to stop. For now preserve old logic.
1329       LOG.warn("Couldn't find oldest seqNum for the region we are about to flush: ["
1330         + Bytes.toString(encodedRegionName) + "]");
1331     }
1332     return obtainSeqNum();
1333   }
1334 
1335   @Override
1336   public void completeCacheFlush(final byte [] encodedRegionName)
1337   {
1338     synchronized (oldestSeqNumsLock) {
1339       this.oldestFlushingSeqNums.remove(encodedRegionName);
1340     }
1341     closeBarrier.endOp();
1342   }
1343 
1344   @Override
1345   public void abortCacheFlush(byte[] encodedRegionName) {
1346     Long currentSeqNum = null, seqNumBeforeFlushStarts = null;
1347     synchronized (oldestSeqNumsLock) {
1348       seqNumBeforeFlushStarts = this.oldestFlushingSeqNums.remove(encodedRegionName);
1349       if (seqNumBeforeFlushStarts != null) {
1350         currentSeqNum =
1351           this.oldestUnflushedSeqNums.put(encodedRegionName, seqNumBeforeFlushStarts);
1352       }
1353     }
1354     closeBarrier.endOp();
1355     if ((currentSeqNum != null)
1356         && (currentSeqNum.longValue() <= seqNumBeforeFlushStarts.longValue())) {
1357       String errorStr = "Region " + Bytes.toString(encodedRegionName) +
1358           "acquired edits out of order current memstore seq=" + currentSeqNum
1359           + ", previous oldest unflushed id=" + seqNumBeforeFlushStarts;
1360       LOG.error(errorStr);
1361       assert false : errorStr;
1362       Runtime.getRuntime().halt(1);
1363     }
1364   }
1365 
1366   @Override
1367   public boolean isLowReplicationRollEnabled() {
1368       return lowReplicationRollEnabled;
1369   }
1370 
1371   /**
1372    * Get the directory we are making logs in.
1373    *
1374    * @return dir
1375    */
1376   protected Path getDir() {
1377     return dir;
1378   }
1379 
1380   static Path getHLogArchivePath(Path oldLogDir, Path p) {
1381     return new Path(oldLogDir, p.getName());
1382   }
1383 
1384   static String formatRecoveredEditsFileName(final long seqid) {
1385     return String.format("%019d", seqid);
1386   }
1387 
1388   public static final long FIXED_OVERHEAD = ClassSize.align(
1389     ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1390     ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1391 
1392   private static void usage() {
1393     System.err.println("Usage: HLog <ARGS>");
1394     System.err.println("Arguments:");
1395     System.err.println(" --dump  Dump textual representation of passed one or more files");
1396     System.err.println("         For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1397     System.err.println(" --split Split the passed directory of WAL logs");
1398     System.err.println("         For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1399   }
1400 
1401   private static void split(final Configuration conf, final Path p)
1402   throws IOException {
1403     FileSystem fs = FileSystem.get(conf);
1404     if (!fs.exists(p)) {
1405       throw new FileNotFoundException(p.toString());
1406     }
1407     if (!fs.getFileStatus(p).isDir()) {
1408       throw new IOException(p + " is not a directory");
1409     }
1410 
1411     final Path baseDir = FSUtils.getRootDir(conf);
1412     final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1413     HLogSplitter.split(baseDir, p, oldLogDir, fs, conf);
1414   }
1415 
1416   @Override
1417   public WALCoprocessorHost getCoprocessorHost() {
1418     return coprocessorHost;
1419   }
1420 
1421   /** Provide access to currently deferred sequence num for tests */
1422   boolean hasDeferredEntries() {
1423     return lastDeferredTxid > syncedTillHere;
1424   }
1425 
1426   @Override
1427   public long getEarliestMemstoreSeqNum(byte[] encodedRegionName) {
1428     Long result = oldestUnflushedSeqNums.get(encodedRegionName);
1429     return result == null ? HConstants.NO_SEQNUM : result.longValue();
1430   }
1431 
1432   /**
1433    * Pass one or more log file names and it will either dump out a text version
1434    * on <code>stdout</code> or split the specified log files.
1435    *
1436    * @param args
1437    * @throws IOException
1438    */
1439   public static void main(String[] args) throws IOException {
1440     if (args.length < 2) {
1441       usage();
1442       System.exit(-1);
1443     }
1444     // either dump using the HLogPrettyPrinter or split, depending on args
1445     if (args[0].compareTo("--dump") == 0) {
1446       HLogPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
1447     } else if (args[0].compareTo("--split") == 0) {
1448       Configuration conf = HBaseConfiguration.create();
1449       for (int i = 1; i < args.length; i++) {
1450         try {
1451           Path logPath = new Path(args[i]);
1452           FSUtils.setFsDefault(conf, logPath);
1453           split(conf, logPath);
1454         } catch (Throwable t) {
1455           t.printStackTrace(System.err);
1456           System.exit(-1);
1457         }
1458       }
1459     } else {
1460       usage();
1461       System.exit(-1);
1462     }
1463   }
1464 }