View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver.wal;
19  
20  import java.io.FileNotFoundException;
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.io.OutputStream;
24  import java.lang.reflect.InvocationTargetException;
25  import java.lang.reflect.Method;
26  import java.net.URLEncoder;
27  import java.util.ArrayList;
28  import java.util.Arrays;
29  import java.util.Comparator;
30  import java.util.HashMap;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.NavigableMap;
34  import java.util.TreeMap;
35  import java.util.UUID;
36  import java.util.concurrent.BlockingQueue;
37  import java.util.concurrent.ConcurrentHashMap;
38  import java.util.concurrent.ConcurrentSkipListMap;
39  import java.util.concurrent.CopyOnWriteArrayList;
40  import java.util.concurrent.CountDownLatch;
41  import java.util.concurrent.ExecutionException;
42  import java.util.concurrent.ExecutorService;
43  import java.util.concurrent.Executors;
44  import java.util.concurrent.LinkedBlockingQueue;
45  import java.util.concurrent.TimeUnit;
46  import java.util.concurrent.atomic.AtomicBoolean;
47  import java.util.concurrent.atomic.AtomicInteger;
48  import java.util.concurrent.atomic.AtomicLong;
49  import java.util.concurrent.locks.ReentrantLock;
50  
51  import org.apache.commons.logging.Log;
52  import org.apache.commons.logging.LogFactory;
53  import org.apache.hadoop.hbase.classification.InterfaceAudience;
54  import org.apache.hadoop.conf.Configuration;
55  import org.apache.hadoop.fs.FSDataOutputStream;
56  import org.apache.hadoop.fs.FileStatus;
57  import org.apache.hadoop.fs.FileSystem;
58  import org.apache.hadoop.fs.Path;
59  import org.apache.hadoop.fs.PathFilter;
60  import org.apache.hadoop.hbase.Cell;
61  import org.apache.hadoop.hbase.CellUtil;
62  import org.apache.hadoop.hbase.HBaseConfiguration;
63  import org.apache.hadoop.hbase.HConstants;
64  import org.apache.hadoop.hbase.HRegionInfo;
65  import org.apache.hadoop.hbase.HTableDescriptor;
66  import org.apache.hadoop.hbase.KeyValue;
67  import org.apache.hadoop.hbase.TableName;
68  import static org.apache.hadoop.hbase.wal.DefaultWALProvider.WAL_FILE_NAME_DELIMITER;
69  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
70  import org.apache.hadoop.hbase.wal.WAL;
71  import org.apache.hadoop.hbase.wal.WAL.Entry;
72  import org.apache.hadoop.hbase.wal.WALFactory;
73  import org.apache.hadoop.hbase.wal.WALKey;
74  import org.apache.hadoop.hbase.wal.WALPrettyPrinter;
75  import org.apache.hadoop.hbase.wal.WALProvider.Writer;
76  import org.apache.hadoop.hbase.wal.WALSplitter;
77  import org.apache.hadoop.hbase.util.Bytes;
78  import org.apache.hadoop.hbase.util.ClassSize;
79  import org.apache.hadoop.hbase.util.DrainBarrier;
80  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
81  import org.apache.hadoop.hbase.util.FSUtils;
82  import org.apache.hadoop.hbase.util.HasThread;
83  import org.apache.hadoop.hbase.util.Threads;
84  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
85  import org.apache.hadoop.util.StringUtils;
86  import org.apache.htrace.NullScope;
87  import org.apache.htrace.Span;
88  import org.apache.htrace.Trace;
89  import org.apache.htrace.TraceScope;
90  
91  import com.google.common.annotations.VisibleForTesting;
92  import com.lmax.disruptor.BlockingWaitStrategy;
93  import com.lmax.disruptor.EventHandler;
94  import com.lmax.disruptor.ExceptionHandler;
95  import com.lmax.disruptor.LifecycleAware;
96  import com.lmax.disruptor.TimeoutException;
97  import com.lmax.disruptor.dsl.Disruptor;
98  import com.lmax.disruptor.dsl.ProducerType;
99  
100 /**
101  * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS.
102  * Only one WAL is ever being written at a time.  When a WAL hits a configured maximum size,
103  * it is rolled.  This is done internal to the implementation.
104  *
105  * <p>As data is flushed from the MemStore to other on-disk structures (files sorted by
106  * key, hfiles), a WAL becomes obsolete. We can let go of all the log edits/entries for a given
107  * HRegion-sequence id.  A bunch of work in the below is done keeping account of these region
108  * sequence ids -- what is flushed out to hfiles, and what is yet in WAL and in memory only.
109  *
110  * <p>It is only practical to delete entire files. Thus, we delete an entire on-disk file
111  * <code>F</code> when all of the edits in <code>F</code> have a log-sequence-id that's older
112  * (smaller) than the most-recent flush.
113  *
114  * <p>To read an WAL, call {@link WALFactory#createReader(org.apache.hadoop.fs.FileSystem,
115  * org.apache.hadoop.fs.Path)}.
116  */
117 @InterfaceAudience.Private
118 public class FSHLog implements WAL {
119   // IMPLEMENTATION NOTES:
120   //
121   // At the core is a ring buffer.  Our ring buffer is the LMAX Disruptor.  It tries to
122   // minimize synchronizations and volatile writes when multiple contending threads as is the case
123   // here appending and syncing on a single WAL.  The Disruptor is configured to handle multiple
124   // producers but it has one consumer only (the producers in HBase are IPC Handlers calling append
125   // and then sync).  The single consumer/writer pulls the appends and syncs off the ring buffer.
126   // When a handler calls sync, it is given back a future. The producer 'blocks' on the future so
127   // it does not return until the sync completes.  The future is passed over the ring buffer from
128   // the producer/handler to the consumer thread where it does its best to batch up the producer
129   // syncs so one WAL sync actually spans multiple producer sync invocations.  How well the
130   // batching works depends on the write rate; i.e. we tend to batch more in times of
131   // high writes/syncs.
132   //
133   // Calls to append now also wait until the append has been done on the consumer side of the
134   // disruptor.  We used to not wait but it makes the implemenation easier to grok if we have
135   // the region edit/sequence id after the append returns.
136   // 
137   // TODO: Handlers need to coordinate appending AND syncing.  Can we have the threads contend
138   // once only?  Probably hard given syncs take way longer than an append.
139   //
140   // The consumer threads pass the syncs off to multiple syncing threads in a round robin fashion
141   // to ensure we keep up back-to-back FS sync calls (FS sync calls are the long poll writing the
142   // WAL).  The consumer thread passes the futures to the sync threads for it to complete
143   // the futures when done.
144   //
145   // The 'sequence' in the below is the sequence of the append/sync on the ringbuffer.  It
146   // acts as a sort-of transaction id.  It is always incrementing.
147   //
148   // The RingBufferEventHandler class hosts the ring buffer consuming code.  The threads that
149   // do the actual FS sync are implementations of SyncRunner.  SafePointZigZagLatch is a
150   // synchronization class used to halt the consumer at a safe point --  just after all outstanding
151   // syncs and appends have completed -- so the log roller can swap the WAL out under it.
152 
153   static final Log LOG = LogFactory.getLog(FSHLog.class);
154 
155   private static final int DEFAULT_SLOW_SYNC_TIME_MS = 100; // in ms
156   
157   /**
158    * The nexus at which all incoming handlers meet.  Does appends and sync with an ordering.
159    * Appends and syncs are each put on the ring which means handlers need to
160    * smash up against the ring twice (can we make it once only? ... maybe not since time to append
161    * is so different from time to sync and sometimes we don't want to sync or we want to async
162    * the sync).  The ring is where we make sure of our ordering and it is also where we do
163    * batching up of handler sync calls.
164    */
165   private final Disruptor<RingBufferTruck> disruptor;
166 
167   /**
168    * An executorservice that runs the disrutpor AppendEventHandler append executor.
169    */
170   private final ExecutorService appendExecutor;
171 
172   /**
173    * This fellow is run by the above appendExecutor service but it is all about batching up appends
174    * and syncs; it may shutdown without cleaning out the last few appends or syncs.  To guard
175    * against this, keep a reference to this handler and do explicit close on way out to make sure
176    * all flushed out before we exit.
177    */
178   private final RingBufferEventHandler ringBufferEventHandler;
179 
180   /**
181    * Map of {@link SyncFuture}s keyed by Handler objects.  Used so we reuse SyncFutures.
182    * TODO: Reus FSWALEntry's rather than create them anew each time as we do SyncFutures here.
183    * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather than have them
184    * get them from this Map?
185    */
186   private final Map<Thread, SyncFuture> syncFuturesByHandler;
187 
188   /**
189    * The highest known outstanding unsync'd WALEdit sequence number where sequence number is the
190    * ring buffer sequence.  Maintained by the ring buffer consumer.
191    */
192   private volatile long highestUnsyncedSequence = -1;
193 
194   /**
195    * Updated to the ring buffer sequence of the last successful sync call.  This can be less than
196    * {@link #highestUnsyncedSequence} for case where we have an append where a sync has not yet
197    * come in for it.  Maintained by the syncing threads.
198    */
199   private final AtomicLong highestSyncedSequence = new AtomicLong(0);
200 
201   /**
202    * file system instance
203    */
204   protected final FileSystem fs;
205 
206   /**
207    * WAL directory, where all WAL files would be placed.
208    */
209   private final Path fullPathLogDir;
210   /**
211    * dir path where old logs are kept.
212    */
213   private final Path fullPathArchiveDir;
214 
215   /**
216    * Matches just those wal files that belong to this wal instance.
217    */
218   private final PathFilter ourFiles;
219 
220   /**
221    * Prefix of a WAL file, usually the region server name it is hosted on.
222    */
223   private final String logFilePrefix;
224 
225   /**
226    * Suffix included on generated wal file names 
227    */
228   private final String logFileSuffix;
229 
230   /**
231    * Prefix used when checking for wal membership.
232    */
233   private final String prefixPathStr;
234 
235   private final WALCoprocessorHost coprocessorHost;
236 
237   /**
238    * conf object
239    */
240   protected final Configuration conf;
241   /** Listeners that are called on WAL events. */
242   private final List<WALActionsListener> listeners = new CopyOnWriteArrayList<WALActionsListener>();
243 
244   @Override
245   public void registerWALActionsListener(final WALActionsListener listener) {
246     this.listeners.add(listener);
247   }
248   
249   @Override
250   public boolean unregisterWALActionsListener(final WALActionsListener listener) {
251     return this.listeners.remove(listener);
252   }
253 
254   @Override
255   public WALCoprocessorHost getCoprocessorHost() {
256     return coprocessorHost;
257   }
258   /**
259    * FSDataOutputStream associated with the current SequenceFile.writer
260    */
261   private FSDataOutputStream hdfs_out;
262 
263   // All about log rolling if not enough replicas outstanding.
264 
265   // Minimum tolerable replicas, if the actual value is lower than it, rollWriter will be triggered
266   private final int minTolerableReplication;
267 
268   // DFSOutputStream.getNumCurrentReplicas method instance gotten via reflection.
269   private final Method getNumCurrentReplicas;
270   private final Method getPipeLine; // refers to DFSOutputStream.getPipeLine
271   private final int slowSyncNs;
272 
273   private final static Object [] NO_ARGS = new Object []{};
274 
275   // If live datanode count is lower than the default replicas value,
276   // RollWriter will be triggered in each sync(So the RollWriter will be
277   // triggered one by one in a short time). Using it as a workaround to slow
278   // down the roll frequency triggered by checkLowReplication().
279   private final AtomicInteger consecutiveLogRolls = new AtomicInteger(0);
280 
281   private final int lowReplicationRollLimit;
282 
283   // If consecutiveLogRolls is larger than lowReplicationRollLimit,
284   // then disable the rolling in checkLowReplication().
285   // Enable it if the replications recover.
286   private volatile boolean lowReplicationRollEnabled = true;
287 
288   /**
289    * Current log file.
290    */
291   volatile Writer writer;
292 
293   /** The barrier used to ensure that close() waits for all log rolls and flushes to finish. */
294   private final DrainBarrier closeBarrier = new DrainBarrier();
295 
296   /**
297    * This lock makes sure only one log roll runs at a time. Should not be taken while any other
298    * lock is held. We don't just use synchronized because that results in bogus and tedious
299    * findbugs warning when it thinks synchronized controls writer thread safety.  It is held when
300    * we are actually rolling the log.  It is checked when we are looking to see if we should roll
301    * the log or not.
302    */
303   private final ReentrantLock rollWriterLock = new ReentrantLock(true);
304 
305   private volatile boolean closed = false;
306   private final AtomicBoolean shutdown = new AtomicBoolean(false);
307 
308   // The timestamp (in ms) when the log file was created.
309   private final AtomicLong filenum = new AtomicLong(-1);
310 
311   // Number of transactions in the current Wal.
312   private final AtomicInteger numEntries = new AtomicInteger(0);
313 
314   // If > than this size, roll the log.
315   private final long logrollsize;
316 
317   /**
318    * The total size of wal
319    */
320   private AtomicLong totalLogSize = new AtomicLong(0);
321 
322   /*
323    * If more than this many logs, force flush of oldest region to oldest edit
324    * goes to disk.  If too many and we crash, then will take forever replaying.
325    * Keep the number of logs tidy.
326    */
327   private final int maxLogs;
328 
329   /** Number of log close errors tolerated before we abort */
330   private final int closeErrorsTolerated;
331 
332   private final AtomicInteger closeErrorCount = new AtomicInteger();
333 
334   // Region sequence id accounting across flushes and for knowing when we can GC a WAL.  These
335   // sequence id numbers are by region and unrelated to the ring buffer sequence number accounting
336   // done above in failedSequence, highest sequence, etc.
337   /**
338    * This lock ties all operations on oldestFlushingRegionSequenceIds and
339    * oldestFlushedRegionSequenceIds Maps with the exception of append's putIfAbsent call into
340    * oldestUnflushedSeqNums. We use these Maps to find out the low bound regions sequence id, or
341    * to find regions  with old sequence ids to force flush; we are interested in old stuff not the
342    * new additions (TODO: IS THIS SAFE?  CHECK!).
343    */
344   private final Object regionSequenceIdLock = new Object();
345 
346   /**
347    * Map of encoded region names to their OLDEST -- i.e. their first, the longest-lived --
348    * sequence id in memstore. Note that this sequence id is the region sequence id.  This is not
349    * related to the id we use above for {@link #highestSyncedSequence} and
350    * {@link #highestUnsyncedSequence} which is the sequence from the disruptor ring buffer.
351    */
352   private final ConcurrentSkipListMap<byte [], Long> oldestUnflushedRegionSequenceIds =
353     new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
354 
355   /**
356    * Map of encoded region names to their lowest or OLDEST sequence/edit id in memstore currently
357    * being flushed out to hfiles. Entries are moved here from
358    * {@link #oldestUnflushedRegionSequenceIds} while the lock {@link #regionSequenceIdLock} is held
359    * (so movement between the Maps is atomic). This is not related to the id we use above for
360    * {@link #highestSyncedSequence} and {@link #highestUnsyncedSequence} which is the sequence from
361    * the disruptor ring buffer, an internal detail.
362    */
363   private final Map<byte[], Long> lowestFlushingRegionSequenceIds =
364     new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
365 
366  /**
367   * Map of region encoded names to the latest region sequence id.  Updated on each append of
368   * WALEdits to the WAL. We create one map for each WAL file at the time it is rolled.
369   * <p>When deciding whether to archive a WAL file, we compare the sequence IDs in this map to
370   * {@link #lowestFlushingRegionSequenceIds} and {@link #oldestUnflushedRegionSequenceIds}.
371   * See {@link FSHLog#areAllRegionsFlushed(Map, Map, Map)} for more info.
372   * <p>
373   * This map uses byte[] as the key, and uses reference equality. It works in our use case as we
374   * use {@link HRegionInfo#getEncodedNameAsBytes()} as keys. For a given region, it always returns
375   * the same array.
376   */
377   private Map<byte[], Long> highestRegionSequenceIds = new HashMap<byte[], Long>();
378 
379   /**
380    * WAL Comparator; it compares the timestamp (log filenum), present in the log file name.
381    * Throws an IllegalArgumentException if used to compare paths from different wals.
382    */
383   public final Comparator<Path> LOG_NAME_COMPARATOR = new Comparator<Path>() {
384     @Override
385     public int compare(Path o1, Path o2) {
386       long t1 = getFileNumFromFileName(o1);
387       long t2 = getFileNumFromFileName(o2);
388       if (t1 == t2) return 0;
389       return (t1 > t2) ? 1 : -1;
390     }
391   };
392 
393   /**
394    * Map of wal log file to the latest sequence ids of all regions it has entries of.
395    * The map is sorted by the log file creation timestamp (contained in the log file name).
396    */
397   private NavigableMap<Path, Map<byte[], Long>> byWalRegionSequenceIds =
398     new ConcurrentSkipListMap<Path, Map<byte[], Long>>(LOG_NAME_COMPARATOR);
399 
400   /**
401    * Exception handler to pass the disruptor ringbuffer.  Same as native implementation only it
402    * logs using our logger instead of java native logger.
403    */
404   static class RingBufferExceptionHandler implements ExceptionHandler {
405     @Override
406     public void handleEventException(Throwable ex, long sequence, Object event) {
407       LOG.error("Sequence=" + sequence + ", event=" + event, ex);
408       throw new RuntimeException(ex);
409     }
410 
411     @Override
412     public void handleOnStartException(Throwable ex) {
413       LOG.error(ex);
414       throw new RuntimeException(ex);
415     }
416 
417     @Override
418     public void handleOnShutdownException(Throwable ex) {
419       LOG.error(ex);
420       throw new RuntimeException(ex);
421     }
422   }
423 
424   /**
425    * Constructor.
426    *
427    * @param fs filesystem handle
428    * @param root path for stored and archived wals
429    * @param logDir dir where wals are stored
430    * @param conf configuration to use
431    * @throws IOException
432    */
433   public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf)
434       throws IOException {
435     this(fs, root, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
436   }
437 
438   /**
439    * Create an edit log at the given <code>dir</code> location.
440    *
441    * You should never have to load an existing log. If there is a log at
442    * startup, it should have already been processed and deleted by the time the
443    * WAL object is started up.
444    *
445    * @param fs filesystem handle
446    * @param rootDir path to where logs and oldlogs
447    * @param logDir dir where wals are stored
448    * @param archiveDir dir where wals are archived
449    * @param conf configuration to use
450    * @param listeners Listeners on WAL events. Listeners passed here will
451    * be registered before we do anything else; e.g. the
452    * Constructor {@link #rollWriter()}.
453    * @param failIfWALExists If true IOException will be thrown if files related to this wal
454    *        already exist.
455    * @param prefix should always be hostname and port in distributed env and
456    *        it will be URL encoded before being used.
457    *        If prefix is null, "wal" will be used
458    * @param suffix will be url encoded. null is treated as empty. non-empty must start with
459    *        {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
460    * @throws IOException
461    */
462   public FSHLog(final FileSystem fs, final Path rootDir, final String logDir,
463       final String archiveDir, final Configuration conf,
464       final List<WALActionsListener> listeners,
465       final boolean failIfWALExists, final String prefix, final String suffix)
466       throws IOException {
467     this.fs = fs;
468     this.fullPathLogDir = new Path(rootDir, logDir);
469     this.fullPathArchiveDir = new Path(rootDir, archiveDir);
470     this.conf = conf;
471 
472     if (!fs.exists(fullPathLogDir) && !fs.mkdirs(fullPathLogDir)) {
473       throw new IOException("Unable to mkdir " + fullPathLogDir);
474     }
475 
476     if (!fs.exists(this.fullPathArchiveDir)) {
477       if (!fs.mkdirs(this.fullPathArchiveDir)) {
478         throw new IOException("Unable to mkdir " + this.fullPathArchiveDir);
479       }
480     }
481 
482     // If prefix is null||empty then just name it wal
483     this.logFilePrefix =
484       prefix == null || prefix.isEmpty() ? "wal" : URLEncoder.encode(prefix, "UTF8");
485     // we only correctly differentiate suffices when numeric ones start with '.'
486     if (suffix != null && !(suffix.isEmpty()) && !(suffix.startsWith(WAL_FILE_NAME_DELIMITER))) {
487       throw new IllegalArgumentException("wal suffix must start with '" + WAL_FILE_NAME_DELIMITER +
488           "' but instead was '" + suffix + "'");
489     }
490     this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8");
491     this.prefixPathStr = new Path(fullPathLogDir,
492         logFilePrefix + WAL_FILE_NAME_DELIMITER).toString();
493 
494     this.ourFiles = new PathFilter() {
495       @Override
496       public boolean accept(final Path fileName) {
497         // The path should start with dir/<prefix> and end with our suffix
498         final String fileNameString = fileName.toString();
499         if (!fileNameString.startsWith(prefixPathStr)) {
500           return false;
501         }
502         if (logFileSuffix.isEmpty()) {
503           // in the case of the null suffix, we need to ensure the filename ends with a timestamp.
504           return org.apache.commons.lang.StringUtils.isNumeric(
505               fileNameString.substring(prefixPathStr.length()));
506         } else if (!fileNameString.endsWith(logFileSuffix)) {
507           return false;
508         }
509         return true;
510       }
511     };
512 
513     if (failIfWALExists) {
514       final FileStatus[] walFiles = FSUtils.listStatus(fs, fullPathLogDir, ourFiles);
515       if (null != walFiles && 0 != walFiles.length) {
516         throw new IOException("Target WAL already exists within directory " + fullPathLogDir);
517       }
518     }
519 
520     // Register listeners.  TODO: Should this exist anymore?  We have CPs?
521     if (listeners != null) {
522       for (WALActionsListener i: listeners) {
523         registerWALActionsListener(i);
524       }
525     }
526     this.coprocessorHost = new WALCoprocessorHost(this, conf);
527 
528     // Get size to roll log at. Roll at 95% of HDFS block size so we avoid crossing HDFS blocks
529     // (it costs a little x'ing bocks)
530     final long blocksize = this.conf.getLong("hbase.regionserver.hlog.blocksize",
531         FSUtils.getDefaultBlockSize(this.fs, this.fullPathLogDir));
532     this.logrollsize =
533       (long)(blocksize * conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f));
534 
535     this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
536     this.minTolerableReplication = conf.getInt( "hbase.regionserver.hlog.tolerable.lowreplication",
537         FSUtils.getDefaultReplication(fs, this.fullPathLogDir));
538     this.lowReplicationRollLimit =
539       conf.getInt("hbase.regionserver.hlog.lowreplication.rolllimit", 5);
540     this.closeErrorsTolerated = conf.getInt("hbase.regionserver.logroll.errors.tolerated", 0);
541     int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 200);
542 
543     LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) +
544       ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
545       ", prefix=" + this.logFilePrefix + ", suffix=" + logFileSuffix + ", logDir=" +
546       this.fullPathLogDir + ", archiveDir=" + this.fullPathArchiveDir);
547 
548     // rollWriter sets this.hdfs_out if it can.
549     rollWriter();
550 
551     this.slowSyncNs =
552         1000000 * conf.getInt("hbase.regionserver.hlog.slowsync.ms",
553           DEFAULT_SLOW_SYNC_TIME_MS);
554     // handle the reflection necessary to call getNumCurrentReplicas(). TODO: Replace with
555     // HdfsDataOutputStream#getCurrentBlockReplication() and go without reflection.
556     this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
557     this.getPipeLine = getGetPipeline(this.hdfs_out);
558 
559     // This is the 'writer' -- a single threaded executor.  This single thread 'consumes' what is
560     // put on the ring buffer.
561     String hostingThreadName = Thread.currentThread().getName();
562     this.appendExecutor = Executors.
563       newSingleThreadExecutor(Threads.getNamedThreadFactory(hostingThreadName + ".append"));
564     // Preallocate objects to use on the ring buffer.  The way that appends and syncs work, we will
565     // be stuck and make no progress if the buffer is filled with appends only and there is no
566     // sync. If no sync, then the handlers will be outstanding just waiting on sync completion
567     // before they return.
568     final int preallocatedEventCount =
569       this.conf.getInt("hbase.regionserver.wal.disruptor.event.count", 1024 * 16);
570     // Using BlockingWaitStrategy.  Stuff that is going on here takes so long it makes no sense
571     // spinning as other strategies do.
572     this.disruptor =
573       new Disruptor<RingBufferTruck>(RingBufferTruck.EVENT_FACTORY, preallocatedEventCount,
574         this.appendExecutor, ProducerType.MULTI, new BlockingWaitStrategy());
575     // Advance the ring buffer sequence so that it starts from 1 instead of 0,
576     // because SyncFuture.NOT_DONE = 0.
577     this.disruptor.getRingBuffer().next();
578     this.ringBufferEventHandler =
579       new RingBufferEventHandler(conf.getInt("hbase.regionserver.hlog.syncer.count", 5),
580         maxHandlersCount);
581     this.disruptor.handleExceptionsWith(new RingBufferExceptionHandler());
582     this.disruptor.handleEventsWith(new RingBufferEventHandler [] {this.ringBufferEventHandler});
583     // Presize our map of SyncFutures by handler objects.
584     this.syncFuturesByHandler = new ConcurrentHashMap<Thread, SyncFuture>(maxHandlersCount);
585     // Starting up threads in constructor is a no no; Interface should have an init call.
586     this.disruptor.start();
587   }
588 
589   /**
590    * Get the backing files associated with this WAL.
591    * @return may be null if there are no files.
592    */
593   protected FileStatus[] getFiles() throws IOException {
594     return FSUtils.listStatus(fs, fullPathLogDir, ourFiles);
595   }
596 
597   /**
598    * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate
599    * the default behavior (such as setting the maxRecoveryErrorCount value for example (see
600    * {@link TestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection on the
601    * underlying HDFS OutputStream.
602    * NOTE: This could be removed once Hadoop1 support is removed.
603    * @return null if underlying stream is not ready.
604    */
605   @VisibleForTesting
606   OutputStream getOutputStream() {
607     return this.hdfs_out.getWrappedStream();
608   }
609 
610   @Override
611   public byte [][] rollWriter() throws FailedLogCloseException, IOException {
612     return rollWriter(false);
613   }
614 
615   /**
616    * retrieve the next path to use for writing.
617    * Increments the internal filenum.
618    */
619   private Path getNewPath() throws IOException {
620     this.filenum.set(System.currentTimeMillis());
621     Path newPath = getCurrentFileName();
622     while (fs.exists(newPath)) {
623       this.filenum.incrementAndGet();
624       newPath = getCurrentFileName();
625     }
626     return newPath;
627   }
628 
629   Path getOldPath() {
630     long currentFilenum = this.filenum.get();
631     Path oldPath = null;
632     if (currentFilenum > 0) {
633       // ComputeFilename  will take care of meta wal filename
634       oldPath = computeFilename(currentFilenum);
635     } // I presume if currentFilenum is <= 0, this is first file and null for oldPath if fine?
636     return oldPath;
637   }
638 
639   /**
640    * Tell listeners about pre log roll.
641    * @throws IOException 
642    */
643   private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath)
644   throws IOException {
645     if (!this.listeners.isEmpty()) {
646       for (WALActionsListener i : this.listeners) {
647         i.preLogRoll(oldPath, newPath);
648       }
649     }
650   }
651 
652   /**
653    * Tell listeners about post log roll.
654    * @throws IOException 
655    */
656   private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath)
657   throws IOException {
658     if (!this.listeners.isEmpty()) {
659       for (WALActionsListener i : this.listeners) {
660         i.postLogRoll(oldPath, newPath);
661       }
662     }
663   }
664 
665   /**
666    * Run a sync after opening to set up the pipeline.
667    * @param nextWriter
668    * @param startTimeNanos
669    */
670   private void preemptiveSync(final ProtobufLogWriter nextWriter) {
671     long startTimeNanos = System.nanoTime();
672     try {
673       nextWriter.sync();
674       postSync(System.nanoTime() - startTimeNanos, 0);
675     } catch (IOException e) {
676       // optimization failed, no need to abort here.
677       LOG.warn("pre-sync failed but an optimization so keep going", e);
678     }
679   }
680 
681   @Override
682   public byte [][] rollWriter(boolean force) throws FailedLogCloseException, IOException {
683     rollWriterLock.lock();
684     try {
685       // Return if nothing to flush.
686       if (!force && (this.writer != null && this.numEntries.get() <= 0)) return null;
687       byte [][] regionsToFlush = null;
688       if (this.closed) {
689         LOG.debug("WAL closed. Skipping rolling of writer");
690         return regionsToFlush;
691       }
692       if (!closeBarrier.beginOp()) {
693         LOG.debug("WAL closing. Skipping rolling of writer");
694         return regionsToFlush;
695       }
696       TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
697       try {
698         Path oldPath = getOldPath();
699         Path newPath = getNewPath();
700         // Any exception from here on is catastrophic, non-recoverable so we currently abort.
701         Writer nextWriter = this.createWriterInstance(newPath);
702         FSDataOutputStream nextHdfsOut = null;
703         if (nextWriter instanceof ProtobufLogWriter) {
704           nextHdfsOut = ((ProtobufLogWriter)nextWriter).getStream();
705           // If a ProtobufLogWriter, go ahead and try and sync to force setup of pipeline.
706           // If this fails, we just keep going.... it is an optimization, not the end of the world.
707           preemptiveSync((ProtobufLogWriter)nextWriter);
708         }
709         tellListenersAboutPreLogRoll(oldPath, newPath);
710         // NewPath could be equal to oldPath if replaceWriter fails.
711         newPath = replaceWriter(oldPath, newPath, nextWriter, nextHdfsOut);
712         tellListenersAboutPostLogRoll(oldPath, newPath);
713         // Can we delete any of the old log files?
714         if (getNumRolledLogFiles() > 0) {
715           cleanOldLogs();
716           regionsToFlush = findRegionsToForceFlush();
717         }
718       } finally {
719         closeBarrier.endOp();
720         assert scope == NullScope.INSTANCE || !scope.isDetached();
721         scope.close();
722       }
723       return regionsToFlush;
724     } finally {
725       rollWriterLock.unlock();
726     }
727   }
728 
729   /**
730    * This method allows subclasses to inject different writers without having to
731    * extend other methods like rollWriter().
732    *
733    * @return Writer instance
734    */
735   protected Writer createWriterInstance(final Path path) throws IOException {
736     return DefaultWALProvider.createWriter(conf, fs, path, false);
737   }
738 
739   /**
740    * Archive old logs that could be archived: a log is eligible for archiving if all its WALEdits
741    * have been flushed to hfiles.
742    * <p>
743    * For each log file, it compares its region to sequenceId map
744    * (@link {@link FSHLog#highestRegionSequenceIds} with corresponding region entries in
745    * {@link FSHLog#lowestFlushingRegionSequenceIds} and
746    * {@link FSHLog#oldestUnflushedRegionSequenceIds}. If all the regions in the map are flushed
747    * past of their value, then the wal is eligible for archiving.
748    * @throws IOException
749    */
750   private void cleanOldLogs() throws IOException {
751     Map<byte[], Long> oldestFlushingSeqNumsLocal = null;
752     Map<byte[], Long> oldestUnflushedSeqNumsLocal = null;
753     List<Path> logsToArchive = new ArrayList<Path>();
754     // make a local copy so as to avoid locking when we iterate over these maps.
755     synchronized (regionSequenceIdLock) {
756       oldestFlushingSeqNumsLocal = new HashMap<byte[], Long>(this.lowestFlushingRegionSequenceIds);
757       oldestUnflushedSeqNumsLocal =
758         new HashMap<byte[], Long>(this.oldestUnflushedRegionSequenceIds);
759     }
760     for (Map.Entry<Path, Map<byte[], Long>> e : byWalRegionSequenceIds.entrySet()) {
761       // iterate over the log file.
762       Path log = e.getKey();
763       Map<byte[], Long> sequenceNums = e.getValue();
764       // iterate over the map for this log file, and tell whether it should be archive or not.
765       if (areAllRegionsFlushed(sequenceNums, oldestFlushingSeqNumsLocal,
766           oldestUnflushedSeqNumsLocal)) {
767         logsToArchive.add(log);
768         LOG.debug("WAL file ready for archiving " + log);
769       }
770     }
771     for (Path p : logsToArchive) {
772       this.totalLogSize.addAndGet(-this.fs.getFileStatus(p).getLen());
773       archiveLogFile(p);
774       this.byWalRegionSequenceIds.remove(p);
775     }
776   }
777 
778   /**
779    * Takes a region:sequenceId map for a WAL file, and checks whether the file can be archived.
780    * It compares the region entries present in the passed sequenceNums map with the local copy of
781    * {@link #oldestUnflushedRegionSequenceIds} and {@link #lowestFlushingRegionSequenceIds}. If,
782    * for all regions, the value is lesser than the minimum of values present in the
783    * oldestFlushing/UnflushedSeqNums, then the wal file is eligible for archiving.
784    * @param sequenceNums for a WAL, at the time when it was rolled.
785    * @param oldestFlushingMap
786    * @param oldestUnflushedMap
787    * @return true if wal is eligible for archiving, false otherwise.
788    */
789    static boolean areAllRegionsFlushed(Map<byte[], Long> sequenceNums,
790       Map<byte[], Long> oldestFlushingMap, Map<byte[], Long> oldestUnflushedMap) {
791     for (Map.Entry<byte[], Long> regionSeqIdEntry : sequenceNums.entrySet()) {
792       // find region entries in the flushing/unflushed map. If there is no entry, it meansj
793       // a region doesn't have any unflushed entry.
794       long oldestFlushing = oldestFlushingMap.containsKey(regionSeqIdEntry.getKey()) ?
795           oldestFlushingMap.get(regionSeqIdEntry.getKey()) : Long.MAX_VALUE;
796       long oldestUnFlushed = oldestUnflushedMap.containsKey(regionSeqIdEntry.getKey()) ?
797           oldestUnflushedMap.get(regionSeqIdEntry.getKey()) : Long.MAX_VALUE;
798           // do a minimum to be sure to contain oldest sequence Id
799       long minSeqNum = Math.min(oldestFlushing, oldestUnFlushed);
800       if (minSeqNum <= regionSeqIdEntry.getValue()) return false;// can't archive
801     }
802     return true;
803   }
804 
805   /**
806    * Iterates over the given map of regions, and compares their sequence numbers with corresponding
807    * entries in {@link #oldestUnflushedRegionSequenceIds}. If the sequence number is greater or
808    * equal, the region is eligible to flush, otherwise, there is no benefit to flush (from the
809    * perspective of passed regionsSequenceNums map), because the region has already flushed the
810    * entries present in the WAL file for which this method is called for (typically, the oldest
811    * wal file).
812    * @param regionsSequenceNums
813    * @return regions which should be flushed (whose sequence numbers are larger than their
814    * corresponding un-flushed entries.
815    */
816   private byte[][] findEligibleMemstoresToFlush(Map<byte[], Long> regionsSequenceNums) {
817     List<byte[]> regionsToFlush = null;
818     // Keeping the old behavior of iterating unflushedSeqNums under oldestSeqNumsLock.
819     synchronized (regionSequenceIdLock) {
820       for (Map.Entry<byte[], Long> e : regionsSequenceNums.entrySet()) {
821         Long unFlushedVal = this.oldestUnflushedRegionSequenceIds.get(e.getKey());
822         if (unFlushedVal != null && unFlushedVal <= e.getValue()) {
823           if (regionsToFlush == null) regionsToFlush = new ArrayList<byte[]>();
824           regionsToFlush.add(e.getKey());
825         }
826       }
827     }
828     return regionsToFlush == null ? null : regionsToFlush
829         .toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY });
830   }
831 
832   /**
833    * If the number of un-archived WAL files is greater than maximum allowed, it checks
834    * the first (oldest) WAL file, and returns the regions which should be flushed so that it could
835    * be archived.
836    * @return regions to flush in order to archive oldest wal file.
837    * @throws IOException
838    */
839   byte[][] findRegionsToForceFlush() throws IOException {
840     byte [][] regions = null;
841     int logCount = getNumRolledLogFiles();
842     if (logCount > this.maxLogs && logCount > 0) {
843       Map.Entry<Path, Map<byte[], Long>> firstWALEntry =
844         this.byWalRegionSequenceIds.firstEntry();
845       regions = findEligibleMemstoresToFlush(firstWALEntry.getValue());
846     }
847     if (regions != null) {
848       StringBuilder sb = new StringBuilder();
849       for (int i = 0; i < regions.length; i++) {
850         if (i > 0) sb.append(", ");
851         sb.append(Bytes.toStringBinary(regions[i]));
852       }
853       LOG.info("Too many wals: logs=" + logCount + ", maxlogs=" +
854          this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
855          sb.toString());
856     }
857     return regions;
858   }
859 
860   /**
861    * Cleans up current writer closing it and then puts in place the passed in
862    * <code>nextWriter</code>.
863    *
864    * In the case of creating a new WAL, oldPath will be null.
865    *
866    * In the case of rolling over from one file to the next, none of the params will be null.
867    *
868    * In the case of closing out this FSHLog with no further use newPath, nextWriter, and
869    * nextHdfsOut will be null.
870    *
871    * @param oldPath may be null
872    * @param newPath may be null
873    * @param nextWriter may be null
874    * @param nextHdfsOut may be null
875    * @return the passed in <code>newPath</code>
876    * @throws IOException if there is a problem flushing or closing the underlying FS
877    */
878   Path replaceWriter(final Path oldPath, final Path newPath, Writer nextWriter,
879       final FSDataOutputStream nextHdfsOut)
880   throws IOException {
881     // Ask the ring buffer writer to pause at a safe point.  Once we do this, the writer
882     // thread will eventually pause. An error hereafter needs to release the writer thread
883     // regardless -- hence the finally block below.  Note, this method is called from the FSHLog
884     // constructor BEFORE the ring buffer is set running so it is null on first time through
885     // here; allow for that.
886     SyncFuture syncFuture = null;
887     SafePointZigZagLatch zigzagLatch = (this.ringBufferEventHandler == null)?
888       null: this.ringBufferEventHandler.attainSafePoint();
889     TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
890     try {
891       // Wait on the safe point to be achieved.  Send in a sync in case nothing has hit the
892       // ring buffer between the above notification of writer that we want it to go to
893       // 'safe point' and then here where we are waiting on it to attain safe point.  Use
894       // 'sendSync' instead of 'sync' because we do not want this thread to block waiting on it
895       // to come back.  Cleanup this syncFuture down below after we are ready to run again.
896       try {
897         if (zigzagLatch != null) {
898           Trace.addTimelineAnnotation("awaiting safepoint");
899           syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer());
900         }
901       } catch (FailedSyncBeforeLogCloseException e) {
902         if (isUnflushedEntries()) throw e;
903         // Else, let is pass through to the close.
904         LOG.warn("Failed last sync but no outstanding unsync edits so falling through to close; " +
905           e.getMessage());
906       }
907 
908       // It is at the safe point.  Swap out writer from under the blocked writer thread.
909       // TODO: This is close is inline with critical section.  Should happen in background?
910       try {
911         if (this.writer != null) {
912           Trace.addTimelineAnnotation("closing writer");
913           this.writer.close();
914           Trace.addTimelineAnnotation("writer closed");
915         }
916         this.closeErrorCount.set(0);
917       } catch (IOException ioe) {
918         int errors = closeErrorCount.incrementAndGet();
919         if (!isUnflushedEntries() && (errors <= this.closeErrorsTolerated)) {
920           LOG.warn("Riding over failed WAL close of " + oldPath + ", cause=\"" +
921             ioe.getMessage() + "\", errors=" + errors +
922             "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK");
923         } else {
924           throw ioe;
925         }
926       }
927       this.writer = nextWriter;
928       this.hdfs_out = nextHdfsOut;
929       int oldNumEntries = this.numEntries.get();
930       this.numEntries.set(0);
931       final String newPathString = (null == newPath ? null : FSUtils.getPath(newPath));
932       if (oldPath != null) {
933         this.byWalRegionSequenceIds.put(oldPath, this.highestRegionSequenceIds);
934         this.highestRegionSequenceIds = new HashMap<byte[], Long>();
935         long oldFileLen = this.fs.getFileStatus(oldPath).getLen();
936         this.totalLogSize.addAndGet(oldFileLen);
937         LOG.info("Rolled WAL " + FSUtils.getPath(oldPath) + " with entries=" + oldNumEntries +
938           ", filesize=" + StringUtils.byteDesc(oldFileLen) + "; new WAL " +
939           newPathString);
940       } else {
941         LOG.info("New WAL " + newPathString);
942       }
943     } catch (InterruptedException ie) {
944       // Perpetuate the interrupt
945       Thread.currentThread().interrupt();
946     } catch (IOException e) {
947       long count = getUnflushedEntriesCount();
948       LOG.error("Failed close of WAL writer " + oldPath + ", unflushedEntries=" + count, e);
949       throw new FailedLogCloseException(oldPath + ", unflushedEntries=" + count, e);
950     } finally {
951       try {
952         // Let the writer thread go regardless, whether error or not.
953         if (zigzagLatch != null) {
954           zigzagLatch.releaseSafePoint();
955           // It will be null if we failed our wait on safe point above.
956           if (syncFuture != null) blockOnSync(syncFuture);
957         }
958       } finally {
959         scope.close();
960       }
961     }
962     return newPath;
963   }
964 
965   long getUnflushedEntriesCount() {
966     long highestSynced = this.highestSyncedSequence.get();
967     return highestSynced > this.highestUnsyncedSequence?
968       0: this.highestUnsyncedSequence - highestSynced;
969   }
970 
971   boolean isUnflushedEntries() {
972     return getUnflushedEntriesCount() > 0;
973   }
974 
975   /*
976    * only public so WALSplitter can use.
977    * @return archived location of a WAL file with the given path p
978    */
979   public static Path getWALArchivePath(Path archiveDir, Path p) {
980     return new Path(archiveDir, p.getName());
981   }
982 
983   private void archiveLogFile(final Path p) throws IOException {
984     Path newPath = getWALArchivePath(this.fullPathArchiveDir, p);
985     // Tell our listeners that a log is going to be archived.
986     if (!this.listeners.isEmpty()) {
987       for (WALActionsListener i : this.listeners) {
988         i.preLogArchive(p, newPath);
989       }
990     }
991     LOG.info("Archiving " + p + " to " + newPath);
992     if (!FSUtils.renameAndSetModifyTime(this.fs, p, newPath)) {
993       throw new IOException("Unable to rename " + p + " to " + newPath);
994     }
995     // Tell our listeners that a log has been archived.
996     if (!this.listeners.isEmpty()) {
997       for (WALActionsListener i : this.listeners) {
998         i.postLogArchive(p, newPath);
999       }
1000     }
1001   }
1002 
1003   /**
1004    * This is a convenience method that computes a new filename with a given
1005    * file-number.
1006    * @param filenum to use
1007    * @return Path
1008    */
1009   protected Path computeFilename(final long filenum) {
1010     if (filenum < 0) {
1011       throw new RuntimeException("wal file number can't be < 0");
1012     }
1013     String child = logFilePrefix + WAL_FILE_NAME_DELIMITER + filenum + logFileSuffix;
1014     return new Path(fullPathLogDir, child);
1015   }
1016 
1017   /**
1018    * This is a convenience method that computes a new filename with a given
1019    * using the current WAL file-number
1020    * @return Path
1021    */
1022   public Path getCurrentFileName() {
1023     return computeFilename(this.filenum.get());
1024   }
1025 
1026   @Override
1027   public String toString() {
1028     return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")";
1029   }
1030 
1031 /**
1032  * A log file has a creation timestamp (in ms) in its file name ({@link #filenum}.
1033  * This helper method returns the creation timestamp from a given log file.
1034  * It extracts the timestamp assuming the filename is created with the
1035  * {@link #computeFilename(long filenum)} method.
1036  * @param fileName
1037  * @return timestamp, as in the log file name.
1038  */
1039   protected long getFileNumFromFileName(Path fileName) {
1040     if (fileName == null) throw new IllegalArgumentException("file name can't be null");
1041     if (!ourFiles.accept(fileName)) {
1042       throw new IllegalArgumentException("The log file " + fileName +
1043           " doesn't belong to this wal. (" + toString() + ")");
1044     }
1045     final String fileNameString = fileName.toString();
1046     String chompedPath = fileNameString.substring(prefixPathStr.length(),
1047         (fileNameString.length() - logFileSuffix.length()));
1048     return Long.parseLong(chompedPath);
1049   }
1050 
1051   @Override
1052   public void close() throws IOException {
1053     shutdown();
1054     final FileStatus[] files = getFiles();
1055     if (null != files && 0 != files.length) {
1056       for (FileStatus file : files) {
1057         Path p = getWALArchivePath(this.fullPathArchiveDir, file.getPath());
1058         // Tell our listeners that a log is going to be archived.
1059         if (!this.listeners.isEmpty()) {
1060           for (WALActionsListener i : this.listeners) {
1061             i.preLogArchive(file.getPath(), p);
1062           }
1063         }
1064 
1065         if (!FSUtils.renameAndSetModifyTime(fs, file.getPath(), p)) {
1066           throw new IOException("Unable to rename " + file.getPath() + " to " + p);
1067         }
1068         // Tell our listeners that a log was archived.
1069         if (!this.listeners.isEmpty()) {
1070           for (WALActionsListener i : this.listeners) {
1071             i.postLogArchive(file.getPath(), p);
1072           }
1073         }
1074       }
1075       LOG.debug("Moved " + files.length + " WAL file(s) to " +
1076         FSUtils.getPath(this.fullPathArchiveDir));
1077     }
1078     LOG.info("Closed WAL: " + toString() );
1079   }
1080 
1081   @Override
1082   public void shutdown() throws IOException {
1083     if (shutdown.compareAndSet(false, true)) {
1084       try {
1085         // Prevent all further flushing and rolling.
1086         closeBarrier.stopAndDrainOps();
1087       } catch (InterruptedException e) {
1088         LOG.error("Exception while waiting for cache flushes and log rolls", e);
1089         Thread.currentThread().interrupt();
1090       }
1091 
1092       // Shutdown the disruptor.  Will stop after all entries have been processed.  Make sure we
1093       // have stopped incoming appends before calling this else it will not shutdown.  We are
1094       // conservative below waiting a long time and if not elapsed, then halting.
1095       if (this.disruptor != null) {
1096         long timeoutms = conf.getLong("hbase.wal.disruptor.shutdown.timeout.ms", 60000);
1097         try {
1098           this.disruptor.shutdown(timeoutms, TimeUnit.MILLISECONDS);
1099         } catch (TimeoutException e) {
1100           LOG.warn("Timed out bringing down disruptor after " + timeoutms + "ms; forcing halt " +
1101             "(It is a problem if this is NOT an ABORT! -- DATALOSS!!!!)");
1102           this.disruptor.halt();
1103           this.disruptor.shutdown();
1104         }
1105       }
1106       // With disruptor down, this is safe to let go.
1107       if (this.appendExecutor !=  null) this.appendExecutor.shutdown();
1108 
1109       // Tell our listeners that the log is closing
1110       if (!this.listeners.isEmpty()) {
1111         for (WALActionsListener i : this.listeners) {
1112           i.logCloseRequested();
1113         }
1114       }
1115       this.closed = true;
1116       if (LOG.isDebugEnabled()) {
1117         LOG.debug("Closing WAL writer in " + FSUtils.getPath(fullPathLogDir));
1118       }
1119       if (this.writer != null) {
1120         this.writer.close();
1121         this.writer = null;
1122       }
1123     }
1124   }
1125 
1126   /**
1127    * @param now
1128    * @param encodedRegionName Encoded name of the region as returned by
1129    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
1130    * @param tableName
1131    * @param clusterIds that have consumed the change
1132    * @return New log key.
1133    */
1134   protected WALKey makeKey(byte[] encodedRegionName, TableName tableName, long seqnum,
1135       long now, List<UUID> clusterIds, long nonceGroup, long nonce) {
1136     // we use HLogKey here instead of WALKey directly to support legacy coprocessors.
1137     return new HLogKey(encodedRegionName, tableName, seqnum, now, clusterIds, nonceGroup, nonce);
1138   }
1139   
1140   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH_EXCEPTION",
1141       justification="Will never be null")
1142   @Override
1143   public long append(final HTableDescriptor htd, final HRegionInfo hri, final WALKey key,
1144       final WALEdit edits, final AtomicLong sequenceId, final boolean inMemstore, 
1145       final List<Cell> memstoreCells) throws IOException {
1146     if (this.closed) throw new IOException("Cannot append; log is closed");
1147     // Make a trace scope for the append.  It is closed on other side of the ring buffer by the
1148     // single consuming thread.  Don't have to worry about it.
1149     TraceScope scope = Trace.startSpan("FSHLog.append");
1150 
1151     // This is crazy how much it takes to make an edit.  Do we need all this stuff!!!!????  We need
1152     // all this to make a key and then below to append the edit, we need to carry htd, info,
1153     // etc. all over the ring buffer.
1154     FSWALEntry entry = null;
1155     long sequence = this.disruptor.getRingBuffer().next();
1156     try {
1157       RingBufferTruck truck = this.disruptor.getRingBuffer().get(sequence);
1158       // Construction of FSWALEntry sets a latch.  The latch is thrown just after we stamp the
1159       // edit with its edit/sequence id.  The below entry.getRegionSequenceId will wait on the
1160       // latch to be thrown.  TODO: reuse FSWALEntry as we do SyncFuture rather create per append.
1161       entry = new FSWALEntry(sequence, key, edits, sequenceId, inMemstore, htd, hri, memstoreCells);
1162       truck.loadPayload(entry, scope.detach());
1163     } finally {
1164       this.disruptor.getRingBuffer().publish(sequence);
1165     }
1166     return sequence;
1167   }
1168 
1169   /**
1170    * Thread to runs the hdfs sync call. This call takes a while to complete.  This is the longest
1171    * pole adding edits to the WAL and this must complete to be sure all edits persisted.  We run
1172    * multiple threads sync'ng rather than one that just syncs in series so we have better
1173    * latencies; otherwise, an edit that arrived just after a sync started, might have to wait
1174    * almost the length of two sync invocations before it is marked done.
1175    * <p>When the sync completes, it marks all the passed in futures done.  On the other end of the
1176    * sync future is a blocked thread, usually a regionserver Handler.  There may be more than one
1177    * future passed in the case where a few threads arrive at about the same time and all invoke
1178    * 'sync'.  In this case we'll batch up the invocations and run one filesystem sync only for a
1179    * batch of Handler sync invocations.  Do not confuse these Handler SyncFutures with the futures
1180    * an ExecutorService returns when you call submit. We have no use for these in this model. These
1181    * SyncFutures are 'artificial', something to hold the Handler until the filesystem sync
1182    * completes.
1183    */
1184   private class SyncRunner extends HasThread {
1185     private volatile long sequence;
1186     private final BlockingQueue<SyncFuture> syncFutures;
1187  
1188     /**
1189      * UPDATE! 
1190      * @param syncs the batch of calls to sync that arrived as this thread was starting; when done,
1191      * we will put the result of the actual hdfs sync call as the result.
1192      * @param sequence The sequence number on the ring buffer when this thread was set running.
1193      * If this actual writer sync completes then all appends up this point have been
1194      * flushed/synced/pushed to datanodes.  If we fail, then the passed in <code>syncs</code>
1195      * futures will return the exception to their clients; some of the edits may have made it out
1196      * to data nodes but we will report all that were part of this session as failed.
1197      */
1198     SyncRunner(final String name, final int maxHandlersCount) {
1199       super(name);
1200       // LinkedBlockingQueue because of
1201       // http://www.javacodegeeks.com/2010/09/java-best-practices-queue-battle-and.html
1202       // Could use other blockingqueues here or concurrent queues.
1203       //
1204       // We could let the capacity be 'open' but bound it so we get alerted in pathological case
1205       // where we cannot sync and we have a bunch of threads all backed up waiting on their syncs
1206       // to come in.  LinkedBlockingQueue actually shrinks when you remove elements so Q should
1207       // stay neat and tidy in usual case.  Let the max size be three times the maximum handlers.
1208       // The passed in maxHandlerCount is the user-level handlers which is what we put up most of
1209       // but HBase has other handlers running too -- opening region handlers which want to write
1210       // the meta table when succesful (i.e. sync), closing handlers -- etc.  These are usually
1211       // much fewer in number than the user-space handlers so Q-size should be user handlers plus
1212       // some space for these other handlers.  Lets multiply by 3 for good-measure.
1213       this.syncFutures = new LinkedBlockingQueue<SyncFuture>(maxHandlersCount * 3);
1214     }
1215 
1216     void offer(final long sequence, final SyncFuture [] syncFutures, final int syncFutureCount) {
1217       // Set sequence first because the add to the queue will wake the thread if sleeping.
1218       this.sequence = sequence;
1219       this.syncFutures.addAll(Arrays.asList(syncFutures).subList(0, syncFutureCount));
1220     }
1221 
1222     /**
1223      * Release the passed <code>syncFuture</code>
1224      * @param syncFuture
1225      * @param currentSequence
1226      * @param t
1227      * @return Returns 1.
1228      */
1229     private int releaseSyncFuture(final SyncFuture syncFuture, final long currentSequence,
1230         final Throwable t) {
1231       if (!syncFuture.done(currentSequence, t)) throw new IllegalStateException();
1232       // This function releases one sync future only.
1233       return 1;
1234     }
1235  
1236     /**
1237      * Release all SyncFutures whose sequence is <= <code>currentSequence</code>.
1238      * @param currentSequence
1239      * @param t May be non-null if we are processing SyncFutures because an exception was thrown.
1240      * @return Count of SyncFutures we let go.
1241      */
1242     private int releaseSyncFutures(final long currentSequence, final Throwable t) {
1243       int syncCount = 0;
1244       for (SyncFuture syncFuture; (syncFuture = this.syncFutures.peek()) != null;) {
1245         if (syncFuture.getRingBufferSequence() > currentSequence) break;
1246         releaseSyncFuture(syncFuture, currentSequence, t);
1247         if (!this.syncFutures.remove(syncFuture)) {
1248           throw new IllegalStateException(syncFuture.toString());
1249         }
1250         syncCount++;
1251       }
1252       return syncCount;
1253     }
1254 
1255     /**
1256      * @param sequence The sequence we ran the filesystem sync against.
1257      * @return Current highest synced sequence.
1258      */
1259     private long updateHighestSyncedSequence(long sequence) {
1260       long currentHighestSyncedSequence;
1261       // Set the highestSyncedSequence IFF our current sequence id is the 'highest'.
1262       do {
1263         currentHighestSyncedSequence = highestSyncedSequence.get();
1264         if (currentHighestSyncedSequence >= sequence) {
1265           // Set the sync number to current highwater mark; might be able to let go more
1266           // queued sync futures
1267           sequence = currentHighestSyncedSequence;
1268           break;
1269         }
1270       } while (!highestSyncedSequence.compareAndSet(currentHighestSyncedSequence, sequence));
1271       return sequence;
1272     }
1273 
1274     public void run() {
1275       long currentSequence;
1276       while (!isInterrupted()) {
1277         int syncCount = 0;
1278         SyncFuture takeSyncFuture;
1279         try {
1280           while (true) {
1281             // We have to process what we 'take' from the queue
1282             takeSyncFuture = this.syncFutures.take();
1283             currentSequence = this.sequence;
1284             long syncFutureSequence = takeSyncFuture.getRingBufferSequence();
1285             if (syncFutureSequence > currentSequence) {
1286               throw new IllegalStateException("currentSequence=" + syncFutureSequence +
1287                 ", syncFutureSequence=" + syncFutureSequence);
1288             }
1289             // See if we can process any syncfutures BEFORE we go sync.
1290             long currentHighestSyncedSequence = highestSyncedSequence.get();
1291             if (currentSequence < currentHighestSyncedSequence) {
1292               syncCount += releaseSyncFuture(takeSyncFuture, currentHighestSyncedSequence, null);
1293               // Done with the 'take'.  Go around again and do a new 'take'.
1294               continue;
1295             }
1296             break;
1297           }
1298           // I got something.  Lets run.  Save off current sequence number in case it changes
1299           // while we run.
1300           TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
1301           long start = System.nanoTime();
1302           Throwable t = null;
1303           try {
1304             Trace.addTimelineAnnotation("syncing writer");
1305             writer.sync();
1306             Trace.addTimelineAnnotation("writer synced");
1307             currentSequence = updateHighestSyncedSequence(currentSequence);
1308           } catch (IOException e) {
1309             LOG.error("Error syncing, request close of wal ", e);
1310             t = e;
1311           } catch (Exception e) {
1312             LOG.warn("UNEXPECTED", e);
1313             t = e;
1314           } finally {
1315             // reattach the span to the future before releasing.
1316             takeSyncFuture.setSpan(scope.detach());
1317             // First release what we 'took' from the queue.
1318             syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, t);
1319             // Can we release other syncs?
1320             syncCount += releaseSyncFutures(currentSequence, t);
1321             if (t != null) {
1322               requestLogRoll();
1323             } else checkLogRoll();
1324           }
1325           postSync(System.nanoTime() - start, syncCount);
1326         } catch (InterruptedException e) {
1327           // Presume legit interrupt.
1328           Thread.currentThread().interrupt();
1329         } catch (Throwable t) {
1330           LOG.warn("UNEXPECTED, continuing", t);
1331         }
1332       }
1333     }
1334   }
1335 
1336   /**
1337    * Schedule a log roll if needed.
1338    */
1339   void checkLogRoll() {
1340     // Will return immediately if we are in the middle of a WAL log roll currently.
1341     if (!rollWriterLock.tryLock()) return;
1342     boolean lowReplication;
1343     try {
1344       lowReplication = checkLowReplication();
1345     } finally {
1346       rollWriterLock.unlock();
1347     }
1348     try {
1349       if (lowReplication || writer != null && writer.getLength() > logrollsize) {
1350         requestLogRoll(lowReplication);
1351       }
1352     } catch (IOException e) {
1353       LOG.warn("Writer.getLength() failed; continuing", e);
1354     }
1355   }
1356 
1357   /*
1358    * @return true if number of replicas for the WAL is lower than threshold
1359    */
1360   private boolean checkLowReplication() {
1361     boolean logRollNeeded = false;
1362     // if the number of replicas in HDFS has fallen below the configured
1363     // value, then roll logs.
1364     try {
1365       int numCurrentReplicas = getLogReplication();
1366       if (numCurrentReplicas != 0 && numCurrentReplicas < this.minTolerableReplication) {
1367         if (this.lowReplicationRollEnabled) {
1368           if (this.consecutiveLogRolls.get() < this.lowReplicationRollLimit) {
1369             LOG.warn("HDFS pipeline error detected. " + "Found "
1370                 + numCurrentReplicas + " replicas but expecting no less than "
1371                 + this.minTolerableReplication + " replicas. "
1372                 + " Requesting close of wal. current pipeline: "
1373                 + Arrays.toString(getPipeLine()));
1374             logRollNeeded = true;
1375             // If rollWriter is requested, increase consecutiveLogRolls. Once it
1376             // is larger than lowReplicationRollLimit, disable the
1377             // LowReplication-Roller
1378             this.consecutiveLogRolls.getAndIncrement();
1379           } else {
1380             LOG.warn("Too many consecutive RollWriter requests, it's a sign of "
1381                 + "the total number of live datanodes is lower than the tolerable replicas.");
1382             this.consecutiveLogRolls.set(0);
1383             this.lowReplicationRollEnabled = false;
1384           }
1385         }
1386       } else if (numCurrentReplicas >= this.minTolerableReplication) {
1387         if (!this.lowReplicationRollEnabled) {
1388           // The new writer's log replicas is always the default value.
1389           // So we should not enable LowReplication-Roller. If numEntries
1390           // is lower than or equals 1, we consider it as a new writer.
1391           if (this.numEntries.get() <= 1) {
1392             return logRollNeeded;
1393           }
1394           // Once the live datanode number and the replicas return to normal,
1395           // enable the LowReplication-Roller.
1396           this.lowReplicationRollEnabled = true;
1397           LOG.info("LowReplication-Roller was enabled.");
1398         }
1399       }
1400     } catch (Exception e) {
1401       LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1402         " still proceeding ahead...");
1403     }
1404     return logRollNeeded;
1405   }
1406 
1407   private SyncFuture publishSyncOnRingBuffer() {
1408     return publishSyncOnRingBuffer(null);
1409   }
1410 
1411   private SyncFuture publishSyncOnRingBuffer(Span span) {
1412     long sequence = this.disruptor.getRingBuffer().next();
1413     SyncFuture syncFuture = getSyncFuture(sequence, span);
1414     try {
1415       RingBufferTruck truck = this.disruptor.getRingBuffer().get(sequence);
1416       truck.loadPayload(syncFuture);
1417     } finally {
1418       this.disruptor.getRingBuffer().publish(sequence);
1419     }
1420     return syncFuture;
1421   }
1422 
1423   // Sync all known transactions
1424   private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
1425     return blockOnSync(publishSyncOnRingBuffer(span));
1426   }
1427 
1428   private Span blockOnSync(final SyncFuture syncFuture) throws IOException {
1429     // Now we have published the ringbuffer, halt the current thread until we get an answer back.
1430     try {
1431       syncFuture.get();
1432       return syncFuture.getSpan();
1433     } catch (InterruptedException ie) {
1434       LOG.warn("Interrupted", ie);
1435       throw convertInterruptedExceptionToIOException(ie);
1436     } catch (ExecutionException e) {
1437       throw ensureIOException(e.getCause());
1438     }
1439   }
1440 
1441   private IOException convertInterruptedExceptionToIOException(final InterruptedException ie) {
1442     Thread.currentThread().interrupt();
1443     IOException ioe = new InterruptedIOException();
1444     ioe.initCause(ie);
1445     return ioe;
1446   }
1447 
1448   private SyncFuture getSyncFuture(final long sequence, Span span) {
1449     SyncFuture syncFuture = this.syncFuturesByHandler.get(Thread.currentThread());
1450     if (syncFuture == null) {
1451       syncFuture = new SyncFuture();
1452       this.syncFuturesByHandler.put(Thread.currentThread(), syncFuture);
1453     }
1454     return syncFuture.reset(sequence, span);
1455   }
1456 
1457   private void postSync(final long timeInNanos, final int handlerSyncs) {
1458     if (timeInNanos > this.slowSyncNs) {
1459       String msg =
1460           new StringBuilder().append("Slow sync cost: ")
1461               .append(timeInNanos / 1000000).append(" ms, current pipeline: ")
1462               .append(Arrays.toString(getPipeLine())).toString();
1463       Trace.addTimelineAnnotation(msg);
1464       LOG.info(msg);
1465     }
1466     if (!listeners.isEmpty()) {
1467       for (WALActionsListener listener : listeners) {
1468         listener.postSync(timeInNanos, handlerSyncs);
1469       }
1470     }
1471   }
1472 
1473   private long postAppend(final Entry e, final long elapsedTime) {
1474     long len = 0;
1475     if (!listeners.isEmpty()) {
1476       for (Cell cell : e.getEdit().getCells()) {
1477         len += CellUtil.estimatedSerializedSizeOf(cell);
1478       }
1479       for (WALActionsListener listener : listeners) {
1480         listener.postAppend(len, elapsedTime);
1481       }
1482     }
1483     return len;
1484   }
1485 
1486   /**
1487    * Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream.
1488    * This is used for getting current replicas of a file being written.
1489    * @return Method or null.
1490    */
1491   private Method getGetNumCurrentReplicas(final FSDataOutputStream os) {
1492     // TODO: Remove all this and use the now publically available
1493     // HdfsDataOutputStream#getCurrentBlockReplication()
1494     Method m = null;
1495     if (os != null) {
1496       Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream().getClass();
1497       try {
1498         m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas", new Class<?>[] {});
1499         m.setAccessible(true);
1500       } catch (NoSuchMethodException e) {
1501         LOG.info("FileSystem's output stream doesn't support getNumCurrentReplicas; " +
1502          "HDFS-826 not available; fsOut=" + wrappedStreamClass.getName());
1503       } catch (SecurityException e) {
1504         LOG.info("No access to getNumCurrentReplicas on FileSystems's output stream; HDFS-826 " +
1505           "not available; fsOut=" + wrappedStreamClass.getName(), e);
1506         m = null; // could happen on setAccessible()
1507       }
1508     }
1509     if (m != null) {
1510       if (LOG.isTraceEnabled()) LOG.trace("Using getNumCurrentReplicas");
1511     }
1512     return m;
1513   }
1514 
1515   /**
1516    * This method gets the datanode replication count for the current WAL.
1517    *
1518    * If the pipeline isn't started yet or is empty, you will get the default
1519    * replication factor.  Therefore, if this function returns 0, it means you
1520    * are not properly running with the HDFS-826 patch.
1521    * @throws InvocationTargetException
1522    * @throws IllegalAccessException
1523    * @throws IllegalArgumentException
1524    *
1525    * @throws Exception
1526    */
1527   @VisibleForTesting
1528   int getLogReplication()
1529   throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1530     final OutputStream stream = getOutputStream();
1531     if (this.getNumCurrentReplicas != null && stream != null) {
1532       Object repl = this.getNumCurrentReplicas.invoke(stream, NO_ARGS);
1533       if (repl instanceof Integer) {
1534         return ((Integer)repl).intValue();
1535       }
1536     }
1537     return 0;
1538   }
1539 
1540   @Override
1541   public void sync() throws IOException {
1542     TraceScope scope = Trace.startSpan("FSHLog.sync");
1543     try {
1544       scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
1545     } finally {
1546       assert scope == NullScope.INSTANCE || !scope.isDetached();
1547       scope.close();
1548     }
1549   }
1550 
1551   @Override
1552   public void sync(long txid) throws IOException {
1553     if (this.highestSyncedSequence.get() >= txid){
1554       // Already sync'd.
1555       return;
1556     }
1557     TraceScope scope = Trace.startSpan("FSHLog.sync");
1558     try {
1559       scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
1560     } finally {
1561       assert scope == NullScope.INSTANCE || !scope.isDetached();
1562       scope.close();
1563     }
1564   }
1565 
1566   // public only until class moves to o.a.h.h.wal
1567   public void requestLogRoll() {
1568     requestLogRoll(false);
1569   }
1570 
1571   private void requestLogRoll(boolean tooFewReplicas) {
1572     if (!this.listeners.isEmpty()) {
1573       for (WALActionsListener i: this.listeners) {
1574         i.logRollRequested(tooFewReplicas);
1575       }
1576     }
1577   }
1578 
1579   // public only until class moves to o.a.h.h.wal
1580   /** @return the number of rolled log files */
1581   public int getNumRolledLogFiles() {
1582     return byWalRegionSequenceIds.size();
1583   }
1584 
1585   // public only until class moves to o.a.h.h.wal
1586   /** @return the number of log files in use */
1587   public int getNumLogFiles() {
1588     // +1 for current use log
1589     return getNumRolledLogFiles() + 1;
1590   }
1591   
1592   // public only until class moves to o.a.h.h.wal
1593   /** @return the size of log files in use */
1594   public long getLogFileSize() {
1595     return this.totalLogSize.get();
1596   }
1597   
1598   @Override
1599   public boolean startCacheFlush(final byte[] encodedRegionName) {
1600     Long oldRegionSeqNum = null;
1601     if (!closeBarrier.beginOp()) {
1602       LOG.info("Flush will not be started for " + Bytes.toString(encodedRegionName) +
1603         " - because the server is closing.");
1604       return false;
1605     }
1606     synchronized (regionSequenceIdLock) {
1607       oldRegionSeqNum = this.oldestUnflushedRegionSequenceIds.remove(encodedRegionName);
1608       if (oldRegionSeqNum != null) {
1609         Long oldValue =
1610           this.lowestFlushingRegionSequenceIds.put(encodedRegionName, oldRegionSeqNum);
1611         assert oldValue ==
1612           null : "Flushing map not cleaned up for " + Bytes.toString(encodedRegionName);
1613       }
1614     }
1615     if (oldRegionSeqNum == null) {
1616       // TODO: if we have no oldRegionSeqNum, and WAL is not disabled, presumably either
1617       //       the region is already flushing (which would make this call invalid), or there
1618       //       were no appends after last flush, so why are we starting flush? Maybe we should
1619       //       assert not null, and switch to "long" everywhere. Less rigorous, but safer,
1620       //       alternative is telling the caller to stop. For now preserve old logic.
1621       LOG.warn("Couldn't find oldest seqNum for the region we are about to flush: ["
1622         + Bytes.toString(encodedRegionName) + "]");
1623     }
1624     return true;
1625   }
1626 
1627   @Override
1628   public void completeCacheFlush(final byte [] encodedRegionName) {
1629     synchronized (regionSequenceIdLock) {
1630       this.lowestFlushingRegionSequenceIds.remove(encodedRegionName);
1631     }
1632     closeBarrier.endOp();
1633   }
1634 
1635   @Override
1636   public void abortCacheFlush(byte[] encodedRegionName) {
1637     Long currentSeqNum = null, seqNumBeforeFlushStarts = null;
1638     synchronized (regionSequenceIdLock) {
1639       seqNumBeforeFlushStarts = this.lowestFlushingRegionSequenceIds.remove(encodedRegionName);
1640       if (seqNumBeforeFlushStarts != null) {
1641         currentSeqNum =
1642           this.oldestUnflushedRegionSequenceIds.put(encodedRegionName, seqNumBeforeFlushStarts);
1643       }
1644     }
1645     closeBarrier.endOp();
1646     if ((currentSeqNum != null)
1647         && (currentSeqNum.longValue() <= seqNumBeforeFlushStarts.longValue())) {
1648       String errorStr = "Region " + Bytes.toString(encodedRegionName) +
1649           "acquired edits out of order current memstore seq=" + currentSeqNum
1650           + ", previous oldest unflushed id=" + seqNumBeforeFlushStarts;
1651       LOG.error(errorStr);
1652       assert false : errorStr;
1653       Runtime.getRuntime().halt(1);
1654     }
1655   }
1656 
1657   @VisibleForTesting
1658   boolean isLowReplicationRollEnabled() {
1659       return lowReplicationRollEnabled;
1660   }
1661 
1662   public static final long FIXED_OVERHEAD = ClassSize.align(
1663     ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1664     ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1665 
1666   private static void split(final Configuration conf, final Path p)
1667   throws IOException {
1668     FileSystem fs = FileSystem.get(conf);
1669     if (!fs.exists(p)) {
1670       throw new FileNotFoundException(p.toString());
1671     }
1672     if (!fs.getFileStatus(p).isDirectory()) {
1673       throw new IOException(p + " is not a directory");
1674     }
1675 
1676     final Path baseDir = FSUtils.getRootDir(conf);
1677     final Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1678     WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf));
1679   }
1680 
1681 
1682   @Override
1683   public long getEarliestMemstoreSeqNum(byte[] encodedRegionName) {
1684     Long result = oldestUnflushedRegionSequenceIds.get(encodedRegionName);
1685     return result == null ? HConstants.NO_SEQNUM : result.longValue();
1686   }
1687 
1688   /**
1689    * This class is used coordinating two threads holding one thread at a
1690    * 'safe point' while the orchestrating thread does some work that requires the first thread
1691    * paused: e.g. holding the WAL writer while its WAL is swapped out from under it by another
1692    * thread.
1693    * 
1694    * <p>Thread A signals Thread B to hold when it gets to a 'safe point'.  Thread A wait until
1695    * Thread B gets there. When the 'safe point' has been attained, Thread B signals Thread A.
1696    * Thread B then holds at the 'safe point'.  Thread A on notification that Thread B is paused,
1697    * goes ahead and does the work it needs to do while Thread B is holding.  When Thread A is done,
1698    * it flags B and then Thread A and Thread B continue along on their merry way.  Pause and
1699    * signalling 'zigzags' between the two participating threads.  We use two latches -- one the
1700    * inverse of the other -- pausing and signaling when states are achieved.
1701    * 
1702    * <p>To start up the drama, Thread A creates an instance of this class each time it would do
1703    * this zigzag dance and passes it to Thread B (these classes use Latches so it is one shot
1704    * only). Thread B notices the new instance (via reading a volatile reference or how ever) and it
1705    * starts to work toward the 'safe point'.  Thread A calls {@link #waitSafePoint()} when it
1706    * cannot proceed until the Thread B 'safe point' is attained. Thread A will be held inside in
1707    * {@link #waitSafePoint()} until Thread B reaches the 'safe point'.  Once there, Thread B
1708    * frees Thread A by calling {@link #safePointAttained()}.  Thread A now knows Thread B
1709    * is at the 'safe point' and that it is holding there (When Thread B calls
1710    * {@link #safePointAttained()} it blocks here until Thread A calls {@link #releaseSafePoint()}).
1711    * Thread A proceeds to do what it needs to do while Thread B is paused.  When finished,
1712    * it lets Thread B lose by calling {@link #releaseSafePoint()} and away go both Threads again.
1713    */
1714   static class SafePointZigZagLatch {
1715     /**
1716      * Count down this latch when safe point attained.
1717      */
1718     private volatile CountDownLatch safePointAttainedLatch = new CountDownLatch(1);
1719     /**
1720      * Latch to wait on.  Will be released when we can proceed.
1721      */
1722     private volatile CountDownLatch safePointReleasedLatch = new CountDownLatch(1);
1723  
1724     /**
1725      * For Thread A to call when it is ready to wait on the 'safe point' to be attained.
1726      * Thread A will be held in here until Thread B calls {@link #safePointAttained()}
1727      * @throws InterruptedException
1728      * @throws ExecutionException
1729      * @param syncFuture We need this as barometer on outstanding syncs.  If it comes home with
1730      * an exception, then something is up w/ our syncing.
1731      * @return The passed <code>syncFuture</code>
1732      * @throws FailedSyncBeforeLogCloseException 
1733      */
1734     SyncFuture waitSafePoint(final SyncFuture syncFuture)
1735     throws InterruptedException, FailedSyncBeforeLogCloseException {
1736       while (true) {
1737         if (this.safePointAttainedLatch.await(1, TimeUnit.NANOSECONDS)) break;
1738         if (syncFuture.isThrowable()) {
1739           throw new FailedSyncBeforeLogCloseException(syncFuture.getThrowable());
1740         }
1741       }
1742       return syncFuture;
1743     }
1744  
1745     /**
1746      * Called by Thread B when it attains the 'safe point'.  In this method, Thread B signals
1747      * Thread A it can proceed. Thread B will be held in here until {@link #releaseSafePoint()}
1748      * is called by Thread A.
1749      * @throws InterruptedException
1750      */
1751     void safePointAttained() throws InterruptedException {
1752       this.safePointAttainedLatch.countDown();
1753       this.safePointReleasedLatch.await();
1754     }
1755 
1756     /**
1757      * Called by Thread A when it is done with the work it needs to do while Thread B is
1758      * halted.  This will release the Thread B held in a call to {@link #safePointAttained()}
1759      */
1760     void releaseSafePoint() {
1761       this.safePointReleasedLatch.countDown();
1762     }
1763 
1764     /**
1765      * @return True is this is a 'cocked', fresh instance, and not one that has already fired.
1766      */
1767     boolean isCocked() {
1768       return this.safePointAttainedLatch.getCount() > 0 &&
1769         this.safePointReleasedLatch.getCount() > 0;
1770     }
1771   }
1772 
1773   /**
1774    * Handler that is run by the disruptor ringbuffer consumer. Consumer is a SINGLE
1775    * 'writer/appender' thread.  Appends edits and starts up sync runs.  Tries its best to batch up
1776    * syncs.  There is no discernible benefit batching appends so we just append as they come in
1777    * because it simplifies the below implementation.  See metrics for batching effectiveness
1778    * (In measurement, at 100 concurrent handlers writing 1k, we are batching > 10 appends and 10
1779    * handler sync invocations for every actual dfsclient sync call; at 10 concurrent handlers,
1780    * YMMV).
1781    * <p>Herein, we have an array into which we store the sync futures as they come in.  When we
1782    * have a 'batch', we'll then pass what we have collected to a SyncRunner thread to do the
1783    * filesystem sync.  When it completes, it will then call
1784    * {@link SyncFuture#done(long, Throwable)} on each of SyncFutures in the batch to release
1785    * blocked Handler threads.
1786    * <p>I've tried various effects to try and make latencies low while keeping throughput high.
1787    * I've tried keeping a single Queue of SyncFutures in this class appending to its tail as the
1788    * syncs coming and having sync runner threads poll off the head to 'finish' completed
1789    * SyncFutures.  I've tried linkedlist, and various from concurrent utils whether
1790    * LinkedBlockingQueue or ArrayBlockingQueue, etc.  The more points of synchronization, the
1791    * more 'work' (according to 'perf stats') that has to be done; small increases in stall
1792    * percentages seem to have a big impact on throughput/latencies.  The below model where we have
1793    * an array into which we stash the syncs and then hand them off to the sync thread seemed like
1794    * a decent compromise.  See HBASE-8755 for more detail.
1795    */
1796   class RingBufferEventHandler implements EventHandler<RingBufferTruck>, LifecycleAware {
1797     private final SyncRunner [] syncRunners;
1798     private final SyncFuture [] syncFutures;
1799     // Had 'interesting' issues when this was non-volatile.  On occasion, we'd not pass all
1800     // syncFutures to the next sync'ing thread.
1801     private volatile int syncFuturesCount = 0;
1802     private volatile SafePointZigZagLatch zigzagLatch;
1803     /**
1804      * Object to block on while waiting on safe point.
1805      */
1806     private final Object safePointWaiter = new Object();
1807     private volatile boolean shutdown = false;
1808 
1809     /**
1810      * Which syncrunner to use next.
1811      */
1812     private int syncRunnerIndex;
1813 
1814     RingBufferEventHandler(final int syncRunnerCount, final int maxHandlersCount) {
1815       this.syncFutures = new SyncFuture[maxHandlersCount];
1816       this.syncRunners = new SyncRunner[syncRunnerCount];
1817       for (int i = 0; i < syncRunnerCount; i++) {
1818         this.syncRunners[i] = new SyncRunner("sync." + i, maxHandlersCount);
1819       }
1820     }
1821 
1822     private void cleanupOutstandingSyncsOnException(final long sequence, final Exception e) {
1823       for (int i = 0; i < this.syncFuturesCount; i++) this.syncFutures[i].done(sequence, e);
1824       this.syncFuturesCount = 0;
1825     }
1826 
1827     @Override
1828     // We can set endOfBatch in the below method if at end of our this.syncFutures array
1829     public void onEvent(final RingBufferTruck truck, final long sequence, boolean endOfBatch)
1830     throws Exception {
1831       // Appends and syncs are coming in order off the ringbuffer.  We depend on this fact.  We'll
1832       // add appends to dfsclient as they come in.  Batching appends doesn't give any significant
1833       // benefit on measurement.  Handler sync calls we will batch up.
1834 
1835       try {
1836         if (truck.hasSyncFuturePayload()) {
1837           this.syncFutures[this.syncFuturesCount++] = truck.unloadSyncFuturePayload();
1838           // Force flush of syncs if we are carrying a full complement of syncFutures.
1839           if (this.syncFuturesCount == this.syncFutures.length) endOfBatch = true;
1840         } else if (truck.hasFSWALEntryPayload()) {
1841           TraceScope scope = Trace.continueSpan(truck.unloadSpanPayload());
1842           try {
1843             append(truck.unloadFSWALEntryPayload());
1844           } catch (Exception e) {
1845             // If append fails, presume any pending syncs will fail too; let all waiting handlers
1846             // know of the exception.
1847             cleanupOutstandingSyncsOnException(sequence, e);
1848             // Return to keep processing.
1849             return;
1850           } finally {
1851             assert scope == NullScope.INSTANCE || !scope.isDetached();
1852             scope.close(); // append scope is complete
1853           }
1854         } else {
1855           // They can't both be null.  Fail all up to this!!!
1856           cleanupOutstandingSyncsOnException(sequence,
1857             new IllegalStateException("Neither append nor sync"));
1858           // Return to keep processing.
1859           return;
1860         }
1861 
1862         // TODO: Check size and if big go ahead and call a sync if we have enough data.
1863 
1864         // If not a batch, return to consume more events from the ring buffer before proceeding;
1865         // we want to get up a batch of syncs and appends before we go do a filesystem sync.
1866         if (!endOfBatch || this.syncFuturesCount <= 0) return;
1867 
1868         // Now we have a batch.
1869 
1870         if (LOG.isTraceEnabled()) {
1871           LOG.trace("Sequence=" + sequence + ", syncCount=" + this.syncFuturesCount);
1872         }
1873 
1874         // Below expects that the offer 'transfers' responsibility for the outstanding syncs to the
1875         // syncRunner. We should never get an exception in here. HBASE-11145 was because queue
1876         // was sized exactly to the count of user handlers but we could have more if we factor in
1877         // meta handlers doing opens and closes.
1878         int index = Math.abs(this.syncRunnerIndex++) % this.syncRunners.length;
1879         try {
1880           this.syncRunners[index].offer(sequence, this.syncFutures, this.syncFuturesCount);
1881         } catch (Exception e) {
1882           cleanupOutstandingSyncsOnException(sequence, e);
1883           throw e;
1884         }
1885         attainSafePoint(sequence);
1886         this.syncFuturesCount = 0;
1887       } catch (Throwable t) {
1888         LOG.error("UNEXPECTED!!! syncFutures.length=" + this.syncFutures.length, t);
1889       }
1890     }
1891 
1892     SafePointZigZagLatch attainSafePoint() {
1893       this.zigzagLatch = new SafePointZigZagLatch();
1894       return this.zigzagLatch;
1895     }
1896 
1897     /**
1898      * Check if we should attain safe point.  If so, go there and then wait till signalled before
1899      * we proceeding.
1900      */
1901     private void attainSafePoint(final long currentSequence) {
1902       if (this.zigzagLatch == null || !this.zigzagLatch.isCocked()) return;
1903       // If here, another thread is waiting on us to get to safe point.  Don't leave it hanging.
1904       try {
1905         // Wait on outstanding syncers; wait for them to finish syncing (unless we've been
1906         // shutdown or unless our latch has been thrown because we have been aborted).
1907         while (!this.shutdown && this.zigzagLatch.isCocked() &&
1908             highestSyncedSequence.get() < currentSequence) {
1909           synchronized (this.safePointWaiter) {
1910             this.safePointWaiter.wait(0, 1);
1911           }
1912         }
1913         // Tell waiting thread we've attained safe point
1914         this.zigzagLatch.safePointAttained();
1915       } catch (InterruptedException e) {
1916         LOG.warn("Interrupted ", e);
1917         Thread.currentThread().interrupt();
1918       }
1919     }
1920 
1921     /**
1922      * Append to the WAL.  Does all CP and WAL listener calls.
1923      * @param entry
1924      * @throws Exception
1925      */
1926     void append(final FSWALEntry entry) throws Exception {
1927       // TODO: WORK ON MAKING THIS APPEND FASTER. DOING WAY TOO MUCH WORK WITH CPs, PBing, etc.
1928       atHeadOfRingBufferEventHandlerAppend();
1929 
1930       long start = EnvironmentEdgeManager.currentTime();
1931       byte [] encodedRegionName = entry.getKey().getEncodedRegionName();
1932       long regionSequenceId = WALKey.NO_SEQUENCE_ID;
1933       try {
1934         // We are about to append this edit; update the region-scoped sequence number.  Do it
1935         // here inside this single appending/writing thread.  Events are ordered on the ringbuffer
1936         // so region sequenceids will also be in order.
1937         regionSequenceId = entry.stampRegionSequenceId();
1938         
1939         // Edits are empty, there is nothing to append.  Maybe empty when we are looking for a 
1940         // region sequence id only, a region edit/sequence id that is not associated with an actual 
1941         // edit. It has to go through all the rigmarole to be sure we have the right ordering.
1942         if (entry.getEdit().isEmpty()) {
1943           return;
1944         }
1945         
1946         // Coprocessor hook.
1947         if (!coprocessorHost.preWALWrite(entry.getHRegionInfo(), entry.getKey(),
1948             entry.getEdit())) {
1949           if (entry.getEdit().isReplay()) {
1950             // Set replication scope null so that this won't be replicated
1951             entry.getKey().setScopes(null);
1952           }
1953         }
1954         if (!listeners.isEmpty()) {
1955           for (WALActionsListener i: listeners) {
1956             // TODO: Why does listener take a table description and CPs take a regioninfo?  Fix.
1957             i.visitLogEntryBeforeWrite(entry.getHTableDescriptor(), entry.getKey(),
1958               entry.getEdit());
1959           }
1960         }
1961 
1962         writer.append(entry);
1963         assert highestUnsyncedSequence < entry.getSequence();
1964         highestUnsyncedSequence = entry.getSequence();
1965         Long lRegionSequenceId = Long.valueOf(regionSequenceId);
1966         highestRegionSequenceIds.put(encodedRegionName, lRegionSequenceId);
1967         if (entry.isInMemstore()) {
1968           oldestUnflushedRegionSequenceIds.putIfAbsent(encodedRegionName, lRegionSequenceId);
1969         }
1970         
1971         coprocessorHost.postWALWrite(entry.getHRegionInfo(), entry.getKey(), entry.getEdit());
1972         // Update metrics.
1973         postAppend(entry, EnvironmentEdgeManager.currentTime() - start);
1974       } catch (Exception e) {
1975         LOG.fatal("Could not append. Requesting close of wal", e);
1976         requestLogRoll();
1977         throw e;
1978       }
1979       numEntries.incrementAndGet();
1980     }
1981 
1982     @Override
1983     public void onStart() {
1984       for (SyncRunner syncRunner: this.syncRunners) syncRunner.start();
1985     }
1986 
1987     @Override
1988     public void onShutdown() {
1989       for (SyncRunner syncRunner: this.syncRunners) syncRunner.interrupt();
1990     }
1991   }
1992 
1993   /**
1994    * Exposed for testing only.  Use to tricks like halt the ring buffer appending.
1995    */
1996   @VisibleForTesting
1997   void atHeadOfRingBufferEventHandlerAppend() {
1998     // Noop
1999   }
2000 
2001   private static IOException ensureIOException(final Throwable t) {
2002     return (t instanceof IOException)? (IOException)t: new IOException(t);
2003   }
2004 
2005   private static void usage() {
2006     System.err.println("Usage: FSHLog <ARGS>");
2007     System.err.println("Arguments:");
2008     System.err.println(" --dump  Dump textual representation of passed one or more files");
2009     System.err.println("         For example: " +
2010       "FSHLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
2011     System.err.println(" --split Split the passed directory of WAL logs");
2012     System.err.println("         For example: " +
2013       "FSHLog --split hdfs://example.com:9000/hbase/.logs/DIR");
2014   }
2015 
2016   /**
2017    * Pass one or more log file names and it will either dump out a text version
2018    * on <code>stdout</code> or split the specified log files.
2019    *
2020    * @param args
2021    * @throws IOException
2022    */
2023   public static void main(String[] args) throws IOException {
2024     if (args.length < 2) {
2025       usage();
2026       System.exit(-1);
2027     }
2028     // either dump using the WALPrettyPrinter or split, depending on args
2029     if (args[0].compareTo("--dump") == 0) {
2030       WALPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
2031     } else if (args[0].compareTo("--perf") == 0) {
2032       LOG.fatal("Please use the WALPerformanceEvaluation tool instead. i.e.:");
2033       LOG.fatal("\thbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation --iterations " +
2034           args[1]);
2035       System.exit(-1);
2036     } else if (args[0].compareTo("--split") == 0) {
2037       Configuration conf = HBaseConfiguration.create();
2038       for (int i = 1; i < args.length; i++) {
2039         try {
2040           Path logPath = new Path(args[i]);
2041           FSUtils.setFsDefault(conf, logPath);
2042           split(conf, logPath);
2043         } catch (IOException t) {
2044           t.printStackTrace(System.err);
2045           System.exit(-1);
2046         }
2047       }
2048     } else {
2049       usage();
2050       System.exit(-1);
2051     }
2052   }
2053   
2054   /**
2055    * Find the 'getPipeline' on the passed <code>os</code> stream.
2056    * @return Method or null.
2057    */
2058   private Method getGetPipeline(final FSDataOutputStream os) {
2059     Method m = null;
2060     if (os != null) {
2061       Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream()
2062           .getClass();
2063       try {
2064         m = wrappedStreamClass.getDeclaredMethod("getPipeline",
2065           new Class<?>[] {});
2066         m.setAccessible(true);
2067       } catch (NoSuchMethodException e) {
2068         LOG.info("FileSystem's output stream doesn't support"
2069             + " getPipeline; not available; fsOut="
2070             + wrappedStreamClass.getName());
2071       } catch (SecurityException e) {
2072         LOG.info(
2073           "Doesn't have access to getPipeline on "
2074               + "FileSystems's output stream ; fsOut="
2075               + wrappedStreamClass.getName(), e);
2076         m = null; // could happen on setAccessible()
2077       }
2078     }
2079     return m;
2080   }
2081 
2082   /**
2083    * This method gets the pipeline for the current WAL.
2084    */
2085   @VisibleForTesting
2086   DatanodeInfo[] getPipeLine() {
2087     if (this.getPipeLine != null && this.hdfs_out != null) {
2088       Object repl;
2089       try {
2090         repl = this.getPipeLine.invoke(getOutputStream(), NO_ARGS);
2091         if (repl instanceof DatanodeInfo[]) {
2092           return ((DatanodeInfo[]) repl);
2093         }
2094       } catch (Exception e) {
2095         LOG.info("Get pipeline failed", e);
2096       }
2097     }
2098     return new DatanodeInfo[0];
2099   }
2100 }