View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import java.io.DataInput;
23  import java.io.DataOutput;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.io.OutputStream;
27  import java.io.UnsupportedEncodingException;
28  import java.lang.reflect.InvocationTargetException;
29  import java.lang.reflect.Method;
30  import java.net.URLEncoder;
31  import java.util.ArrayList;
32  import java.util.Collections;
33  import java.util.List;
34  import java.util.Map;
35  import java.util.NavigableSet;
36  import java.util.SortedMap;
37  import java.util.TreeMap;
38  import java.util.TreeSet;
39  import java.util.concurrent.ConcurrentSkipListMap;
40  import java.util.concurrent.CopyOnWriteArrayList;
41  import java.util.concurrent.atomic.AtomicInteger;
42  import java.util.concurrent.atomic.AtomicLong;
43  import java.util.concurrent.locks.Condition;
44  import java.util.concurrent.locks.Lock;
45  import java.util.concurrent.locks.ReentrantLock;
46  import java.util.regex.Matcher;
47  import java.util.regex.Pattern;
48  
49  import org.apache.commons.logging.Log;
50  import org.apache.commons.logging.LogFactory;
51  import org.apache.hadoop.conf.Configuration;
52  import org.apache.hadoop.fs.FileStatus;
53  import org.apache.hadoop.fs.FileSystem;
54  import org.apache.hadoop.fs.Path;
55  import org.apache.hadoop.fs.PathFilter;
56  import org.apache.hadoop.fs.Syncable;
57  import org.apache.hadoop.hbase.HBaseConfiguration;
58  import org.apache.hadoop.hbase.HConstants;
59  import org.apache.hadoop.hbase.HRegionInfo;
60  import org.apache.hadoop.hbase.HServerInfo;
61  import org.apache.hadoop.hbase.KeyValue;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.hbase.util.ClassSize;
64  import org.apache.hadoop.hbase.util.FSUtils;
65  import org.apache.hadoop.hbase.util.Threads;
66  import org.apache.hadoop.io.Writable;
67  import org.apache.hadoop.util.StringUtils;
68  
69  /**
70   * HLog stores all the edits to the HStore.  Its the hbase write-ahead-log
71   * implementation.
72   *
73   * It performs logfile-rolling, so external callers are not aware that the
74   * underlying file is being rolled.
75   *
76   * <p>
77   * There is one HLog per RegionServer.  All edits for all Regions carried by
78   * a particular RegionServer are entered first in the HLog.
79   *
80   * <p>
81   * Each HRegion is identified by a unique long <code>int</code>. HRegions do
82   * not need to declare themselves before using the HLog; they simply include
83   * their HRegion-id in the <code>append</code> or
84   * <code>completeCacheFlush</code> calls.
85   *
86   * <p>
87   * An HLog consists of multiple on-disk files, which have a chronological order.
88   * As data is flushed to other (better) on-disk structures, the log becomes
89   * obsolete. We can destroy all the log messages for a given HRegion-id up to
90   * the most-recent CACHEFLUSH message from that HRegion.
91   *
92   * <p>
93   * It's only practical to delete entire files. Thus, we delete an entire on-disk
94   * file F when all of the messages in F have a log-sequence-id that's older
95   * (smaller) than the most-recent CACHEFLUSH message for every HRegion that has
96   * a message in F.
97   *
98   * <p>
99   * Synchronized methods can never execute in parallel. However, between the
100  * start of a cache flush and the completion point, appends are allowed but log
101  * rolling is not. To prevent log rolling taking place during this period, a
102  * separate reentrant lock is used.
103  *
104  * <p>To read an HLog, call {@link #getReader(org.apache.hadoop.fs.FileSystem,
105  * org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration)}.
106  *
107  */
108 public class HLog implements Syncable {
109   static final Log LOG = LogFactory.getLog(HLog.class);
110   public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
111   static final byte [] METAROW = Bytes.toBytes("METAROW");
112 
113   /*
114    * Name of directory that holds recovered edits written by the wal log
115    * splitting code, one per region
116    */
117   private static final String RECOVERED_EDITS_DIR = "recovered.edits";
118   private static final Pattern EDITFILES_NAME_PATTERN =
119     Pattern.compile("-?[0-9]+");
120   
121   private final FileSystem fs;
122   private final Path dir;
123   private final Configuration conf;
124   // Listeners that are called on WAL events.
125   private List<WALObserver> listeners =
126     new CopyOnWriteArrayList<WALObserver>();
127   private final long optionalFlushInterval;
128   private final long blocksize;
129   private final int flushlogentries;
130   private final String prefix;
131   private final Path oldLogDir;
132   private boolean logRollRequested;
133 
134 
135   private static Class<? extends Writer> logWriterClass;
136   private static Class<? extends Reader> logReaderClass;
137 
138   static void resetLogReaderClass() {
139     HLog.logReaderClass = null;
140   }
141 
142   private OutputStream hdfs_out;     // OutputStream associated with the current SequenceFile.writer
143   private int initialReplication;    // initial replication factor of SequenceFile.writer
144   private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas
145   final static Object [] NO_ARGS = new Object []{};
146 
147   // used to indirectly tell syncFs to force the sync
148   private boolean forceSync = false;
149 
150   public interface Reader {
151     void init(FileSystem fs, Path path, Configuration c) throws IOException;
152     void close() throws IOException;
153     Entry next() throws IOException;
154     Entry next(Entry reuse) throws IOException;
155     void seek(long pos) throws IOException;
156     long getPosition() throws IOException;
157   }
158 
159   public interface Writer {
160     void init(FileSystem fs, Path path, Configuration c) throws IOException;
161     void close() throws IOException;
162     void sync() throws IOException;
163     void append(Entry entry) throws IOException;
164     long getLength() throws IOException;
165   }
166 
167   /*
168    * Current log file.
169    */
170   Writer writer;
171 
172   /*
173    * Map of all log files but the current one.
174    */
175   final SortedMap<Long, Path> outputfiles =
176     Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
177 
178   /*
179    * Map of regions to most recent sequence/edit id in their memstore.
180    * Key is encoded region name.
181    */
182   private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
183     new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
184 
185   private volatile boolean closed = false;
186 
187   private final AtomicLong logSeqNum = new AtomicLong(0);
188 
189   // The timestamp (in ms) when the log file was created.
190   private volatile long filenum = -1;
191 
192   //number of transactions in the current Hlog.
193   private final AtomicInteger numEntries = new AtomicInteger(0);
194 
195   // If > than this size, roll the log. This is typically 0.95 times the size
196   // of the default Hdfs block size.
197   private final long logrollsize;
198 
199   // This lock prevents starting a log roll during a cache flush.
200   // synchronized is insufficient because a cache flush spans two method calls.
201   private final Lock cacheFlushLock = new ReentrantLock();
202 
203   // We synchronize on updateLock to prevent updates and to prevent a log roll
204   // during an update
205   // locked during appends
206   private final Object updateLock = new Object();
207 
208   private final boolean enabled;
209 
210   /*
211    * If more than this many logs, force flush of oldest region to oldest edit
212    * goes to disk.  If too many and we crash, then will take forever replaying.
213    * Keep the number of logs tidy.
214    */
215   private final int maxLogs;
216 
217   /**
218    * Thread that handles optional sync'ing
219    */
220   private final LogSyncer logSyncerThread;
221 
222   /**
223    * Pattern used to validate a HLog file name
224    */
225   private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
226 
227   static byte [] COMPLETE_CACHE_FLUSH;
228   static {
229     try {
230       COMPLETE_CACHE_FLUSH =
231         "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
232     } catch (UnsupportedEncodingException e) {
233       assert(false);
234     }
235   }
236 
237   // For measuring latency of writes
238   private static volatile long writeOps;
239   private static volatile long writeTime;
240   // For measuring latency of syncs
241   private static volatile long syncOps;
242   private static volatile long syncTime;
243   
244   public static long getWriteOps() {
245     long ret = writeOps;
246     writeOps = 0;
247     return ret;
248   }
249 
250   public static long getWriteTime() {
251     long ret = writeTime;
252     writeTime = 0;
253     return ret;
254   }
255 
256   public static long getSyncOps() {
257     long ret = syncOps;
258     syncOps = 0;
259     return ret;
260   }
261 
262   public static long getSyncTime() {
263     long ret = syncTime;
264     syncTime = 0;
265     return ret;
266   }
267 
268   /**
269    * Constructor.
270    *
271    * @param fs filesystem handle
272    * @param dir path to where hlogs are stored
273    * @param oldLogDir path to where hlogs are archived
274    * @param conf configuration to use
275    * @throws IOException
276    */
277   public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
278               final Configuration conf)
279   throws IOException {
280     this(fs, dir, oldLogDir, conf, null, true, null);
281   }
282 
283   /**
284    * Create an edit log at the given <code>dir</code> location.
285    *
286    * You should never have to load an existing log. If there is a log at
287    * startup, it should have already been processed and deleted by the time the
288    * HLog object is started up.
289    *
290    * @param fs filesystem handle
291    * @param dir path to where hlogs are stored
292    * @param oldLogDir path to where hlogs are archived
293    * @param conf configuration to use
294    * @param listeners Listeners on WAL events. Listeners passed here will
295    * be registered before we do anything else; e.g. the
296    * Constructor {@link #rollWriter()}.
297    * @param prefix should always be hostname and port in distributed env and
298    *        it will be URL encoded before being used.
299    *        If prefix is null, "hlog" will be used
300    * @throws IOException
301    */
302   public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
303       final Configuration conf, final List<WALObserver> listeners,
304       final String prefix) throws IOException {
305     this(fs, dir, oldLogDir, conf, listeners, true, prefix);
306   }
307 
308   /**
309    * Create an edit log at the given <code>dir</code> location.
310    *
311    * You should never have to load an existing log. If there is a log at
312    * startup, it should have already been processed and deleted by the time the
313    * HLog object is started up.
314    *
315    * @param fs filesystem handle
316    * @param dir path to where hlogs are stored
317    * @param oldLogDir path to where hlogs are archived
318    * @param conf configuration to use
319    * @param listeners Listeners on WAL events. Listeners passed here will
320    * be registered before we do anything else; e.g. the
321    * Constructor {@link #rollWriter()}.
322    * @param failIfLogDirExists If true IOException will be thrown if dir already exists.
323    * @param prefix should always be hostname and port in distributed env and
324    *        it will be URL encoded before being used.
325    *        If prefix is null, "hlog" will be used
326    * @throws IOException
327    */
328   public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
329       final Configuration conf, final List<WALObserver> listeners,
330       final boolean failIfLogDirExists, final String prefix)
331  throws IOException {
332     super();
333     this.fs = fs;
334     this.dir = dir;
335     this.conf = conf;
336     if (listeners != null) {
337       for (WALObserver i: listeners) {
338         registerWALActionsListener(i);
339       }
340     }
341     this.flushlogentries =
342       conf.getInt("hbase.regionserver.flushlogentries", 1);
343     this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
344       this.fs.getDefaultBlockSize());
345     // Roll at 95% of block size.
346     float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
347     this.logrollsize = (long)(this.blocksize * multi);
348     this.optionalFlushInterval =
349       conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
350     if (failIfLogDirExists && fs.exists(dir)) {
351       throw new IOException("Target HLog directory already exists: " + dir);
352     }
353     if (!fs.mkdirs(dir)) {
354       throw new IOException("Unable to mkdir " + dir);
355     }
356     this.oldLogDir = oldLogDir;
357     if (!fs.exists(oldLogDir)) {
358       if (!fs.mkdirs(this.oldLogDir)) {
359         throw new IOException("Unable to mkdir " + this.oldLogDir);
360       }
361     }
362     this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
363     this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
364     LOG.info("HLog configuration: blocksize=" +
365       StringUtils.byteDesc(this.blocksize) +
366       ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
367       ", enabled=" + this.enabled +
368       ", flushlogentries=" + this.flushlogentries +
369       ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
370     // If prefix is null||empty then just name it hlog
371     this.prefix = prefix == null || prefix.isEmpty() ?
372         "hlog" : URLEncoder.encode(prefix, "UTF8");
373     // rollWriter sets this.hdfs_out if it can.
374     rollWriter();
375 
376     // handle the reflection necessary to call getNumCurrentReplicas()
377     this.getNumCurrentReplicas = null;
378     Exception exception = null;
379     if (this.hdfs_out != null) {
380       try {
381         this.getNumCurrentReplicas = this.hdfs_out.getClass().
382           getMethod("getNumCurrentReplicas", new Class<?> []{});
383         this.getNumCurrentReplicas.setAccessible(true);
384       } catch (NoSuchMethodException e) {
385         // Thrown if getNumCurrentReplicas() function isn't available
386         exception = e;
387       } catch (SecurityException e) {
388         // Thrown if we can't get access to getNumCurrentReplicas()
389         exception = e;
390         this.getNumCurrentReplicas = null; // could happen on setAccessible()
391       }
392     }
393     if (this.getNumCurrentReplicas != null) {
394       LOG.info("Using getNumCurrentReplicas--HDFS-826");
395     } else {
396       LOG.info("getNumCurrentReplicas--HDFS-826 not available; hdfs_out=" +
397         this.hdfs_out + ", exception=" + exception.getMessage());
398     }
399 
400     logSyncerThread = new LogSyncer(this.optionalFlushInterval);
401     Threads.setDaemonThreadRunning(logSyncerThread,
402         Thread.currentThread().getName() + ".logSyncer");
403   }
404 
405   public void registerWALActionsListener (final WALObserver listener) {
406     this.listeners.add(listener);
407   }
408 
409   public boolean unregisterWALActionsListener(final WALObserver listener) {
410     return this.listeners.remove(listener);
411   }
412 
413   /**
414    * @return Current state of the monotonically increasing file id.
415    */
416   public long getFilenum() {
417     return this.filenum;
418   }
419 
420   /**
421    * Called by HRegionServer when it opens a new region to ensure that log
422    * sequence numbers are always greater than the latest sequence number of the
423    * region being brought on-line.
424    *
425    * @param newvalue We'll set log edit/sequence number to this value if it
426    * is greater than the current value.
427    */
428   public void setSequenceNumber(final long newvalue) {
429     for (long id = this.logSeqNum.get(); id < newvalue &&
430         !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
431       // This could spin on occasion but better the occasional spin than locking
432       // every increment of sequence number.
433       LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
434     }
435   }
436 
437   /**
438    * @return log sequence number
439    */
440   public long getSequenceNumber() {
441     return logSeqNum.get();
442   }
443 
444   // usage: see TestLogRolling.java
445   OutputStream getOutputStream() {
446     return this.hdfs_out;
447   }
448 
449   /**
450    * Roll the log writer. That is, start writing log messages to a new file.
451    *
452    * Because a log cannot be rolled during a cache flush, and a cache flush
453    * spans two method calls, a special lock needs to be obtained so that a cache
454    * flush cannot start when the log is being rolled and the log cannot be
455    * rolled during a cache flush.
456    *
457    * <p>Note that this method cannot be synchronized because it is possible that
458    * startCacheFlush runs, obtaining the cacheFlushLock, then this method could
459    * start which would obtain the lock on this but block on obtaining the
460    * cacheFlushLock and then completeCacheFlush could be called which would wait
461    * for the lock on this and consequently never release the cacheFlushLock
462    *
463    * @return If lots of logs, flush the returned regions so next time through
464    * we can clean logs. Returns null if nothing to flush.  Names are actual
465    * region names as returned by {@link HRegionInfo#getEncodedName()}
466    * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
467    * @throws IOException
468    */
469   public byte [][] rollWriter() throws FailedLogCloseException, IOException {
470     // Return if nothing to flush.
471     if (this.writer != null && this.numEntries.get() <= 0) {
472       return null;
473     }
474     byte [][] regionsToFlush = null;
475     this.cacheFlushLock.lock();
476     try {
477       if (closed) {
478         return regionsToFlush;
479       }
480       // Do all the preparation outside of the updateLock to block
481       // as less as possible the incoming writes
482       long currentFilenum = this.filenum;
483       this.filenum = System.currentTimeMillis();
484       Path newPath = computeFilename();
485       HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
486       int nextInitialReplication = fs.getFileStatus(newPath).getReplication();
487       // Can we get at the dfsclient outputstream?  If an instance of
488       // SFLW, it'll have done the necessary reflection to get at the
489       // protected field name.
490       OutputStream nextHdfsOut = null;
491       if (nextWriter instanceof SequenceFileLogWriter) {
492         nextHdfsOut =
493           ((SequenceFileLogWriter)nextWriter).getDFSCOutputStream();
494       }
495       // Tell our listeners that a new log was created
496       if (!this.listeners.isEmpty()) {
497         for (WALObserver i : this.listeners) {
498           i.logRolled(newPath);
499         }
500       }
501 
502       synchronized (updateLock) {
503         // Clean up current writer.
504         Path oldFile = cleanupCurrentWriter(currentFilenum);
505         this.writer = nextWriter;
506         this.initialReplication = nextInitialReplication;
507         this.hdfs_out = nextHdfsOut;
508 
509         LOG.info((oldFile != null?
510             "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
511             this.numEntries.get() +
512             ", filesize=" +
513             this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
514           "New hlog " + FSUtils.getPath(newPath));
515         this.numEntries.set(0);
516         this.logRollRequested = false;
517       }
518       // Can we delete any of the old log files?
519       if (this.outputfiles.size() > 0) {
520         if (this.lastSeqWritten.isEmpty()) {
521           LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
522           // If so, then no new writes have come in since all regions were
523           // flushed (and removed from the lastSeqWritten map). Means can
524           // remove all but currently open log file.
525           for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
526             archiveLogFile(e.getValue(), e.getKey());
527           }
528           this.outputfiles.clear();
529         } else {
530           regionsToFlush = cleanOldLogs();
531         }
532       }
533     } finally {
534       this.cacheFlushLock.unlock();
535     }
536     return regionsToFlush;
537   }
538 
539   /**
540    * This method allows subclasses to inject different writers without having to
541    * extend other methods like rollWriter().
542    * 
543    * @param fs
544    * @param path
545    * @param conf
546    * @return Writer instance
547    * @throws IOException
548    */
549   protected Writer createWriterInstance(final FileSystem fs, final Path path,
550       final Configuration conf) throws IOException {
551     return createWriter(fs, path, conf);
552   }
553 
554   /**
555    * Get a reader for the WAL.
556    * @param fs
557    * @param path
558    * @param conf
559    * @return A WAL reader.  Close when done with it.
560    * @throws IOException
561    */
562   public static Reader getReader(final FileSystem fs,
563     final Path path, Configuration conf)
564   throws IOException {
565     try {
566 
567       if (logReaderClass == null) {
568 
569         logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
570             SequenceFileLogReader.class, Reader.class);
571       }
572 
573 
574       HLog.Reader reader = logReaderClass.newInstance();
575       reader.init(fs, path, conf);
576       return reader;
577     } catch (IOException e) {
578       throw e;
579     }
580     catch (Exception e) {
581       throw new IOException("Cannot get log reader", e);
582     }
583   }
584 
585   /**
586    * Get a writer for the WAL.
587    * @param path
588    * @param conf
589    * @return A WAL writer.  Close when done with it.
590    * @throws IOException
591    */
592   public static Writer createWriter(final FileSystem fs,
593       final Path path, Configuration conf)
594   throws IOException {
595     try {
596       if (logWriterClass == null) {
597         logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
598             SequenceFileLogWriter.class, Writer.class);
599       }
600       HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
601       writer.init(fs, path, conf);
602       return writer;
603     } catch (Exception e) {
604       IOException ie = new IOException("cannot get log writer");
605       ie.initCause(e);
606       throw ie;
607     }
608   }
609 
610   /*
611    * Clean up old commit logs.
612    * @return If lots of logs, flush the returned region so next time through
613    * we can clean logs. Returns null if nothing to flush.  Returns array of
614    * encoded region names to flush.
615    * @throws IOException
616    */
617   private byte [][] cleanOldLogs() throws IOException {
618     Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
619     // Get the set of all log files whose last sequence number is smaller than
620     // the oldest edit's sequence number.
621     TreeSet<Long> sequenceNumbers =
622       new TreeSet<Long>(this.outputfiles.headMap(
623         (Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
624     // Now remove old log files (if any)
625     int logsToRemove = sequenceNumbers.size();
626     if (logsToRemove > 0) {
627       if (LOG.isDebugEnabled()) {
628         // Find associated region; helps debugging.
629         byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
630         LOG.debug("Found " + logsToRemove + " hlogs to remove" +
631           " out of total " + this.outputfiles.size() + ";" +
632           " oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
633           " from region " + Bytes.toString(oldestRegion));
634       }
635       for (Long seq : sequenceNumbers) {
636         archiveLogFile(this.outputfiles.remove(seq), seq);
637       }
638     }
639 
640     // If too many log files, figure which regions we need to flush.
641     // Array is an array of encoded region names.
642     byte [][] regions = null;
643     int logCount = this.outputfiles.size();
644     if (logCount > this.maxLogs && this.outputfiles != null &&
645         this.outputfiles.size() > 0) {
646       // This is an array of encoded region names.
647       regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
648         this.lastSeqWritten);
649       if (regions != null) {
650         StringBuilder sb = new StringBuilder();
651         for (int i = 0; i < regions.length; i++) {
652           if (i > 0) sb.append(", ");
653           sb.append(Bytes.toStringBinary(regions[i]));
654         }
655         LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
656            this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
657            sb.toString());
658       }
659     }
660     return regions;
661   }
662 
663   /**
664    * Return regions (memstores) that have edits that are equal or less than
665    * the passed <code>oldestWALseqid</code>.
666    * @param oldestWALseqid
667    * @param regionsToSeqids
668    * @return All regions whose seqid is < than <code>oldestWALseqid</code> (Not
669    * necessarily in order).  Null if no regions found.
670    */
671   static byte [][] findMemstoresWithEditsEqualOrOlderThan(final long oldestWALseqid,
672       final Map<byte [], Long> regionsToSeqids) {
673     //  This method is static so it can be unit tested the easier.
674     List<byte []> regions = null;
675     for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
676       if (e.getValue().longValue() <= oldestWALseqid) {
677         if (regions == null) regions = new ArrayList<byte []>();
678         regions.add(e.getKey());
679       }
680     }
681     return regions == null?
682       null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
683   }
684 
685   /*
686    * @return Logs older than this id are safe to remove.
687    */
688   private Long getOldestOutstandingSeqNum() {
689     return Collections.min(this.lastSeqWritten.values());
690   }
691 
692   /**
693    * @param oldestOutstandingSeqNum
694    * @return (Encoded) name of oldest outstanding region.
695    */
696   private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
697     byte [] oldestRegion = null;
698     for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
699       if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
700         oldestRegion = e.getKey();
701         break;
702       }
703     }
704     return oldestRegion;
705   }
706 
707   /*
708    * Cleans up current writer closing and adding to outputfiles.
709    * Presumes we're operating inside an updateLock scope.
710    * @return Path to current writer or null if none.
711    * @throws IOException
712    */
713   private Path cleanupCurrentWriter(final long currentfilenum)
714   throws IOException {
715     Path oldFile = null;
716     if (this.writer != null) {
717       // Close the current writer, get a new one.
718       try {
719         this.writer.close();
720       } catch (IOException e) {
721         // Failed close of log file.  Means we're losing edits.  For now,
722         // shut ourselves down to minimize loss.  Alternative is to try and
723         // keep going.  See HBASE-930.
724         FailedLogCloseException flce =
725           new FailedLogCloseException("#" + currentfilenum);
726         flce.initCause(e);
727         throw e;
728       }
729       if (currentfilenum >= 0) {
730         oldFile = computeFilename(currentfilenum);
731         this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
732       }
733     }
734     return oldFile;
735   }
736 
737   private void archiveLogFile(final Path p, final Long seqno) throws IOException {
738     Path newPath = getHLogArchivePath(this.oldLogDir, p);
739     LOG.info("moving old hlog file " + FSUtils.getPath(p) +
740       " whose highest sequenceid is " + seqno + " to " +
741       FSUtils.getPath(newPath));
742     if (!this.fs.rename(p, newPath)) {
743       throw new IOException("Unable to rename " + p + " to " + newPath);
744     }
745   }
746 
747   /**
748    * This is a convenience method that computes a new filename with a given
749    * using the current HLog file-number
750    * @return Path
751    */
752   protected Path computeFilename() {
753     return computeFilename(this.filenum);
754   }
755 
756   /**
757    * This is a convenience method that computes a new filename with a given
758    * file-number.
759    * @param filenum to use
760    * @return Path
761    */
762   protected Path computeFilename(long filenum) {
763     if (filenum < 0) {
764       throw new RuntimeException("hlog file number can't be < 0");
765     }
766     return new Path(dir, prefix + "." + filenum);
767   }
768 
769   /**
770    * Shut down the log and delete the log directory
771    *
772    * @throws IOException
773    */
774   public void closeAndDelete() throws IOException {
775     close();
776     FileStatus[] files = fs.listStatus(this.dir);
777     for(FileStatus file : files) {
778       Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
779       if (!fs.rename(file.getPath(),p)) {
780         throw new IOException("Unable to rename " + file.getPath() + " to " + p);
781       }
782     }
783     LOG.debug("Moved " + files.length + " log files to " +
784         FSUtils.getPath(this.oldLogDir));
785     if (!fs.delete(dir, true)) {
786       LOG.info("Unable to delete " + dir);
787     }
788   }
789 
790   /**
791    * Shut down the log.
792    *
793    * @throws IOException
794    */
795   public void close() throws IOException {
796     try {
797       logSyncerThread.interrupt();
798       // Make sure we synced everything
799       logSyncerThread.join(this.optionalFlushInterval*2);
800     } catch (InterruptedException e) {
801       LOG.error("Exception while waiting for syncer thread to die", e);
802     }
803 
804     cacheFlushLock.lock();
805     try {
806       // Tell our listeners that the log is closing
807       if (!this.listeners.isEmpty()) {
808         for (WALObserver i : this.listeners) {
809           i.logCloseRequested();
810         }
811       }
812       synchronized (updateLock) {
813         this.closed = true;
814         if (LOG.isDebugEnabled()) {
815           LOG.debug("closing hlog writer in " + this.dir.toString());
816         }
817         this.writer.close();
818       }
819     } finally {
820       cacheFlushLock.unlock();
821     }
822   }
823 
824    /** Append an entry to the log.
825    *
826    * @param regionInfo
827    * @param logEdit
828    * @param now Time of this edit write.
829    * @throws IOException
830    */
831   public void append(HRegionInfo regionInfo, WALEdit logEdit,
832     final long now,
833     final boolean isMetaRegion)
834   throws IOException {
835     byte [] regionName = regionInfo.getEncodedNameAsBytes();
836     byte [] tableName = regionInfo.getTableDesc().getName();
837     this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
838   }
839 
840   /**
841    * @param now
842    * @param regionName
843    * @param tableName
844    * @return New log key.
845    */
846   protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) {
847     return new HLogKey(regionName, tableName, seqnum, now);
848   }
849 
850 
851 
852   /** Append an entry to the log.
853    *
854    * @param regionInfo
855    * @param logEdit
856    * @param logKey
857    * @throws IOException
858    */
859   public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
860   throws IOException {
861     if (this.closed) {
862       throw new IOException("Cannot append; log is closed");
863     }
864     synchronized (updateLock) {
865       long seqNum = obtainSeqNum();
866       logKey.setLogSeqNum(seqNum);
867       // The 'lastSeqWritten' map holds the sequence number of the oldest
868       // write for each region (i.e. the first edit added to the particular
869       // memstore). When the cache is flushed, the entry for the
870       // region being flushed is removed if the sequence number of the flush
871       // is greater than or equal to the value in lastSeqWritten.
872       this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
873         Long.valueOf(seqNum));
874       doWrite(regionInfo, logKey, logEdit);
875       this.numEntries.incrementAndGet();
876     }
877 
878     // Sync if catalog region, and if not then check if that table supports
879     // deferred log flushing
880     if (regionInfo.isMetaRegion() ||
881         !regionInfo.getTableDesc().isDeferredLogFlush()) {
882       // sync txn to file system
883       this.sync();
884     }
885   }
886 
887   /**
888    * Append a set of edits to the log. Log edits are keyed by (encoded)
889    * regionName, rowname, and log-sequence-id.
890    *
891    * Later, if we sort by these keys, we obtain all the relevant edits for a
892    * given key-range of the HRegion (TODO). Any edits that do not have a
893    * matching COMPLETE_CACHEFLUSH message can be discarded.
894    *
895    * <p>
896    * Logs cannot be restarted once closed, or once the HLog process dies. Each
897    * time the HLog starts, it must create a new log. This means that other
898    * systems should process the log appropriately upon each startup (and prior
899    * to initializing HLog).
900    *
901    * synchronized prevents appends during the completion of a cache flush or for
902    * the duration of a log roll.
903    *
904    * @param info
905    * @param tableName
906    * @param edits
907    * @param now
908    * @throws IOException
909    */
910   public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
911     final long now)
912   throws IOException {
913     if (edits.isEmpty()) return;
914     if (this.closed) {
915       throw new IOException("Cannot append; log is closed");
916     }
917     synchronized (this.updateLock) {
918       long seqNum = obtainSeqNum();
919       // The 'lastSeqWritten' map holds the sequence number of the oldest
920       // write for each region (i.e. the first edit added to the particular
921       // memstore). . When the cache is flushed, the entry for the
922       // region being flushed is removed if the sequence number of the flush
923       // is greater than or equal to the value in lastSeqWritten.
924       // Use encoded name.  Its shorter, guaranteed unique and a subset of
925       // actual  name.
926       byte [] hriKey = info.getEncodedNameAsBytes();
927       this.lastSeqWritten.putIfAbsent(hriKey, seqNum);
928       HLogKey logKey = makeKey(hriKey, tableName, seqNum, now);
929       doWrite(info, logKey, edits);
930       this.numEntries.incrementAndGet();
931     }
932     // Sync if catalog region, and if not then check if that table supports
933     // deferred log flushing
934     if (info.isMetaRegion() ||
935         !info.getTableDesc().isDeferredLogFlush()) {
936       // sync txn to file system
937       this.sync();
938     }
939   }
940 
941   /**
942    * This thread is responsible to call syncFs and buffer up the writers while
943    * it happens.
944    */
945    class LogSyncer extends Thread {
946 
947     private final long optionalFlushInterval;
948 
949     private boolean syncerShuttingDown = false;
950 
951     LogSyncer(long optionalFlushInterval) {
952       this.optionalFlushInterval = optionalFlushInterval;
953     }
954 
955     @Override
956     public void run() {
957       try {
958         // awaiting with a timeout doesn't always
959         // throw exceptions on interrupt
960         while(!this.isInterrupted()) {
961 
962           Thread.sleep(this.optionalFlushInterval);
963           sync();
964         }
965       } catch (IOException e) {
966         LOG.error("Error while syncing, requesting close of hlog ", e);
967         requestLogRoll();
968       } catch (InterruptedException e) {
969         LOG.debug(getName() + " interrupted while waiting for sync requests");
970       } finally {
971         syncerShuttingDown = true;
972         LOG.info(getName() + " exiting");
973       }
974     }
975   }
976 
977   public void sync() throws IOException {
978     synchronized (this.updateLock) {
979       if (this.closed) {
980         return;
981       }
982     }
983     try {
984       long now = System.currentTimeMillis();
985       // Done in parallel for all writer threads, thanks to HDFS-895
986       this.writer.sync();
987       synchronized (this.updateLock) {
988         syncTime += System.currentTimeMillis() - now;
989         syncOps++;
990         if (!logRollRequested) {
991           checkLowReplication();
992           if (this.writer.getLength() > this.logrollsize) {
993             requestLogRoll();
994           }
995         }
996       }
997 
998     } catch (IOException e) {
999       LOG.fatal("Could not append. Requesting close of hlog", e);
1000       requestLogRoll();
1001       throw e;
1002     }
1003   }
1004 
1005   private void checkLowReplication() {
1006     // if the number of replicas in HDFS has fallen below the initial
1007     // value, then roll logs.
1008     try {
1009       int numCurrentReplicas = getLogReplication();
1010       if (numCurrentReplicas != 0 &&
1011           numCurrentReplicas < this.initialReplication) {
1012         LOG.warn("HDFS pipeline error detected. " +
1013             "Found " + numCurrentReplicas + " replicas but expecting " +
1014             this.initialReplication + " replicas. " +
1015             " Requesting close of hlog.");
1016         requestLogRoll();
1017         logRollRequested = true;
1018       }
1019     } catch (Exception e) {
1020       LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1021           " still proceeding ahead...");
1022     }
1023   }
1024 
1025   /**
1026    * This method gets the datanode replication count for the current HLog.
1027    *
1028    * If the pipeline isn't started yet or is empty, you will get the default
1029    * replication factor.  Therefore, if this function returns 0, it means you
1030    * are not properly running with the HDFS-826 patch.
1031    * @throws InvocationTargetException
1032    * @throws IllegalAccessException
1033    * @throws IllegalArgumentException
1034    *
1035    * @throws Exception
1036    */
1037   int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1038     if(this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1039       Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS);
1040       if (repl instanceof Integer) {
1041         return ((Integer)repl).intValue();
1042       }
1043     }
1044     return 0;
1045   }
1046 
1047   boolean canGetCurReplicas() {
1048     return this.getNumCurrentReplicas != null;
1049   }
1050 
1051   public void hsync() throws IOException {
1052     // Not yet implemented up in hdfs so just call hflush.
1053     sync();
1054   }
1055 
1056   private void requestLogRoll() {
1057     if (!this.listeners.isEmpty()) {
1058       for (WALObserver i: this.listeners) {
1059         i.logRollRequested();
1060       }
1061     }
1062   }
1063 
1064   protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
1065   throws IOException {
1066     if (!this.enabled) {
1067       return;
1068     }
1069     if (!this.listeners.isEmpty()) {
1070       for (WALObserver i: this.listeners) {
1071         i.visitLogEntryBeforeWrite(info, logKey, logEdit);
1072       }
1073     }
1074     try {
1075       long now = System.currentTimeMillis();
1076       this.writer.append(new HLog.Entry(logKey, logEdit));
1077       long took = System.currentTimeMillis() - now;
1078       writeTime += took;
1079       writeOps++;
1080       if (took > 1000) {
1081         long len = 0;
1082         for(KeyValue kv : logEdit.getKeyValues()) { 
1083           len += kv.getLength(); 
1084         }
1085         LOG.warn(String.format(
1086           "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
1087           Thread.currentThread().getName(), took, this.numEntries.get(), 
1088           StringUtils.humanReadableInt(len)));
1089       }
1090     } catch (IOException e) {
1091       LOG.fatal("Could not append. Requesting close of hlog", e);
1092       requestLogRoll();
1093       throw e;
1094     }
1095   }
1096 
1097   /** @return How many items have been added to the log */
1098   int getNumEntries() {
1099     return numEntries.get();
1100   }
1101 
1102   /**
1103    * Obtain a log sequence number.
1104    */
1105   private long obtainSeqNum() {
1106     return this.logSeqNum.incrementAndGet();
1107   }
1108 
1109   /** @return the number of log files in use */
1110   int getNumLogFiles() {
1111     return outputfiles.size();
1112   }
1113 
1114   /**
1115    * By acquiring a log sequence ID, we can allow log messages to continue while
1116    * we flush the cache.
1117    *
1118    * Acquire a lock so that we do not roll the log between the start and
1119    * completion of a cache-flush. Otherwise the log-seq-id for the flush will
1120    * not appear in the correct logfile.
1121    *
1122    * @return sequence ID to pass {@link #completeCacheFlush(byte[], byte[], long, boolean)}
1123    * (byte[], byte[], long)}
1124    * @see #completeCacheFlush(byte[], byte[], long, boolean)
1125    * @see #abortCacheFlush()
1126    */
1127   public long startCacheFlush() {
1128     this.cacheFlushLock.lock();
1129     return obtainSeqNum();
1130   }
1131 
1132   /**
1133    * Complete the cache flush
1134    *
1135    * Protected by cacheFlushLock
1136    *
1137    * @param encodedRegionName
1138    * @param tableName
1139    * @param logSeqId
1140    * @throws IOException
1141    */
1142   public void completeCacheFlush(final byte [] encodedRegionName,
1143       final byte [] tableName, final long logSeqId, final boolean isMetaRegion)
1144   throws IOException {
1145     try {
1146       if (this.closed) {
1147         return;
1148       }
1149       synchronized (updateLock) {
1150         long now = System.currentTimeMillis();
1151         WALEdit edit = completeCacheFlushLogEdit();
1152         HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
1153             System.currentTimeMillis());
1154         this.writer.append(new Entry(key, edit));
1155         writeTime += System.currentTimeMillis() - now;
1156         writeOps++;
1157         this.numEntries.incrementAndGet();
1158         Long seq = this.lastSeqWritten.get(encodedRegionName);
1159         if (seq != null && logSeqId >= seq.longValue()) {
1160           this.lastSeqWritten.remove(encodedRegionName);
1161         }
1162       }
1163       // sync txn to file system
1164       this.sync();
1165 
1166     } finally {
1167       this.cacheFlushLock.unlock();
1168     }
1169   }
1170 
1171   private WALEdit completeCacheFlushLogEdit() {
1172     KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1173       System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1174     WALEdit e = new WALEdit();
1175     e.add(kv);
1176     return e;
1177   }
1178 
1179   /**
1180    * Abort a cache flush.
1181    * Call if the flush fails. Note that the only recovery for an aborted flush
1182    * currently is a restart of the regionserver so the snapshot content dropped
1183    * by the failure gets restored to the memstore.
1184    */
1185   public void abortCacheFlush() {
1186     this.cacheFlushLock.unlock();
1187   }
1188 
1189   /**
1190    * @param family
1191    * @return true if the column is a meta column
1192    */
1193   public static boolean isMetaFamily(byte [] family) {
1194     return Bytes.equals(METAFAMILY, family);
1195   }
1196 
1197   @SuppressWarnings("unchecked")
1198   public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1199      return (Class<? extends HLogKey>)
1200        conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1201   }
1202 
1203   public static HLogKey newKey(Configuration conf) throws IOException {
1204     Class<? extends HLogKey> keyClass = getKeyClass(conf);
1205     try {
1206       return keyClass.newInstance();
1207     } catch (InstantiationException e) {
1208       throw new IOException("cannot create hlog key");
1209     } catch (IllegalAccessException e) {
1210       throw new IOException("cannot create hlog key");
1211     }
1212   }
1213 
1214   /**
1215    * Utility class that lets us keep track of the edit with it's key
1216    * Only used when splitting logs
1217    */
1218   public static class Entry implements Writable {
1219     private WALEdit edit;
1220     private HLogKey key;
1221 
1222     public Entry() {
1223       edit = new WALEdit();
1224       key = new HLogKey();
1225     }
1226 
1227     /**
1228      * Constructor for both params
1229      * @param edit log's edit
1230      * @param key log's key
1231      */
1232     public Entry(HLogKey key, WALEdit edit) {
1233       super();
1234       this.key = key;
1235       this.edit = edit;
1236     }
1237     /**
1238      * Gets the edit
1239      * @return edit
1240      */
1241     public WALEdit getEdit() {
1242       return edit;
1243     }
1244     /**
1245      * Gets the key
1246      * @return key
1247      */
1248     public HLogKey getKey() {
1249       return key;
1250     }
1251 
1252     @Override
1253     public String toString() {
1254       return this.key + "=" + this.edit;
1255     }
1256 
1257     @Override
1258     public void write(DataOutput dataOutput) throws IOException {
1259       this.key.write(dataOutput);
1260       this.edit.write(dataOutput);
1261     }
1262 
1263     @Override
1264     public void readFields(DataInput dataInput) throws IOException {
1265       this.key.readFields(dataInput);
1266       this.edit.readFields(dataInput);
1267     }
1268   }
1269 
1270   /**
1271    * Construct the HLog directory name
1272    *
1273    * @param info HServerInfo for server
1274    * @return the HLog directory name
1275    */
1276   public static String getHLogDirectoryName(HServerInfo info) {
1277     return getHLogDirectoryName(info.getServerName());
1278   }
1279 
1280   /**
1281    * Construct the HLog directory name
1282    *
1283    * @param serverAddress
1284    * @param startCode
1285    * @return the HLog directory name
1286    */
1287   public static String getHLogDirectoryName(String serverAddress,
1288       long startCode) {
1289     if (serverAddress == null || serverAddress.length() == 0) {
1290       return null;
1291     }
1292     return getHLogDirectoryName(
1293         HServerInfo.getServerName(serverAddress, startCode));
1294   }
1295 
1296   /**
1297    * Construct the HLog directory name
1298    *
1299    * @param serverName
1300    * @return the HLog directory name
1301    */
1302   public static String getHLogDirectoryName(String serverName) {
1303     StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1304     dirName.append("/");
1305     dirName.append(serverName);
1306     return dirName.toString();
1307   }
1308 
1309   /**
1310    * Get the directory we are making logs in.
1311    * 
1312    * @return dir
1313    */
1314   protected Path getDir() {
1315     return dir;
1316   }
1317   
1318   public static boolean validateHLogFilename(String filename) {
1319     return pattern.matcher(filename).matches();
1320   }
1321 
1322   static Path getHLogArchivePath(Path oldLogDir, Path p) {
1323     return new Path(oldLogDir, p.getName());
1324   }
1325 
1326   static String formatRecoveredEditsFileName(final long seqid) {
1327     return String.format("%019d", seqid);
1328   }
1329 
1330   /**
1331    * Returns sorted set of edit files made by wal-log splitter.
1332    * @param fs
1333    * @param regiondir
1334    * @return Files in passed <code>regiondir</code> as a sorted set.
1335    * @throws IOException
1336    */
1337   public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1338       final Path regiondir)
1339   throws IOException {
1340     Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1341     FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
1342       @Override
1343       public boolean accept(Path p) {
1344         boolean result = false;
1345         try {
1346           // Return files and only files that match the editfile names pattern.
1347           // There can be other files in this directory other than edit files.
1348           // In particular, on error, we'll move aside the bad edit file giving
1349           // it a timestamp suffix.  See moveAsideBadEditsFile.
1350           Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1351           result = fs.isFile(p) && m.matches();
1352         } catch (IOException e) {
1353           LOG.warn("Failed isFile check on " + p);
1354         }
1355         return result;
1356       }
1357     });
1358     NavigableSet<Path> filesSorted = new TreeSet<Path>();
1359     if (files == null) return filesSorted;
1360     for (FileStatus status: files) {
1361       filesSorted.add(status.getPath());
1362     }
1363     return filesSorted;
1364   }
1365 
1366   /**
1367    * Move aside a bad edits file.
1368    * @param fs
1369    * @param edits Edits file to move aside.
1370    * @return The name of the moved aside file.
1371    * @throws IOException
1372    */
1373   public static Path moveAsideBadEditsFile(final FileSystem fs,
1374       final Path edits)
1375   throws IOException {
1376     Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1377       System.currentTimeMillis());
1378     if (!fs.rename(edits, moveAsideName)) {
1379       LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1380     }
1381     return moveAsideName;
1382   }
1383 
1384   /**
1385    * @param regiondir This regions directory in the filesystem.
1386    * @return The directory that holds recovered edits files for the region
1387    * <code>regiondir</code>
1388    */
1389   public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1390     return new Path(regiondir, RECOVERED_EDITS_DIR);
1391   }
1392 
1393   public static final long FIXED_OVERHEAD = ClassSize.align(
1394     ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1395     ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1396 
1397   private static void usage() {
1398     System.err.println("Usage: HLog <ARGS>");
1399     System.err.println("Arguments:");
1400     System.err.println(" --dump  Dump textual representation of passed one or more files");
1401     System.err.println("         For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1402     System.err.println(" --split Split the passed directory of WAL logs");
1403     System.err.println("         For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1404   }
1405 
1406   private static void dump(final Configuration conf, final Path p)
1407   throws IOException {
1408     FileSystem fs = FileSystem.get(conf);
1409     if (!fs.exists(p)) {
1410       throw new FileNotFoundException(p.toString());
1411     }
1412     if (!fs.isFile(p)) {
1413       throw new IOException(p + " is not a file");
1414     }
1415     Reader log = getReader(fs, p, conf);
1416     try {
1417       int count = 0;
1418       HLog.Entry entry;
1419       while ((entry = log.next()) != null) {
1420         System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
1421           entry.toString());
1422         count++;
1423       }
1424     } finally {
1425       log.close();
1426     }
1427   }
1428 
1429   private static void split(final Configuration conf, final Path p)
1430   throws IOException {
1431     FileSystem fs = FileSystem.get(conf);
1432     if (!fs.exists(p)) {
1433       throw new FileNotFoundException(p.toString());
1434     }
1435     final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1436     final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1437     if (!fs.getFileStatus(p).isDir()) {
1438       throw new IOException(p + " is not a directory");
1439     }
1440 
1441     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
1442         conf, baseDir, p, oldLogDir, fs);
1443     logSplitter.splitLog();
1444   }
1445 
1446   /**
1447    * Pass one or more log file names and it will either dump out a text version
1448    * on <code>stdout</code> or split the specified log files.
1449    *
1450    * @param args
1451    * @throws IOException
1452    */
1453   public static void main(String[] args) throws IOException {
1454     if (args.length < 2) {
1455       usage();
1456       System.exit(-1);
1457     }
1458     boolean dump = true;
1459     if (args[0].compareTo("--dump") != 0) {
1460       if (args[0].compareTo("--split") == 0) {
1461         dump = false;
1462       } else {
1463         usage();
1464         System.exit(-1);
1465       }
1466     }
1467     Configuration conf = HBaseConfiguration.create();
1468     for (int i = 1; i < args.length; i++) {
1469       try {
1470         conf.set("fs.default.name", args[i]);
1471         conf.set("fs.defaultFS", args[i]);
1472         Path logPath = new Path(args[i]);
1473         if (dump) {
1474           dump(conf, logPath);
1475         } else {
1476           split(conf, logPath);
1477         }
1478       } catch (Throwable t) {
1479         t.printStackTrace(System.err);
1480         System.exit(-1);
1481       }
1482     }
1483   }
1484 }