1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.DataInput;
23 import java.io.DataOutput;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.io.UnsupportedEncodingException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.URLEncoder;
31 import java.util.ArrayList;
32 import java.util.Arrays;
33 import java.util.Collections;
34 import java.util.LinkedList;
35 import java.util.List;
36 import java.util.Map;
37 import java.util.NavigableSet;
38 import java.util.SortedMap;
39 import java.util.TreeMap;
40 import java.util.TreeSet;
41 import java.util.UUID;
42 import java.util.concurrent.ConcurrentSkipListMap;
43 import java.util.concurrent.CopyOnWriteArrayList;
44 import java.util.concurrent.atomic.AtomicBoolean;
45 import java.util.concurrent.atomic.AtomicInteger;
46 import java.util.concurrent.atomic.AtomicLong;
47 import java.util.concurrent.locks.Lock;
48 import java.util.concurrent.locks.ReentrantLock;
49 import java.util.regex.Matcher;
50 import java.util.regex.Pattern;
51
52 import org.apache.commons.logging.Log;
53 import org.apache.commons.logging.LogFactory;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FSDataOutputStream;
56 import org.apache.hadoop.fs.FileStatus;
57 import org.apache.hadoop.fs.FileSystem;
58 import org.apache.hadoop.fs.Path;
59 import org.apache.hadoop.fs.PathFilter;
60 import org.apache.hadoop.fs.Syncable;
61 import org.apache.hadoop.hbase.HBaseConfiguration;
62 import org.apache.hadoop.hbase.HBaseFileSystem;
63 import org.apache.hadoop.hbase.HConstants;
64 import org.apache.hadoop.hbase.HRegionInfo;
65 import org.apache.hadoop.hbase.HTableDescriptor;
66 import org.apache.hadoop.hbase.KeyValue;
67 import org.apache.hadoop.hbase.ServerName;
68 import org.apache.hadoop.hbase.util.Bytes;
69 import org.apache.hadoop.hbase.util.ClassSize;
70 import org.apache.hadoop.hbase.util.FSUtils;
71 import org.apache.hadoop.hbase.util.HasThread;
72 import org.apache.hadoop.hbase.util.Threads;
73 import org.apache.hadoop.io.Writable;
74 import org.apache.hadoop.util.StringUtils;
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 public class HLog implements Syncable {
116 static final Log LOG = LogFactory.getLog(HLog.class);
117 public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
118 static final byte [] METAROW = Bytes.toBytes("METAROW");
119
120
121 public static final String SPLITTING_EXT = "-splitting";
122 public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false;
123
124 public static final String META_HLOG_FILE_EXTN = ".meta";
125 public static final String SEPARATE_HLOG_FOR_META = "hbase.regionserver.separate.hlog.for.meta";
126
127
128
129
130
131 public static final String RECOVERED_EDITS_DIR = "recovered.edits";
132 private static final Pattern EDITFILES_NAME_PATTERN =
133 Pattern.compile("-?[0-9]+");
134 public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
135
136 private final FileSystem fs;
137 private final Path dir;
138 private final Configuration conf;
139 private final HLogFileSystem hlogFs;
140
141 private List<WALActionsListener> listeners =
142 new CopyOnWriteArrayList<WALActionsListener>();
143 private final long optionalFlushInterval;
144 private final long blocksize;
145 private final String prefix;
146 private final AtomicLong unflushedEntries = new AtomicLong(0);
147 private volatile long syncedTillHere = 0;
148 private long lastDeferredTxid;
149 private final Path oldLogDir;
150 private volatile boolean logRollRunning;
151
152 private static Class<? extends Writer> logWriterClass;
153 private static Class<? extends Reader> logReaderClass;
154
155 private WALCoprocessorHost coprocessorHost;
156
157 static void resetLogReaderClass() {
158 HLog.logReaderClass = null;
159 }
160
161 private FSDataOutputStream hdfs_out;
162
163
164 private int minTolerableReplication;
165 private Method getNumCurrentReplicas;
166 final static Object [] NO_ARGS = new Object []{};
167
168 public interface Reader {
169 void init(FileSystem fs, Path path, Configuration c) throws IOException;
170 void close() throws IOException;
171 Entry next() throws IOException;
172 Entry next(Entry reuse) throws IOException;
173 void seek(long pos) throws IOException;
174 long getPosition() throws IOException;
175 void reset() throws IOException;
176 }
177
178 public interface Writer {
179 void init(FileSystem fs, Path path, Configuration c) throws IOException;
180 void close() throws IOException;
181 void sync() throws IOException;
182 void append(Entry entry) throws IOException;
183 long getLength() throws IOException;
184 }
185
186
187
188
189 Writer writer;
190
191
192
193
194 final SortedMap<Long, Path> outputfiles =
195 Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
196
197
198
199
200
201 private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
202 new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
203
204 private volatile boolean closed = false;
205
206 private final AtomicLong logSeqNum = new AtomicLong(0);
207
208 private boolean forMeta = false;
209
210
211 private volatile long filenum = -1;
212
213
214 private final AtomicInteger numEntries = new AtomicInteger(0);
215
216
217
218
219
220 private volatile int consecutiveLogRolls = 0;
221 private final int lowReplicationRollLimit;
222
223
224
225
226 private volatile boolean lowReplicationRollEnabled = true;
227
228
229
230 private final long logrollsize;
231
232
233
234 private final Lock cacheFlushLock = new ReentrantLock();
235
236
237
238
239 private final Object updateLock = new Object();
240 private final Object flushLock = new Object();
241
242 private final boolean enabled;
243
244
245
246
247
248
249 private final int maxLogs;
250
251
252
253
254 private final LogSyncer logSyncer;
255
256
257 private final int closeErrorsTolerated;
258
259 private final AtomicInteger closeErrorCount = new AtomicInteger();
260
261
262
263
264 private static final Pattern pattern =
265 Pattern.compile(".*\\.\\d*("+HLog.META_HLOG_FILE_EXTN+")*");
266
267 static byte [] COMPLETE_CACHE_FLUSH;
268 static {
269 try {
270 COMPLETE_CACHE_FLUSH =
271 "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
272 } catch (UnsupportedEncodingException e) {
273 assert(false);
274 }
275 }
276
277 public static class Metric {
278 public long min = Long.MAX_VALUE;
279 public long max = 0;
280 public long total = 0;
281 public int count = 0;
282
283 synchronized void inc(final long val) {
284 min = Math.min(min, val);
285 max = Math.max(max, val);
286 total += val;
287 ++count;
288 }
289
290 synchronized Metric get() {
291 Metric copy = new Metric();
292 copy.min = min;
293 copy.max = max;
294 copy.total = total;
295 copy.count = count;
296 this.min = Long.MAX_VALUE;
297 this.max = 0;
298 this.total = 0;
299 this.count = 0;
300 return copy;
301 }
302 }
303
304
305 private static Metric writeTime = new Metric();
306 private static Metric writeSize = new Metric();
307
308 private static Metric syncTime = new Metric();
309
310 private static AtomicLong slowHLogAppendCount = new AtomicLong();
311 private static Metric slowHLogAppendTime = new Metric();
312
313 public static Metric getWriteTime() {
314 return writeTime.get();
315 }
316
317 public static Metric getWriteSize() {
318 return writeSize.get();
319 }
320
321 public static Metric getSyncTime() {
322 return syncTime.get();
323 }
324
325 public static long getSlowAppendCount() {
326 return slowHLogAppendCount.get();
327 }
328
329 public static Metric getSlowAppendTime() {
330 return slowHLogAppendTime.get();
331 }
332
333
334
335
336
337
338
339
340
341
342 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
343 final Configuration conf)
344 throws IOException {
345 this(fs, dir, oldLogDir, conf, null, true, null, false);
346 }
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
368 final Configuration conf, final List<WALActionsListener> listeners,
369 final String prefix) throws IOException {
370 this(fs, dir, oldLogDir, conf, listeners, true, prefix, false);
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
395 final Configuration conf, final List<WALActionsListener> listeners,
396 final boolean failIfLogDirExists, final String prefix, boolean forMeta)
397 throws IOException {
398 super();
399 this.fs = fs;
400 this.dir = dir;
401 this.conf = conf;
402 this.hlogFs = new HLogFileSystem(conf);
403 if (listeners != null) {
404 for (WALActionsListener i: listeners) {
405 registerWALActionsListener(i);
406 }
407 }
408 this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
409 FSUtils.getDefaultBlockSize(this.fs, this.dir));
410
411 float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
412 this.logrollsize = (long)(this.blocksize * multi);
413 this.optionalFlushInterval =
414 conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
415 boolean dirExists = false;
416 if (failIfLogDirExists && (dirExists = this.fs.exists(dir))) {
417 throw new IOException("Target HLog directory already exists: " + dir);
418 }
419 if (!dirExists && !HBaseFileSystem.makeDirOnFileSystem(fs, dir)) {
420 throw new IOException("Unable to mkdir " + dir);
421 }
422 this.oldLogDir = oldLogDir;
423 if (!fs.exists(oldLogDir) && !HBaseFileSystem.makeDirOnFileSystem(fs, oldLogDir)) {
424 throw new IOException("Unable to mkdir " + this.oldLogDir);
425 }
426 this.forMeta = forMeta;
427 this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
428 this.minTolerableReplication = conf.getInt(
429 "hbase.regionserver.hlog.tolerable.lowreplication",
430 FSUtils.getDefaultReplication(this.fs, this.dir));
431 this.lowReplicationRollLimit = conf.getInt(
432 "hbase.regionserver.hlog.lowreplication.rolllimit", 5);
433 this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
434 this.closeErrorsTolerated = conf.getInt(
435 "hbase.regionserver.logroll.errors.tolerated", 0);
436
437 LOG.info("HLog configuration: blocksize=" +
438 StringUtils.byteDesc(this.blocksize) +
439 ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
440 ", enabled=" + this.enabled +
441 ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
442
443 this.prefix = prefix == null || prefix.isEmpty() ?
444 "hlog" : URLEncoder.encode(prefix, "UTF8");
445
446 rollWriter();
447
448
449 this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
450
451 logSyncer = new LogSyncer(this.optionalFlushInterval);
452
453 if (this.optionalFlushInterval > 0) {
454 Threads.setDaemonThreadRunning(logSyncer.getThread(), Thread.currentThread().getName()
455 + ".logSyncer");
456 } else {
457 LOG.info("hbase.regionserver.optionallogflushinterval is set as "
458 + this.optionalFlushInterval + ". Deferred log syncing won't work. "
459 + "Any Mutation, marked to be deferred synced, will be flushed immediately.");
460 }
461 coprocessorHost = new WALCoprocessorHost(this, conf);
462 }
463
464
465
466
467
468 private Method getGetNumCurrentReplicas(final FSDataOutputStream os) {
469 Method m = null;
470 if (os != null) {
471 Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream()
472 .getClass();
473 try {
474 m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas",
475 new Class<?>[] {});
476 m.setAccessible(true);
477 } catch (NoSuchMethodException e) {
478 LOG.info("FileSystem's output stream doesn't support"
479 + " getNumCurrentReplicas; --HDFS-826 not available; fsOut="
480 + wrappedStreamClass.getName());
481 } catch (SecurityException e) {
482 LOG.info("Doesn't have access to getNumCurrentReplicas on "
483 + "FileSystems's output stream --HDFS-826 not available; fsOut="
484 + wrappedStreamClass.getName(), e);
485 m = null;
486 }
487 }
488 if (m != null) {
489 LOG.info("Using getNumCurrentReplicas--HDFS-826");
490 }
491 return m;
492 }
493
494 public void registerWALActionsListener(final WALActionsListener listener) {
495 this.listeners.add(listener);
496 }
497
498 public boolean unregisterWALActionsListener(final WALActionsListener listener) {
499 return this.listeners.remove(listener);
500 }
501
502
503
504
505 public long getFilenum() {
506 return this.filenum;
507 }
508
509
510
511
512
513
514
515
516
517 public void setSequenceNumber(final long newvalue) {
518 for (long id = this.logSeqNum.get(); id < newvalue &&
519 !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
520
521
522 LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
523 }
524 }
525
526
527
528
529 public long getSequenceNumber() {
530 return logSeqNum.get();
531 }
532
533
534
535
536
537
538
539
540 OutputStream getOutputStream() {
541 return this.hdfs_out.getWrappedStream();
542 }
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564 public byte [][] rollWriter() throws FailedLogCloseException, IOException {
565 return rollWriter(false);
566 }
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590 public byte [][] rollWriter(boolean force)
591 throws FailedLogCloseException, IOException {
592
593 if (!force && this.writer != null && this.numEntries.get() <= 0) {
594 return null;
595 }
596 byte [][] regionsToFlush = null;
597 this.cacheFlushLock.lock();
598 this.logRollRunning = true;
599 try {
600 if (closed) {
601 LOG.debug("HLog closed. Skipping rolling of writer");
602 return regionsToFlush;
603 }
604
605
606 long currentFilenum = this.filenum;
607 Path oldPath = null;
608 if (currentFilenum > 0) {
609
610 oldPath = computeFilename(currentFilenum);
611 }
612 this.filenum = System.currentTimeMillis();
613 Path newPath = computeFilename();
614
615
616 if (!this.listeners.isEmpty()) {
617 for (WALActionsListener i : this.listeners) {
618 i.preLogRoll(oldPath, newPath);
619 }
620 }
621 HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
622
623
624
625 FSDataOutputStream nextHdfsOut = null;
626 if (nextWriter instanceof SequenceFileLogWriter) {
627 nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
628 }
629
630 synchronized (updateLock) {
631
632 Path oldFile = cleanupCurrentWriter(currentFilenum);
633 this.writer = nextWriter;
634 this.hdfs_out = nextHdfsOut;
635
636 LOG.info((oldFile != null?
637 "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
638 this.numEntries.get() +
639 ", filesize=" +
640 this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
641 " for " + FSUtils.getPath(newPath));
642 this.numEntries.set(0);
643 }
644
645 if (!this.listeners.isEmpty()) {
646 for (WALActionsListener i : this.listeners) {
647 i.postLogRoll(oldPath, newPath);
648 }
649 }
650
651
652 if (this.outputfiles.size() > 0) {
653 if (this.lastSeqWritten.isEmpty()) {
654 LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
655
656
657
658 for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
659 archiveLogFile(e.getValue(), e.getKey());
660 }
661 this.outputfiles.clear();
662 } else {
663 regionsToFlush = cleanOldLogs();
664 }
665 }
666 } finally {
667 this.logRollRunning = false;
668 this.cacheFlushLock.unlock();
669 }
670 return regionsToFlush;
671 }
672
673
674
675
676
677
678
679
680
681
682
683 protected Writer createWriterInstance(final FileSystem fs, final Path path,
684 final Configuration conf) throws IOException {
685 if (forMeta) {
686
687 }
688 return this.hlogFs.createWriter(fs, conf, path);
689 }
690
691
692
693
694
695
696
697
698
699
700
701
702 public static Reader getReader(final FileSystem fs, final Path path,
703 Configuration conf)
704 throws IOException {
705 try {
706
707 if (logReaderClass == null) {
708
709 logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
710 SequenceFileLogReader.class, Reader.class);
711 }
712
713
714 HLog.Reader reader = logReaderClass.newInstance();
715 reader.init(fs, path, conf);
716 return reader;
717 } catch (IOException e) {
718 throw e;
719 }
720 catch (Exception e) {
721 throw new IOException("Cannot get log reader", e);
722 }
723 }
724
725
726
727
728
729
730
731
732 public static Writer createWriter(final FileSystem fs,
733 final Path path, Configuration conf)
734 throws IOException {
735 try {
736 if (logWriterClass == null) {
737 logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
738 SequenceFileLogWriter.class, Writer.class);
739 }
740 HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
741 writer.init(fs, path, conf);
742 return writer;
743 } catch (Exception e) {
744 throw new IOException("cannot get log writer", e);
745 }
746 }
747
748
749
750
751
752
753
754
755 private byte [][] cleanOldLogs() throws IOException {
756 Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
757
758
759 TreeSet<Long> sequenceNumbers =
760 new TreeSet<Long>(this.outputfiles.headMap(
761 (Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
762
763 int logsToRemove = sequenceNumbers.size();
764 if (logsToRemove > 0) {
765 if (LOG.isDebugEnabled()) {
766
767 byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
768 LOG.debug("Found " + logsToRemove + " hlogs to remove" +
769 " out of total " + this.outputfiles.size() + ";" +
770 " oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
771 " from region " + Bytes.toStringBinary(oldestRegion));
772 }
773 for (Long seq : sequenceNumbers) {
774 archiveLogFile(this.outputfiles.remove(seq), seq);
775 }
776 }
777
778
779
780 byte [][] regions = null;
781 int logCount = this.outputfiles == null? 0: this.outputfiles.size();
782 if (logCount > this.maxLogs && logCount > 0) {
783
784 regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
785 this.lastSeqWritten);
786 if (regions != null) {
787 StringBuilder sb = new StringBuilder();
788 for (int i = 0; i < regions.length; i++) {
789 if (i > 0) sb.append(", ");
790 sb.append(Bytes.toStringBinary(regions[i]));
791 }
792 LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
793 this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
794 sb.toString());
795 }
796 }
797 return regions;
798 }
799
800
801
802
803
804
805
806
807
808 static byte [][] findMemstoresWithEditsEqualOrOlderThan(final long oldestWALseqid,
809 final Map<byte [], Long> regionsToSeqids) {
810
811 List<byte []> regions = null;
812 for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
813 if (e.getValue().longValue() <= oldestWALseqid) {
814 if (regions == null) regions = new ArrayList<byte []>();
815
816 regions.add(e.getKey());
817 }
818 }
819 return regions == null?
820 null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
821 }
822
823
824
825
826 private Long getOldestOutstandingSeqNum() {
827 return Collections.min(this.lastSeqWritten.values());
828 }
829
830
831
832
833
834 private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
835 byte [] oldestRegion = null;
836 for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
837 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
838
839 oldestRegion = e.getKey();
840 break;
841 }
842 }
843 return oldestRegion;
844 }
845
846
847
848
849
850
851
852 Path cleanupCurrentWriter(final long currentfilenum) throws IOException {
853 Path oldFile = null;
854 if (this.writer != null) {
855
856 try {
857
858
859 if (this.unflushedEntries.get() != this.syncedTillHere) {
860 LOG.debug("cleanupCurrentWriter " +
861 " waiting for transactions to get synced " +
862 " total " + this.unflushedEntries.get() +
863 " synced till here " + syncedTillHere);
864 sync();
865 }
866 this.writer.close();
867 this.writer = null;
868 closeErrorCount.set(0);
869 } catch (IOException e) {
870 LOG.error("Failed close of HLog writer", e);
871 int errors = closeErrorCount.incrementAndGet();
872 if (errors <= closeErrorsTolerated && !hasDeferredEntries()) {
873 LOG.warn("Riding over HLog close failure! error count="+errors);
874 } else {
875 if (hasDeferredEntries()) {
876 LOG.error("Aborting due to unflushed edits in HLog");
877 }
878
879
880
881 FailedLogCloseException flce =
882 new FailedLogCloseException("#" + currentfilenum);
883 flce.initCause(e);
884 throw flce;
885 }
886 }
887 if (currentfilenum >= 0) {
888 oldFile = computeFilename(currentfilenum);
889 this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
890 }
891 }
892 return oldFile;
893 }
894
895 private void archiveLogFile(final Path p, final Long seqno) throws IOException {
896 Path newPath = getHLogArchivePath(this.oldLogDir, p);
897 LOG.info("moving old hlog file " + FSUtils.getPath(p) +
898 " whose highest sequenceid is " + seqno + " to " +
899 FSUtils.getPath(newPath));
900
901
902 if (!this.listeners.isEmpty()) {
903 for (WALActionsListener i : this.listeners) {
904 i.preLogArchive(p, newPath);
905 }
906 }
907 if (!HBaseFileSystem.renameAndSetModifyTime(this.fs, p, newPath)) {
908 throw new IOException("Unable to rename " + p + " to " + newPath);
909 }
910
911 if (!this.listeners.isEmpty()) {
912 for (WALActionsListener i : this.listeners) {
913 i.postLogArchive(p, newPath);
914 }
915 }
916 }
917
918
919
920
921
922
923 protected Path computeFilename() {
924 return computeFilename(this.filenum);
925 }
926
927
928
929
930
931
932
933 protected Path computeFilename(long filenum) {
934 if (filenum < 0) {
935 throw new RuntimeException("hlog file number can't be < 0");
936 }
937 String child = prefix + "." + filenum;
938 if (forMeta) {
939 child += HLog.META_HLOG_FILE_EXTN;
940 }
941 return new Path(dir, child);
942 }
943
944 public static boolean isMetaFile(Path p) {
945 if (p.getName().endsWith(HLog.META_HLOG_FILE_EXTN)) {
946 return true;
947 }
948 return false;
949 }
950
951
952
953
954
955
956 public void closeAndDelete() throws IOException {
957 close();
958 if (!fs.exists(this.dir)) return;
959 FileStatus[] files = fs.listStatus(this.dir);
960 for(FileStatus file : files) {
961
962 Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
963
964 if (!this.listeners.isEmpty()) {
965 for (WALActionsListener i : this.listeners) {
966 i.preLogArchive(file.getPath(), p);
967 }
968 }
969 if (!HBaseFileSystem.renameAndSetModifyTime(fs, file.getPath(), p)) {
970 throw new IOException("Unable to rename " + file.getPath() + " to " + p);
971 }
972
973 if (!this.listeners.isEmpty()) {
974 for (WALActionsListener i : this.listeners) {
975 i.postLogArchive(file.getPath(), p);
976 }
977 }
978 }
979 LOG.debug("Moved " + files.length + " log files to " +
980 FSUtils.getPath(this.oldLogDir));
981 if (!HBaseFileSystem.deleteDirFromFileSystem(fs, dir)) {
982 LOG.info("Unable to delete " + dir);
983 }
984 }
985
986
987
988
989
990
991 public void close() throws IOException {
992
993 if (this.optionalFlushInterval > 0) {
994 try {
995 logSyncer.close();
996
997 logSyncer.join(this.optionalFlushInterval * 2);
998 } catch (InterruptedException e) {
999 LOG.error("Exception while waiting for syncer thread to die", e);
1000 }
1001 }
1002
1003 cacheFlushLock.lock();
1004 try {
1005
1006 if (!this.listeners.isEmpty()) {
1007 for (WALActionsListener i : this.listeners) {
1008 i.logCloseRequested();
1009 }
1010 }
1011 synchronized (updateLock) {
1012 this.closed = true;
1013 if (LOG.isDebugEnabled()) {
1014 LOG.debug("closing hlog writer in " + this.dir.toString());
1015 }
1016 if (this.writer != null) {
1017 this.writer.close();
1018 }
1019 }
1020 } finally {
1021 cacheFlushLock.unlock();
1022 }
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032 protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum,
1033 long now, UUID clusterId) {
1034 return new HLogKey(regionName, tableName, seqnum, now, clusterId);
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 public long append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit,
1048 HTableDescriptor htd, boolean doSync)
1049 throws IOException {
1050 if (this.closed) {
1051 throw new IOException("Cannot append; log is closed");
1052 }
1053 long txid = 0;
1054 synchronized (updateLock) {
1055 long seqNum = obtainSeqNum();
1056 logKey.setLogSeqNum(seqNum);
1057
1058
1059
1060
1061
1062 this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
1063 Long.valueOf(seqNum));
1064 doWrite(regionInfo, logKey, logEdit, htd);
1065 txid = this.unflushedEntries.incrementAndGet();
1066 this.numEntries.incrementAndGet();
1067 if (htd.isDeferredLogFlush()) {
1068 lastDeferredTxid = txid;
1069 }
1070 }
1071
1072
1073
1074 if (doSync &&
1075 (regionInfo.isMetaRegion() ||
1076 !htd.isDeferredLogFlush())) {
1077
1078 this.sync(txid);
1079 }
1080 return txid;
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
1094 final long now, HTableDescriptor htd)
1095 throws IOException {
1096 append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now, htd);
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 private long append(HRegionInfo info, byte [] tableName, WALEdit edits, UUID clusterId,
1126 final long now, HTableDescriptor htd, boolean doSync)
1127 throws IOException {
1128 if (edits.isEmpty()) return this.unflushedEntries.get();;
1129 if (this.closed) {
1130 throw new IOException("Cannot append; log is closed");
1131 }
1132 long txid = 0;
1133 synchronized (this.updateLock) {
1134 long seqNum = obtainSeqNum();
1135
1136
1137
1138
1139
1140
1141
1142 byte [] encodedRegionName = info.getEncodedNameAsBytes();
1143 this.lastSeqWritten.putIfAbsent(encodedRegionName, seqNum);
1144 HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterId);
1145 doWrite(info, logKey, edits, htd);
1146 this.numEntries.incrementAndGet();
1147 txid = this.unflushedEntries.incrementAndGet();
1148 if (htd.isDeferredLogFlush()) {
1149 lastDeferredTxid = txid;
1150 }
1151 }
1152
1153
1154 if (doSync &&
1155 (info.isMetaRegion() ||
1156 !htd.isDeferredLogFlush())) {
1157
1158 this.sync(txid);
1159 }
1160 return txid;
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 public long appendNoSync(HRegionInfo info, byte [] tableName, WALEdit edits,
1177 UUID clusterId, final long now, HTableDescriptor htd)
1178 throws IOException {
1179 return append(info, tableName, edits, clusterId, now, htd, false);
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 public long append(HRegionInfo info, byte [] tableName, WALEdit edits,
1196 UUID clusterId, final long now, HTableDescriptor htd)
1197 throws IOException {
1198 return append(info, tableName, edits, clusterId, now, htd, true);
1199 }
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 class LogSyncer extends HasThread {
1210
1211 private final long optionalFlushInterval;
1212
1213 private AtomicBoolean closeLogSyncer = new AtomicBoolean(false);
1214
1215
1216
1217
1218
1219
1220
1221 private List<Entry> pendingWrites = new LinkedList<Entry>();
1222
1223 LogSyncer(long optionalFlushInterval) {
1224 this.optionalFlushInterval = optionalFlushInterval;
1225 }
1226
1227 @Override
1228 public void run() {
1229 try {
1230
1231
1232 while(!this.isInterrupted() && !closeLogSyncer.get()) {
1233
1234 try {
1235 if (unflushedEntries.get() <= syncedTillHere) {
1236 synchronized (closeLogSyncer) {
1237 closeLogSyncer.wait(this.optionalFlushInterval);
1238 }
1239 }
1240
1241
1242
1243 sync();
1244 } catch (IOException e) {
1245 LOG.error("Error while syncing, requesting close of hlog ", e);
1246 requestLogRoll();
1247 }
1248 }
1249 } catch (InterruptedException e) {
1250 LOG.debug(getName() + " interrupted while waiting for sync requests");
1251 } finally {
1252 LOG.info(getName() + " exiting");
1253 }
1254 }
1255
1256
1257
1258
1259 synchronized void append(Entry e) throws IOException {
1260 pendingWrites.add(e);
1261 }
1262
1263
1264
1265 synchronized List<Entry> getPendingWrites() {
1266 List<Entry> save = this.pendingWrites;
1267 this.pendingWrites = new LinkedList<Entry>();
1268 return save;
1269 }
1270
1271
1272 void hlogFlush(Writer writer, List<Entry> pending) throws IOException {
1273 if (pending == null) return;
1274
1275
1276 for (Entry e : pending) {
1277 writer.append(e);
1278 }
1279 }
1280
1281 void close() {
1282 synchronized (closeLogSyncer) {
1283 closeLogSyncer.set(true);
1284 closeLogSyncer.notifyAll();
1285 }
1286 }
1287 }
1288
1289
1290 private void syncer() throws IOException {
1291 syncer(this.unflushedEntries.get());
1292 }
1293
1294
1295 private void syncer(long txid) throws IOException {
1296
1297
1298 if (txid <= this.syncedTillHere) {
1299 return;
1300 }
1301 Writer tempWriter;
1302 synchronized (this.updateLock) {
1303 if (this.closed) return;
1304 tempWriter = this.writer;
1305 }
1306 try {
1307 long doneUpto;
1308 long now = System.currentTimeMillis();
1309
1310
1311
1312
1313 IOException ioe = null;
1314 List<Entry> pending = null;
1315 synchronized (flushLock) {
1316 if (txid <= this.syncedTillHere) {
1317 return;
1318 }
1319 doneUpto = this.unflushedEntries.get();
1320 pending = logSyncer.getPendingWrites();
1321 try {
1322 logSyncer.hlogFlush(tempWriter, pending);
1323 } catch(IOException io) {
1324 ioe = io;
1325 LOG.error("syncer encountered error, will retry. txid=" + txid, ioe);
1326 }
1327 }
1328 if (ioe != null && pending != null) {
1329 synchronized (this.updateLock) {
1330 synchronized (flushLock) {
1331
1332 tempWriter = this.writer;
1333 logSyncer.hlogFlush(tempWriter, pending);
1334 }
1335 }
1336 }
1337
1338 if (txid <= this.syncedTillHere) {
1339 return;
1340 }
1341 try {
1342 tempWriter.sync();
1343 } catch (IOException io) {
1344 synchronized (this.updateLock) {
1345
1346 tempWriter = this.writer;
1347 tempWriter.sync();
1348 }
1349 }
1350 this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto);
1351
1352 syncTime.inc(System.currentTimeMillis() - now);
1353 if (!this.logRollRunning) {
1354 checkLowReplication();
1355 try {
1356 if (tempWriter.getLength() > this.logrollsize) {
1357 requestLogRoll();
1358 }
1359 } catch (IOException x) {
1360 LOG.debug("Log roll failed and will be retried. (This is not an error)");
1361 }
1362 }
1363 } catch (IOException e) {
1364 LOG.fatal("Could not sync. Requesting close of hlog", e);
1365 requestLogRoll();
1366 throw e;
1367 }
1368 }
1369
1370 private void checkLowReplication() {
1371
1372
1373 try {
1374 int numCurrentReplicas = getLogReplication();
1375 if (numCurrentReplicas != 0
1376 && numCurrentReplicas < this.minTolerableReplication) {
1377 if (this.lowReplicationRollEnabled) {
1378 if (this.consecutiveLogRolls < this.lowReplicationRollLimit) {
1379 LOG.warn("HDFS pipeline error detected. " + "Found "
1380 + numCurrentReplicas + " replicas but expecting no less than "
1381 + this.minTolerableReplication + " replicas. "
1382 + " Requesting close of hlog.");
1383 requestLogRoll();
1384
1385
1386
1387 this.consecutiveLogRolls++;
1388 } else {
1389 LOG.warn("Too many consecutive RollWriter requests, it's a sign of "
1390 + "the total number of live datanodes is lower than the tolerable replicas.");
1391 this.consecutiveLogRolls = 0;
1392 this.lowReplicationRollEnabled = false;
1393 }
1394 }
1395 } else if (numCurrentReplicas >= this.minTolerableReplication) {
1396
1397 if (!this.lowReplicationRollEnabled) {
1398
1399
1400
1401 if (this.numEntries.get() <= 1) {
1402 return;
1403 }
1404
1405
1406 this.lowReplicationRollEnabled = true;
1407 LOG.info("LowReplication-Roller was enabled.");
1408 }
1409 }
1410 } catch (Exception e) {
1411 LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1412 " still proceeding ahead...");
1413 }
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 int getLogReplication()
1429 throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1430 if (this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1431 Object repl = this.getNumCurrentReplicas.invoke(getOutputStream(), NO_ARGS);
1432 if (repl instanceof Integer) {
1433 return ((Integer)repl).intValue();
1434 }
1435 }
1436 return 0;
1437 }
1438
1439 boolean canGetCurReplicas() {
1440 return this.getNumCurrentReplicas != null;
1441 }
1442
1443 public void hsync() throws IOException {
1444 syncer();
1445 }
1446
1447 public void hflush() throws IOException {
1448 syncer();
1449 }
1450
1451 public void sync() throws IOException {
1452 syncer();
1453 }
1454
1455 public void sync(long txid) throws IOException {
1456 syncer(txid);
1457 }
1458
1459 private void requestLogRoll() {
1460 if (!this.listeners.isEmpty()) {
1461 for (WALActionsListener i: this.listeners) {
1462 i.logRollRequested();
1463 }
1464 }
1465 }
1466
1467 protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
1468 HTableDescriptor htd)
1469 throws IOException {
1470 if (!this.enabled) {
1471 return;
1472 }
1473 if (!this.listeners.isEmpty()) {
1474 for (WALActionsListener i: this.listeners) {
1475 i.visitLogEntryBeforeWrite(htd, logKey, logEdit);
1476 }
1477 }
1478 try {
1479 long now = System.currentTimeMillis();
1480
1481 if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) {
1482
1483 logSyncer.append(new HLog.Entry(logKey, logEdit));
1484 }
1485 long took = System.currentTimeMillis() - now;
1486 coprocessorHost.postWALWrite(info, logKey, logEdit);
1487 writeTime.inc(took);
1488 long len = 0;
1489 for (KeyValue kv : logEdit.getKeyValues()) {
1490 len += kv.getLength();
1491 }
1492 writeSize.inc(len);
1493 if (took > 1000) {
1494 LOG.warn(String.format(
1495 "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
1496 Thread.currentThread().getName(), took, this.numEntries.get(),
1497 StringUtils.humanReadableInt(len)));
1498 slowHLogAppendCount.incrementAndGet();
1499 slowHLogAppendTime.inc(took);
1500 }
1501 } catch (IOException e) {
1502 LOG.fatal("Could not append. Requesting close of hlog", e);
1503 requestLogRoll();
1504 throw e;
1505 }
1506 }
1507
1508
1509
1510 int getNumEntries() {
1511 return numEntries.get();
1512 }
1513
1514
1515
1516
1517 public long obtainSeqNum() {
1518 return this.logSeqNum.incrementAndGet();
1519 }
1520
1521
1522 int getNumLogFiles() {
1523 return outputfiles.size();
1524 }
1525
1526 private byte[] getSnapshotName(byte[] encodedRegionName) {
1527 byte snp[] = new byte[encodedRegionName.length + 3];
1528
1529
1530
1531 snp[0] = 's'; snp[1] = 'n'; snp[2] = 'p';
1532 for (int i = 0; i < encodedRegionName.length; i++) {
1533 snp[i+3] = encodedRegionName[i];
1534 }
1535 return snp;
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 public long startCacheFlush(final byte[] encodedRegionName) {
1566 this.cacheFlushLock.lock();
1567 Long seq = this.lastSeqWritten.remove(encodedRegionName);
1568
1569
1570
1571 if (seq != null) {
1572
1573
1574
1575
1576 Long oldseq =
1577 lastSeqWritten.put(getSnapshotName(encodedRegionName), seq);
1578 if (oldseq != null) {
1579 LOG.error("Logic Error Snapshot seq id from earlier flush still" +
1580 " present! for region " + Bytes.toString(encodedRegionName) +
1581 " overwritten oldseq=" + oldseq + "with new seq=" + seq);
1582 Runtime.getRuntime().halt(1);
1583 }
1584 }
1585 return obtainSeqNum();
1586 }
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 public void completeCacheFlush(final byte [] encodedRegionName,
1600 final byte [] tableName, final long logSeqId, final boolean isMetaRegion)
1601 throws IOException {
1602 try {
1603 if (this.closed) {
1604 return;
1605 }
1606 long txid = 0;
1607 synchronized (updateLock) {
1608 long now = System.currentTimeMillis();
1609 WALEdit edit = completeCacheFlushLogEdit();
1610 HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
1611 System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
1612 logSyncer.append(new Entry(key, edit));
1613 txid = this.unflushedEntries.incrementAndGet();
1614 writeTime.inc(System.currentTimeMillis() - now);
1615 long len = 0;
1616 for (KeyValue kv : edit.getKeyValues()) {
1617 len += kv.getLength();
1618 }
1619 writeSize.inc(len);
1620 this.numEntries.incrementAndGet();
1621 }
1622
1623 this.sync(txid);
1624
1625 } finally {
1626
1627
1628
1629 this.lastSeqWritten.remove(getSnapshotName(encodedRegionName));
1630 this.cacheFlushLock.unlock();
1631 }
1632 }
1633
1634 private WALEdit completeCacheFlushLogEdit() {
1635 KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1636 System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1637 WALEdit e = new WALEdit();
1638 e.add(kv);
1639 return e;
1640 }
1641
1642
1643
1644
1645
1646
1647
1648 public void abortCacheFlush(byte[] encodedRegionName) {
1649 Long snapshot_seq =
1650 this.lastSeqWritten.remove(getSnapshotName(encodedRegionName));
1651 if (snapshot_seq != null) {
1652
1653
1654
1655
1656 Long current_memstore_earliest_seq =
1657 this.lastSeqWritten.put(encodedRegionName, snapshot_seq);
1658 if (current_memstore_earliest_seq != null &&
1659 (current_memstore_earliest_seq.longValue() <=
1660 snapshot_seq.longValue())) {
1661 LOG.error("Logic Error region " + Bytes.toString(encodedRegionName) +
1662 "acquired edits out of order current memstore seq=" +
1663 current_memstore_earliest_seq + " snapshot seq=" + snapshot_seq);
1664 Runtime.getRuntime().halt(1);
1665 }
1666 }
1667 this.cacheFlushLock.unlock();
1668 }
1669
1670
1671
1672
1673
1674 public static boolean isMetaFamily(byte [] family) {
1675 return Bytes.equals(METAFAMILY, family);
1676 }
1677
1678
1679
1680
1681
1682
1683 public boolean isLowReplicationRollEnabled() {
1684 return lowReplicationRollEnabled;
1685 }
1686
1687 @SuppressWarnings("unchecked")
1688 public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1689 return (Class<? extends HLogKey>)
1690 conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1691 }
1692
1693 public static HLogKey newKey(Configuration conf) throws IOException {
1694 Class<? extends HLogKey> keyClass = getKeyClass(conf);
1695 try {
1696 return keyClass.newInstance();
1697 } catch (InstantiationException e) {
1698 throw new IOException("cannot create hlog key");
1699 } catch (IllegalAccessException e) {
1700 throw new IOException("cannot create hlog key");
1701 }
1702 }
1703
1704
1705
1706
1707
1708 public static class Entry implements Writable {
1709 private WALEdit edit;
1710 private HLogKey key;
1711
1712 public Entry() {
1713 edit = new WALEdit();
1714 key = new HLogKey();
1715 }
1716
1717
1718
1719
1720
1721
1722 public Entry(HLogKey key, WALEdit edit) {
1723 super();
1724 this.key = key;
1725 this.edit = edit;
1726 }
1727
1728
1729
1730
1731 public WALEdit getEdit() {
1732 return edit;
1733 }
1734
1735
1736
1737
1738 public HLogKey getKey() {
1739 return key;
1740 }
1741
1742 @Override
1743 public String toString() {
1744 return this.key + "=" + this.edit;
1745 }
1746
1747 @Override
1748 public void write(DataOutput dataOutput) throws IOException {
1749 this.key.write(dataOutput);
1750 this.edit.write(dataOutput);
1751 }
1752
1753 @Override
1754 public void readFields(DataInput dataInput) throws IOException {
1755 this.key.readFields(dataInput);
1756 this.edit.readFields(dataInput);
1757 }
1758 }
1759
1760
1761
1762
1763
1764
1765
1766 public static String getHLogDirectoryName(final String serverName) {
1767 StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1768 dirName.append("/");
1769 dirName.append(serverName);
1770 return dirName.toString();
1771 }
1772
1773
1774
1775
1776
1777
1778 protected Path getDir() {
1779 return dir;
1780 }
1781
1782
1783
1784
1785
1786
1787 public static boolean validateHLogFilename(String filename) {
1788 return pattern.matcher(filename).matches();
1789 }
1790
1791 static Path getHLogArchivePath(Path oldLogDir, Path p) {
1792 return new Path(oldLogDir, p.getName());
1793 }
1794
1795 static String formatRecoveredEditsFileName(final long seqid) {
1796 return String.format("%019d", seqid);
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1808 final Path regiondir)
1809 throws IOException {
1810 NavigableSet<Path> filesSorted = new TreeSet<Path>();
1811 Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1812 if (!fs.exists(editsdir)) return filesSorted;
1813 FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
1814 @Override
1815 public boolean accept(Path p) {
1816 boolean result = false;
1817 try {
1818
1819
1820
1821
1822 Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1823 result = fs.isFile(p) && m.matches();
1824
1825
1826 if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
1827 result = false;
1828 }
1829 } catch (IOException e) {
1830 LOG.warn("Failed isFile check on " + p);
1831 }
1832 return result;
1833 }
1834 });
1835 if (files == null) return filesSorted;
1836 for (FileStatus status: files) {
1837 filesSorted.add(status.getPath());
1838 }
1839 return filesSorted;
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849 public static Path moveAsideBadEditsFile(final FileSystem fs,
1850 final Path edits)
1851 throws IOException {
1852 Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1853 System.currentTimeMillis());
1854 if (!HBaseFileSystem.renameDirForFileSystem(fs, edits, moveAsideName)) {
1855 LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1856 }
1857 return moveAsideName;
1858 }
1859
1860
1861
1862
1863
1864
1865 public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1866 return new Path(regiondir, RECOVERED_EDITS_DIR);
1867 }
1868
1869 public static final long FIXED_OVERHEAD = ClassSize.align(
1870 ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1871 ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1872
1873 private static void usage() {
1874 System.err.println("Usage: HLog <ARGS>");
1875 System.err.println("Arguments:");
1876 System.err.println(" --dump Dump textual representation of passed one or more files");
1877 System.err.println(" For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1878 System.err.println(" --split Split the passed directory of WAL logs");
1879 System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1880 }
1881
1882 private static void split(final Configuration conf, final Path p)
1883 throws IOException {
1884 FileSystem fs = FileSystem.get(conf);
1885 if (!fs.exists(p)) {
1886 throw new FileNotFoundException(p.toString());
1887 }
1888 final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1889 final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1890 if (!fs.getFileStatus(p).isDir()) {
1891 throw new IOException(p + " is not a directory");
1892 }
1893
1894 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
1895 conf, baseDir, p, oldLogDir, fs);
1896 logSplitter.splitLog();
1897 }
1898
1899
1900
1901
1902 public WALCoprocessorHost getCoprocessorHost() {
1903 return coprocessorHost;
1904 }
1905
1906
1907 boolean hasDeferredEntries() {
1908 return lastDeferredTxid > syncedTillHere;
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918 public static void main(String[] args) throws IOException {
1919 if (args.length < 2) {
1920 usage();
1921 System.exit(-1);
1922 }
1923
1924 if (args[0].compareTo("--dump") == 0) {
1925 HLogPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
1926 } else if (args[0].compareTo("--split") == 0) {
1927 Configuration conf = HBaseConfiguration.create();
1928 for (int i = 1; i < args.length; i++) {
1929 try {
1930 conf.set("fs.default.name", args[i]);
1931 conf.set("fs.defaultFS", args[i]);
1932 Path logPath = new Path(args[i]);
1933 split(conf, logPath);
1934 } catch (Throwable t) {
1935 t.printStackTrace(System.err);
1936 System.exit(-1);
1937 }
1938 }
1939 } else {
1940 usage();
1941 System.exit(-1);
1942 }
1943 }
1944 }