1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease;
23
24 import java.io.DataInput;
25 import java.io.DataOutput;
26 import java.io.EOFException;
27 import java.io.FileNotFoundException;
28 import java.io.IOException;
29 import java.io.OutputStream;
30 import java.io.UnsupportedEncodingException;
31 import java.lang.reflect.InvocationTargetException;
32 import java.lang.reflect.Method;
33 import java.net.URLEncoder;
34 import java.util.ArrayList;
35 import java.util.Collections;
36 import java.util.HashMap;
37 import java.util.LinkedList;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.SortedMap;
42 import java.util.TreeMap;
43 import java.util.TreeSet;
44 import java.util.concurrent.Callable;
45 import java.util.concurrent.ConcurrentSkipListMap;
46 import java.util.concurrent.CopyOnWriteArrayList;
47 import java.util.concurrent.ExecutionException;
48 import java.util.concurrent.Executors;
49 import java.util.concurrent.Future;
50 import java.util.concurrent.ThreadPoolExecutor;
51 import java.util.concurrent.TimeUnit;
52 import java.util.concurrent.atomic.AtomicInteger;
53 import java.util.concurrent.atomic.AtomicLong;
54 import java.util.concurrent.locks.Condition;
55 import java.util.concurrent.locks.Lock;
56 import java.util.concurrent.locks.ReentrantLock;
57 import java.util.regex.Matcher;
58 import java.util.regex.Pattern;
59
60 import org.apache.commons.logging.Log;
61 import org.apache.commons.logging.LogFactory;
62 import org.apache.hadoop.conf.Configuration;
63 import org.apache.hadoop.fs.FileStatus;
64 import org.apache.hadoop.fs.FileSystem;
65 import org.apache.hadoop.fs.Path;
66 import org.apache.hadoop.fs.PathFilter;
67 import org.apache.hadoop.fs.Syncable;
68 import org.apache.hadoop.hbase.HBaseConfiguration;
69 import org.apache.hadoop.hbase.HConstants;
70 import org.apache.hadoop.hbase.HRegionInfo;
71 import org.apache.hadoop.hbase.HServerInfo;
72 import org.apache.hadoop.hbase.HTableDescriptor;
73 import org.apache.hadoop.hbase.KeyValue;
74 import org.apache.hadoop.hbase.RemoteExceptionHandler;
75 import org.apache.hadoop.hbase.regionserver.HRegion;
76 import org.apache.hadoop.hbase.util.Bytes;
77 import org.apache.hadoop.hbase.util.ClassSize;
78 import org.apache.hadoop.hbase.util.FSUtils;
79 import org.apache.hadoop.hbase.util.Threads;
80 import org.apache.hadoop.io.Writable;
81
82 import com.google.common.util.concurrent.NamingThreadFactory;
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 public class HLog implements Syncable {
124 static final Log LOG = LogFactory.getLog(HLog.class);
125 public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
126 static final byte [] METAROW = Bytes.toBytes("METAROW");
127
128
129
130
131
132 private static final String RECOVERED_EDITS_DIR = "recovered.edits";
133 private static final Pattern EDITFILES_NAME_PATTERN =
134 Pattern.compile("-?[0-9]+");
135
136 private final FileSystem fs;
137 private final Path dir;
138 private final Configuration conf;
139 private final LogRollListener listener;
140 private final long optionalFlushInterval;
141 private final long blocksize;
142 private final int flushlogentries;
143 private final String prefix;
144 private final AtomicInteger unflushedEntries = new AtomicInteger(0);
145 private final Path oldLogDir;
146 private final List<LogActionsListener> actionListeners =
147 Collections.synchronizedList(new ArrayList<LogActionsListener>());
148
149
150 private static Class<? extends Writer> logWriterClass;
151 private static Class<? extends Reader> logReaderClass;
152
153 private OutputStream hdfs_out;
154 private int initialReplication;
155 private Method getNumCurrentReplicas;
156 final static Object [] NO_ARGS = new Object []{};
157
158
159 private boolean forceSync = false;
160
161 public interface Reader {
162 void init(FileSystem fs, Path path, Configuration c) throws IOException;
163 void close() throws IOException;
164 Entry next() throws IOException;
165 Entry next(Entry reuse) throws IOException;
166 void seek(long pos) throws IOException;
167 long getPosition() throws IOException;
168 }
169
170 public interface Writer {
171 void init(FileSystem fs, Path path, Configuration c) throws IOException;
172 void close() throws IOException;
173 void sync() throws IOException;
174 void append(Entry entry) throws IOException;
175 long getLength() throws IOException;
176 }
177
178
179
180
181 Writer writer;
182
183
184
185
186 final SortedMap<Long, Path> outputfiles =
187 Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
188
189
190
191
192 private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
193 new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
194
195 private volatile boolean closed = false;
196
197 private final AtomicLong logSeqNum = new AtomicLong(0);
198
199
200 private volatile long filenum = -1;
201
202
203 private final AtomicInteger numEntries = new AtomicInteger(0);
204
205
206
207 private final long logrollsize;
208
209
210
211 private final Lock cacheFlushLock = new ReentrantLock();
212
213
214
215 private final Object updateLock = new Object();
216
217 private final boolean enabled;
218
219
220
221
222
223
224 private final int maxLogs;
225
226
227
228
229 private final LogSyncer logSyncerThread;
230
231 private final List<LogEntryVisitor> logEntryVisitors =
232 new CopyOnWriteArrayList<LogEntryVisitor>();
233
234
235
236
237 private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
238
239 static byte [] COMPLETE_CACHE_FLUSH;
240 static {
241 try {
242 COMPLETE_CACHE_FLUSH =
243 "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
244 } catch (UnsupportedEncodingException e) {
245 assert(false);
246 }
247 }
248
249
250 private static volatile long writeOps;
251 private static volatile long writeTime;
252
253 private static volatile long syncOps;
254 private static volatile long syncTime;
255
256 public static long getWriteOps() {
257 long ret = writeOps;
258 writeOps = 0;
259 return ret;
260 }
261
262 public static long getWriteTime() {
263 long ret = writeTime;
264 writeTime = 0;
265 return ret;
266 }
267
268 public static long getSyncOps() {
269 long ret = syncOps;
270 syncOps = 0;
271 return ret;
272 }
273
274 public static long getSyncTime() {
275 long ret = syncTime;
276 syncTime = 0;
277 return ret;
278 }
279
280
281
282
283
284
285
286
287
288
289
290 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
291 final Configuration conf, final LogRollListener listener)
292 throws IOException {
293 this(fs, dir, oldLogDir, conf, listener, null, null);
294 }
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
315 final Configuration conf, final LogRollListener listener,
316 final LogActionsListener actionListener, final String prefix)
317 throws IOException {
318 super();
319 this.fs = fs;
320 this.dir = dir;
321 this.conf = conf;
322 this.listener = listener;
323 this.flushlogentries =
324 conf.getInt("hbase.regionserver.flushlogentries", 1);
325 this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
326 this.fs.getDefaultBlockSize());
327
328 float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
329 this.logrollsize = (long)(this.blocksize * multi);
330 this.optionalFlushInterval =
331 conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
332 if (fs.exists(dir)) {
333 throw new IOException("Target HLog directory already exists: " + dir);
334 }
335 fs.mkdirs(dir);
336 this.oldLogDir = oldLogDir;
337 if (!fs.exists(oldLogDir)) {
338 fs.mkdirs(this.oldLogDir);
339 }
340 this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
341 this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
342 LOG.info("HLog configuration: blocksize=" + this.blocksize +
343 ", rollsize=" + this.logrollsize +
344 ", enabled=" + this.enabled +
345 ", flushlogentries=" + this.flushlogentries +
346 ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
347 if (actionListener != null) {
348 addLogActionsListerner(actionListener);
349 }
350
351 this.prefix = prefix == null || prefix.isEmpty() ?
352 "hlog" : URLEncoder.encode(prefix, "UTF8");
353
354 rollWriter();
355
356
357 this.getNumCurrentReplicas = null;
358 if(this.hdfs_out != null) {
359 try {
360 this.getNumCurrentReplicas = this.hdfs_out.getClass().
361 getMethod("getNumCurrentReplicas", new Class<?> []{});
362 this.getNumCurrentReplicas.setAccessible(true);
363 } catch (NoSuchMethodException e) {
364
365 } catch (SecurityException e) {
366
367 this.getNumCurrentReplicas = null;
368 }
369 }
370 if(this.getNumCurrentReplicas != null) {
371 LOG.info("Using getNumCurrentReplicas--HDFS-826");
372 } else {
373 LOG.info("getNumCurrentReplicas--HDFS-826 not available" );
374 }
375
376 logSyncerThread = new LogSyncer(this.optionalFlushInterval);
377 Threads.setDaemonThreadRunning(logSyncerThread,
378 Thread.currentThread().getName() + ".logSyncer");
379 }
380
381
382
383
384 public long getFilenum() {
385 return this.filenum;
386 }
387
388
389
390
391
392
393
394
395
396 public void setSequenceNumber(final long newvalue) {
397 for (long id = this.logSeqNum.get(); id < newvalue &&
398 !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
399
400
401 LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
402 }
403 }
404
405
406
407
408 public long getSequenceNumber() {
409 return logSeqNum.get();
410 }
411
412
413 OutputStream getOutputStream() {
414 return this.hdfs_out;
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 public byte [][] rollWriter() throws FailedLogCloseException, IOException {
437
438 if (this.writer != null && this.numEntries.get() <= 0) {
439 return null;
440 }
441 byte [][] regionsToFlush = null;
442 this.cacheFlushLock.lock();
443 try {
444 if (closed) {
445 return regionsToFlush;
446 }
447
448
449 long currentFilenum = this.filenum;
450 this.filenum = System.currentTimeMillis();
451 Path newPath = computeFilename();
452 HLog.Writer nextWriter = createWriter(fs, newPath, HBaseConfiguration.create(conf));
453 int nextInitialReplication = fs.getFileStatus(newPath).getReplication();
454
455
456
457 OutputStream nextHdfsOut = null;
458 if (nextWriter instanceof SequenceFileLogWriter) {
459 nextHdfsOut =
460 ((SequenceFileLogWriter)nextWriter).getDFSCOutputStream();
461 }
462 synchronized (updateLock) {
463
464 Path oldFile = cleanupCurrentWriter(currentFilenum);
465 this.writer = nextWriter;
466 this.initialReplication = nextInitialReplication;
467 this.hdfs_out = nextHdfsOut;
468
469 LOG.info((oldFile != null?
470 "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
471 this.numEntries.get() +
472 ", filesize=" +
473 this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
474 "New hlog " + FSUtils.getPath(newPath));
475 this.numEntries.set(0);
476 }
477
478 if (!this.actionListeners.isEmpty()) {
479 for (LogActionsListener list : this.actionListeners) {
480 list.logRolled(newPath);
481 }
482 }
483
484 if (this.outputfiles.size() > 0) {
485 if (this.lastSeqWritten.size() <= 0) {
486 LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
487
488
489
490 for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
491 archiveLogFile(e.getValue(), e.getKey());
492 }
493 this.outputfiles.clear();
494 } else {
495 regionsToFlush = cleanOldLogs();
496 }
497 }
498 } finally {
499 this.cacheFlushLock.unlock();
500 }
501 return regionsToFlush;
502 }
503
504
505
506
507
508
509
510
511
512 public static Reader getReader(final FileSystem fs,
513 final Path path, Configuration conf)
514 throws IOException {
515 try {
516 if (logReaderClass == null) {
517 logReaderClass =conf.getClass("hbase.regionserver.hlog.reader.impl",
518 SequenceFileLogReader.class, Reader.class);
519 }
520
521 HLog.Reader reader = logReaderClass.newInstance();
522 reader.init(fs, path, conf);
523 return reader;
524 } catch (IOException e) {
525 throw e;
526 }
527 catch (Exception e) {
528 throw new IOException("Cannot get log reader", e);
529 }
530 }
531
532
533
534
535
536
537
538
539 public static Writer createWriter(final FileSystem fs,
540 final Path path, Configuration conf)
541 throws IOException {
542 try {
543 if (logWriterClass == null) {
544 logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
545 SequenceFileLogWriter.class, Writer.class);
546 }
547 HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
548 writer.init(fs, path, conf);
549 return writer;
550 } catch (Exception e) {
551 IOException ie = new IOException("cannot get log writer");
552 ie.initCause(e);
553 throw ie;
554 }
555 }
556
557
558
559
560
561
562
563 private byte [][] cleanOldLogs() throws IOException {
564 Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
565
566
567 TreeSet<Long> sequenceNumbers =
568 new TreeSet<Long>(this.outputfiles.headMap(
569 (Long.valueOf(oldestOutstandingSeqNum.longValue() + 1L))).keySet());
570
571 int logsToRemove = sequenceNumbers.size();
572 if (logsToRemove > 0) {
573 if (LOG.isDebugEnabled()) {
574
575 byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
576 LOG.debug("Found " + logsToRemove + " hlogs to remove " +
577 " out of total " + this.outputfiles.size() + "; " +
578 "oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
579 " from region " + Bytes.toString(oldestRegion));
580 }
581 for (Long seq : sequenceNumbers) {
582 archiveLogFile(this.outputfiles.remove(seq), seq);
583 }
584 }
585
586
587 byte [][] regions = null;
588 int logCount = this.outputfiles.size() - logsToRemove;
589 if (logCount > this.maxLogs && this.outputfiles != null &&
590 this.outputfiles.size() > 0) {
591 regions = findMemstoresWithEditsOlderThan(this.outputfiles.firstKey(),
592 this.lastSeqWritten);
593 StringBuilder sb = new StringBuilder();
594 for (int i = 0; i < regions.length; i++) {
595 if (i > 0) sb.append(", ");
596 sb.append(Bytes.toStringBinary(regions[i]));
597 }
598 LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
599 this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
600 sb.toString());
601 }
602 return regions;
603 }
604
605
606
607
608
609
610
611
612
613 static byte [][] findMemstoresWithEditsOlderThan(final long oldestWALseqid,
614 final Map<byte [], Long> regionsToSeqids) {
615
616 List<byte []> regions = null;
617 for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
618 if (e.getValue().longValue() < oldestWALseqid) {
619 if (regions == null) regions = new ArrayList<byte []>();
620 regions.add(e.getKey());
621 }
622 }
623 return regions == null?
624 null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
625 }
626
627
628
629
630 private Long getOldestOutstandingSeqNum() {
631 return Collections.min(this.lastSeqWritten.values());
632 }
633
634 private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
635 byte [] oldestRegion = null;
636 for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
637 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
638 oldestRegion = e.getKey();
639 break;
640 }
641 }
642 return oldestRegion;
643 }
644
645
646
647
648
649
650
651 private Path cleanupCurrentWriter(final long currentfilenum)
652 throws IOException {
653 Path oldFile = null;
654 if (this.writer != null) {
655
656 try {
657 this.writer.close();
658 } catch (IOException e) {
659
660
661
662 FailedLogCloseException flce =
663 new FailedLogCloseException("#" + currentfilenum);
664 flce.initCause(e);
665 throw e;
666 }
667 if (currentfilenum >= 0) {
668 oldFile = computeFilename(currentfilenum);
669 this.outputfiles.put(Long.valueOf(this.logSeqNum.get() - 1), oldFile);
670 }
671 }
672 return oldFile;
673 }
674
675 private void archiveLogFile(final Path p, final Long seqno) throws IOException {
676 Path newPath = getHLogArchivePath(this.oldLogDir, p);
677 LOG.info("moving old hlog file " + FSUtils.getPath(p) +
678 " whose highest sequenceid is " + seqno + " to " +
679 FSUtils.getPath(newPath));
680 this.fs.rename(p, newPath);
681 }
682
683
684
685
686
687
688 protected Path computeFilename() {
689 return computeFilename(this.filenum);
690 }
691
692
693
694
695
696
697
698 protected Path computeFilename(long filenum) {
699 if (filenum < 0) {
700 throw new RuntimeException("hlog file number can't be < 0");
701 }
702 return new Path(dir, prefix + "." + filenum);
703 }
704
705
706
707
708
709
710 public void closeAndDelete() throws IOException {
711 close();
712 FileStatus[] files = fs.listStatus(this.dir);
713 for(FileStatus file : files) {
714 fs.rename(file.getPath(),
715 getHLogArchivePath(this.oldLogDir, file.getPath()));
716 }
717 LOG.debug("Moved " + files.length + " log files to " +
718 FSUtils.getPath(this.oldLogDir));
719 fs.delete(dir, true);
720 }
721
722
723
724
725
726
727 public void close() throws IOException {
728 try {
729 logSyncerThread.interrupt();
730
731 logSyncerThread.join(this.optionalFlushInterval*2);
732 } catch (InterruptedException e) {
733 LOG.error("Exception while waiting for syncer thread to die", e);
734 }
735
736 cacheFlushLock.lock();
737 try {
738 synchronized (updateLock) {
739 this.closed = true;
740 if (LOG.isDebugEnabled()) {
741 LOG.debug("closing hlog writer in " + this.dir.toString());
742 }
743 this.writer.close();
744 }
745 } finally {
746 cacheFlushLock.unlock();
747 }
748 }
749
750
751
752
753
754
755
756
757 public void append(HRegionInfo regionInfo, WALEdit logEdit,
758 final long now,
759 final boolean isMetaRegion)
760 throws IOException {
761 byte [] regionName = regionInfo.getRegionName();
762 byte [] tableName = regionInfo.getTableDesc().getName();
763 this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
764 }
765
766
767
768
769
770
771
772 protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) {
773 return new HLogKey(regionName, tableName, seqnum, now);
774 }
775
776
777
778
779
780
781
782
783
784
785 public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
786 throws IOException {
787 if (this.closed) {
788 throw new IOException("Cannot append; log is closed");
789 }
790 byte [] regionName = regionInfo.getRegionName();
791 synchronized (updateLock) {
792 long seqNum = obtainSeqNum();
793 logKey.setLogSeqNum(seqNum);
794
795
796
797
798
799 this.lastSeqWritten.putIfAbsent(regionName, Long.valueOf(seqNum));
800 doWrite(regionInfo, logKey, logEdit);
801 this.unflushedEntries.incrementAndGet();
802 this.numEntries.incrementAndGet();
803 }
804
805
806 this.sync(regionInfo.isMetaRegion());
807 }
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832 public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
833 final long now)
834 throws IOException {
835 if (edits.isEmpty()) return;
836
837 byte[] regionName = info.getRegionName();
838 if (this.closed) {
839 throw new IOException("Cannot append; log is closed");
840 }
841 synchronized (this.updateLock) {
842 long seqNum = obtainSeqNum();
843
844
845
846
847
848 this.lastSeqWritten.putIfAbsent(regionName, seqNum);
849 HLogKey logKey = makeKey(regionName, tableName, seqNum, now);
850 doWrite(info, logKey, edits);
851 this.numEntries.incrementAndGet();
852
853
854 this.unflushedEntries.incrementAndGet();
855 }
856
857 this.sync(info.isMetaRegion());
858 }
859
860
861
862
863
864 class LogSyncer extends Thread {
865
866
867 private final ReentrantLock lock = new ReentrantLock(true);
868
869
870 private final Condition queueEmpty = lock.newCondition();
871
872
873 private final Condition syncDone = lock.newCondition();
874
875 private final long optionalFlushInterval;
876
877 private boolean syncerShuttingDown = false;
878
879 LogSyncer(long optionalFlushInterval) {
880 this.optionalFlushInterval = optionalFlushInterval;
881 }
882
883 @Override
884 public void run() {
885 try {
886 lock.lock();
887
888
889 while(!this.isInterrupted()) {
890
891
892
893
894 if (!(queueEmpty.awaitNanos(
895 this.optionalFlushInterval*1000000) <= 0)) {
896 forceSync = true;
897 }
898
899
900
901
902 hflush();
903
904
905
906
907 syncDone.signalAll();
908 }
909 } catch (IOException e) {
910 LOG.error("Error while syncing, requesting close of hlog ", e);
911 requestLogRoll();
912 } catch (InterruptedException e) {
913 LOG.debug(getName() + "interrupted while waiting for sync requests");
914 } finally {
915 syncerShuttingDown = true;
916 syncDone.signalAll();
917 lock.unlock();
918 LOG.info(getName() + " exiting");
919 }
920 }
921
922
923
924
925
926 public void addToSyncQueue(boolean force) {
927
928
929 if (unflushedEntries.get() == 0) {
930 return;
931 }
932 lock.lock();
933 try {
934 if (syncerShuttingDown) {
935 LOG.warn(getName() + " was shut down while waiting for sync");
936 return;
937 }
938 if(force) {
939 forceSync = true;
940 }
941
942 queueEmpty.signal();
943
944
945 syncDone.await();
946 } catch (InterruptedException e) {
947 LOG.debug(getName() + " was interrupted while waiting for sync", e);
948 }
949 finally {
950 lock.unlock();
951 }
952 }
953 }
954
955 public void sync(){
956 sync(false);
957 }
958
959
960
961
962
963
964 public void sync(boolean force) {
965 logSyncerThread.addToSyncQueue(force);
966 }
967
968 public void hflush() throws IOException {
969 synchronized (this.updateLock) {
970 if (this.closed) {
971 return;
972 }
973 boolean logRollRequested = false;
974 if (this.forceSync ||
975 this.unflushedEntries.get() >= this.flushlogentries) {
976 try {
977 long now = System.currentTimeMillis();
978 this.writer.sync();
979 syncTime += System.currentTimeMillis() - now;
980 syncOps++;
981 this.forceSync = false;
982 this.unflushedEntries.set(0);
983
984
985
986 try {
987 int numCurrentReplicas = getLogReplication();
988 if (numCurrentReplicas != 0 &&
989 numCurrentReplicas < this.initialReplication) {
990 LOG.warn("HDFS pipeline error detected. " +
991 "Found " + numCurrentReplicas + " replicas but expecting " +
992 this.initialReplication + " replicas. " +
993 " Requesting close of hlog.");
994 requestLogRoll();
995 logRollRequested = true;
996 }
997 } catch (Exception e) {
998 LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
999 " still proceeding ahead...");
1000 }
1001 } catch (IOException e) {
1002 LOG.fatal("Could not append. Requesting close of hlog", e);
1003 requestLogRoll();
1004 throw e;
1005 }
1006 }
1007
1008 if (!logRollRequested && (this.writer.getLength() > this.logrollsize)) {
1009 requestLogRoll();
1010 }
1011 }
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1027 if(this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1028 Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS);
1029 if (repl instanceof Integer) {
1030 return ((Integer)repl).intValue();
1031 }
1032 }
1033 return 0;
1034 }
1035
1036 boolean canGetCurReplicas() {
1037 return this.getNumCurrentReplicas != null;
1038 }
1039
1040 public void hsync() throws IOException {
1041
1042 hflush();
1043 }
1044
1045 private void requestLogRoll() {
1046 if (this.listener != null) {
1047 this.listener.logRollRequested();
1048 }
1049 }
1050
1051 protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
1052 throws IOException {
1053 if (!this.enabled) {
1054 return;
1055 }
1056 if (!this.logEntryVisitors.isEmpty()) {
1057 for (LogEntryVisitor visitor : this.logEntryVisitors) {
1058 visitor.visitLogEntryBeforeWrite(info, logKey, logEdit);
1059 }
1060 }
1061 try {
1062 long now = System.currentTimeMillis();
1063 this.writer.append(new HLog.Entry(logKey, logEdit));
1064 long took = System.currentTimeMillis() - now;
1065 writeTime += took;
1066 writeOps++;
1067 if (took > 1000) {
1068 LOG.warn(Thread.currentThread().getName() + " took " + took +
1069 "ms appending an edit to hlog; editcount=" + this.numEntries.get());
1070 }
1071 } catch (IOException e) {
1072 LOG.fatal("Could not append. Requesting close of hlog", e);
1073 requestLogRoll();
1074 throw e;
1075 }
1076 }
1077
1078
1079 int getNumEntries() {
1080 return numEntries.get();
1081 }
1082
1083
1084
1085
1086 private long obtainSeqNum() {
1087 return this.logSeqNum.incrementAndGet();
1088 }
1089
1090
1091 int getNumLogFiles() {
1092 return outputfiles.size();
1093 }
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 public long startCacheFlush() {
1109 this.cacheFlushLock.lock();
1110 return obtainSeqNum();
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 public void completeCacheFlush(final byte [] regionName, final byte [] tableName,
1124 final long logSeqId,
1125 final boolean isMetaRegion)
1126 throws IOException {
1127 try {
1128 if (this.closed) {
1129 return;
1130 }
1131 synchronized (updateLock) {
1132 long now = System.currentTimeMillis();
1133 WALEdit edit = completeCacheFlushLogEdit();
1134 HLogKey key = makeKey(regionName, tableName, logSeqId,
1135 System.currentTimeMillis());
1136 this.writer.append(new Entry(key, edit));
1137 writeTime += System.currentTimeMillis() - now;
1138 writeOps++;
1139 this.numEntries.incrementAndGet();
1140 Long seq = this.lastSeqWritten.get(regionName);
1141 if (seq != null && logSeqId >= seq.longValue()) {
1142 this.lastSeqWritten.remove(regionName);
1143 }
1144 }
1145
1146 this.sync(isMetaRegion);
1147
1148 } finally {
1149 this.cacheFlushLock.unlock();
1150 }
1151 }
1152
1153 private WALEdit completeCacheFlushLogEdit() {
1154 KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1155 System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1156 WALEdit e = new WALEdit();
1157 e.add(kv);
1158 return e;
1159 }
1160
1161
1162
1163
1164
1165
1166
1167 public void abortCacheFlush() {
1168 this.cacheFlushLock.unlock();
1169 }
1170
1171
1172
1173
1174
1175 public static boolean isMetaFamily(byte [] family) {
1176 return Bytes.equals(METAFAMILY, family);
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 public static List<Path> splitLog(final Path rootDir, final Path srcDir,
1194 Path oldLogDir, final FileSystem fs, final Configuration conf)
1195 throws IOException {
1196
1197 long millis = System.currentTimeMillis();
1198 List<Path> splits = null;
1199 if (!fs.exists(srcDir)) {
1200
1201 return splits;
1202 }
1203 FileStatus [] logfiles = fs.listStatus(srcDir);
1204 if (logfiles == null || logfiles.length == 0) {
1205
1206 return splits;
1207 }
1208 LOG.info("Splitting " + logfiles.length + " hlog(s) in " +
1209 srcDir.toString());
1210 splits = splitLog(rootDir, srcDir, oldLogDir, logfiles, fs, conf);
1211 try {
1212 FileStatus[] files = fs.listStatus(srcDir);
1213 for(FileStatus file : files) {
1214 Path newPath = getHLogArchivePath(oldLogDir, file.getPath());
1215 LOG.info("Moving " + FSUtils.getPath(file.getPath()) + " to " +
1216 FSUtils.getPath(newPath));
1217 fs.rename(file.getPath(), newPath);
1218 }
1219 LOG.debug("Moved " + files.length + " log files to " +
1220 FSUtils.getPath(oldLogDir));
1221 fs.delete(srcDir, true);
1222 } catch (IOException e) {
1223 e = RemoteExceptionHandler.checkIOException(e);
1224 IOException io = new IOException("Cannot delete: " + srcDir);
1225 io.initCause(e);
1226 throw io;
1227 }
1228 long endMillis = System.currentTimeMillis();
1229 LOG.info("hlog file splitting completed in " + (endMillis - millis) +
1230 " millis for " + srcDir.toString());
1231 return splits;
1232 }
1233
1234
1235 private final static class WriterAndPath {
1236 final Path p;
1237 final Writer w;
1238 WriterAndPath(final Path p, final Writer w) {
1239 this.p = p;
1240 this.w = w;
1241 }
1242 }
1243
1244 @SuppressWarnings("unchecked")
1245 public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1246 return (Class<? extends HLogKey>)
1247 conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1248 }
1249
1250 public static HLogKey newKey(Configuration conf) throws IOException {
1251 Class<? extends HLogKey> keyClass = getKeyClass(conf);
1252 try {
1253 return keyClass.newInstance();
1254 } catch (InstantiationException e) {
1255 throw new IOException("cannot create hlog key");
1256 } catch (IllegalAccessException e) {
1257 throw new IOException("cannot create hlog key");
1258 }
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 private static List<Path> splitLog(final Path rootDir, final Path srcDir,
1289 Path oldLogDir, final FileStatus[] logfiles, final FileSystem fs,
1290 final Configuration conf)
1291 throws IOException {
1292 List<Path> processedLogs = new ArrayList<Path>();
1293 List<Path> corruptedLogs = new ArrayList<Path>();
1294 final Map<byte [], WriterAndPath> logWriters =
1295 Collections.synchronizedMap(
1296 new TreeMap<byte [], WriterAndPath>(Bytes.BYTES_COMPARATOR));
1297 List<Path> splits = null;
1298
1299
1300
1301
1302 int logFilesPerStep = conf.getInt("hbase.hlog.split.batch.size", 3);
1303 boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", false);
1304
1305
1306 try {
1307 int i = -1;
1308 while (i < logfiles.length) {
1309 final Map<byte[], LinkedList<Entry>> editsByRegion =
1310 new TreeMap<byte[], LinkedList<Entry>>(Bytes.BYTES_COMPARATOR);
1311 for (int j = 0; j < logFilesPerStep; j++) {
1312 i++;
1313 if (i == logfiles.length) {
1314 break;
1315 }
1316 FileStatus log = logfiles[i];
1317 Path logPath = log.getPath();
1318 long logLength = log.getLen();
1319 LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length +
1320 ": " + logPath + ", length=" + logLength );
1321 try {
1322 recoverFileLease(fs, logPath, conf);
1323 parseHLog(log, editsByRegion, fs, conf);
1324 processedLogs.add(logPath);
1325 } catch (EOFException eof) {
1326
1327 LOG.info("EOF from hlog " + logPath + ". continuing");
1328 processedLogs.add(logPath);
1329 } catch (IOException e) {
1330 if (skipErrors) {
1331 LOG.warn("Got while parsing hlog " + logPath +
1332 ". Marking as corrupted", e);
1333 corruptedLogs.add(logPath);
1334 } else {
1335 throw e;
1336 }
1337 }
1338 }
1339 writeEditsBatchToRegions(editsByRegion, logWriters, rootDir, fs, conf);
1340 }
1341 if (fs.listStatus(srcDir).length > processedLogs.size() + corruptedLogs.size()) {
1342 throw new IOException("Discovered orphan hlog after split. Maybe " +
1343 "HRegionServer was not dead when we started");
1344 }
1345 archiveLogs(corruptedLogs, processedLogs, oldLogDir, fs, conf);
1346 } finally {
1347 splits = new ArrayList<Path>(logWriters.size());
1348 for (WriterAndPath wap : logWriters.values()) {
1349 wap.w.close();
1350 splits.add(wap.p);
1351 LOG.debug("Closed " + wap.p);
1352 }
1353 }
1354 return splits;
1355 }
1356
1357
1358
1359
1360
1361
1362 public static class Entry implements Writable {
1363 private WALEdit edit;
1364 private HLogKey key;
1365
1366 public Entry() {
1367 edit = new WALEdit();
1368 key = new HLogKey();
1369 }
1370
1371
1372
1373
1374
1375
1376 public Entry(HLogKey key, WALEdit edit) {
1377 super();
1378 this.key = key;
1379 this.edit = edit;
1380 }
1381
1382
1383
1384
1385 public WALEdit getEdit() {
1386 return edit;
1387 }
1388
1389
1390
1391
1392 public HLogKey getKey() {
1393 return key;
1394 }
1395
1396 @Override
1397 public String toString() {
1398 return this.key + "=" + this.edit;
1399 }
1400
1401 @Override
1402 public void write(DataOutput dataOutput) throws IOException {
1403 this.key.write(dataOutput);
1404 this.edit.write(dataOutput);
1405 }
1406
1407 @Override
1408 public void readFields(DataInput dataInput) throws IOException {
1409 this.key.readFields(dataInput);
1410 this.edit.readFields(dataInput);
1411 }
1412 }
1413
1414
1415
1416
1417
1418
1419
1420 public static String getHLogDirectoryName(HServerInfo info) {
1421 return getHLogDirectoryName(info.getServerName());
1422 }
1423
1424
1425
1426
1427
1428
1429
1430
1431 public static String getHLogDirectoryName(String serverAddress,
1432 long startCode) {
1433 if (serverAddress == null || serverAddress.length() == 0) {
1434 return null;
1435 }
1436 return getHLogDirectoryName(
1437 HServerInfo.getServerName(serverAddress, startCode));
1438 }
1439
1440
1441
1442
1443
1444
1445
1446 public static String getHLogDirectoryName(String serverName) {
1447 StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1448 dirName.append("/");
1449 dirName.append(serverName);
1450 return dirName.toString();
1451 }
1452
1453 public static boolean validateHLogFilename(String filename) {
1454 return pattern.matcher(filename).matches();
1455 }
1456
1457 private static Path getHLogArchivePath(Path oldLogDir, Path p) {
1458 return new Path(oldLogDir, p.getName());
1459 }
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 private static void writeEditsBatchToRegions(
1472 final Map<byte[], LinkedList<Entry>> splitLogsMap,
1473 final Map<byte[], WriterAndPath> logWriters,
1474 final Path rootDir, final FileSystem fs, final Configuration conf)
1475 throws IOException {
1476
1477
1478 int logWriterThreads =
1479 conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
1480 boolean skipErrors = conf.getBoolean("hbase.skip.errors", false);
1481 HashMap<byte[], Future> writeFutureResult = new HashMap<byte[], Future>();
1482 NamingThreadFactory f = new NamingThreadFactory(
1483 "SplitWriter-%1$d", Executors.defaultThreadFactory());
1484 ThreadPoolExecutor threadPool = (ThreadPoolExecutor)Executors.newFixedThreadPool(logWriterThreads, f);
1485 for (final byte [] region : splitLogsMap.keySet()) {
1486 Callable splitter = createNewSplitter(rootDir, logWriters, splitLogsMap, region, fs, conf);
1487 writeFutureResult.put(region, threadPool.submit(splitter));
1488 }
1489
1490 threadPool.shutdown();
1491
1492 try {
1493 for (int j = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); j++) {
1494 String message = "Waiting for hlog writers to terminate, elapsed " + j * 5 + " seconds";
1495 if (j < 30) {
1496 LOG.debug(message);
1497 } else {
1498 LOG.info(message);
1499 }
1500
1501 }
1502 } catch(InterruptedException ex) {
1503 LOG.warn("Hlog writers were interrupted, possible data loss!");
1504 if (!skipErrors) {
1505 throw new IOException("Could not finish writing log entries", ex);
1506
1507 }
1508 }
1509
1510 for (Map.Entry<byte[], Future> entry : writeFutureResult.entrySet()) {
1511 try {
1512 entry.getValue().get();
1513 } catch (ExecutionException e) {
1514 throw (new IOException(e.getCause()));
1515 } catch (InterruptedException e1) {
1516 LOG.warn("Writer for region " + Bytes.toString(entry.getKey()) +
1517 " was interrupted, however the write process should have " +
1518 "finished. Throwing up ", e1);
1519 throw (new IOException(e1.getCause()));
1520 }
1521 }
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 private static void parseHLog(final FileStatus logfile,
1535 final Map<byte[], LinkedList<Entry>> splitLogsMap, final FileSystem fs,
1536 final Configuration conf)
1537 throws IOException {
1538
1539
1540
1541 long length = logfile.getLen();
1542 if (length <= 0) {
1543 LOG.warn("File " + logfile.getPath() + " might be still open, length is 0");
1544 }
1545 Path path = logfile.getPath();
1546 Reader in;
1547 int editsCount = 0;
1548 try {
1549 in = HLog.getReader(fs, path, conf);
1550 } catch (EOFException e) {
1551 if (length <= 0) {
1552
1553
1554
1555
1556 LOG.warn("Could not open " + path + " for reading. File is empty" + e);
1557 return;
1558 } else {
1559 throw e;
1560 }
1561 }
1562 try {
1563 Entry entry;
1564 while ((entry = in.next()) != null) {
1565 byte[] region = entry.getKey().getRegionName();
1566 LinkedList<Entry> queue = splitLogsMap.get(region);
1567 if (queue == null) {
1568 queue = new LinkedList<Entry>();
1569 splitLogsMap.put(region, queue);
1570 }
1571 queue.addLast(entry);
1572 editsCount++;
1573 }
1574 } finally {
1575 LOG.debug("Pushed=" + editsCount + " entries from " + path);
1576 try {
1577 if (in != null) {
1578 in.close();
1579 }
1580 } catch (IOException e) {
1581 LOG.warn("Close log reader in finally threw exception -- continuing", e);
1582 }
1583 }
1584 }
1585
1586 private static Callable<Void> createNewSplitter(final Path rootDir,
1587 final Map<byte[], WriterAndPath> logWriters,
1588 final Map<byte[], LinkedList<Entry>> logEntries,
1589 final byte[] region, final FileSystem fs, final Configuration conf) {
1590 return new Callable<Void>() {
1591 public String getName() {
1592 return "Split writer thread for region " + Bytes.toStringBinary(region);
1593 }
1594
1595 @Override
1596 public Void call() throws IOException {
1597 LinkedList<Entry> entries = logEntries.get(region);
1598 LOG.debug(this.getName()+" got " + entries.size() + " to process");
1599 long threadTime = System.currentTimeMillis();
1600 try {
1601 int editsCount = 0;
1602 WriterAndPath wap = logWriters.get(region);
1603 for (Entry logEntry: entries) {
1604 if (wap == null) {
1605 Path regionedits = getRegionSplitEditsPath(fs, logEntry, rootDir);
1606 if (fs.exists(regionedits)) {
1607 LOG.warn("Found existing old edits file. It could be the " +
1608 "result of a previous failed split attempt. Deleting " +
1609 regionedits + ", length=" + fs.getFileStatus(regionedits).getLen());
1610 if (!fs.delete(regionedits, false)) {
1611 LOG.warn("Failed delete of old " + regionedits);
1612 }
1613 }
1614 Writer w = createWriter(fs, regionedits, conf);
1615 wap = new WriterAndPath(regionedits, w);
1616 logWriters.put(region, wap);
1617 LOG.debug("Creating writer path=" + regionedits +
1618 " region=" + Bytes.toStringBinary(region));
1619 }
1620 wap.w.append(logEntry);
1621 editsCount++;
1622 }
1623 LOG.debug(this.getName() + " Applied " + editsCount +
1624 " total edits to " + Bytes.toStringBinary(region) +
1625 " in " + (System.currentTimeMillis() - threadTime) + "ms");
1626 } catch (IOException e) {
1627 e = RemoteExceptionHandler.checkIOException(e);
1628 LOG.fatal(this.getName() + " Got while writing log entry to log", e);
1629 throw e;
1630 }
1631 return null;
1632 }
1633 };
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648 private static void archiveLogs(final List<Path> corruptedLogs,
1649 final List<Path> processedLogs, final Path oldLogDir,
1650 final FileSystem fs, final Configuration conf)
1651 throws IOException{
1652 final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR),
1653 conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"));
1654
1655 fs.mkdirs(corruptDir);
1656 fs.mkdirs(oldLogDir);
1657
1658 for (Path corrupted: corruptedLogs) {
1659 Path p = new Path(corruptDir, corrupted.getName());
1660 LOG.info("Moving corrupted log " + corrupted + " to " + p);
1661 fs.rename(corrupted, p);
1662 }
1663
1664 for (Path p: processedLogs) {
1665 Path newPath = getHLogArchivePath(oldLogDir, p);
1666 fs.rename(p, newPath);
1667 LOG.info("Archived processed log " + p + " to " + newPath);
1668 }
1669 }
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683 private static Path getRegionSplitEditsPath(final FileSystem fs,
1684 final Entry logEntry, final Path rootDir)
1685 throws IOException {
1686 Path tableDir = HTableDescriptor.getTableDir(rootDir,
1687 logEntry.getKey().getTablename());
1688 Path regiondir = HRegion.getRegionDir(tableDir,
1689 HRegionInfo.encodeRegionName(logEntry.getKey().getRegionName()));
1690 Path dir = getRegionDirRecoveredEditsDir(regiondir);
1691 if (!fs.exists(dir)) {
1692 if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir);
1693 }
1694 return new Path(dir,
1695 formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum()));
1696 }
1697
1698 static String formatRecoveredEditsFileName(final long seqid) {
1699 return String.format("%019d", seqid);
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1711 final Path regiondir)
1712 throws IOException {
1713 Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1714 FileStatus [] files = fs.listStatus(editsdir, new PathFilter () {
1715 @Override
1716 public boolean accept(Path p) {
1717 boolean result = false;
1718 try {
1719
1720
1721
1722
1723 Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1724 result = fs.isFile(p) && m.matches();
1725 } catch (IOException e) {
1726 LOG.warn("Failed isFile check on " + p);
1727 }
1728 return result;
1729 }
1730 });
1731 NavigableSet<Path> filesSorted = new TreeSet<Path>();
1732 if (files == null) return filesSorted;
1733 for (FileStatus status: files) {
1734 filesSorted.add(status.getPath());
1735 }
1736 return filesSorted;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746 public static Path moveAsideBadEditsFile(final FileSystem fs,
1747 final Path edits)
1748 throws IOException {
1749 Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1750 System.currentTimeMillis());
1751 if (!fs.rename(edits, moveAsideName)) {
1752 LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1753 }
1754 return moveAsideName;
1755 }
1756
1757
1758
1759
1760
1761
1762 public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1763 return new Path(regiondir, RECOVERED_EDITS_DIR);
1764 }
1765
1766
1767
1768
1769
1770 public void addLogEntryVisitor(LogEntryVisitor visitor) {
1771 this.logEntryVisitors.add(visitor);
1772 }
1773
1774
1775
1776
1777
1778 public void removeLogEntryVisitor(LogEntryVisitor visitor) {
1779 this.logEntryVisitors.remove(visitor);
1780 }
1781
1782
1783 public void addLogActionsListerner(LogActionsListener list) {
1784 LOG.info("Adding a listener");
1785 this.actionListeners.add(list);
1786 }
1787
1788 public boolean removeLogActionsListener(LogActionsListener list) {
1789 return this.actionListeners.remove(list);
1790 }
1791
1792 public static final long FIXED_OVERHEAD = ClassSize.align(
1793 ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1794 ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1795
1796 private static void usage() {
1797 System.err.println("Usage: HLog <ARGS>");
1798 System.err.println("Arguments:");
1799 System.err.println(" --dump Dump textual representation of passed one or more files");
1800 System.err.println(" For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1801 System.err.println(" --split Split the passed directory of WAL logs");
1802 System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1803 }
1804
1805 private static void dump(final Configuration conf, final Path p)
1806 throws IOException {
1807 FileSystem fs = FileSystem.get(conf);
1808 if (!fs.exists(p)) {
1809 throw new FileNotFoundException(p.toString());
1810 }
1811 if (!fs.isFile(p)) {
1812 throw new IOException(p + " is not a file");
1813 }
1814 Reader log = getReader(fs, p, conf);
1815 try {
1816 int count = 0;
1817 HLog.Entry entry;
1818 while ((entry = log.next()) != null) {
1819 System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
1820 entry.toString());
1821 count++;
1822 }
1823 } finally {
1824 log.close();
1825 }
1826 }
1827
1828 private static void split(final Configuration conf, final Path p)
1829 throws IOException {
1830 FileSystem fs = FileSystem.get(conf);
1831 if (!fs.exists(p)) {
1832 throw new FileNotFoundException(p.toString());
1833 }
1834 final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1835 final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1836 if (!fs.getFileStatus(p).isDir()) {
1837 throw new IOException(p + " is not a directory");
1838 }
1839 splitLog(baseDir, p, oldLogDir, fs, conf);
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849 public static void main(String[] args) throws IOException {
1850 if (args.length < 2) {
1851 usage();
1852 System.exit(-1);
1853 }
1854 boolean dump = true;
1855 if (args[0].compareTo("--dump") != 0) {
1856 if (args[0].compareTo("--split") == 0) {
1857 dump = false;
1858
1859 } else {
1860 usage();
1861 System.exit(-1);
1862 }
1863 }
1864 Configuration conf = HBaseConfiguration.create();
1865 for (int i = 1; i < args.length; i++) {
1866 Path logPath = new Path(args[i]);
1867 try {
1868 if (dump) {
1869 dump(conf, logPath);
1870 } else {
1871 split(conf, logPath);
1872 }
1873 } catch (Throwable t) {
1874 t.printStackTrace(System.err);
1875 System.exit(-1);
1876 }
1877 }
1878 }
1879 }