1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.DataInput;
23 import java.io.DataOutput;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.io.UnsupportedEncodingException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.URLEncoder;
31 import java.util.ArrayList;
32 import java.util.Collections;
33 import java.util.List;
34 import java.util.Map;
35 import java.util.NavigableSet;
36 import java.util.SortedMap;
37 import java.util.TreeMap;
38 import java.util.TreeSet;
39 import java.util.concurrent.ConcurrentSkipListMap;
40 import java.util.concurrent.CopyOnWriteArrayList;
41 import java.util.concurrent.atomic.AtomicInteger;
42 import java.util.concurrent.atomic.AtomicLong;
43 import java.util.concurrent.locks.Condition;
44 import java.util.concurrent.locks.Lock;
45 import java.util.concurrent.locks.ReentrantLock;
46 import java.util.regex.Matcher;
47 import java.util.regex.Pattern;
48
49 import org.apache.commons.logging.Log;
50 import org.apache.commons.logging.LogFactory;
51 import org.apache.hadoop.conf.Configuration;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.Syncable;
57 import org.apache.hadoop.hbase.HBaseConfiguration;
58 import org.apache.hadoop.hbase.HConstants;
59 import org.apache.hadoop.hbase.HRegionInfo;
60 import org.apache.hadoop.hbase.HServerInfo;
61 import org.apache.hadoop.hbase.KeyValue;
62 import org.apache.hadoop.hbase.util.Bytes;
63 import org.apache.hadoop.hbase.util.ClassSize;
64 import org.apache.hadoop.hbase.util.FSUtils;
65 import org.apache.hadoop.hbase.util.Threads;
66 import org.apache.hadoop.io.Writable;
67 import org.apache.hadoop.util.StringUtils;
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108 public class HLog implements Syncable {
109 static final Log LOG = LogFactory.getLog(HLog.class);
110 public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
111 static final byte [] METAROW = Bytes.toBytes("METAROW");
112
113
114
115
116
117 private static final String RECOVERED_EDITS_DIR = "recovered.edits";
118 private static final Pattern EDITFILES_NAME_PATTERN =
119 Pattern.compile("-?[0-9]+");
120
121 private final FileSystem fs;
122 private final Path dir;
123 private final Configuration conf;
124
125 private List<WALObserver> listeners =
126 new CopyOnWriteArrayList<WALObserver>();
127 private final long optionalFlushInterval;
128 private final long blocksize;
129 private final int flushlogentries;
130 private final String prefix;
131 private final Path oldLogDir;
132 private boolean logRollRequested;
133
134
135 private static Class<? extends Writer> logWriterClass;
136 private static Class<? extends Reader> logReaderClass;
137
138 static void resetLogReaderClass() {
139 HLog.logReaderClass = null;
140 }
141
142 private OutputStream hdfs_out;
143 private int initialReplication;
144 private Method getNumCurrentReplicas;
145 final static Object [] NO_ARGS = new Object []{};
146
147
148 private boolean forceSync = false;
149
150 public interface Reader {
151 void init(FileSystem fs, Path path, Configuration c) throws IOException;
152 void close() throws IOException;
153 Entry next() throws IOException;
154 Entry next(Entry reuse) throws IOException;
155 void seek(long pos) throws IOException;
156 long getPosition() throws IOException;
157 }
158
159 public interface Writer {
160 void init(FileSystem fs, Path path, Configuration c) throws IOException;
161 void close() throws IOException;
162 void sync() throws IOException;
163 void append(Entry entry) throws IOException;
164 long getLength() throws IOException;
165 }
166
167
168
169
170 Writer writer;
171
172
173
174
175 final SortedMap<Long, Path> outputfiles =
176 Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
177
178
179
180
181
182 private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
183 new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
184
185 private volatile boolean closed = false;
186
187 private final AtomicLong logSeqNum = new AtomicLong(0);
188
189
190 private volatile long filenum = -1;
191
192
193 private final AtomicInteger numEntries = new AtomicInteger(0);
194
195
196
197 private final long logrollsize;
198
199
200
201 private final Lock cacheFlushLock = new ReentrantLock();
202
203
204
205
206 private final Object updateLock = new Object();
207
208 private final boolean enabled;
209
210
211
212
213
214
215 private final int maxLogs;
216
217
218
219
220 private final LogSyncer logSyncerThread;
221
222
223
224
225 private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
226
227 static byte [] COMPLETE_CACHE_FLUSH;
228 static {
229 try {
230 COMPLETE_CACHE_FLUSH =
231 "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
232 } catch (UnsupportedEncodingException e) {
233 assert(false);
234 }
235 }
236
237
238 private static volatile long writeOps;
239 private static volatile long writeTime;
240
241 private static volatile long syncOps;
242 private static volatile long syncTime;
243
244 public static long getWriteOps() {
245 long ret = writeOps;
246 writeOps = 0;
247 return ret;
248 }
249
250 public static long getWriteTime() {
251 long ret = writeTime;
252 writeTime = 0;
253 return ret;
254 }
255
256 public static long getSyncOps() {
257 long ret = syncOps;
258 syncOps = 0;
259 return ret;
260 }
261
262 public static long getSyncTime() {
263 long ret = syncTime;
264 syncTime = 0;
265 return ret;
266 }
267
268
269
270
271
272
273
274
275
276
277 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
278 final Configuration conf)
279 throws IOException {
280 this(fs, dir, oldLogDir, conf, null, true, null);
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
303 final Configuration conf, final List<WALObserver> listeners,
304 final String prefix) throws IOException {
305 this(fs, dir, oldLogDir, conf, listeners, true, prefix);
306 }
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
329 final Configuration conf, final List<WALObserver> listeners,
330 final boolean failIfLogDirExists, final String prefix)
331 throws IOException {
332 super();
333 this.fs = fs;
334 this.dir = dir;
335 this.conf = conf;
336 if (listeners != null) {
337 for (WALObserver i: listeners) {
338 registerWALActionsListener(i);
339 }
340 }
341 this.flushlogentries =
342 conf.getInt("hbase.regionserver.flushlogentries", 1);
343 this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
344 this.fs.getDefaultBlockSize());
345
346 float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
347 this.logrollsize = (long)(this.blocksize * multi);
348 this.optionalFlushInterval =
349 conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
350 if (failIfLogDirExists && fs.exists(dir)) {
351 throw new IOException("Target HLog directory already exists: " + dir);
352 }
353 if (!fs.mkdirs(dir)) {
354 throw new IOException("Unable to mkdir " + dir);
355 }
356 this.oldLogDir = oldLogDir;
357 if (!fs.exists(oldLogDir)) {
358 if (!fs.mkdirs(this.oldLogDir)) {
359 throw new IOException("Unable to mkdir " + this.oldLogDir);
360 }
361 }
362 this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
363 this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
364 LOG.info("HLog configuration: blocksize=" +
365 StringUtils.byteDesc(this.blocksize) +
366 ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
367 ", enabled=" + this.enabled +
368 ", flushlogentries=" + this.flushlogentries +
369 ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
370
371 this.prefix = prefix == null || prefix.isEmpty() ?
372 "hlog" : URLEncoder.encode(prefix, "UTF8");
373
374 rollWriter();
375
376
377 this.getNumCurrentReplicas = null;
378 Exception exception = null;
379 if (this.hdfs_out != null) {
380 try {
381 this.getNumCurrentReplicas = this.hdfs_out.getClass().
382 getMethod("getNumCurrentReplicas", new Class<?> []{});
383 this.getNumCurrentReplicas.setAccessible(true);
384 } catch (NoSuchMethodException e) {
385
386 exception = e;
387 } catch (SecurityException e) {
388
389 exception = e;
390 this.getNumCurrentReplicas = null;
391 }
392 }
393 if (this.getNumCurrentReplicas != null) {
394 LOG.info("Using getNumCurrentReplicas--HDFS-826");
395 } else {
396 LOG.info("getNumCurrentReplicas--HDFS-826 not available; hdfs_out=" +
397 this.hdfs_out + ", exception=" + exception.getMessage());
398 }
399
400 logSyncerThread = new LogSyncer(this.optionalFlushInterval);
401 Threads.setDaemonThreadRunning(logSyncerThread,
402 Thread.currentThread().getName() + ".logSyncer");
403 }
404
405 public void registerWALActionsListener (final WALObserver listener) {
406 this.listeners.add(listener);
407 }
408
409 public boolean unregisterWALActionsListener(final WALObserver listener) {
410 return this.listeners.remove(listener);
411 }
412
413
414
415
416 public long getFilenum() {
417 return this.filenum;
418 }
419
420
421
422
423
424
425
426
427
428 public void setSequenceNumber(final long newvalue) {
429 for (long id = this.logSeqNum.get(); id < newvalue &&
430 !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
431
432
433 LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
434 }
435 }
436
437
438
439
440 public long getSequenceNumber() {
441 return logSeqNum.get();
442 }
443
444
445 OutputStream getOutputStream() {
446 return this.hdfs_out;
447 }
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 public byte [][] rollWriter() throws FailedLogCloseException, IOException {
470
471 if (this.writer != null && this.numEntries.get() <= 0) {
472 return null;
473 }
474 byte [][] regionsToFlush = null;
475 this.cacheFlushLock.lock();
476 try {
477 if (closed) {
478 return regionsToFlush;
479 }
480
481
482 long currentFilenum = this.filenum;
483 this.filenum = System.currentTimeMillis();
484 Path newPath = computeFilename();
485 HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
486 int nextInitialReplication = fs.getFileStatus(newPath).getReplication();
487
488
489
490 OutputStream nextHdfsOut = null;
491 if (nextWriter instanceof SequenceFileLogWriter) {
492 nextHdfsOut =
493 ((SequenceFileLogWriter)nextWriter).getDFSCOutputStream();
494 }
495
496 if (!this.listeners.isEmpty()) {
497 for (WALObserver i : this.listeners) {
498 i.logRolled(newPath);
499 }
500 }
501
502 synchronized (updateLock) {
503
504 Path oldFile = cleanupCurrentWriter(currentFilenum);
505 this.writer = nextWriter;
506 this.initialReplication = nextInitialReplication;
507 this.hdfs_out = nextHdfsOut;
508
509 LOG.info((oldFile != null?
510 "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
511 this.numEntries.get() +
512 ", filesize=" +
513 this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
514 "New hlog " + FSUtils.getPath(newPath));
515 this.numEntries.set(0);
516 this.logRollRequested = false;
517 }
518
519 if (this.outputfiles.size() > 0) {
520 if (this.lastSeqWritten.isEmpty()) {
521 LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
522
523
524
525 for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
526 archiveLogFile(e.getValue(), e.getKey());
527 }
528 this.outputfiles.clear();
529 } else {
530 regionsToFlush = cleanOldLogs();
531 }
532 }
533 } finally {
534 this.cacheFlushLock.unlock();
535 }
536 return regionsToFlush;
537 }
538
539
540
541
542
543
544
545
546
547
548
549 protected Writer createWriterInstance(final FileSystem fs, final Path path,
550 final Configuration conf) throws IOException {
551 return createWriter(fs, path, conf);
552 }
553
554
555
556
557
558
559
560
561
562 public static Reader getReader(final FileSystem fs,
563 final Path path, Configuration conf)
564 throws IOException {
565 try {
566
567 if (logReaderClass == null) {
568
569 logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
570 SequenceFileLogReader.class, Reader.class);
571 }
572
573
574 HLog.Reader reader = logReaderClass.newInstance();
575 reader.init(fs, path, conf);
576 return reader;
577 } catch (IOException e) {
578 throw e;
579 }
580 catch (Exception e) {
581 throw new IOException("Cannot get log reader", e);
582 }
583 }
584
585
586
587
588
589
590
591
592 public static Writer createWriter(final FileSystem fs,
593 final Path path, Configuration conf)
594 throws IOException {
595 try {
596 if (logWriterClass == null) {
597 logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
598 SequenceFileLogWriter.class, Writer.class);
599 }
600 HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
601 writer.init(fs, path, conf);
602 return writer;
603 } catch (Exception e) {
604 IOException ie = new IOException("cannot get log writer");
605 ie.initCause(e);
606 throw ie;
607 }
608 }
609
610
611
612
613
614
615
616
617 private byte [][] cleanOldLogs() throws IOException {
618 Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
619
620
621 TreeSet<Long> sequenceNumbers =
622 new TreeSet<Long>(this.outputfiles.headMap(
623 (Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
624
625 int logsToRemove = sequenceNumbers.size();
626 if (logsToRemove > 0) {
627 if (LOG.isDebugEnabled()) {
628
629 byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
630 LOG.debug("Found " + logsToRemove + " hlogs to remove" +
631 " out of total " + this.outputfiles.size() + ";" +
632 " oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
633 " from region " + Bytes.toString(oldestRegion));
634 }
635 for (Long seq : sequenceNumbers) {
636 archiveLogFile(this.outputfiles.remove(seq), seq);
637 }
638 }
639
640
641
642 byte [][] regions = null;
643 int logCount = this.outputfiles.size();
644 if (logCount > this.maxLogs && this.outputfiles != null &&
645 this.outputfiles.size() > 0) {
646
647 regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
648 this.lastSeqWritten);
649 if (regions != null) {
650 StringBuilder sb = new StringBuilder();
651 for (int i = 0; i < regions.length; i++) {
652 if (i > 0) sb.append(", ");
653 sb.append(Bytes.toStringBinary(regions[i]));
654 }
655 LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
656 this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
657 sb.toString());
658 }
659 }
660 return regions;
661 }
662
663
664
665
666
667
668
669
670
671 static byte [][] findMemstoresWithEditsEqualOrOlderThan(final long oldestWALseqid,
672 final Map<byte [], Long> regionsToSeqids) {
673
674 List<byte []> regions = null;
675 for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
676 if (e.getValue().longValue() <= oldestWALseqid) {
677 if (regions == null) regions = new ArrayList<byte []>();
678 regions.add(e.getKey());
679 }
680 }
681 return regions == null?
682 null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
683 }
684
685
686
687
688 private Long getOldestOutstandingSeqNum() {
689 return Collections.min(this.lastSeqWritten.values());
690 }
691
692
693
694
695
696 private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
697 byte [] oldestRegion = null;
698 for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
699 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
700 oldestRegion = e.getKey();
701 break;
702 }
703 }
704 return oldestRegion;
705 }
706
707
708
709
710
711
712
713 private Path cleanupCurrentWriter(final long currentfilenum)
714 throws IOException {
715 Path oldFile = null;
716 if (this.writer != null) {
717
718 try {
719 this.writer.close();
720 } catch (IOException e) {
721
722
723
724 FailedLogCloseException flce =
725 new FailedLogCloseException("#" + currentfilenum);
726 flce.initCause(e);
727 throw e;
728 }
729 if (currentfilenum >= 0) {
730 oldFile = computeFilename(currentfilenum);
731 this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
732 }
733 }
734 return oldFile;
735 }
736
737 private void archiveLogFile(final Path p, final Long seqno) throws IOException {
738 Path newPath = getHLogArchivePath(this.oldLogDir, p);
739 LOG.info("moving old hlog file " + FSUtils.getPath(p) +
740 " whose highest sequenceid is " + seqno + " to " +
741 FSUtils.getPath(newPath));
742 if (!this.fs.rename(p, newPath)) {
743 throw new IOException("Unable to rename " + p + " to " + newPath);
744 }
745 }
746
747
748
749
750
751
752 protected Path computeFilename() {
753 return computeFilename(this.filenum);
754 }
755
756
757
758
759
760
761
762 protected Path computeFilename(long filenum) {
763 if (filenum < 0) {
764 throw new RuntimeException("hlog file number can't be < 0");
765 }
766 return new Path(dir, prefix + "." + filenum);
767 }
768
769
770
771
772
773
774 public void closeAndDelete() throws IOException {
775 close();
776 FileStatus[] files = fs.listStatus(this.dir);
777 for(FileStatus file : files) {
778 Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
779 if (!fs.rename(file.getPath(),p)) {
780 throw new IOException("Unable to rename " + file.getPath() + " to " + p);
781 }
782 }
783 LOG.debug("Moved " + files.length + " log files to " +
784 FSUtils.getPath(this.oldLogDir));
785 if (!fs.delete(dir, true)) {
786 LOG.info("Unable to delete " + dir);
787 }
788 }
789
790
791
792
793
794
795 public void close() throws IOException {
796 try {
797 logSyncerThread.interrupt();
798
799 logSyncerThread.join(this.optionalFlushInterval*2);
800 } catch (InterruptedException e) {
801 LOG.error("Exception while waiting for syncer thread to die", e);
802 }
803
804 cacheFlushLock.lock();
805 try {
806
807 if (!this.listeners.isEmpty()) {
808 for (WALObserver i : this.listeners) {
809 i.logCloseRequested();
810 }
811 }
812 synchronized (updateLock) {
813 this.closed = true;
814 if (LOG.isDebugEnabled()) {
815 LOG.debug("closing hlog writer in " + this.dir.toString());
816 }
817 this.writer.close();
818 }
819 } finally {
820 cacheFlushLock.unlock();
821 }
822 }
823
824
825
826
827
828
829
830
831 public void append(HRegionInfo regionInfo, WALEdit logEdit,
832 final long now,
833 final boolean isMetaRegion)
834 throws IOException {
835 byte [] regionName = regionInfo.getEncodedNameAsBytes();
836 byte [] tableName = regionInfo.getTableDesc().getName();
837 this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
838 }
839
840
841
842
843
844
845
846 protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) {
847 return new HLogKey(regionName, tableName, seqnum, now);
848 }
849
850
851
852
853
854
855
856
857
858
859 public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
860 throws IOException {
861 if (this.closed) {
862 throw new IOException("Cannot append; log is closed");
863 }
864 synchronized (updateLock) {
865 long seqNum = obtainSeqNum();
866 logKey.setLogSeqNum(seqNum);
867
868
869
870
871
872 this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
873 Long.valueOf(seqNum));
874 doWrite(regionInfo, logKey, logEdit);
875 this.numEntries.incrementAndGet();
876 }
877
878
879
880 if (regionInfo.isMetaRegion() ||
881 !regionInfo.getTableDesc().isDeferredLogFlush()) {
882
883 this.sync();
884 }
885 }
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910 public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
911 final long now)
912 throws IOException {
913 if (edits.isEmpty()) return;
914 if (this.closed) {
915 throw new IOException("Cannot append; log is closed");
916 }
917 synchronized (this.updateLock) {
918 long seqNum = obtainSeqNum();
919
920
921
922
923
924
925
926 byte [] hriKey = info.getEncodedNameAsBytes();
927 this.lastSeqWritten.putIfAbsent(hriKey, seqNum);
928 HLogKey logKey = makeKey(hriKey, tableName, seqNum, now);
929 doWrite(info, logKey, edits);
930 this.numEntries.incrementAndGet();
931 }
932
933
934 if (info.isMetaRegion() ||
935 !info.getTableDesc().isDeferredLogFlush()) {
936
937 this.sync();
938 }
939 }
940
941
942
943
944
945 class LogSyncer extends Thread {
946
947 private final long optionalFlushInterval;
948
949 private boolean syncerShuttingDown = false;
950
951 LogSyncer(long optionalFlushInterval) {
952 this.optionalFlushInterval = optionalFlushInterval;
953 }
954
955 @Override
956 public void run() {
957 try {
958
959
960 while(!this.isInterrupted()) {
961
962 Thread.sleep(this.optionalFlushInterval);
963 sync();
964 }
965 } catch (IOException e) {
966 LOG.error("Error while syncing, requesting close of hlog ", e);
967 requestLogRoll();
968 } catch (InterruptedException e) {
969 LOG.debug(getName() + " interrupted while waiting for sync requests");
970 } finally {
971 syncerShuttingDown = true;
972 LOG.info(getName() + " exiting");
973 }
974 }
975 }
976
977 public void sync() throws IOException {
978 synchronized (this.updateLock) {
979 if (this.closed) {
980 return;
981 }
982 }
983 try {
984 long now = System.currentTimeMillis();
985
986 this.writer.sync();
987 synchronized (this.updateLock) {
988 syncTime += System.currentTimeMillis() - now;
989 syncOps++;
990 if (!logRollRequested) {
991 checkLowReplication();
992 if (this.writer.getLength() > this.logrollsize) {
993 requestLogRoll();
994 }
995 }
996 }
997
998 } catch (IOException e) {
999 LOG.fatal("Could not append. Requesting close of hlog", e);
1000 requestLogRoll();
1001 throw e;
1002 }
1003 }
1004
1005 private void checkLowReplication() {
1006
1007
1008 try {
1009 int numCurrentReplicas = getLogReplication();
1010 if (numCurrentReplicas != 0 &&
1011 numCurrentReplicas < this.initialReplication) {
1012 LOG.warn("HDFS pipeline error detected. " +
1013 "Found " + numCurrentReplicas + " replicas but expecting " +
1014 this.initialReplication + " replicas. " +
1015 " Requesting close of hlog.");
1016 requestLogRoll();
1017 logRollRequested = true;
1018 }
1019 } catch (Exception e) {
1020 LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1021 " still proceeding ahead...");
1022 }
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1038 if(this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1039 Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS);
1040 if (repl instanceof Integer) {
1041 return ((Integer)repl).intValue();
1042 }
1043 }
1044 return 0;
1045 }
1046
1047 boolean canGetCurReplicas() {
1048 return this.getNumCurrentReplicas != null;
1049 }
1050
1051 public void hsync() throws IOException {
1052
1053 sync();
1054 }
1055
1056 private void requestLogRoll() {
1057 if (!this.listeners.isEmpty()) {
1058 for (WALObserver i: this.listeners) {
1059 i.logRollRequested();
1060 }
1061 }
1062 }
1063
1064 protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
1065 throws IOException {
1066 if (!this.enabled) {
1067 return;
1068 }
1069 if (!this.listeners.isEmpty()) {
1070 for (WALObserver i: this.listeners) {
1071 i.visitLogEntryBeforeWrite(info, logKey, logEdit);
1072 }
1073 }
1074 try {
1075 long now = System.currentTimeMillis();
1076 this.writer.append(new HLog.Entry(logKey, logEdit));
1077 long took = System.currentTimeMillis() - now;
1078 writeTime += took;
1079 writeOps++;
1080 if (took > 1000) {
1081 long len = 0;
1082 for(KeyValue kv : logEdit.getKeyValues()) {
1083 len += kv.getLength();
1084 }
1085 LOG.warn(String.format(
1086 "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
1087 Thread.currentThread().getName(), took, this.numEntries.get(),
1088 StringUtils.humanReadableInt(len)));
1089 }
1090 } catch (IOException e) {
1091 LOG.fatal("Could not append. Requesting close of hlog", e);
1092 requestLogRoll();
1093 throw e;
1094 }
1095 }
1096
1097
1098 int getNumEntries() {
1099 return numEntries.get();
1100 }
1101
1102
1103
1104
1105 private long obtainSeqNum() {
1106 return this.logSeqNum.incrementAndGet();
1107 }
1108
1109
1110 int getNumLogFiles() {
1111 return outputfiles.size();
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 public long startCacheFlush() {
1128 this.cacheFlushLock.lock();
1129 return obtainSeqNum();
1130 }
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 public void completeCacheFlush(final byte [] encodedRegionName,
1143 final byte [] tableName, final long logSeqId, final boolean isMetaRegion)
1144 throws IOException {
1145 try {
1146 if (this.closed) {
1147 return;
1148 }
1149 synchronized (updateLock) {
1150 long now = System.currentTimeMillis();
1151 WALEdit edit = completeCacheFlushLogEdit();
1152 HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
1153 System.currentTimeMillis());
1154 this.writer.append(new Entry(key, edit));
1155 writeTime += System.currentTimeMillis() - now;
1156 writeOps++;
1157 this.numEntries.incrementAndGet();
1158 Long seq = this.lastSeqWritten.get(encodedRegionName);
1159 if (seq != null && logSeqId >= seq.longValue()) {
1160 this.lastSeqWritten.remove(encodedRegionName);
1161 }
1162 }
1163
1164 this.sync();
1165
1166 } finally {
1167 this.cacheFlushLock.unlock();
1168 }
1169 }
1170
1171 private WALEdit completeCacheFlushLogEdit() {
1172 KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1173 System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1174 WALEdit e = new WALEdit();
1175 e.add(kv);
1176 return e;
1177 }
1178
1179
1180
1181
1182
1183
1184
1185 public void abortCacheFlush() {
1186 this.cacheFlushLock.unlock();
1187 }
1188
1189
1190
1191
1192
1193 public static boolean isMetaFamily(byte [] family) {
1194 return Bytes.equals(METAFAMILY, family);
1195 }
1196
1197 @SuppressWarnings("unchecked")
1198 public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1199 return (Class<? extends HLogKey>)
1200 conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1201 }
1202
1203 public static HLogKey newKey(Configuration conf) throws IOException {
1204 Class<? extends HLogKey> keyClass = getKeyClass(conf);
1205 try {
1206 return keyClass.newInstance();
1207 } catch (InstantiationException e) {
1208 throw new IOException("cannot create hlog key");
1209 } catch (IllegalAccessException e) {
1210 throw new IOException("cannot create hlog key");
1211 }
1212 }
1213
1214
1215
1216
1217
1218 public static class Entry implements Writable {
1219 private WALEdit edit;
1220 private HLogKey key;
1221
1222 public Entry() {
1223 edit = new WALEdit();
1224 key = new HLogKey();
1225 }
1226
1227
1228
1229
1230
1231
1232 public Entry(HLogKey key, WALEdit edit) {
1233 super();
1234 this.key = key;
1235 this.edit = edit;
1236 }
1237
1238
1239
1240
1241 public WALEdit getEdit() {
1242 return edit;
1243 }
1244
1245
1246
1247
1248 public HLogKey getKey() {
1249 return key;
1250 }
1251
1252 @Override
1253 public String toString() {
1254 return this.key + "=" + this.edit;
1255 }
1256
1257 @Override
1258 public void write(DataOutput dataOutput) throws IOException {
1259 this.key.write(dataOutput);
1260 this.edit.write(dataOutput);
1261 }
1262
1263 @Override
1264 public void readFields(DataInput dataInput) throws IOException {
1265 this.key.readFields(dataInput);
1266 this.edit.readFields(dataInput);
1267 }
1268 }
1269
1270
1271
1272
1273
1274
1275
1276 public static String getHLogDirectoryName(HServerInfo info) {
1277 return getHLogDirectoryName(info.getServerName());
1278 }
1279
1280
1281
1282
1283
1284
1285
1286
1287 public static String getHLogDirectoryName(String serverAddress,
1288 long startCode) {
1289 if (serverAddress == null || serverAddress.length() == 0) {
1290 return null;
1291 }
1292 return getHLogDirectoryName(
1293 HServerInfo.getServerName(serverAddress, startCode));
1294 }
1295
1296
1297
1298
1299
1300
1301
1302 public static String getHLogDirectoryName(String serverName) {
1303 StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1304 dirName.append("/");
1305 dirName.append(serverName);
1306 return dirName.toString();
1307 }
1308
1309
1310
1311
1312
1313
1314 protected Path getDir() {
1315 return dir;
1316 }
1317
1318 public static boolean validateHLogFilename(String filename) {
1319 return pattern.matcher(filename).matches();
1320 }
1321
1322 static Path getHLogArchivePath(Path oldLogDir, Path p) {
1323 return new Path(oldLogDir, p.getName());
1324 }
1325
1326 static String formatRecoveredEditsFileName(final long seqid) {
1327 return String.format("%019d", seqid);
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337 public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1338 final Path regiondir)
1339 throws IOException {
1340 Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1341 FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
1342 @Override
1343 public boolean accept(Path p) {
1344 boolean result = false;
1345 try {
1346
1347
1348
1349
1350 Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1351 result = fs.isFile(p) && m.matches();
1352 } catch (IOException e) {
1353 LOG.warn("Failed isFile check on " + p);
1354 }
1355 return result;
1356 }
1357 });
1358 NavigableSet<Path> filesSorted = new TreeSet<Path>();
1359 if (files == null) return filesSorted;
1360 for (FileStatus status: files) {
1361 filesSorted.add(status.getPath());
1362 }
1363 return filesSorted;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373 public static Path moveAsideBadEditsFile(final FileSystem fs,
1374 final Path edits)
1375 throws IOException {
1376 Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1377 System.currentTimeMillis());
1378 if (!fs.rename(edits, moveAsideName)) {
1379 LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1380 }
1381 return moveAsideName;
1382 }
1383
1384
1385
1386
1387
1388
1389 public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1390 return new Path(regiondir, RECOVERED_EDITS_DIR);
1391 }
1392
1393 public static final long FIXED_OVERHEAD = ClassSize.align(
1394 ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1395 ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1396
1397 private static void usage() {
1398 System.err.println("Usage: HLog <ARGS>");
1399 System.err.println("Arguments:");
1400 System.err.println(" --dump Dump textual representation of passed one or more files");
1401 System.err.println(" For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1402 System.err.println(" --split Split the passed directory of WAL logs");
1403 System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1404 }
1405
1406 private static void dump(final Configuration conf, final Path p)
1407 throws IOException {
1408 FileSystem fs = FileSystem.get(conf);
1409 if (!fs.exists(p)) {
1410 throw new FileNotFoundException(p.toString());
1411 }
1412 if (!fs.isFile(p)) {
1413 throw new IOException(p + " is not a file");
1414 }
1415 Reader log = getReader(fs, p, conf);
1416 try {
1417 int count = 0;
1418 HLog.Entry entry;
1419 while ((entry = log.next()) != null) {
1420 System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
1421 entry.toString());
1422 count++;
1423 }
1424 } finally {
1425 log.close();
1426 }
1427 }
1428
1429 private static void split(final Configuration conf, final Path p)
1430 throws IOException {
1431 FileSystem fs = FileSystem.get(conf);
1432 if (!fs.exists(p)) {
1433 throw new FileNotFoundException(p.toString());
1434 }
1435 final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1436 final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1437 if (!fs.getFileStatus(p).isDir()) {
1438 throw new IOException(p + " is not a directory");
1439 }
1440
1441 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
1442 conf, baseDir, p, oldLogDir, fs);
1443 logSplitter.splitLog();
1444 }
1445
1446
1447
1448
1449
1450
1451
1452
1453 public static void main(String[] args) throws IOException {
1454 if (args.length < 2) {
1455 usage();
1456 System.exit(-1);
1457 }
1458 boolean dump = true;
1459 if (args[0].compareTo("--dump") != 0) {
1460 if (args[0].compareTo("--split") == 0) {
1461 dump = false;
1462 } else {
1463 usage();
1464 System.exit(-1);
1465 }
1466 }
1467 Configuration conf = HBaseConfiguration.create();
1468 for (int i = 1; i < args.length; i++) {
1469 try {
1470 conf.set("fs.default.name", args[i]);
1471 conf.set("fs.defaultFS", args[i]);
1472 Path logPath = new Path(args[i]);
1473 if (dump) {
1474 dump(conf, logPath);
1475 } else {
1476 split(conf, logPath);
1477 }
1478 } catch (Throwable t) {
1479 t.printStackTrace(System.err);
1480 System.exit(-1);
1481 }
1482 }
1483 }
1484 }