1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.IOException;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.classification.InterfaceAudience;
27 import org.apache.hadoop.conf.Configuration;
28 import org.apache.hadoop.fs.FSDataInputStream;
29 import org.apache.hadoop.fs.FileSystem;
30 import org.apache.hadoop.fs.Path;
31 import org.apache.hadoop.hbase.TableName;
32 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer;
33 import org.apache.hadoop.hbase.util.FSUtils;
34
35 @InterfaceAudience.Private
36 public abstract class ReaderBase implements HLog.Reader {
37 private static final Log LOG = LogFactory.getLog(ReaderBase.class);
38 protected Configuration conf;
39 protected FileSystem fs;
40 protected Path path;
41 protected long edit = 0;
42 protected long fileLength;
43 protected WALTrailer trailer;
44
45
46 protected int trailerWarnSize;
47
48
49
50 protected CompressionContext compressionContext = null;
51 protected boolean emptyCompressionContext = true;
52
53
54
55
56 public ReaderBase() {
57 }
58
59 @Override
60 public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream stream)
61 throws IOException {
62 this.conf = conf;
63 this.path = path;
64 this.fs = fs;
65 this.fileLength = this.fs.getFileStatus(path).getLen();
66 this.trailerWarnSize = conf.getInt(HLog.WAL_TRAILER_WARN_SIZE,
67 HLog.DEFAULT_WAL_TRAILER_WARN_SIZE);
68 initReader(stream);
69
70 boolean compression = hasCompression();
71 if (compression) {
72
73 try {
74 if (compressionContext == null) {
75 compressionContext = new CompressionContext(LRUDictionary.class,
76 FSUtils.isRecoveredEdits(path));
77 } else {
78 compressionContext.clear();
79 }
80 } catch (Exception e) {
81 throw new IOException("Failed to initialize CompressionContext", e);
82 }
83 }
84 initAfterCompression();
85 }
86
87 @Override
88 public HLog.Entry next() throws IOException {
89 return next(null);
90 }
91
92 @Override
93 public HLog.Entry next(HLog.Entry reuse) throws IOException {
94 HLog.Entry e = reuse;
95 if (e == null) {
96 e = new HLog.Entry(new HLogKey(), new WALEdit());
97 }
98 if (compressionContext != null) {
99 e.setCompressionContext(compressionContext);
100 }
101
102 boolean hasEntry = false;
103 try {
104 hasEntry = readNext(e);
105 } catch (IllegalArgumentException iae) {
106 TableName tableName = e.getKey().getTablename();
107 if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) {
108
109 LOG.info("Got an old ROOT edit, ignoring ");
110 return next(e);
111 }
112 else throw iae;
113 }
114 edit++;
115 if (compressionContext != null && emptyCompressionContext) {
116 emptyCompressionContext = false;
117 }
118 return hasEntry ? e : null;
119 }
120
121 @Override
122 public void seek(long pos) throws IOException {
123 if (compressionContext != null && emptyCompressionContext) {
124 while (next() != null) {
125 if (getPosition() == pos) {
126 emptyCompressionContext = false;
127 break;
128 }
129 }
130 }
131 seekOnFs(pos);
132 }
133
134
135
136
137
138 protected abstract void initReader(FSDataInputStream stream) throws IOException;
139
140
141
142
143 protected abstract void initAfterCompression() throws IOException;
144
145
146
147 protected abstract boolean hasCompression();
148
149
150
151
152
153
154 protected abstract boolean readNext(HLog.Entry e) throws IOException;
155
156
157
158
159 protected abstract void seekOnFs(long pos) throws IOException;
160
161 @Override
162 public WALTrailer getWALTrailer() {
163 return null;
164 }
165 }