1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.OutputStream;
23 import java.lang.reflect.Method;
24 import java.util.ArrayList;
25 import java.util.List;
26
27 import org.apache.commons.logging.Log;
28 import org.apache.commons.logging.LogFactory;
29 import org.apache.commons.logging.impl.Log4JLogger;
30 import org.apache.hadoop.hbase.HBaseClusterTestCase;
31 import org.apache.hadoop.hbase.HColumnDescriptor;
32 import org.apache.hadoop.hbase.HConstants;
33 import org.apache.hadoop.hbase.HTableDescriptor;
34 import org.apache.hadoop.hbase.regionserver.HRegionServer;
35 import org.apache.hadoop.hbase.regionserver.HRegion;
36 import org.apache.hadoop.hbase.client.HBaseAdmin;
37 import org.apache.hadoop.hbase.client.HTable;
38 import org.apache.hadoop.hbase.client.Put;
39 import org.apache.hadoop.hbase.util.Bytes;
40 import org.apache.hadoop.hbase.util.FSUtils;
41 import org.apache.hadoop.hdfs.DFSClient;
42 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
43 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
44 import org.apache.hadoop.hdfs.server.datanode.DataNode;
45 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
46 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
47 import org.apache.log4j.Level;
48
49
50
51
52 public class TestLogRolling extends HBaseClusterTestCase {
53 private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
54 private HRegionServer server;
55 private HLog log;
56 private String tableName;
57 private byte[] value;
58
59
60 {
61 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
62 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
63 ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
64 ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
65 ((Log4JLogger)HRegionServer.LOG).getLogger().setLevel(Level.ALL);
66 ((Log4JLogger)HRegion.LOG).getLogger().setLevel(Level.ALL);
67 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
68 }
69
70
71
72
73
74 public TestLogRolling() throws Exception {
75
76 super();
77 try {
78 this.server = null;
79 this.log = null;
80 this.tableName = null;
81 this.value = null;
82
83 String className = this.getClass().getName();
84 StringBuilder v = new StringBuilder(className);
85 while (v.length() < 1000) {
86 v.append(className);
87 }
88 value = Bytes.toBytes(v.toString());
89
90 } catch (Exception e) {
91 LOG.fatal("error in constructor", e);
92 throw e;
93 }
94 }
95
96
97
98 @Override
99 protected void setUp() throws Exception {
100
101
102 conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
103
104
105 conf.setInt("hbase.regionserver.maxlogentries", 32);
106
107
108 conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
109
110
111 conf.setInt("hbase.hregion.memstore.flush.size", 8192);
112
113
114 conf.setLong("hbase.client.pause", 15 * 1000);
115
116
117
118 conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
119
120
121
122 conf.setBoolean("dfs.support.append", true);
123
124
125 conf.setInt("heartbeat.recheck.interval", 5000);
126 conf.setInt("dfs.heartbeat.interval", 1);
127
128
129 conf.setInt("dfs.client.block.write.retries", 30);
130
131 super.setUp();
132 }
133
134 private void startAndWriteData() throws Exception {
135
136 new HTable(conf, HConstants.META_TABLE_NAME);
137 this.server = cluster.getRegionServerThreads().get(0).getRegionServer();
138 this.log = server.getLog();
139
140
141 HTableDescriptor desc = new HTableDescriptor(tableName);
142 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
143 HBaseAdmin admin = new HBaseAdmin(conf);
144 admin.createTable(desc);
145 HTable table = new HTable(conf, tableName);
146 for (int i = 1; i <= 256; i++) {
147 Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
148 put.add(HConstants.CATALOG_FAMILY, null, value);
149 table.put(put);
150 if (i % 32 == 0) {
151
152 try {
153 Thread.sleep(2000);
154 } catch (InterruptedException e) {
155
156 }
157 }
158 }
159 }
160
161
162
163
164
165
166 public void testLogRolling() throws Exception {
167 this.tableName = getName();
168 try {
169 startAndWriteData();
170 LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
171
172
173
174 List<HRegion> regions =
175 new ArrayList<HRegion>(server.getOnlineRegions());
176 for (HRegion r: regions) {
177 r.flushcache();
178 }
179
180
181 log.rollWriter();
182
183 int count = log.getNumLogFiles();
184 LOG.info("after flushing all regions and rolling logs there are " +
185 log.getNumLogFiles() + " log files");
186 assertTrue(("actual count: " + count), count <= 2);
187 } catch (Exception e) {
188 LOG.fatal("unexpected exception", e);
189 throw e;
190 }
191 }
192
193 void writeData(HTable table, int rownum) throws Exception {
194 Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", rownum)));
195 put.add(HConstants.CATALOG_FAMILY, null, value);
196 table.put(put);
197
198
199 try {
200 Thread.sleep(2000);
201 } catch (InterruptedException e) {
202
203 }
204 }
205
206
207
208
209
210
211
212 public void testLogRollOnDatanodeDeath() throws Exception {
213 assertTrue("This test requires HLog file replication.",
214 fs.getDefaultReplication() > 1);
215
216
217 new HTable(conf, HConstants.META_TABLE_NAME);
218 this.server = cluster.getRegionServer(0);
219 this.log = server.getLog();
220
221 assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
222
223 assertTrue("Need append support for this test", FSUtils.isAppendSupported(conf));
224
225
226 dfsCluster.startDataNodes(conf, 1, true, null, null);
227 dfsCluster.waitActive();
228 assertTrue(dfsCluster.getDataNodes().size() >=
229 fs.getDefaultReplication() + 1);
230
231
232 String tableName = getName();
233 HTableDescriptor desc = new HTableDescriptor(tableName);
234 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
235 HBaseAdmin admin = new HBaseAdmin(conf);
236 admin.createTable(desc);
237 HTable table = new HTable(conf, tableName);
238 table.setAutoFlush(true);
239
240 long curTime = System.currentTimeMillis();
241 long oldFilenum = log.getFilenum();
242 assertTrue("Log should have a timestamp older than now",
243 curTime > oldFilenum && oldFilenum != -1);
244
245
246 writeData(table, 1);
247 assertTrue("The log shouldn't have rolled yet",
248 oldFilenum == log.getFilenum());
249
250
251 OutputStream stm = log.getOutputStream();
252 Method getPipeline = null;
253 for (Method m : stm.getClass().getDeclaredMethods()) {
254 if(m.getName().endsWith("getPipeline")) {
255 getPipeline = m;
256 getPipeline.setAccessible(true);
257 break;
258 }
259 }
260 assertTrue("Need DFSOutputStream.getPipeline() for this test",
261 getPipeline != null);
262 Object repl = getPipeline.invoke(stm, new Object []{}
263 DatanodeInfo[] pipeline = (DatanodeInfo[]) repl;
264 assertTrue(pipeline.length == fs.getDefaultReplication());
265 DataNodeProperties dnprop = dfsCluster.stopDataNode(pipeline[0].getName());
266 assertTrue(dnprop != null);
267
268
269 writeData(table, 2);
270 long newFilenum = log.getFilenum();
271 assertTrue("Missing datanode should've triggered a log roll",
272 newFilenum > oldFilenum && newFilenum > curTime);
273
274
275 writeData(table, 3);
276 assertTrue("The log should not roll again.",
277 log.getFilenum() == newFilenum);
278 assertTrue("New log file should have the default replication",
279 log.getLogReplication() == fs.getDefaultReplication());
280 }
281 }