1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.IOException;
23 import java.io.OutputStream;
24 import java.lang.reflect.InvocationTargetException;
25 import java.lang.reflect.Method;
26 import java.util.ArrayList;
27 import java.util.List;
28
29 import org.apache.commons.logging.Log;
30 import org.apache.commons.logging.LogFactory;
31 import org.apache.commons.logging.impl.Log4JLogger;
32
33 import org.apache.hadoop.fs.FileSystem;
34 import org.apache.hadoop.hbase.HBaseTestingUtility;
35 import org.apache.hadoop.hbase.HColumnDescriptor;
36 import org.apache.hadoop.hbase.HConstants;
37 import org.apache.hadoop.hbase.HTableDescriptor;
38 import org.apache.hadoop.hbase.MiniHBaseCluster;
39 import org.apache.hadoop.hbase.regionserver.HRegionServer;
40 import org.apache.hadoop.hbase.regionserver.HRegion;
41 import org.apache.hadoop.hbase.client.HBaseAdmin;
42 import org.apache.hadoop.hbase.client.HTable;
43 import org.apache.hadoop.hbase.client.Put;
44 import org.apache.hadoop.hbase.util.Bytes;
45 import org.apache.hadoop.hbase.util.FSUtils;
46 import org.apache.hadoop.hdfs.DFSClient;
47 import org.apache.hadoop.hdfs.MiniDFSCluster;
48
49 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
50 import org.apache.hadoop.hdfs.server.datanode.DataNode;
51 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
52 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
53 import org.apache.log4j.Level;
54 import org.junit.AfterClass;
55 import org.junit.BeforeClass;
56 import org.junit.Test;
57
58 import static org.junit.Assert.assertTrue;
59
60
61
62
63 public class TestLogRolling {
64 private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
65 private HRegionServer server;
66 private HLog log;
67 private String tableName;
68 private byte[] value;
69 private static FileSystem fs;
70 private static MiniDFSCluster dfsCluster;
71 private static HBaseAdmin admin;
72 private static MiniHBaseCluster cluster;
73 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
74
75
76 {
77 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
78 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
79 ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
80 ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
81 ((Log4JLogger)HRegionServer.LOG).getLogger().setLevel(Level.ALL);
82 ((Log4JLogger)HRegion.LOG).getLogger().setLevel(Level.ALL);
83 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
84 }
85
86
87
88
89
90 public TestLogRolling() {
91
92 super();
93 this.server = null;
94 this.log = null;
95 this.tableName = null;
96 this.value = null;
97
98 String className = this.getClass().getName();
99 StringBuilder v = new StringBuilder(className);
100 while (v.length() < 1000) {
101 v.append(className);
102 }
103 value = Bytes.toBytes(v.toString());
104 }
105
106
107
108 @BeforeClass
109 public static void setUpBeforeClass() throws Exception {
110
111
112 TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize", 768L * 1024L);
113
114
115 TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
116
117
118 TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2);
119
120
121 TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.flush.size", 8192);
122
123
124 TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
125
126
127
128 TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
129
130
131
132 TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
133
134
135 TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
136 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
137
138
139 TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
140 TEST_UTIL.startMiniCluster(2);
141
142 cluster = TEST_UTIL.getHBaseCluster();
143 dfsCluster = TEST_UTIL.getDFSCluster();
144 fs = TEST_UTIL.getTestFileSystem();
145 admin = TEST_UTIL.getHBaseAdmin();
146 }
147
148 @AfterClass
149 public static void tearDownAfterClass() throws IOException {
150 TEST_UTIL.cleanupTestDir();
151 TEST_UTIL.shutdownMiniCluster();
152 }
153
154 private void startAndWriteData() throws IOException {
155
156 new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
157
158
159 HTableDescriptor desc = new HTableDescriptor(tableName);
160 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
161 admin.createTable(desc);
162 HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
163
164 server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
165 this.log = server.getLog();
166 for (int i = 1; i <= 256; i++) {
167 Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
168 put.add(HConstants.CATALOG_FAMILY, null, value);
169 table.put(put);
170 if (i % 32 == 0) {
171
172 try {
173 Thread.sleep(2000);
174 } catch (InterruptedException e) {
175
176 }
177 }
178 }
179 }
180
181
182
183
184
185
186 @Test
187 public void testLogRolling() throws FailedLogCloseException, IOException {
188 this.tableName = getName();
189 startAndWriteData();
190 LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
191
192
193
194 List<HRegion> regions =
195 new ArrayList<HRegion>(server.getOnlineRegions());
196 for (HRegion r: regions) {
197 r.flushcache();
198 }
199
200
201 log.rollWriter();
202
203 int count = log.getNumLogFiles();
204 LOG.info("after flushing all regions and rolling logs there are " +
205 log.getNumLogFiles() + " log files");
206 assertTrue(("actual count: " + count), count <= 2);
207 }
208
209 private static String getName() {
210 return "TestLogRolling";
211 }
212
213 void writeData(HTable table, int rownum) throws IOException {
214 Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", rownum)));
215 put.add(HConstants.CATALOG_FAMILY, null, value);
216 table.put(put);
217
218
219 try {
220 Thread.sleep(2000);
221 } catch (InterruptedException e) {
222
223 }
224 }
225
226
227
228
229 @SuppressWarnings("null")
230 DatanodeInfo[] getPipeline(HLog log) throws IllegalArgumentException,
231 IllegalAccessException, InvocationTargetException {
232 OutputStream stm = log.getOutputStream();
233 Method getPipeline = null;
234 for (Method m : stm.getClass().getDeclaredMethods()) {
235 if (m.getName().endsWith("getPipeline")) {
236 getPipeline = m;
237 getPipeline.setAccessible(true);
238 break;
239 }
240 }
241
242 assertTrue("Need DFSOutputStream.getPipeline() for this test",
243 null != getPipeline);
244 Object repl = getPipeline.invoke(stm, new Object[] {}
245 return (DatanodeInfo[]) repl;
246 }
247
248
249
250
251
252
253
254
255
256
257 @Test
258 public void testLogRollOnDatanodeDeath() throws IOException,
259 InterruptedException, IllegalArgumentException, IllegalAccessException,
260 InvocationTargetException {
261 assertTrue("This test requires HLog file replication.", fs
262 .getDefaultReplication() > 1);
263
264 new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
265
266 String tableName = getName();
267 HTableDescriptor desc = new HTableDescriptor(tableName);
268 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
269
270 if (admin.tableExists(tableName)) {
271 admin.disableTable(tableName);
272 admin.deleteTable(tableName);
273 }
274 admin.createTable(desc);
275 HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
276
277 server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
278 this.log = server.getLog();
279
280 assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
281
282 assertTrue("Need append support for this test", FSUtils
283 .isAppendSupported(TEST_UTIL.getConfiguration()));
284
285
286 dfsCluster
287 .startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null);
288 dfsCluster.waitActive();
289 assertTrue(dfsCluster.getDataNodes().size() >= fs.getDefaultReplication() + 1);
290
291 writeData(table, 2);
292
293 table.setAutoFlush(true);
294
295 long curTime = System.currentTimeMillis();
296 long oldFilenum = log.getFilenum();
297 assertTrue("Log should have a timestamp older than now",
298 curTime > oldFilenum && oldFilenum != -1);
299
300 assertTrue("The log shouldn't have rolled yet", oldFilenum == log
301 .getFilenum());
302 DatanodeInfo[] pipeline = getPipeline(log);
303 assertTrue(pipeline.length == fs.getDefaultReplication());
304
305
306 assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null);
307 Thread.sleep(10000);
308
309 writeData(table, 2);
310 long newFilenum = log.getFilenum();
311
312 assertTrue("Missing datanode should've triggered a log roll",
313 newFilenum > oldFilenum && newFilenum > curTime);
314
315
316 writeData(table, 3);
317 assertTrue("The log should not roll again.", log.getFilenum() == newFilenum);
318 assertTrue("New log file should have the default replication", log
319 .getLogReplication() == fs.getDefaultReplication());
320 }
321 }