1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertTrue;
24 import static org.junit.Assert.assertNotNull;
25 import static org.junit.Assert.fail;
26
27 import java.io.IOException;
28 import java.lang.reflect.Method;
29 import java.util.HashMap;
30 import java.util.List;
31 import java.util.Map;
32
33 import org.apache.commons.logging.Log;
34 import org.apache.commons.logging.LogFactory;
35 import org.apache.commons.logging.impl.Log4JLogger;
36 import org.apache.hadoop.conf.Configuration;
37 import org.apache.hadoop.fs.FSDataInputStream;
38 import org.apache.hadoop.fs.FSDataOutputStream;
39 import org.apache.hadoop.fs.FileStatus;
40 import org.apache.hadoop.fs.FileSystem;
41 import org.apache.hadoop.fs.Path;
42 import org.apache.hadoop.hbase.*;
43 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
44 import org.apache.hadoop.hbase.util.Bytes;
45 import org.apache.hadoop.hbase.util.FSHDFSUtils;
46 import org.apache.hadoop.hbase.util.FSUtils;
47 import org.apache.hadoop.hbase.Coprocessor;
48 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
49 import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
50 import org.apache.hadoop.hdfs.DFSClient;
51 import org.apache.hadoop.hdfs.DistributedFileSystem;
52 import org.apache.hadoop.hdfs.MiniDFSCluster;
53 import org.apache.hadoop.hdfs.protocol.FSConstants;
54 import org.apache.hadoop.hdfs.server.datanode.DataNode;
55 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
56 import org.apache.hadoop.io.SequenceFile;
57 import org.apache.log4j.Level;
58 import org.junit.After;
59 import org.junit.AfterClass;
60 import org.junit.Before;
61 import org.junit.BeforeClass;
62 import org.junit.Test;
63 import org.junit.experimental.categories.Category;
64
65
66 @Category(LargeTests.class)
67 public class TestHLog {
68 private static final Log LOG = LogFactory.getLog(TestHLog.class);
69 {
70 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
71 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
72 ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
73 .getLogger().setLevel(Level.ALL);
74 ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
75 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
76 }
77
78 private static Configuration conf;
79 private static FileSystem fs;
80 private static Path dir;
81 private static MiniDFSCluster cluster;
82 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
83 private static Path hbaseDir;
84 private static Path oldLogDir;
85
86 @Before
87 public void setUp() throws Exception {
88
89 FileStatus[] entries = fs.listStatus(new Path("/"));
90 for (FileStatus dir : entries) {
91 fs.delete(dir.getPath(), true);
92 }
93
94 }
95
96 @After
97 public void tearDown() throws Exception {
98 }
99
100 @BeforeClass
101 public static void setUpBeforeClass() throws Exception {
102
103 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
104
105 TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
106 TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
107
108 TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
109 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
110 TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
111
112 TEST_UTIL.getConfiguration()
113 .setInt("ipc.client.connect.max.retries", 1);
114 TEST_UTIL.getConfiguration().setInt(
115 "dfs.client.block.recovery.retries", 1);
116 TEST_UTIL.getConfiguration().setInt(
117 "ipc.client.connection.maxidletime", 500);
118 TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
119 SampleRegionWALObserver.class.getName());
120 TEST_UTIL.startMiniDFSCluster(3);
121
122 conf = TEST_UTIL.getConfiguration();
123 cluster = TEST_UTIL.getDFSCluster();
124 fs = cluster.getFileSystem();
125
126 hbaseDir = TEST_UTIL.createRootDir();
127 oldLogDir = new Path(hbaseDir, ".oldlogs");
128 dir = new Path(hbaseDir, getName());
129 }
130 @AfterClass
131 public static void tearDownAfterClass() throws Exception {
132 TEST_UTIL.shutdownMiniCluster();
133 }
134
135 private static String getName() {
136
137 return "TestHLog";
138 }
139
140
141
142
143
144
145 @Test
146 public void testMaintainOrderWithConcurrentWrites() throws Exception {
147
148
149
150 int errCode =
151 HLogPerformanceEvaluation.innerMain(new String [] {"-threads", "3", "-verify", "-iterations", "3000"});
152 assertEquals(0, errCode);
153 }
154
155
156
157
158
159
160 @Test
161 public void testSplit() throws IOException {
162
163 final byte [] tableName = Bytes.toBytes(getName());
164 final byte [] rowName = tableName;
165 Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
166 HLog log = new HLog(fs, logdir, oldLogDir, conf);
167 final int howmany = 3;
168 HRegionInfo[] infos = new HRegionInfo[3];
169 Path tabledir = new Path(hbaseDir, getName());
170 fs.mkdirs(tabledir);
171 for(int i = 0; i < howmany; i++) {
172 infos[i] = new HRegionInfo(tableName,
173 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
174 fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
175 LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
176 }
177 HTableDescriptor htd = new HTableDescriptor(tableName);
178 htd.addFamily(new HColumnDescriptor("column"));
179
180
181 try {
182 for (int ii = 0; ii < howmany; ii++) {
183 for (int i = 0; i < howmany; i++) {
184
185 for (int j = 0; j < howmany; j++) {
186 WALEdit edit = new WALEdit();
187 byte [] family = Bytes.toBytes("column");
188 byte [] qualifier = Bytes.toBytes(Integer.toString(j));
189 byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
190 edit.add(new KeyValue(rowName, family, qualifier,
191 System.currentTimeMillis(), column));
192 LOG.info("Region " + i + ": " + edit);
193 log.append(infos[i], tableName, edit,
194 System.currentTimeMillis(), htd);
195 }
196 }
197 log.rollWriter();
198 }
199 log.close();
200 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
201 hbaseDir, logdir, this.oldLogDir, this.fs);
202 List<Path> splits =
203 logSplitter.splitLog();
204 verifySplits(splits, howmany);
205 log = null;
206 } finally {
207 if (log != null) {
208 log.closeAndDelete();
209 }
210 }
211 }
212
213
214
215
216
217 @Test
218 public void Broken_testSync() throws Exception {
219 byte [] bytes = Bytes.toBytes(getName());
220
221 Path p = new Path(dir, getName() + ".fsdos");
222 FSDataOutputStream out = fs.create(p);
223 out.write(bytes);
224 Method syncMethod = null;
225 try {
226 syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
227 } catch (NoSuchMethodException e) {
228 try {
229 syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
230 } catch (NoSuchMethodException ex) {
231 fail("This version of Hadoop supports neither Syncable.sync() " +
232 "nor Syncable.hflush().");
233 }
234 }
235 syncMethod.invoke(out, new Object[]{});
236 FSDataInputStream in = fs.open(p);
237 assertTrue(in.available() > 0);
238 byte [] buffer = new byte [1024];
239 int read = in.read(buffer);
240 assertEquals(bytes.length, read);
241 out.close();
242 in.close();
243 Path subdir = new Path(dir, "hlogdir");
244 HLog wal = new HLog(fs, subdir, oldLogDir, conf);
245 final int total = 20;
246 HLog.Reader reader = null;
247
248 try {
249 HRegionInfo info = new HRegionInfo(bytes,
250 null,null, false);
251 HTableDescriptor htd = new HTableDescriptor();
252 htd.addFamily(new HColumnDescriptor(bytes));
253
254 for (int i = 0; i < total; i++) {
255 WALEdit kvs = new WALEdit();
256 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
257 wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
258 }
259
260
261 wal.sync();
262
263 Path walPath = wal.computeFilename();
264 reader = HLog.getReader(fs, walPath, conf);
265 int count = 0;
266 HLog.Entry entry = new HLog.Entry();
267 while ((entry = reader.next(entry)) != null) count++;
268 assertEquals(total, count);
269 reader.close();
270
271
272 for (int i = 0; i < total; i++) {
273 WALEdit kvs = new WALEdit();
274 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
275 wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
276 }
277 reader = HLog.getReader(fs, walPath, conf);
278 count = 0;
279 while((entry = reader.next(entry)) != null) count++;
280 assertTrue(count >= total);
281 reader.close();
282
283 wal.sync();
284 reader = HLog.getReader(fs, walPath, conf);
285 count = 0;
286 while((entry = reader.next(entry)) != null) count++;
287 assertEquals(total * 2, count);
288
289
290 final byte [] value = new byte[1025 * 1024];
291 for (int i = 0; i < total; i++) {
292 WALEdit kvs = new WALEdit();
293 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
294 wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
295 }
296
297 wal.sync();
298 reader = HLog.getReader(fs, walPath, conf);
299 count = 0;
300 while((entry = reader.next(entry)) != null) count++;
301 assertEquals(total * 3, count);
302 reader.close();
303
304 wal.close();
305 reader = HLog.getReader(fs, walPath, conf);
306 count = 0;
307 while((entry = reader.next(entry)) != null) count++;
308 assertEquals(total * 3, count);
309 reader.close();
310 } finally {
311 if (wal != null) wal.closeAndDelete();
312 if (reader != null) reader.close();
313 }
314 }
315
316
317
318
319
320 @Test
321 public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
322 Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
323 for (int i = 0; i < 10; i++) {
324 Long l = Long.valueOf(i);
325 regionsToSeqids.put(l.toString().getBytes(), l);
326 }
327 byte [][] regions =
328 HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
329 assertEquals(2, regions.length);
330 assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
331 Bytes.equals(regions[0], "1".getBytes()));
332 regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
333 int count = 4;
334 assertEquals(count, regions.length);
335
336 for (int i = 0; i < count; i++) {
337 assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
338 Bytes.equals(regions[i], "1".getBytes()) ||
339 Bytes.equals(regions[i], "2".getBytes()) ||
340 Bytes.equals(regions[i], "3".getBytes()));
341 }
342 }
343
344 private void verifySplits(List<Path> splits, final int howmany)
345 throws IOException {
346 assertEquals(howmany, splits.size());
347 for (int i = 0; i < splits.size(); i++) {
348 LOG.info("Verifying=" + splits.get(i));
349 HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
350 try {
351 int count = 0;
352 String previousRegion = null;
353 long seqno = -1;
354 HLog.Entry entry = new HLog.Entry();
355 while((entry = reader.next(entry)) != null) {
356 HLogKey key = entry.getKey();
357 String region = Bytes.toString(key.getEncodedRegionName());
358
359 if (previousRegion != null) {
360 assertEquals(previousRegion, region);
361 }
362 LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
363 assertTrue(seqno < key.getLogSeqNum());
364 seqno = key.getLogSeqNum();
365 previousRegion = region;
366 count++;
367 }
368 assertEquals(howmany * howmany, count);
369 } finally {
370 reader.close();
371 }
372 }
373 }
374
375
376
377
378
379
380
381
382
383
384 @Test
385 public void testAppendClose() throws Exception {
386 testAppendClose(true);
387 testAppendClose(false);
388 }
389
390
391
392
393 public void testAppendClose(final boolean triggerDirectAppend) throws Exception {
394 byte [] tableName = Bytes.toBytes(getName());
395 HRegionInfo regioninfo = new HRegionInfo(tableName,
396 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
397 Path subdir = new Path(dir, "hlogdir" + triggerDirectAppend);
398 Path archdir = new Path(dir, "hlogdir_archive");
399 HLog wal = new HLog(fs, subdir, archdir, conf);
400 final int total = 20;
401
402 HTableDescriptor htd = new HTableDescriptor();
403 htd.addFamily(new HColumnDescriptor(tableName));
404
405 for (int i = 0; i < total; i++) {
406 WALEdit kvs = new WALEdit();
407 kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
408 wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
409 }
410
411 wal.sync();
412 int namenodePort = cluster.getNameNodePort();
413 final Path walPath = wal.computeFilename();
414
415
416
417 try {
418 DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
419 dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
420 cluster.shutdown();
421 try {
422
423
424 wal.close();
425 } catch (IOException e) {
426 LOG.info(e);
427 }
428 fs.close();
429 LOG.info("STOPPED first instance of the cluster");
430 } finally {
431
432 while (cluster.isClusterUp()){
433 LOG.error("Waiting for cluster to go down");
434 Thread.sleep(1000);
435 }
436
437
438
439
440
441 Thread.sleep(2000);
442
443 cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
444 TEST_UTIL.setDFSCluster(cluster);
445 cluster.waitActive();
446 fs = cluster.getFileSystem();
447 LOG.info("START second instance.");
448 }
449
450
451
452 Method setLeasePeriod = cluster.getClass()
453 .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
454 setLeasePeriod.setAccessible(true);
455 setLeasePeriod.invoke(cluster,
456 new Object[]{new Long(1000), new Long(1000)});
457 try {
458 Thread.sleep(1000);
459 } catch (InterruptedException e) {
460 LOG.info(e);
461 }
462
463
464 final FileSystem recoveredFs = fs;
465 final Configuration rlConf = conf;
466
467 class RecoverLogThread extends Thread {
468 public Exception exception = null;
469 public void run() {
470 try {
471 rlConf.setBoolean(FSHDFSUtils.TEST_TRIGGER_DFS_APPEND, triggerDirectAppend);
472 FSUtils.getInstance(fs, rlConf)
473 .recoverFileLease(recoveredFs, walPath, rlConf);
474 } catch (IOException e) {
475 exception = e;
476 }
477 }
478 }
479
480 RecoverLogThread t = new RecoverLogThread();
481 t.start();
482
483 t.join(60 * 1000);
484 if(t.isAlive()) {
485 t.interrupt();
486 throw new Exception("Timed out waiting for HLog.recoverLog()");
487 }
488
489 if (t.exception != null)
490 throw t.exception;
491
492
493 HLog.Reader reader = HLog.getReader(this.fs, walPath, this.conf);
494 int count = 0;
495 HLog.Entry entry = new HLog.Entry();
496 while (reader.next(entry) != null) {
497 count++;
498 assertTrue("Should be one KeyValue per WALEdit",
499 entry.getEdit().getKeyValues().size() == 1);
500 }
501 assertEquals(total, count);
502 reader.close();
503
504
505 setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
506 }
507
508
509
510
511
512 @Test
513 public void testEditAdd() throws IOException {
514 final int COL_COUNT = 10;
515 final byte [] tableName = Bytes.toBytes("tablename");
516 final byte [] row = Bytes.toBytes("row");
517 HLog.Reader reader = null;
518 HLog log = null;
519 try {
520 log = new HLog(fs, dir, oldLogDir, conf);
521
522
523 long timestamp = System.currentTimeMillis();
524 WALEdit cols = new WALEdit();
525 for (int i = 0; i < COL_COUNT; i++) {
526 cols.add(new KeyValue(row, Bytes.toBytes("column"),
527 Bytes.toBytes(Integer.toString(i)),
528 timestamp, new byte[] { (byte)(i + '0') }));
529 }
530 HRegionInfo info = new HRegionInfo(tableName,
531 row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
532 HTableDescriptor htd = new HTableDescriptor();
533 htd.addFamily(new HColumnDescriptor("column"));
534
535 log.append(info, tableName, cols, System.currentTimeMillis(), htd);
536 long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
537 log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
538 info.isMetaRegion());
539 log.close();
540 Path filename = log.computeFilename();
541 log = null;
542
543 reader = HLog.getReader(fs, filename, conf);
544
545
546 for (int i = 0; i < 1; i++) {
547 HLog.Entry entry = reader.next(null);
548 if (entry == null) break;
549 HLogKey key = entry.getKey();
550 WALEdit val = entry.getEdit();
551 assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
552 assertTrue(Bytes.equals(tableName, key.getTablename()));
553 KeyValue kv = val.getKeyValues().get(0);
554 assertTrue(Bytes.equals(row, kv.getRow()));
555 assertEquals((byte)(i + '0'), kv.getValue()[0]);
556 System.out.println(key + " " + val);
557 }
558 HLog.Entry entry = null;
559 while ((entry = reader.next(null)) != null) {
560 HLogKey key = entry.getKey();
561 WALEdit val = entry.getEdit();
562
563 assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
564 assertTrue(Bytes.equals(tableName, key.getTablename()));
565 KeyValue kv = val.getKeyValues().get(0);
566 assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
567 assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
568 assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
569 val.getKeyValues().get(0).getValue()));
570 System.out.println(key + " " + val);
571 }
572 } finally {
573 if (log != null) {
574 log.closeAndDelete();
575 }
576 if (reader != null) {
577 reader.close();
578 }
579 }
580 }
581
582
583
584
585 @Test
586 public void testAppend() throws IOException {
587 final int COL_COUNT = 10;
588 final byte [] tableName = Bytes.toBytes("tablename");
589 final byte [] row = Bytes.toBytes("row");
590 Reader reader = null;
591 HLog log = new HLog(fs, dir, oldLogDir, conf);
592 try {
593
594
595 long timestamp = System.currentTimeMillis();
596 WALEdit cols = new WALEdit();
597 for (int i = 0; i < COL_COUNT; i++) {
598 cols.add(new KeyValue(row, Bytes.toBytes("column"),
599 Bytes.toBytes(Integer.toString(i)),
600 timestamp, new byte[] { (byte)(i + '0') }));
601 }
602 HRegionInfo hri = new HRegionInfo(tableName,
603 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
604 HTableDescriptor htd = new HTableDescriptor();
605 htd.addFamily(new HColumnDescriptor("column"));
606 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
607 long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
608 log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
609 log.close();
610 Path filename = log.computeFilename();
611 log = null;
612
613 reader = HLog.getReader(fs, filename, conf);
614 HLog.Entry entry = reader.next();
615 assertEquals(COL_COUNT, entry.getEdit().size());
616 int idx = 0;
617 for (KeyValue val : entry.getEdit().getKeyValues()) {
618 assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
619 entry.getKey().getEncodedRegionName()));
620 assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
621 assertTrue(Bytes.equals(row, val.getRow()));
622 assertEquals((byte)(idx + '0'), val.getValue()[0]);
623 System.out.println(entry.getKey() + " " + val);
624 idx++;
625 }
626
627
628 entry = reader.next();
629 assertEquals(1, entry.getEdit().size());
630 for (KeyValue val : entry.getEdit().getKeyValues()) {
631 assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
632 entry.getKey().getEncodedRegionName()));
633 assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
634 assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
635 assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
636 assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
637 val.getValue()));
638 System.out.println(entry.getKey() + " " + val);
639 }
640 } finally {
641 if (log != null) {
642 log.closeAndDelete();
643 }
644 if (reader != null) {
645 reader.close();
646 }
647 }
648 }
649
650
651
652
653
654 @Test
655 public void testVisitors() throws Exception {
656 final int COL_COUNT = 10;
657 final byte [] tableName = Bytes.toBytes("tablename");
658 final byte [] row = Bytes.toBytes("row");
659 HLog log = new HLog(fs, dir, oldLogDir, conf);
660 try {
661 DumbWALActionsListener visitor = new DumbWALActionsListener();
662 log.registerWALActionsListener(visitor);
663 long timestamp = System.currentTimeMillis();
664 HTableDescriptor htd = new HTableDescriptor();
665 htd.addFamily(new HColumnDescriptor("column"));
666
667 HRegionInfo hri = new HRegionInfo(tableName,
668 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
669 for (int i = 0; i < COL_COUNT; i++) {
670 WALEdit cols = new WALEdit();
671 cols.add(new KeyValue(row, Bytes.toBytes("column"),
672 Bytes.toBytes(Integer.toString(i)),
673 timestamp, new byte[]{(byte) (i + '0')}));
674 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
675 }
676 assertEquals(COL_COUNT, visitor.increments);
677 log.unregisterWALActionsListener(visitor);
678 WALEdit cols = new WALEdit();
679 cols.add(new KeyValue(row, Bytes.toBytes("column"),
680 Bytes.toBytes(Integer.toString(11)),
681 timestamp, new byte[]{(byte) (11 + '0')}));
682 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
683 assertEquals(COL_COUNT, visitor.increments);
684 } finally {
685 if (log != null) log.closeAndDelete();
686 }
687 }
688
689 @Test
690 public void testLogCleaning() throws Exception {
691 LOG.info("testLogCleaning");
692 final byte [] tableName = Bytes.toBytes("testLogCleaning");
693 final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
694
695 HLog log = new HLog(fs, dir, oldLogDir, conf);
696 try {
697 HRegionInfo hri = new HRegionInfo(tableName,
698 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
699 HRegionInfo hri2 = new HRegionInfo(tableName2,
700 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
701
702
703
704 addEdits(log, hri, tableName, 1);
705 log.rollWriter();
706 assertEquals(1, log.getNumLogFiles());
707
708
709 addEdits(log, hri, tableName, 2);
710 log.rollWriter();
711 assertEquals(2, log.getNumLogFiles());
712
713
714 addEdits(log, hri, tableName, 1);
715 addEdits(log, hri2, tableName2, 1);
716 addEdits(log, hri, tableName, 1);
717 addEdits(log, hri2, tableName2, 1);
718 log.rollWriter();
719 assertEquals(3, log.getNumLogFiles());
720
721
722
723 long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
724 log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
725 log.rollWriter();
726 assertEquals(2, log.getNumLogFiles());
727
728
729
730
731 seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
732 log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
733 log.rollWriter();
734 assertEquals(0, log.getNumLogFiles());
735 } finally {
736 if (log != null) log.closeAndDelete();
737 }
738 }
739
740
741
742
743 @Test
744 public void testWALCoprocessorLoaded() throws Exception {
745
746 HLog log = new HLog(fs, dir, oldLogDir, conf);
747 try {
748 WALCoprocessorHost host = log.getCoprocessorHost();
749 Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
750 assertNotNull(c);
751 } finally {
752 if (log != null) log.closeAndDelete();
753 }
754 }
755
756 private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
757 int times) throws IOException {
758 HTableDescriptor htd = new HTableDescriptor();
759 htd.addFamily(new HColumnDescriptor("row"));
760
761 final byte [] row = Bytes.toBytes("row");
762 for (int i = 0; i < times; i++) {
763 long timestamp = System.currentTimeMillis();
764 WALEdit cols = new WALEdit();
765 cols.add(new KeyValue(row, row, row, timestamp, row));
766 log.append(hri, tableName, cols, timestamp, htd);
767 }
768 }
769
770 static class DumbWALActionsListener implements WALActionsListener {
771 int increments = 0;
772
773 @Override
774 public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
775 WALEdit logEdit) {
776 increments++;
777 }
778
779 @Override
780 public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
781
782 increments++;
783 }
784
785 @Override
786 public void preLogRoll(Path oldFile, Path newFile) {
787
788 }
789
790 @Override
791 public void postLogRoll(Path oldFile, Path newFile) {
792
793 }
794
795 @Override
796 public void preLogArchive(Path oldFile, Path newFile) {
797
798 }
799
800 @Override
801 public void postLogArchive(Path oldFile, Path newFile) {
802
803 }
804
805 @Override
806 public void logRollRequested() {
807
808
809 }
810
811 @Override
812 public void logCloseRequested() {
813
814 }
815 }
816
817 @org.junit.Rule
818 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
819 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
820 }
821