1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.regionserver.wal;
20
21 import static org.junit.Assert.*;
22
23 import java.io.IOException;
24 import java.lang.reflect.Method;
25 import java.net.BindException;
26 import java.util.TreeMap;
27 import java.util.List;
28 import java.util.Map;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.commons.logging.impl.Log4JLogger;
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.fs.FSDataInputStream;
35 import org.apache.hadoop.fs.FSDataOutputStream;
36 import org.apache.hadoop.fs.FileStatus;
37 import org.apache.hadoop.fs.FileSystem;
38 import org.apache.hadoop.fs.Path;
39 import org.apache.hadoop.hbase.*;
40 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
41 import org.apache.hadoop.hbase.util.Bytes;
42 import org.apache.hadoop.hbase.util.FSUtils;
43 import org.apache.hadoop.hbase.util.Threads;
44 import org.apache.hadoop.hbase.Coprocessor;
45 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
46 import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
47 import org.apache.hadoop.hdfs.DFSClient;
48 import org.apache.hadoop.hdfs.DistributedFileSystem;
49 import org.apache.hadoop.hdfs.MiniDFSCluster;
50 import org.apache.hadoop.hdfs.protocol.FSConstants;
51 import org.apache.hadoop.hdfs.server.datanode.DataNode;
52 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
53 import org.apache.log4j.Level;
54 import org.junit.After;
55 import org.junit.AfterClass;
56 import org.junit.Assert;
57 import org.junit.Before;
58 import org.junit.BeforeClass;
59 import org.junit.Test;
60 import org.junit.experimental.categories.Category;
61
62
63 @Category(LargeTests.class)
64 @SuppressWarnings("deprecation")
65 public class TestHLog {
66 private static final Log LOG = LogFactory.getLog(TestHLog.class);
67 {
68 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
69 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
70 ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
71 .getLogger().setLevel(Level.ALL);
72 ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
73 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
74 }
75
76 private static Configuration conf;
77 private static FileSystem fs;
78 private static Path dir;
79 private static MiniDFSCluster cluster;
80 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
81 private static Path hbaseDir;
82 private static Path oldLogDir;
83
84 @Before
85 public void setUp() throws Exception {
86
87 FileStatus[] entries = fs.listStatus(new Path("/"));
88 for (FileStatus dir : entries) {
89 fs.delete(dir.getPath(), true);
90 }
91
92 }
93
94 @After
95 public void tearDown() throws Exception {
96 }
97
98 @BeforeClass
99 public static void setUpBeforeClass() throws Exception {
100
101 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
102
103 TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
104 TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
105
106 TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
107 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
108 TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
109
110 TEST_UTIL.getConfiguration()
111 .setInt("ipc.client.connect.max.retries", 1);
112 TEST_UTIL.getConfiguration().setInt(
113 "dfs.client.block.recovery.retries", 1);
114 TEST_UTIL.getConfiguration().setInt(
115 "ipc.client.connection.maxidletime", 500);
116 TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
117 SampleRegionWALObserver.class.getName());
118 TEST_UTIL.startMiniDFSCluster(3);
119
120 conf = TEST_UTIL.getConfiguration();
121 cluster = TEST_UTIL.getDFSCluster();
122 fs = cluster.getFileSystem();
123
124 hbaseDir = TEST_UTIL.createRootDir();
125 oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
126 dir = new Path(hbaseDir, getName());
127 }
128 @AfterClass
129 public static void tearDownAfterClass() throws Exception {
130 TEST_UTIL.shutdownMiniCluster();
131 }
132
133 private static String getName() {
134
135 return "TestHLog";
136 }
137
138
139
140
141
142
143 @Test
144 public void testMaintainOrderWithConcurrentWrites() throws Exception {
145
146
147
148 int errCode =
149 HLogPerformanceEvaluation.innerMain(new String [] {"-threads", "3", "-verify", "-iterations", "3000"});
150 assertEquals(0, errCode);
151 }
152
153
154
155
156
157
158 @Test
159 public void testSplit() throws IOException {
160
161 final TableName tableName =
162 TableName.valueOf(getName());
163 final byte [] rowName = tableName.getName();
164 Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
165 HLog log = HLogFactory.createHLog(fs, hbaseDir,
166 HConstants.HREGION_LOGDIR_NAME, conf);
167 final int howmany = 3;
168 HRegionInfo[] infos = new HRegionInfo[3];
169 Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
170 fs.mkdirs(tabledir);
171 for(int i = 0; i < howmany; i++) {
172 infos[i] = new HRegionInfo(tableName,
173 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
174 fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
175 LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
176 }
177 HTableDescriptor htd = new HTableDescriptor(tableName);
178 htd.addFamily(new HColumnDescriptor("column"));
179
180
181 try {
182 for (int ii = 0; ii < howmany; ii++) {
183 for (int i = 0; i < howmany; i++) {
184
185 for (int j = 0; j < howmany; j++) {
186 WALEdit edit = new WALEdit();
187 byte [] family = Bytes.toBytes("column");
188 byte [] qualifier = Bytes.toBytes(Integer.toString(j));
189 byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
190 edit.add(new KeyValue(rowName, family, qualifier,
191 System.currentTimeMillis(), column));
192 LOG.info("Region " + i + ": " + edit);
193 log.append(infos[i], tableName, edit,
194 System.currentTimeMillis(), htd);
195 }
196 }
197 log.rollWriter();
198 }
199 log.close();
200 List<Path> splits = HLogSplitter.split(
201 hbaseDir, logdir, oldLogDir, fs, conf);
202 verifySplits(splits, howmany);
203 log = null;
204 } finally {
205 if (log != null) {
206 log.closeAndDelete();
207 }
208 }
209 }
210
211
212
213
214
215 @Test
216 public void Broken_testSync() throws Exception {
217 TableName tableName =
218 TableName.valueOf(getName());
219
220 Path p = new Path(dir, getName() + ".fsdos");
221 FSDataOutputStream out = fs.create(p);
222 out.write(tableName.getName());
223 Method syncMethod = null;
224 try {
225 syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
226 } catch (NoSuchMethodException e) {
227 try {
228 syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
229 } catch (NoSuchMethodException ex) {
230 fail("This version of Hadoop supports neither Syncable.sync() " +
231 "nor Syncable.hflush().");
232 }
233 }
234 syncMethod.invoke(out, new Object[]{});
235 FSDataInputStream in = fs.open(p);
236 assertTrue(in.available() > 0);
237 byte [] buffer = new byte [1024];
238 int read = in.read(buffer);
239 assertEquals(tableName.getName().length, read);
240 out.close();
241 in.close();
242
243 HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf);
244
245 final int total = 20;
246 HLog.Reader reader = null;
247
248 try {
249 HRegionInfo info = new HRegionInfo(tableName,
250 null,null, false);
251 HTableDescriptor htd = new HTableDescriptor();
252 htd.addFamily(new HColumnDescriptor(tableName.getName()));
253
254 for (int i = 0; i < total; i++) {
255 WALEdit kvs = new WALEdit();
256 kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
257 wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
258 }
259
260
261 wal.sync();
262
263 Path walPath = ((FSHLog) wal).computeFilename();
264 reader = HLogFactory.createReader(fs, walPath, conf);
265 int count = 0;
266 HLog.Entry entry = new HLog.Entry();
267 while ((entry = reader.next(entry)) != null) count++;
268 assertEquals(total, count);
269 reader.close();
270
271
272 for (int i = 0; i < total; i++) {
273 WALEdit kvs = new WALEdit();
274 kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
275 wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
276 }
277 reader = HLogFactory.createReader(fs, walPath, conf);
278 count = 0;
279 while((entry = reader.next(entry)) != null) count++;
280 assertTrue(count >= total);
281 reader.close();
282
283 wal.sync();
284 reader = HLogFactory.createReader(fs, walPath, conf);
285 count = 0;
286 while((entry = reader.next(entry)) != null) count++;
287 assertEquals(total * 2, count);
288
289
290 final byte [] value = new byte[1025 * 1024];
291 for (int i = 0; i < total; i++) {
292 WALEdit kvs = new WALEdit();
293 kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
294 wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
295 }
296
297 wal.sync();
298 reader = HLogFactory.createReader(fs, walPath, conf);
299 count = 0;
300 while((entry = reader.next(entry)) != null) count++;
301 assertEquals(total * 3, count);
302 reader.close();
303
304 wal.close();
305 reader = HLogFactory.createReader(fs, walPath, conf);
306 count = 0;
307 while((entry = reader.next(entry)) != null) count++;
308 assertEquals(total * 3, count);
309 reader.close();
310 } finally {
311 if (wal != null) wal.closeAndDelete();
312 if (reader != null) reader.close();
313 }
314 }
315
316
317
318
319
320 @Test
321 public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
322 Map<byte [], Long> regionsToSeqids = new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
323 for (int i = 0; i < 10; i++) {
324 Long l = Long.valueOf(i);
325 regionsToSeqids.put(l.toString().getBytes(), l);
326 }
327 byte [][] regions =
328 FSHLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
329 assertEquals(2, regions.length);
330 assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
331 Bytes.equals(regions[0], "1".getBytes()));
332 regions = FSHLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
333 int count = 4;
334 assertEquals(count, regions.length);
335
336 for (int i = 0; i < count; i++) {
337 assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
338 Bytes.equals(regions[i], "1".getBytes()) ||
339 Bytes.equals(regions[i], "2".getBytes()) ||
340 Bytes.equals(regions[i], "3".getBytes()));
341 }
342 }
343
344 private void verifySplits(List<Path> splits, final int howmany)
345 throws IOException {
346 assertEquals(howmany * howmany, splits.size());
347 for (int i = 0; i < splits.size(); i++) {
348 LOG.info("Verifying=" + splits.get(i));
349 HLog.Reader reader = HLogFactory.createReader(fs, splits.get(i), conf);
350 try {
351 int count = 0;
352 String previousRegion = null;
353 long seqno = -1;
354 HLog.Entry entry = new HLog.Entry();
355 while((entry = reader.next(entry)) != null) {
356 HLogKey key = entry.getKey();
357 String region = Bytes.toString(key.getEncodedRegionName());
358
359 if (previousRegion != null) {
360 assertEquals(previousRegion, region);
361 }
362 LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
363 assertTrue(seqno < key.getLogSeqNum());
364 seqno = key.getLogSeqNum();
365 previousRegion = region;
366 count++;
367 }
368 assertEquals(howmany, count);
369 } finally {
370 reader.close();
371 }
372 }
373 }
374
375
376
377
378
379
380
381
382
383
384 @Test (timeout=300000)
385 public void testAppendClose() throws Exception {
386 TableName tableName =
387 TableName.valueOf(getName());
388 HRegionInfo regioninfo = new HRegionInfo(tableName,
389 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
390
391 HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
392 "hlogdir_archive", conf);
393 final int total = 20;
394
395 HTableDescriptor htd = new HTableDescriptor();
396 htd.addFamily(new HColumnDescriptor(tableName.getName()));
397
398 for (int i = 0; i < total; i++) {
399 WALEdit kvs = new WALEdit();
400 kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
401 wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
402 }
403
404 wal.sync();
405 int namenodePort = cluster.getNameNodePort();
406 final Path walPath = ((FSHLog) wal).computeFilename();
407
408
409
410 try {
411 DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
412 dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
413 TEST_UTIL.shutdownMiniDFSCluster();
414 try {
415
416
417 wal.close();
418 } catch (IOException e) {
419 LOG.info(e);
420 }
421 fs.close();
422 LOG.info("STOPPED first instance of the cluster");
423 } finally {
424
425 while (cluster.isClusterUp()){
426 LOG.error("Waiting for cluster to go down");
427 Thread.sleep(1000);
428 }
429 assertFalse(cluster.isClusterUp());
430 cluster = null;
431 for (int i = 0; i < 100; i++) {
432 try {
433 cluster = TEST_UTIL.startMiniDFSClusterForTestHLog(namenodePort);
434 break;
435 } catch (BindException e) {
436 LOG.info("Sleeping. BindException bringing up new cluster");
437 Threads.sleep(1000);
438 }
439 }
440 cluster.waitActive();
441 fs = cluster.getFileSystem();
442 LOG.info("STARTED second instance.");
443 }
444
445
446
447 Method setLeasePeriod = cluster.getClass()
448 .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
449 setLeasePeriod.setAccessible(true);
450 setLeasePeriod.invoke(cluster, 1000L, 1000L);
451 try {
452 Thread.sleep(1000);
453 } catch (InterruptedException e) {
454 LOG.info(e);
455 }
456
457
458 final FileSystem recoveredFs = fs;
459 final Configuration rlConf = conf;
460
461 class RecoverLogThread extends Thread {
462 public Exception exception = null;
463 public void run() {
464 try {
465 FSUtils.getInstance(fs, rlConf)
466 .recoverFileLease(recoveredFs, walPath, rlConf, null);
467 } catch (IOException e) {
468 exception = e;
469 }
470 }
471 }
472
473 RecoverLogThread t = new RecoverLogThread();
474 t.start();
475
476 t.join(60 * 1000);
477 if(t.isAlive()) {
478 t.interrupt();
479 throw new Exception("Timed out waiting for HLog.recoverLog()");
480 }
481
482 if (t.exception != null)
483 throw t.exception;
484
485
486 HLog.Reader reader = HLogFactory.createReader(fs, walPath, conf);
487 int count = 0;
488 HLog.Entry entry = new HLog.Entry();
489 while (reader.next(entry) != null) {
490 count++;
491 assertTrue("Should be one KeyValue per WALEdit",
492 entry.getEdit().getKeyValues().size() == 1);
493 }
494 assertEquals(total, count);
495 reader.close();
496
497
498 setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
499 }
500
501
502
503
504
505 @Test
506 public void testEditAdd() throws IOException {
507 final int COL_COUNT = 10;
508 final TableName tableName =
509 TableName.valueOf("tablename");
510 final byte [] row = Bytes.toBytes("row");
511 HLog.Reader reader = null;
512 HLog log = null;
513 try {
514 log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
515
516
517
518 long timestamp = System.currentTimeMillis();
519 WALEdit cols = new WALEdit();
520 for (int i = 0; i < COL_COUNT; i++) {
521 cols.add(new KeyValue(row, Bytes.toBytes("column"),
522 Bytes.toBytes(Integer.toString(i)),
523 timestamp, new byte[] { (byte)(i + '0') }));
524 }
525 HRegionInfo info = new HRegionInfo(tableName,
526 row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
527 HTableDescriptor htd = new HTableDescriptor();
528 htd.addFamily(new HColumnDescriptor("column"));
529
530 log.append(info, tableName, cols, System.currentTimeMillis(), htd);
531 log.startCacheFlush(info.getEncodedNameAsBytes());
532 log.completeCacheFlush(info.getEncodedNameAsBytes());
533 log.close();
534 Path filename = ((FSHLog) log).computeFilename();
535 log = null;
536
537 reader = HLogFactory.createReader(fs, filename, conf);
538
539
540 for (int i = 0; i < 1; i++) {
541 HLog.Entry entry = reader.next(null);
542 if (entry == null) break;
543 HLogKey key = entry.getKey();
544 WALEdit val = entry.getEdit();
545 assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
546 assertTrue(tableName.equals(key.getTablename()));
547 KeyValue kv = val.getKeyValues().get(0);
548 assertTrue(Bytes.equals(row, kv.getRow()));
549 assertEquals((byte)(i + '0'), kv.getValue()[0]);
550 System.out.println(key + " " + val);
551 }
552 } finally {
553 if (log != null) {
554 log.closeAndDelete();
555 }
556 if (reader != null) {
557 reader.close();
558 }
559 }
560 }
561
562
563
564
565 @Test
566 public void testAppend() throws IOException {
567 final int COL_COUNT = 10;
568 final TableName tableName =
569 TableName.valueOf("tablename");
570 final byte [] row = Bytes.toBytes("row");
571 Reader reader = null;
572 HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
573 try {
574
575
576 long timestamp = System.currentTimeMillis();
577 WALEdit cols = new WALEdit();
578 for (int i = 0; i < COL_COUNT; i++) {
579 cols.add(new KeyValue(row, Bytes.toBytes("column"),
580 Bytes.toBytes(Integer.toString(i)),
581 timestamp, new byte[] { (byte)(i + '0') }));
582 }
583 HRegionInfo hri = new HRegionInfo(tableName,
584 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
585 HTableDescriptor htd = new HTableDescriptor();
586 htd.addFamily(new HColumnDescriptor("column"));
587 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
588 log.startCacheFlush(hri.getEncodedNameAsBytes());
589 log.completeCacheFlush(hri.getEncodedNameAsBytes());
590 log.close();
591 Path filename = ((FSHLog) log).computeFilename();
592 log = null;
593
594 reader = HLogFactory.createReader(fs, filename, conf);
595 HLog.Entry entry = reader.next();
596 assertEquals(COL_COUNT, entry.getEdit().size());
597 int idx = 0;
598 for (KeyValue val : entry.getEdit().getKeyValues()) {
599 assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
600 entry.getKey().getEncodedRegionName()));
601 assertTrue(tableName.equals(entry.getKey().getTablename()));
602 assertTrue(Bytes.equals(row, val.getRow()));
603 assertEquals((byte)(idx + '0'), val.getValue()[0]);
604 System.out.println(entry.getKey() + " " + val);
605 idx++;
606 }
607 } finally {
608 if (log != null) {
609 log.closeAndDelete();
610 }
611 if (reader != null) {
612 reader.close();
613 }
614 }
615 }
616
617
618
619
620
621 @Test
622 public void testVisitors() throws Exception {
623 final int COL_COUNT = 10;
624 final TableName tableName =
625 TableName.valueOf("tablename");
626 final byte [] row = Bytes.toBytes("row");
627 HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
628 try {
629 DumbWALActionsListener visitor = new DumbWALActionsListener();
630 log.registerWALActionsListener(visitor);
631 long timestamp = System.currentTimeMillis();
632 HTableDescriptor htd = new HTableDescriptor();
633 htd.addFamily(new HColumnDescriptor("column"));
634
635 HRegionInfo hri = new HRegionInfo(tableName,
636 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
637 for (int i = 0; i < COL_COUNT; i++) {
638 WALEdit cols = new WALEdit();
639 cols.add(new KeyValue(row, Bytes.toBytes("column"),
640 Bytes.toBytes(Integer.toString(i)),
641 timestamp, new byte[]{(byte) (i + '0')}));
642 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
643 }
644 assertEquals(COL_COUNT, visitor.increments);
645 log.unregisterWALActionsListener(visitor);
646 WALEdit cols = new WALEdit();
647 cols.add(new KeyValue(row, Bytes.toBytes("column"),
648 Bytes.toBytes(Integer.toString(11)),
649 timestamp, new byte[]{(byte) (11 + '0')}));
650 log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
651 assertEquals(COL_COUNT, visitor.increments);
652 } finally {
653 if (log != null) log.closeAndDelete();
654 }
655 }
656
657 @Test
658 public void testLogCleaning() throws Exception {
659 LOG.info("testLogCleaning");
660 final TableName tableName =
661 TableName.valueOf("testLogCleaning");
662 final TableName tableName2 =
663 TableName.valueOf("testLogCleaning2");
664
665 HLog log = HLogFactory.createHLog(fs, hbaseDir,
666 getName(), conf);
667 try {
668 HRegionInfo hri = new HRegionInfo(tableName,
669 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
670 HRegionInfo hri2 = new HRegionInfo(tableName2,
671 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
672
673
674
675 addEdits(log, hri, tableName, 1);
676 log.rollWriter();
677 assertEquals(1, ((FSHLog) log).getNumLogFiles());
678
679
680 addEdits(log, hri, tableName, 2);
681 log.rollWriter();
682 assertEquals(2, ((FSHLog) log).getNumLogFiles());
683
684
685 addEdits(log, hri, tableName, 1);
686 addEdits(log, hri2, tableName2, 1);
687 addEdits(log, hri, tableName, 1);
688 addEdits(log, hri2, tableName2, 1);
689 log.rollWriter();
690 assertEquals(3, ((FSHLog) log).getNumLogFiles());
691
692
693
694 addEdits(log, hri2, tableName2, 1);
695 log.startCacheFlush(hri.getEncodedNameAsBytes());
696 log.completeCacheFlush(hri.getEncodedNameAsBytes());
697 log.rollWriter();
698 assertEquals(2, ((FSHLog) log).getNumLogFiles());
699
700
701
702
703 addEdits(log, hri2, tableName2, 1);
704 log.startCacheFlush(hri2.getEncodedNameAsBytes());
705 log.completeCacheFlush(hri2.getEncodedNameAsBytes());
706 log.rollWriter();
707 assertEquals(0, ((FSHLog) log).getNumLogFiles());
708 } finally {
709 if (log != null) log.closeAndDelete();
710 }
711 }
712
713
714 @Test
715 public void testGetServerNameFromHLogDirectoryName() throws IOException {
716 ServerName sn = new ServerName("hn", 450, 1398);
717 String hl = FSUtils.getRootDir(conf) + "/" + HLogUtil.getHLogDirectoryName(sn.toString());
718
719
720 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null));
721 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf,
722 FSUtils.getRootDir(conf).toUri().toString()));
723 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, ""));
724 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, " "));
725 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl));
726 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "qdf"));
727 Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf" + hl + "qdf"));
728
729 final String wals = "/WALs/";
730 ServerName parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
731 FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
732 "/localhost%2C32984%2C1343316388997.1343316390417");
733 Assert.assertEquals("standard", sn, parsed);
734
735 parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "/qdf");
736 Assert.assertEquals("subdir", sn, parsed);
737
738 parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
739 FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
740 "-splitting/localhost%3A57020.1340474893931");
741 Assert.assertEquals("split", sn, parsed);
742 }
743
744
745
746
747 @Test
748 public void testWALCoprocessorLoaded() throws Exception {
749
750 HLog log = HLogFactory.createHLog(fs, hbaseDir,
751 getName(), conf);
752 try {
753 WALCoprocessorHost host = log.getCoprocessorHost();
754 Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
755 assertNotNull(c);
756 } finally {
757 if (log != null) log.closeAndDelete();
758 }
759 }
760
761 private void addEdits(HLog log, HRegionInfo hri, TableName tableName,
762 int times) throws IOException {
763 HTableDescriptor htd = new HTableDescriptor();
764 htd.addFamily(new HColumnDescriptor("row"));
765
766 final byte [] row = Bytes.toBytes("row");
767 for (int i = 0; i < times; i++) {
768 long timestamp = System.currentTimeMillis();
769 WALEdit cols = new WALEdit();
770 cols.add(new KeyValue(row, row, row, timestamp, row));
771 log.append(hri, tableName, cols, timestamp, htd);
772 }
773 }
774
775
776
777
778
779 @Test
780 public void testReadLegacyLog() throws IOException {
781 final int columnCount = 5;
782 final int recordCount = 5;
783 final TableName tableName =
784 TableName.valueOf("tablename");
785 final byte[] row = Bytes.toBytes("row");
786 long timestamp = System.currentTimeMillis();
787 Path path = new Path(dir, "temphlog");
788 SequenceFileLogWriter sflw = null;
789 HLog.Reader reader = null;
790 try {
791 HRegionInfo hri = new HRegionInfo(tableName,
792 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
793 HTableDescriptor htd = new HTableDescriptor(tableName);
794 fs.mkdirs(dir);
795
796 sflw = new SequenceFileLogWriter();
797 sflw.init(fs, path, conf);
798 for (int i = 0; i < recordCount; ++i) {
799 HLogKey key = new HLogKey(
800 hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
801 WALEdit edit = new WALEdit();
802 for (int j = 0; j < columnCount; ++j) {
803 if (i == 0) {
804 htd.addFamily(new HColumnDescriptor("column" + j));
805 }
806 String value = i + "" + j;
807 edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
808 }
809 sflw.append(new HLog.Entry(key, edit));
810 }
811 sflw.sync();
812 sflw.close();
813
814
815 reader = HLogFactory.createReader(fs, path, conf);
816 assertTrue(reader instanceof SequenceFileLogReader);
817 for (int i = 0; i < recordCount; ++i) {
818 HLog.Entry entry = reader.next();
819 assertNotNull(entry);
820 assertEquals(columnCount, entry.getEdit().size());
821 assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
822 assertEquals(tableName, entry.getKey().getTablename());
823 int idx = 0;
824 for (KeyValue val : entry.getEdit().getKeyValues()) {
825 assertTrue(Bytes.equals(row, val.getRow()));
826 String value = i + "" + idx;
827 assertArrayEquals(Bytes.toBytes(value), val.getValue());
828 idx++;
829 }
830 }
831 HLog.Entry entry = reader.next();
832 assertNull(entry);
833 } finally {
834 if (sflw != null) {
835 sflw.close();
836 }
837 if (reader != null) {
838 reader.close();
839 }
840 }
841 }
842
843
844
845
846
847 @Test
848 public void testWALTrailer() throws IOException {
849
850 doRead(true);
851
852 doRead(false);
853 }
854
855
856
857
858
859
860
861
862
863
864 private void doRead(boolean withTrailer) throws IOException {
865 final int columnCount = 5;
866 final int recordCount = 5;
867 final TableName tableName =
868 TableName.valueOf("tablename");
869 final byte[] row = Bytes.toBytes("row");
870 long timestamp = System.currentTimeMillis();
871 Path path = new Path(dir, "temphlog");
872 HLog.Writer writer = null;
873 HLog.Reader reader = null;
874 try {
875 HRegionInfo hri = new HRegionInfo(tableName,
876 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
877 HTableDescriptor htd = new HTableDescriptor(tableName);
878 fs.mkdirs(dir);
879
880 writer = HLogFactory.createWriter(fs, path, conf);
881 for (int i = 0; i < recordCount; ++i) {
882 HLogKey key = new HLogKey(
883 hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
884 WALEdit edit = new WALEdit();
885 for (int j = 0; j < columnCount; ++j) {
886 if (i == 0) {
887 htd.addFamily(new HColumnDescriptor("column" + j));
888 }
889 String value = i + "" + j;
890 edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
891 }
892 writer.append(new HLog.Entry(key, edit));
893 }
894 writer.sync();
895 if (withTrailer) writer.close();
896
897
898 reader = HLogFactory.createReader(fs, path, conf);
899 assertTrue(reader instanceof ProtobufLogReader);
900 if (withTrailer) {
901 assertNotNull(reader.getWALTrailer());
902 } else {
903 assertNull(reader.getWALTrailer());
904 }
905 for (int i = 0; i < recordCount; ++i) {
906 HLog.Entry entry = reader.next();
907 assertNotNull(entry);
908 assertEquals(columnCount, entry.getEdit().size());
909 assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
910 assertEquals(tableName, entry.getKey().getTablename());
911 int idx = 0;
912 for (KeyValue val : entry.getEdit().getKeyValues()) {
913 assertTrue(Bytes.equals(row, val.getRow()));
914 String value = i + "" + idx;
915 assertArrayEquals(Bytes.toBytes(value), val.getValue());
916 idx++;
917 }
918 }
919 HLog.Entry entry = reader.next();
920 assertNull(entry);
921 } finally {
922 if (writer != null) {
923 writer.close();
924 }
925 if (reader != null) {
926 reader.close();
927 }
928 }
929 }
930
931 static class DumbWALActionsListener implements WALActionsListener {
932 int increments = 0;
933
934 @Override
935 public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
936 WALEdit logEdit) {
937 increments++;
938 }
939
940 @Override
941 public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
942
943 increments++;
944 }
945
946 @Override
947 public void preLogRoll(Path oldFile, Path newFile) {
948
949 }
950
951 @Override
952 public void postLogRoll(Path oldFile, Path newFile) {
953
954 }
955
956 @Override
957 public void preLogArchive(Path oldFile, Path newFile) {
958
959 }
960
961 @Override
962 public void postLogArchive(Path oldFile, Path newFile) {
963
964 }
965
966 @Override
967 public void logRollRequested() {
968
969
970 }
971
972 @Override
973 public void logCloseRequested() {
974
975 }
976 }
977
978 }
979