View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.wal;
20  
21  import static org.junit.Assert.assertArrayEquals;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertNull;
26  import static org.junit.Assert.assertTrue;
27  import static org.junit.Assert.fail;
28  
29  import java.io.IOException;
30  import java.lang.reflect.Method;
31  import java.net.BindException;
32  import java.util.List;
33  import java.util.concurrent.atomic.AtomicLong;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FSDataInputStream;
39  import org.apache.hadoop.fs.FSDataOutputStream;
40  import org.apache.hadoop.fs.FileStatus;
41  import org.apache.hadoop.fs.FileSystem;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.Cell;
44  import org.apache.hadoop.hbase.Coprocessor;
45  import org.apache.hadoop.hbase.HBaseTestingUtility;
46  import org.apache.hadoop.hbase.HColumnDescriptor;
47  import org.apache.hadoop.hbase.HConstants;
48  import org.apache.hadoop.hbase.HRegionInfo;
49  import org.apache.hadoop.hbase.HTableDescriptor;
50  import org.apache.hadoop.hbase.KeyValue;
51  import org.apache.hadoop.hbase.testclassification.MediumTests;
52  import org.apache.hadoop.hbase.TableName;
53  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
54  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.FSUtils;
57  import org.apache.hadoop.hbase.util.Threads;
58  import org.apache.hadoop.hdfs.DistributedFileSystem;
59  import org.apache.hadoop.hdfs.MiniDFSCluster;
60  import org.apache.hadoop.hdfs.protocol.FSConstants;
61  import org.junit.After;
62  import org.junit.AfterClass;
63  import org.junit.Before;
64  import org.junit.BeforeClass;
65  import org.junit.Rule;
66  import org.junit.Test;
67  import org.junit.experimental.categories.Category;
68  import org.junit.rules.TestName;
69  
70  // imports for things that haven't moved from regionserver.wal yet.
71  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
72  import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader;
73  import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter;
74  import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
75  import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
76  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
77  
78  /**
79   * WAL tests that can be reused across providers.
80   */
81  @Category(MediumTests.class)
82  public class TestWALFactory {
83    protected static final Log LOG = LogFactory.getLog(TestWALFactory.class);
84  
85    protected static Configuration conf;
86    private static MiniDFSCluster cluster;
87    protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
88    protected static Path hbaseDir;
89  
90    protected FileSystem fs;
91    protected Path dir;
92    protected WALFactory wals;
93  
94    @Rule
95    public final TestName currentTest = new TestName();
96  
97    @Before
98    public void setUp() throws Exception {
99      fs = cluster.getFileSystem();
100     dir = new Path(hbaseDir, currentTest.getMethodName());
101     wals = new WALFactory(conf, null, currentTest.getMethodName());
102   }
103 
104   @After
105   public void tearDown() throws Exception {
106     // testAppendClose closes the FileSystem, which will prevent us from closing cleanly here.
107     try {
108       wals.close();
109     } catch (IOException exception) {
110       LOG.warn("Encountered exception while closing wal factory. If you have other errors, this" +
111           " may be the cause. Message: " + exception);
112       LOG.debug("Exception details for failure to close wal factory.", exception);
113     }
114     FileStatus[] entries = fs.listStatus(new Path("/"));
115     for (FileStatus dir : entries) {
116       fs.delete(dir.getPath(), true);
117     }
118   }
119 
120   @BeforeClass
121   public static void setUpBeforeClass() throws Exception {
122     // Make block sizes small.
123     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
124     // needed for testAppendClose()
125     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
126     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
127     // quicker heartbeat interval for faster DN death notification
128     TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
129     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
130     TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
131 
132     // faster failover with cluster.shutdown();fs.close() idiom
133     TEST_UTIL.getConfiguration()
134         .setInt("hbase.ipc.client.connect.max.retries", 1);
135     TEST_UTIL.getConfiguration().setInt(
136         "dfs.client.block.recovery.retries", 1);
137     TEST_UTIL.getConfiguration().setInt(
138       "hbase.ipc.client.connection.maxidletime", 500);
139     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
140         SampleRegionWALObserver.class.getName());
141     TEST_UTIL.startMiniDFSCluster(3);
142 
143     conf = TEST_UTIL.getConfiguration();
144     cluster = TEST_UTIL.getDFSCluster();
145 
146     hbaseDir = TEST_UTIL.createRootDir();
147   }
148 
149   @AfterClass
150   public static void tearDownAfterClass() throws Exception {
151     TEST_UTIL.shutdownMiniCluster();
152   }
153 
154   @Test
155   public void canCloseSingleton() throws IOException {
156     WALFactory.getInstance(conf).close();
157   }
158 
159   /**
160    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
161    * would fail.
162    * @throws IOException
163    */
164   @Test
165   public void testSplit() throws IOException {
166     final TableName tableName = TableName.valueOf(currentTest.getMethodName());
167     final byte [] rowName = tableName.getName();
168     final Path logdir = new Path(hbaseDir,
169         DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
170     Path oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
171     final int howmany = 3;
172     HRegionInfo[] infos = new HRegionInfo[3];
173     Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
174     fs.mkdirs(tabledir);
175     for(int i = 0; i < howmany; i++) {
176       infos[i] = new HRegionInfo(tableName,
177                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
178       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
179       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
180     }
181     HTableDescriptor htd = new HTableDescriptor(tableName);
182     htd.addFamily(new HColumnDescriptor("column"));
183 
184     // Add edits for three regions.
185     final AtomicLong sequenceId = new AtomicLong(1);
186     for (int ii = 0; ii < howmany; ii++) {
187       for (int i = 0; i < howmany; i++) {
188         final WAL log = wals.getWAL(infos[i].getEncodedNameAsBytes());
189         for (int j = 0; j < howmany; j++) {
190           WALEdit edit = new WALEdit();
191           byte [] family = Bytes.toBytes("column");
192           byte [] qualifier = Bytes.toBytes(Integer.toString(j));
193           byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
194           edit.add(new KeyValue(rowName, family, qualifier,
195               System.currentTimeMillis(), column));
196           LOG.info("Region " + i + ": " + edit);
197           log.append(htd, infos[i], new WALKey(infos[i].getEncodedNameAsBytes(), tableName,
198               System.currentTimeMillis()), edit, sequenceId, true, null);
199         }
200         log.sync();
201         log.rollWriter();
202       }
203     }
204     wals.shutdown();
205     List<Path> splits = WALSplitter.split(hbaseDir, logdir, oldLogDir, fs, conf, wals);
206     verifySplits(splits, howmany);
207   }
208 
209   /**
210    * Test new HDFS-265 sync.
211    * @throws Exception
212    */
213   @Test
214   public void Broken_testSync() throws Exception {
215     TableName tableName = TableName.valueOf(currentTest.getMethodName());
216     // First verify that using streams all works.
217     Path p = new Path(dir, currentTest.getMethodName() + ".fsdos");
218     FSDataOutputStream out = fs.create(p);
219     out.write(tableName.getName());
220     Method syncMethod = null;
221     try {
222       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
223     } catch (NoSuchMethodException e) {
224       try {
225         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
226       } catch (NoSuchMethodException ex) {
227         fail("This version of Hadoop supports neither Syncable.sync() " +
228             "nor Syncable.hflush().");
229       }
230     }
231     syncMethod.invoke(out, new Object[]{});
232     FSDataInputStream in = fs.open(p);
233     assertTrue(in.available() > 0);
234     byte [] buffer = new byte [1024];
235     int read = in.read(buffer);
236     assertEquals(tableName.getName().length, read);
237     out.close();
238     in.close();
239 
240     final AtomicLong sequenceId = new AtomicLong(1);
241     final int total = 20;
242     WAL.Reader reader = null;
243 
244     try {
245       HRegionInfo info = new HRegionInfo(tableName,
246                   null,null, false);
247       HTableDescriptor htd = new HTableDescriptor();
248       htd.addFamily(new HColumnDescriptor(tableName.getName()));
249       final WAL wal = wals.getWAL(info.getEncodedNameAsBytes());
250 
251       for (int i = 0; i < total; i++) {
252         WALEdit kvs = new WALEdit();
253         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
254         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
255             System.currentTimeMillis()), kvs, sequenceId, true, null);
256       }
257       // Now call sync and try reading.  Opening a Reader before you sync just
258       // gives you EOFE.
259       wal.sync();
260       // Open a Reader.
261       Path walPath = DefaultWALProvider.getCurrentFileName(wal);
262       reader = wals.createReader(fs, walPath);
263       int count = 0;
264       WAL.Entry entry = new WAL.Entry();
265       while ((entry = reader.next(entry)) != null) count++;
266       assertEquals(total, count);
267       reader.close();
268       // Add test that checks to see that an open of a Reader works on a file
269       // that has had a sync done on it.
270       for (int i = 0; i < total; i++) {
271         WALEdit kvs = new WALEdit();
272         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
273         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
274             System.currentTimeMillis()), kvs, sequenceId, true, null);
275       }
276       wal.sync();
277       reader = wals.createReader(fs, walPath);
278       count = 0;
279       while((entry = reader.next(entry)) != null) count++;
280       assertTrue(count >= total);
281       reader.close();
282       // If I sync, should see double the edits.
283       wal.sync();
284       reader = wals.createReader(fs, walPath);
285       count = 0;
286       while((entry = reader.next(entry)) != null) count++;
287       assertEquals(total * 2, count);
288       reader.close();
289       // Now do a test that ensures stuff works when we go over block boundary,
290       // especially that we return good length on file.
291       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
292       for (int i = 0; i < total; i++) {
293         WALEdit kvs = new WALEdit();
294         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
295         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
296             System.currentTimeMillis()), kvs, sequenceId, true, null);
297       }
298       // Now I should have written out lots of blocks.  Sync then read.
299       wal.sync();
300       reader = wals.createReader(fs, walPath);
301       count = 0;
302       while((entry = reader.next(entry)) != null) count++;
303       assertEquals(total * 3, count);
304       reader.close();
305       // shutdown and ensure that Reader gets right length also.
306       wal.shutdown();
307       reader = wals.createReader(fs, walPath);
308       count = 0;
309       while((entry = reader.next(entry)) != null) count++;
310       assertEquals(total * 3, count);
311       reader.close();
312     } finally {
313       if (reader != null) reader.close();
314     }
315   }
316 
317   private void verifySplits(final List<Path> splits, final int howmany)
318   throws IOException {
319     assertEquals(howmany * howmany, splits.size());
320     for (int i = 0; i < splits.size(); i++) {
321       LOG.info("Verifying=" + splits.get(i));
322       WAL.Reader reader = wals.createReader(fs, splits.get(i));
323       try {
324         int count = 0;
325         String previousRegion = null;
326         long seqno = -1;
327         WAL.Entry entry = new WAL.Entry();
328         while((entry = reader.next(entry)) != null) {
329           WALKey key = entry.getKey();
330           String region = Bytes.toString(key.getEncodedRegionName());
331           // Assert that all edits are for same region.
332           if (previousRegion != null) {
333             assertEquals(previousRegion, region);
334           }
335           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
336           assertTrue(seqno < key.getLogSeqNum());
337           seqno = key.getLogSeqNum();
338           previousRegion = region;
339           count++;
340         }
341         assertEquals(howmany, count);
342       } finally {
343         reader.close();
344       }
345     }
346   }
347 
348   /*
349    * We pass different values to recoverFileLease() so that different code paths are covered
350    *
351    * For this test to pass, requires:
352    * 1. HDFS-200 (append support)
353    * 2. HDFS-988 (SafeMode should freeze file operations
354    *              [FSNamesystem.nextGenerationStampForBlock])
355    * 3. HDFS-142 (on restart, maintain pendingCreates)
356    */
357   @Test (timeout=300000)
358   public void testAppendClose() throws Exception {
359     TableName tableName =
360         TableName.valueOf(currentTest.getMethodName());
361     HRegionInfo regioninfo = new HRegionInfo(tableName,
362              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
363 
364     final WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes());
365     final AtomicLong sequenceId = new AtomicLong(1);
366     final int total = 20;
367 
368     HTableDescriptor htd = new HTableDescriptor();
369     htd.addFamily(new HColumnDescriptor(tableName.getName()));
370 
371     for (int i = 0; i < total; i++) {
372       WALEdit kvs = new WALEdit();
373       kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
374       wal.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
375           System.currentTimeMillis()), kvs, sequenceId, true, null);
376     }
377     // Now call sync to send the data to HDFS datanodes
378     wal.sync();
379      int namenodePort = cluster.getNameNodePort();
380     final Path walPath = DefaultWALProvider.getCurrentFileName(wal);
381 
382 
383     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
384     try {
385       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
386       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
387       TEST_UTIL.shutdownMiniDFSCluster();
388       try {
389         // wal.writer.close() will throw an exception,
390         // but still call this since it closes the LogSyncer thread first
391         wal.shutdown();
392       } catch (IOException e) {
393         LOG.info(e);
394       }
395       fs.close(); // closing FS last so DFSOutputStream can't call close
396       LOG.info("STOPPED first instance of the cluster");
397     } finally {
398       // Restart the cluster
399       while (cluster.isClusterUp()){
400         LOG.error("Waiting for cluster to go down");
401         Thread.sleep(1000);
402       }
403       assertFalse(cluster.isClusterUp());
404       cluster = null;
405       for (int i = 0; i < 100; i++) {
406         try {
407           cluster = TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort);
408           break;
409         } catch (BindException e) {
410           LOG.info("Sleeping.  BindException bringing up new cluster");
411           Threads.sleep(1000);
412         }
413       }
414       cluster.waitActive();
415       fs = cluster.getFileSystem();
416       LOG.info("STARTED second instance.");
417     }
418 
419     // set the lease period to be 1 second so that the
420     // namenode triggers lease recovery upon append request
421     Method setLeasePeriod = cluster.getClass()
422       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
423     setLeasePeriod.setAccessible(true);
424     setLeasePeriod.invoke(cluster, 1000L, 1000L);
425     try {
426       Thread.sleep(1000);
427     } catch (InterruptedException e) {
428       LOG.info(e);
429     }
430 
431     // Now try recovering the log, like the HMaster would do
432     final FileSystem recoveredFs = fs;
433     final Configuration rlConf = conf;
434 
435     class RecoverLogThread extends Thread {
436       public Exception exception = null;
437       public void run() {
438           try {
439             FSUtils.getInstance(fs, rlConf)
440               .recoverFileLease(recoveredFs, walPath, rlConf, null);
441           } catch (IOException e) {
442             exception = e;
443           }
444       }
445     }
446 
447     RecoverLogThread t = new RecoverLogThread();
448     t.start();
449     // Timeout after 60 sec. Without correct patches, would be an infinite loop
450     t.join(60 * 1000);
451     if(t.isAlive()) {
452       t.interrupt();
453       throw new Exception("Timed out waiting for WAL.recoverLog()");
454     }
455 
456     if (t.exception != null)
457       throw t.exception;
458 
459     // Make sure you can read all the content
460     WAL.Reader reader = wals.createReader(fs, walPath);
461     int count = 0;
462     WAL.Entry entry = new WAL.Entry();
463     while (reader.next(entry) != null) {
464       count++;
465       assertTrue("Should be one KeyValue per WALEdit",
466                   entry.getEdit().getCells().size() == 1);
467     }
468     assertEquals(total, count);
469     reader.close();
470 
471     // Reset the lease period
472     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
473   }
474 
475   /**
476    * Tests that we can write out an edit, close, and then read it back in again.
477    * @throws IOException
478    */
479   @Test
480   public void testEditAdd() throws IOException {
481     final int COL_COUNT = 10;
482     final TableName tableName =
483         TableName.valueOf("tablename");
484     final byte [] row = Bytes.toBytes("row");
485     WAL.Reader reader = null;
486     try {
487       final AtomicLong sequenceId = new AtomicLong(1);
488 
489       // Write columns named 1, 2, 3, etc. and then values of single byte
490       // 1, 2, 3...
491       long timestamp = System.currentTimeMillis();
492       WALEdit cols = new WALEdit();
493       for (int i = 0; i < COL_COUNT; i++) {
494         cols.add(new KeyValue(row, Bytes.toBytes("column"),
495             Bytes.toBytes(Integer.toString(i)),
496           timestamp, new byte[] { (byte)(i + '0') }));
497       }
498       HRegionInfo info = new HRegionInfo(tableName,
499         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
500       HTableDescriptor htd = new HTableDescriptor();
501       htd.addFamily(new HColumnDescriptor("column"));
502       final WAL log = wals.getWAL(info.getEncodedNameAsBytes());
503 
504       final long txid = log.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
505           System.currentTimeMillis()), cols, sequenceId, true, null);
506       log.sync(txid);
507       log.startCacheFlush(info.getEncodedNameAsBytes());
508       log.completeCacheFlush(info.getEncodedNameAsBytes());
509       log.shutdown();
510       Path filename = DefaultWALProvider.getCurrentFileName(log);
511       // Now open a reader on the log and assert append worked.
512       reader = wals.createReader(fs, filename);
513       // Above we added all columns on a single row so we only read one
514       // entry in the below... thats why we have '1'.
515       for (int i = 0; i < 1; i++) {
516         WAL.Entry entry = reader.next(null);
517         if (entry == null) break;
518         WALKey key = entry.getKey();
519         WALEdit val = entry.getEdit();
520         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
521         assertTrue(tableName.equals(key.getTablename()));
522         Cell cell = val.getCells().get(0);
523         assertTrue(Bytes.equals(row, cell.getRow()));
524         assertEquals((byte)(i + '0'), cell.getValue()[0]);
525         System.out.println(key + " " + val);
526       }
527     } finally {
528       if (reader != null) {
529         reader.close();
530       }
531     }
532   }
533 
534   /**
535    * @throws IOException
536    */
537   @Test
538   public void testAppend() throws IOException {
539     final int COL_COUNT = 10;
540     final TableName tableName =
541         TableName.valueOf("tablename");
542     final byte [] row = Bytes.toBytes("row");
543     WAL.Reader reader = null;
544     final AtomicLong sequenceId = new AtomicLong(1);
545     try {
546       // Write columns named 1, 2, 3, etc. and then values of single byte
547       // 1, 2, 3...
548       long timestamp = System.currentTimeMillis();
549       WALEdit cols = new WALEdit();
550       for (int i = 0; i < COL_COUNT; i++) {
551         cols.add(new KeyValue(row, Bytes.toBytes("column"),
552           Bytes.toBytes(Integer.toString(i)),
553           timestamp, new byte[] { (byte)(i + '0') }));
554       }
555       HRegionInfo hri = new HRegionInfo(tableName,
556           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
557       HTableDescriptor htd = new HTableDescriptor();
558       htd.addFamily(new HColumnDescriptor("column"));
559       final WAL log = wals.getWAL(hri.getEncodedNameAsBytes());
560       final long txid = log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
561           System.currentTimeMillis()), cols, sequenceId, true, null);
562       log.sync(txid);
563       log.startCacheFlush(hri.getEncodedNameAsBytes());
564       log.completeCacheFlush(hri.getEncodedNameAsBytes());
565       log.shutdown();
566       Path filename = DefaultWALProvider.getCurrentFileName(log);
567       // Now open a reader on the log and assert append worked.
568       reader = wals.createReader(fs, filename);
569       WAL.Entry entry = reader.next();
570       assertEquals(COL_COUNT, entry.getEdit().size());
571       int idx = 0;
572       for (Cell val : entry.getEdit().getCells()) {
573         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
574           entry.getKey().getEncodedRegionName()));
575         assertTrue(tableName.equals(entry.getKey().getTablename()));
576         assertTrue(Bytes.equals(row, val.getRow()));
577         assertEquals((byte)(idx + '0'), val.getValue()[0]);
578         System.out.println(entry.getKey() + " " + val);
579         idx++;
580       }
581     } finally {
582       if (reader != null) {
583         reader.close();
584       }
585     }
586   }
587 
588   /**
589    * Test that we can visit entries before they are appended
590    * @throws Exception
591    */
592   @Test
593   public void testVisitors() throws Exception {
594     final int COL_COUNT = 10;
595     final TableName tableName =
596         TableName.valueOf("tablename");
597     final byte [] row = Bytes.toBytes("row");
598     final DumbWALActionsListener visitor = new DumbWALActionsListener();
599     final AtomicLong sequenceId = new AtomicLong(1);
600     long timestamp = System.currentTimeMillis();
601     HTableDescriptor htd = new HTableDescriptor();
602     htd.addFamily(new HColumnDescriptor("column"));
603 
604     HRegionInfo hri = new HRegionInfo(tableName,
605         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
606     final WAL log = wals.getWAL(hri.getEncodedNameAsBytes());
607     log.registerWALActionsListener(visitor);
608     for (int i = 0; i < COL_COUNT; i++) {
609       WALEdit cols = new WALEdit();
610       cols.add(new KeyValue(row, Bytes.toBytes("column"),
611           Bytes.toBytes(Integer.toString(i)),
612           timestamp, new byte[]{(byte) (i + '0')}));
613       log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
614           System.currentTimeMillis()), cols, sequenceId, true, null);
615     }
616     log.sync();
617     assertEquals(COL_COUNT, visitor.increments);
618     log.unregisterWALActionsListener(visitor);
619     WALEdit cols = new WALEdit();
620     cols.add(new KeyValue(row, Bytes.toBytes("column"),
621         Bytes.toBytes(Integer.toString(11)),
622         timestamp, new byte[]{(byte) (11 + '0')}));
623     log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
624         System.currentTimeMillis()), cols, sequenceId, true, null);
625     log.sync();
626     assertEquals(COL_COUNT, visitor.increments);
627   }
628 
629   /**
630    * A loaded WAL coprocessor won't break existing WAL test cases.
631    */
632   @Test
633   public void testWALCoprocessorLoaded() throws Exception {
634     // test to see whether the coprocessor is loaded or not.
635     WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION).getCoprocessorHost();
636     Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
637     assertNotNull(c);
638   }
639 
640   /**
641    * @throws IOException
642    */
643   @Test
644   public void testReadLegacyLog() throws IOException {
645     final int columnCount = 5;
646     final int recordCount = 5;
647     final TableName tableName =
648         TableName.valueOf("tablename");
649     final byte[] row = Bytes.toBytes("row");
650     long timestamp = System.currentTimeMillis();
651     Path path = new Path(dir, "tempwal");
652     SequenceFileLogWriter sflw = null;
653     WAL.Reader reader = null;
654     try {
655       HRegionInfo hri = new HRegionInfo(tableName,
656           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
657       HTableDescriptor htd = new HTableDescriptor(tableName);
658       fs.mkdirs(dir);
659       // Write log in pre-PB format.
660       sflw = new SequenceFileLogWriter();
661       sflw.init(fs, path, conf, false);
662       for (int i = 0; i < recordCount; ++i) {
663         WALKey key = new HLogKey(
664             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
665         WALEdit edit = new WALEdit();
666         for (int j = 0; j < columnCount; ++j) {
667           if (i == 0) {
668             htd.addFamily(new HColumnDescriptor("column" + j));
669           }
670           String value = i + "" + j;
671           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
672         }
673         sflw.append(new WAL.Entry(key, edit));
674       }
675       sflw.sync();
676       sflw.close();
677 
678       // Now read the log using standard means.
679       reader = wals.createReader(fs, path);
680       assertTrue(reader instanceof SequenceFileLogReader);
681       for (int i = 0; i < recordCount; ++i) {
682         WAL.Entry entry = reader.next();
683         assertNotNull(entry);
684         assertEquals(columnCount, entry.getEdit().size());
685         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
686         assertEquals(tableName, entry.getKey().getTablename());
687         int idx = 0;
688         for (Cell val : entry.getEdit().getCells()) {
689           assertTrue(Bytes.equals(row, val.getRow()));
690           String value = i + "" + idx;
691           assertArrayEquals(Bytes.toBytes(value), val.getValue());
692           idx++;
693         }
694       }
695       WAL.Entry entry = reader.next();
696       assertNull(entry);
697     } finally {
698       if (sflw != null) {
699         sflw.close();
700       }
701       if (reader != null) {
702         reader.close();
703       }
704     }
705   }
706 
707   static class DumbWALActionsListener extends WALActionsListener.Base {
708     int increments = 0;
709 
710     @Override
711     public void visitLogEntryBeforeWrite(HRegionInfo info, WALKey logKey,
712                                          WALEdit logEdit) {
713       increments++;
714     }
715 
716     @Override
717     public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) {
718       //To change body of implemented methods use File | Settings | File Templates.
719       increments++;
720     }
721   }
722 
723   private static final byte[] UNSPECIFIED_REGION = new byte[]{};
724 
725 }