1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertTrue;
25  import static org.junit.Assert.fail;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.util.HashMap;
30  import java.util.List;
31  import java.util.Map;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.commons.logging.impl.Log4JLogger;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FSDataInputStream;
38  import org.apache.hadoop.fs.FSDataOutputStream;
39  import org.apache.hadoop.fs.FileStatus;
40  import org.apache.hadoop.fs.FileSystem;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.Coprocessor;
43  import org.apache.hadoop.hbase.HBaseTestingUtility;
44  import org.apache.hadoop.hbase.HColumnDescriptor;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionInfo;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.KeyValue;
49  import org.apache.hadoop.hbase.LargeTests;
50  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
51  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
52  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.FSUtils;
55  import org.apache.hadoop.hdfs.DFSClient;
56  import org.apache.hadoop.hdfs.DistributedFileSystem;
57  import org.apache.hadoop.hdfs.MiniDFSCluster;
58  import org.apache.hadoop.hdfs.protocol.FSConstants;
59  import org.apache.hadoop.hdfs.server.datanode.DataNode;
60  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
61  import org.apache.log4j.Level;
62  import org.junit.After;
63  import org.junit.AfterClass;
64  import org.junit.Before;
65  import org.junit.BeforeClass;
66  import org.junit.Test;
67  import org.junit.experimental.categories.Category;
68  
69  /** JUnit test case for HLog */
70  @Category(LargeTests.class)
71  public class TestHLog  {
72    private static final Log LOG = LogFactory.getLog(TestHLog.class);
73    {
74      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
75      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
76      ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
77        .getLogger().setLevel(Level.ALL);
78      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
79      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
80    }
81  
82    private static Configuration conf;
83    private static FileSystem fs;
84    private static Path dir;
85    private static MiniDFSCluster cluster;
86    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
87    private static Path hbaseDir;
88    private static Path oldLogDir;
89  
90    @Before
91    public void setUp() throws Exception {
92  
93      FileStatus[] entries = fs.listStatus(new Path("/"));
94      for (FileStatus dir : entries) {
95        fs.delete(dir.getPath(), true);
96      }
97  
98    }
99  
100   @After
101   public void tearDown() throws Exception {
102   }
103 
104   @BeforeClass
105   public static void setUpBeforeClass() throws Exception {
106     // Make block sizes small.
107     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
108     // needed for testAppendClose()
109     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
110     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
111     // quicker heartbeat interval for faster DN death notification
112     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
113     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
114     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
115     // faster failover with cluster.shutdown();fs.close() idiom
116     TEST_UTIL.getConfiguration()
117         .setInt("ipc.client.connect.max.retries", 1);
118     TEST_UTIL.getConfiguration().setInt(
119         "dfs.client.block.recovery.retries", 1);
120     TEST_UTIL.getConfiguration().setInt(
121       "ipc.client.connection.maxidletime", 500);
122     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
123         SampleRegionWALObserver.class.getName());
124     TEST_UTIL.startMiniDFSCluster(3);
125 
126     conf = TEST_UTIL.getConfiguration();
127     cluster = TEST_UTIL.getDFSCluster();
128     fs = cluster.getFileSystem();
129 
130     hbaseDir = TEST_UTIL.createRootDir();
131     oldLogDir = new Path(hbaseDir, ".oldlogs");
132     dir = new Path(hbaseDir, getName());
133   }
134   @AfterClass
135   public static void tearDownAfterClass() throws Exception {
136     TEST_UTIL.shutdownMiniCluster();
137   }
138 
139   private static String getName() {
140     // TODO Auto-generated method stub
141     return "TestHLog";
142   }
143 
144   /**
145    * Test that with three concurrent threads we still write edits in sequence
146    * edit id order.
147    * @throws Exception
148    */
149   @Test
150   public void testMaintainOrderWithConcurrentWrites() throws Exception {
151     // Run the HPE tool with three threads writing 3000 edits each concurrently.
152     // When done, verify that all edits were written and that the order in the
153     // WALs is of ascending edit sequence ids.
154     int errCode =
155       HLogPerformanceEvaluation.innerMain(new String [] {"-threads", "3", "-verify", "-iterations", "3000"});
156     assertEquals(0, errCode);
157   }
158 
159   /**
160    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
161    * would fail.
162    * @throws IOException
163    */
164   @Test
165   public void testSplit() throws IOException {
166 
167     final byte [] tableName = Bytes.toBytes(getName());
168     final byte [] rowName = tableName;
169     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
170     HLog log = new HLog(fs, logdir, oldLogDir, conf);
171     final int howmany = 3;
172     HRegionInfo[] infos = new HRegionInfo[3];
173     Path tabledir = new Path(hbaseDir, getName());
174     fs.mkdirs(tabledir);
175     for(int i = 0; i < howmany; i++) {
176       infos[i] = new HRegionInfo(tableName,
177                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
178       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
179       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
180     }
181     HTableDescriptor htd = new HTableDescriptor(tableName);
182     htd.addFamily(new HColumnDescriptor("column"));
183 
184     // Add edits for three regions.
185     try {
186       for (int ii = 0; ii < howmany; ii++) {
187         for (int i = 0; i < howmany; i++) {
188 
189           for (int j = 0; j < howmany; j++) {
190             WALEdit edit = new WALEdit();
191             byte [] family = Bytes.toBytes("column");
192             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
193             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
194             edit.add(new KeyValue(rowName, family, qualifier,
195                 System.currentTimeMillis(), column));
196             LOG.info("Region " + i + ": " + edit);
197             log.append(infos[i], tableName, edit,
198               System.currentTimeMillis(), htd);
199           }
200         }
201         log.rollWriter();
202       }
203       log.close();
204       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
205           hbaseDir, logdir, oldLogDir, fs);
206       List<Path> splits =
207         logSplitter.splitLog();
208       verifySplits(splits, howmany);
209       log = null;
210     } finally {
211       if (log != null) {
212         log.closeAndDelete();
213       }
214     }
215   }
216 
217   /**
218    * Test new HDFS-265 sync.
219    * @throws Exception
220    */
221   @Test
222   public void Broken_testSync() throws Exception {
223     byte [] bytes = Bytes.toBytes(getName());
224     // First verify that using streams all works.
225     Path p = new Path(dir, getName() + ".fsdos");
226     FSDataOutputStream out = fs.create(p);
227     out.write(bytes);
228     Method syncMethod = null;
229     try {
230       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
231     } catch (NoSuchMethodException e) {
232       try {
233         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
234       } catch (NoSuchMethodException ex) {
235         fail("This version of Hadoop supports neither Syncable.sync() " +
236             "nor Syncable.hflush().");
237       }
238     }
239     syncMethod.invoke(out, new Object[]{});
240     FSDataInputStream in = fs.open(p);
241     assertTrue(in.available() > 0);
242     byte [] buffer = new byte [1024];
243     int read = in.read(buffer);
244     assertEquals(bytes.length, read);
245     out.close();
246     in.close();
247     Path subdir = new Path(dir, "hlogdir");
248     HLog wal = new HLog(fs, subdir, oldLogDir, conf);
249     final int total = 20;
250     HLog.Reader reader = null;
251 
252     try {
253       HRegionInfo info = new HRegionInfo(bytes,
254                   null,null, false);
255       HTableDescriptor htd = new HTableDescriptor();
256       htd.addFamily(new HColumnDescriptor(bytes));
257 
258       for (int i = 0; i < total; i++) {
259         WALEdit kvs = new WALEdit();
260         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
261         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
262       }
263       // Now call sync and try reading.  Opening a Reader before you sync just
264       // gives you EOFE.
265       wal.sync();
266       // Open a Reader.
267       Path walPath = wal.computeFilename();
268       reader = HLog.getReader(fs, walPath, conf);
269       int count = 0;
270       HLog.Entry entry = new HLog.Entry();
271       while ((entry = reader.next(entry)) != null) count++;
272       assertEquals(total, count);
273       reader.close();
274       // Add test that checks to see that an open of a Reader works on a file
275       // that has had a sync done on it.
276       for (int i = 0; i < total; i++) {
277         WALEdit kvs = new WALEdit();
278         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
279         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
280       }
281       reader = HLog.getReader(fs, walPath, conf);
282       count = 0;
283       while((entry = reader.next(entry)) != null) count++;
284       assertTrue(count >= total);
285       reader.close();
286       // If I sync, should see double the edits.
287       wal.sync();
288       reader = HLog.getReader(fs, walPath, conf);
289       count = 0;
290       while((entry = reader.next(entry)) != null) count++;
291       assertEquals(total * 2, count);
292       // Now do a test that ensures stuff works when we go over block boundary,
293       // especially that we return good length on file.
294       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
295       for (int i = 0; i < total; i++) {
296         WALEdit kvs = new WALEdit();
297         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
298         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
299       }
300       // Now I should have written out lots of blocks.  Sync then read.
301       wal.sync();
302       reader = HLog.getReader(fs, walPath, conf);
303       count = 0;
304       while((entry = reader.next(entry)) != null) count++;
305       assertEquals(total * 3, count);
306       reader.close();
307       // Close it and ensure that closed, Reader gets right length also.
308       wal.close();
309       reader = HLog.getReader(fs, walPath, conf);
310       count = 0;
311       while((entry = reader.next(entry)) != null) count++;
312       assertEquals(total * 3, count);
313       reader.close();
314     } finally {
315       if (wal != null) wal.closeAndDelete();
316       if (reader != null) reader.close();
317     }
318   }
319 
320   /**
321    * Test the findMemstoresWithEditsEqualOrOlderThan method.
322    * @throws IOException
323    */
324   @Test
325   public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
326     Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
327     for (int i = 0; i < 10; i++) {
328       Long l = Long.valueOf(i);
329       regionsToSeqids.put(l.toString().getBytes(), l);
330     }
331     byte [][] regions =
332       HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
333     assertEquals(2, regions.length);
334     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
335         Bytes.equals(regions[0], "1".getBytes()));
336     regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
337     int count = 4;
338     assertEquals(count, regions.length);
339     // Regions returned are not ordered.
340     for (int i = 0; i < count; i++) {
341       assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
342         Bytes.equals(regions[i], "1".getBytes()) ||
343         Bytes.equals(regions[i], "2".getBytes()) ||
344         Bytes.equals(regions[i], "3".getBytes()));
345     }
346   }
347 
348   private void verifySplits(List<Path> splits, final int howmany)
349   throws IOException {
350     assertEquals(howmany, splits.size());
351     for (int i = 0; i < splits.size(); i++) {
352       LOG.info("Verifying=" + splits.get(i));
353       HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
354       try {
355         int count = 0;
356         String previousRegion = null;
357         long seqno = -1;
358         HLog.Entry entry = new HLog.Entry();
359         while((entry = reader.next(entry)) != null) {
360           HLogKey key = entry.getKey();
361           String region = Bytes.toString(key.getEncodedRegionName());
362           // Assert that all edits are for same region.
363           if (previousRegion != null) {
364             assertEquals(previousRegion, region);
365           }
366           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
367           assertTrue(seqno < key.getLogSeqNum());
368           seqno = key.getLogSeqNum();
369           previousRegion = region;
370           count++;
371         }
372         assertEquals(howmany * howmany, count);
373       } finally {
374         reader.close();
375       }
376     }
377   }
378 
379   /*
380    * We pass different values to recoverFileLease() so that different code paths are covered
381    *
382    * For this test to pass, requires:
383    * 1. HDFS-200 (append support)
384    * 2. HDFS-988 (SafeMode should freeze file operations
385    *              [FSNamesystem.nextGenerationStampForBlock])
386    * 3. HDFS-142 (on restart, maintain pendingCreates)
387    */
388   @Test
389   public void testAppendClose() throws Exception {
390     byte [] tableName = Bytes.toBytes(getName());
391     HRegionInfo regioninfo = new HRegionInfo(tableName,
392              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
393     Path subdir = new Path(dir, "hlogdir");
394     Path archdir = new Path(dir, "hlogdir_archive");
395     HLog wal = new HLog(fs, subdir, archdir, conf);
396     final int total = 20;
397 
398     HTableDescriptor htd = new HTableDescriptor();
399     htd.addFamily(new HColumnDescriptor(tableName));
400 
401     for (int i = 0; i < total; i++) {
402       WALEdit kvs = new WALEdit();
403       kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
404       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
405     }
406     // Now call sync to send the data to HDFS datanodes
407     wal.sync();
408      int namenodePort = cluster.getNameNodePort();
409     final Path walPath = wal.computeFilename();
410 
411 
412     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
413     try {
414       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
415       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
416       cluster.shutdown();
417       try {
418         // wal.writer.close() will throw an exception,
419         // but still call this since it closes the LogSyncer thread first
420         wal.close();
421       } catch (IOException e) {
422         LOG.info(e);
423       }
424       fs.close(); // closing FS last so DFSOutputStream can't call close
425       LOG.info("STOPPED first instance of the cluster");
426     } finally {
427       // Restart the cluster
428       while (cluster.isClusterUp()){
429         LOG.error("Waiting for cluster to go down");
430         Thread.sleep(1000);
431       }
432 
433       // Workaround a strange issue with Hadoop's RPC system - if we don't
434       // sleep here, the new datanodes will pick up a cached IPC connection to
435       // the old (dead) NN and fail to start. Sleeping 2 seconds goes past
436       // the idle time threshold configured in the conf above
437       Thread.sleep(2000);
438 
439       cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
440       TEST_UTIL.setDFSCluster(cluster);
441       cluster.waitActive();
442       fs = cluster.getFileSystem();
443       LOG.info("START second instance.");
444     }
445 
446     // set the lease period to be 1 second so that the
447     // namenode triggers lease recovery upon append request
448     Method setLeasePeriod = cluster.getClass()
449       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
450     setLeasePeriod.setAccessible(true);
451     setLeasePeriod.invoke(cluster, 1000L, 1000L);
452     try {
453       Thread.sleep(1000);
454     } catch (InterruptedException e) {
455       LOG.info(e);
456     }
457 
458     // Now try recovering the log, like the HMaster would do
459     final FileSystem recoveredFs = fs;
460     final Configuration rlConf = conf;
461 
462     class RecoverLogThread extends Thread {
463       public Exception exception = null;
464       public void run() {
465           try {
466             FSUtils.getInstance(fs, rlConf)
467               .recoverFileLease(recoveredFs, walPath, rlConf);
468           } catch (IOException e) {
469             exception = e;
470           }
471       }
472     }
473 
474     RecoverLogThread t = new RecoverLogThread();
475     t.start();
476     // Timeout after 60 sec. Without correct patches, would be an infinite loop
477     t.join(60 * 1000);
478     if(t.isAlive()) {
479       t.interrupt();
480       throw new Exception("Timed out waiting for HLog.recoverLog()");
481     }
482 
483     if (t.exception != null)
484       throw t.exception;
485 
486     // Make sure you can read all the content
487     HLog.Reader reader = HLog.getReader(this.fs, walPath, this.conf);
488     int count = 0;
489     HLog.Entry entry = new HLog.Entry();
490     while (reader.next(entry) != null) {
491       count++;
492       assertTrue("Should be one KeyValue per WALEdit",
493                  entry.getEdit().getKeyValues().size() == 1);
494     }
495     assertEquals(total, count);
496     reader.close();
497 
498     // Reset the lease period
499     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
500   }
501 
502   /**
503    * Tests that we can write out an edit, close, and then read it back in again.
504    * @throws IOException
505    */
506   @Test
507   public void testEditAdd() throws IOException {
508     final int COL_COUNT = 10;
509     final byte [] tableName = Bytes.toBytes("tablename");
510     final byte [] row = Bytes.toBytes("row");
511     HLog.Reader reader = null;
512     HLog log = null;
513     try {
514       log = new HLog(fs, dir, oldLogDir, conf);
515       // Write columns named 1, 2, 3, etc. and then values of single byte
516       // 1, 2, 3...
517       long timestamp = System.currentTimeMillis();
518       WALEdit cols = new WALEdit();
519       for (int i = 0; i < COL_COUNT; i++) {
520         cols.add(new KeyValue(row, Bytes.toBytes("column"),
521             Bytes.toBytes(Integer.toString(i)),
522           timestamp, new byte[] { (byte)(i + '0') }));
523       }
524       HRegionInfo info = new HRegionInfo(tableName,
525         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
526       HTableDescriptor htd = new HTableDescriptor();
527       htd.addFamily(new HColumnDescriptor("column"));
528 
529       log.append(info, tableName, cols, System.currentTimeMillis(), htd);
530       long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
531       log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
532           info.isMetaRegion());
533       log.close();
534       Path filename = log.computeFilename();
535       log = null;
536       // Now open a reader on the log and assert append worked.
537       reader = HLog.getReader(fs, filename, conf);
538       // Above we added all columns on a single row so we only read one
539       // entry in the below... thats why we have '1'.
540       for (int i = 0; i < 1; i++) {
541         HLog.Entry entry = reader.next(null);
542         if (entry == null) break;
543         HLogKey key = entry.getKey();
544         WALEdit val = entry.getEdit();
545         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
546         assertTrue(Bytes.equals(tableName, key.getTablename()));
547         KeyValue kv = val.getKeyValues().get(0);
548         assertTrue(Bytes.equals(row, kv.getRow()));
549         assertEquals((byte)(i + '0'), kv.getValue()[0]);
550         System.out.println(key + " " + val);
551       }
552       HLog.Entry entry = null;
553       while ((entry = reader.next(null)) != null) {
554         HLogKey key = entry.getKey();
555         WALEdit val = entry.getEdit();
556         // Assert only one more row... the meta flushed row.
557         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
558         assertTrue(Bytes.equals(tableName, key.getTablename()));
559         KeyValue kv = val.getKeyValues().get(0);
560         assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
561         assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
562         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
563           val.getKeyValues().get(0).getValue()));
564         System.out.println(key + " " + val);
565       }
566     } finally {
567       if (log != null) {
568         log.closeAndDelete();
569       }
570       if (reader != null) {
571         reader.close();
572       }
573     }
574   }
575 
576   /**
577    * @throws IOException
578    */
579   @Test
580   public void testAppend() throws IOException {
581     final int COL_COUNT = 10;
582     final byte [] tableName = Bytes.toBytes("tablename");
583     final byte [] row = Bytes.toBytes("row");
584     Reader reader = null;
585     HLog log = new HLog(fs, dir, oldLogDir, conf);
586     try {
587       // Write columns named 1, 2, 3, etc. and then values of single byte
588       // 1, 2, 3...
589       long timestamp = System.currentTimeMillis();
590       WALEdit cols = new WALEdit();
591       for (int i = 0; i < COL_COUNT; i++) {
592         cols.add(new KeyValue(row, Bytes.toBytes("column"),
593           Bytes.toBytes(Integer.toString(i)),
594           timestamp, new byte[] { (byte)(i + '0') }));
595       }
596       HRegionInfo hri = new HRegionInfo(tableName,
597           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
598       HTableDescriptor htd = new HTableDescriptor();
599       htd.addFamily(new HColumnDescriptor("column"));
600       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
601       long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
602       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
603       log.close();
604       Path filename = log.computeFilename();
605       log = null;
606       // Now open a reader on the log and assert append worked.
607       reader = HLog.getReader(fs, filename, conf);
608       HLog.Entry entry = reader.next();
609       assertEquals(COL_COUNT, entry.getEdit().size());
610       int idx = 0;
611       for (KeyValue val : entry.getEdit().getKeyValues()) {
612         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
613           entry.getKey().getEncodedRegionName()));
614         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
615         assertTrue(Bytes.equals(row, val.getRow()));
616         assertEquals((byte)(idx + '0'), val.getValue()[0]);
617         System.out.println(entry.getKey() + " " + val);
618         idx++;
619       }
620 
621       // Get next row... the meta flushed row.
622       entry = reader.next();
623       assertEquals(1, entry.getEdit().size());
624       for (KeyValue val : entry.getEdit().getKeyValues()) {
625         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
626           entry.getKey().getEncodedRegionName()));
627         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
628         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
629         assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
630         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
631           val.getValue()));
632         System.out.println(entry.getKey() + " " + val);
633       }
634     } finally {
635       if (log != null) {
636         log.closeAndDelete();
637       }
638       if (reader != null) {
639         reader.close();
640       }
641     }
642   }
643 
644   /**
645    * Test that we can visit entries before they are appended
646    * @throws Exception
647    */
648   @Test
649   public void testVisitors() throws Exception {
650     final int COL_COUNT = 10;
651     final byte [] tableName = Bytes.toBytes("tablename");
652     final byte [] row = Bytes.toBytes("row");
653     HLog log = new HLog(fs, dir, oldLogDir, conf);
654     try {
655       DumbWALActionsListener visitor = new DumbWALActionsListener();
656       log.registerWALActionsListener(visitor);
657       long timestamp = System.currentTimeMillis();
658       HTableDescriptor htd = new HTableDescriptor();
659       htd.addFamily(new HColumnDescriptor("column"));
660 
661       HRegionInfo hri = new HRegionInfo(tableName,
662           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
663       for (int i = 0; i < COL_COUNT; i++) {
664         WALEdit cols = new WALEdit();
665         cols.add(new KeyValue(row, Bytes.toBytes("column"),
666             Bytes.toBytes(Integer.toString(i)),
667             timestamp, new byte[]{(byte) (i + '0')}));
668         log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
669       }
670       assertEquals(COL_COUNT, visitor.increments);
671       log.unregisterWALActionsListener(visitor);
672       WALEdit cols = new WALEdit();
673       cols.add(new KeyValue(row, Bytes.toBytes("column"),
674           Bytes.toBytes(Integer.toString(11)),
675           timestamp, new byte[]{(byte) (11 + '0')}));
676       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
677       assertEquals(COL_COUNT, visitor.increments);
678     } finally {
679       if (log != null) log.closeAndDelete();
680     }
681   }
682 
683   @Test
684   public void testLogCleaning() throws Exception {
685     LOG.info("testLogCleaning");
686     final byte [] tableName = Bytes.toBytes("testLogCleaning");
687     final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
688 
689     HLog log = new HLog(fs, dir, oldLogDir, conf);
690     try {
691       HRegionInfo hri = new HRegionInfo(tableName,
692           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
693       HRegionInfo hri2 = new HRegionInfo(tableName2,
694           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
695 
696       // Add a single edit and make sure that rolling won't remove the file
697       // Before HBASE-3198 it used to delete it
698       addEdits(log, hri, tableName, 1);
699       log.rollWriter();
700       assertEquals(1, log.getNumLogFiles());
701 
702       // See if there's anything wrong with more than 1 edit
703       addEdits(log, hri, tableName, 2);
704       log.rollWriter();
705       assertEquals(2, log.getNumLogFiles());
706 
707       // Now mix edits from 2 regions, still no flushing
708       addEdits(log, hri, tableName, 1);
709       addEdits(log, hri2, tableName2, 1);
710       addEdits(log, hri, tableName, 1);
711       addEdits(log, hri2, tableName2, 1);
712       log.rollWriter();
713       assertEquals(3, log.getNumLogFiles());
714 
715       // Flush the first region, we expect to see the first two files getting
716       // archived
717       long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
718       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
719       log.rollWriter();
720       assertEquals(2, log.getNumLogFiles());
721 
722       // Flush the second region, which removes all the remaining output files
723       // since the oldest was completely flushed and the two others only contain
724       // flush information
725       seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
726       log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
727       log.rollWriter();
728       assertEquals(0, log.getNumLogFiles());
729     } finally {
730       if (log != null) log.closeAndDelete();
731     }
732   }
733 
734   /**
735    * A loaded WAL coprocessor won't break existing HLog test cases.
736    */
737   @Test
738   public void testWALCoprocessorLoaded() throws Exception {
739     // test to see whether the coprocessor is loaded or not.
740     HLog log = new HLog(fs, dir, oldLogDir, conf);
741     try {
742       WALCoprocessorHost host = log.getCoprocessorHost();
743       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
744       assertNotNull(c);
745     } finally {
746       if (log != null) log.closeAndDelete();
747     }
748   }
749 
750   private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
751                         int times) throws IOException {
752     HTableDescriptor htd = new HTableDescriptor();
753     htd.addFamily(new HColumnDescriptor("row"));
754 
755     final byte [] row = Bytes.toBytes("row");
756     for (int i = 0; i < times; i++) {
757       long timestamp = System.currentTimeMillis();
758       WALEdit cols = new WALEdit();
759       cols.add(new KeyValue(row, row, row, timestamp, row));
760       log.append(hri, tableName, cols, timestamp, htd);
761     }
762   }
763 
764   static class DumbWALActionsListener implements WALActionsListener {
765     int increments = 0;
766 
767     @Override
768     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
769                                          WALEdit logEdit) {
770       increments++;
771     }
772 
773     @Override
774     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
775       //To change body of implemented methods use File | Settings | File Templates.
776       increments++;
777     }
778 
779     @Override
780     public void preLogRoll(Path oldFile, Path newFile) {
781       // TODO Auto-generated method stub
782     }
783 
784     @Override
785     public void postLogRoll(Path oldFile, Path newFile) {
786       // TODO Auto-generated method stub
787     }
788 
789     @Override
790     public void preLogArchive(Path oldFile, Path newFile) {
791       // TODO Auto-generated method stub
792     }
793 
794     @Override
795     public void postLogArchive(Path oldFile, Path newFile) {
796       // TODO Auto-generated method stub
797     }
798 
799     @Override
800     public void logRollRequested() {
801       // TODO Auto-generated method stub
802 
803     }
804 
805     @Override
806     public void logCloseRequested() {
807       // not interested
808     }
809   }
810 
811   @org.junit.Rule
812   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
813     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
814 }
815