1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.fail;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.util.HashMap;
30  import java.util.List;
31  import java.util.Map;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.commons.logging.impl.Log4JLogger;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FSDataInputStream;
38  import org.apache.hadoop.fs.FSDataOutputStream;
39  import org.apache.hadoop.fs.FileStatus;
40  import org.apache.hadoop.fs.FileSystem;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.*;
43  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.apache.hadoop.hbase.util.FSHDFSUtils;
46  import org.apache.hadoop.hbase.util.FSUtils;
47  import org.apache.hadoop.hbase.Coprocessor;
48  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
49  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
50  import org.apache.hadoop.hdfs.DFSClient;
51  import org.apache.hadoop.hdfs.DistributedFileSystem;
52  import org.apache.hadoop.hdfs.MiniDFSCluster;
53  import org.apache.hadoop.hdfs.protocol.FSConstants;
54  import org.apache.hadoop.hdfs.server.datanode.DataNode;
55  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
56  import org.apache.hadoop.io.SequenceFile;
57  import org.apache.log4j.Level;
58  import org.junit.After;
59  import org.junit.AfterClass;
60  import org.junit.Before;
61  import org.junit.BeforeClass;
62  import org.junit.Test;
63  import org.junit.experimental.categories.Category;
64  
65  /** JUnit test case for HLog */
66  @Category(LargeTests.class)
67  public class TestHLog  {
68    private static final Log LOG = LogFactory.getLog(TestHLog.class);
69    {
70      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
71      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
72      ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
73        .getLogger().setLevel(Level.ALL);
74      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
75      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
76    }
77  
78    private static Configuration conf;
79    private static FileSystem fs;
80    private static Path dir;
81    private static MiniDFSCluster cluster;
82    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
83    private static Path hbaseDir;
84    private static Path oldLogDir;
85  
86    @Before
87    public void setUp() throws Exception {
88  
89      FileStatus[] entries = fs.listStatus(new Path("/"));
90      for (FileStatus dir : entries) {
91        fs.delete(dir.getPath(), true);
92      }
93  
94    }
95  
96    @After
97    public void tearDown() throws Exception {
98    }
99  
100   @BeforeClass
101   public static void setUpBeforeClass() throws Exception {
102     // Make block sizes small.
103     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
104     // needed for testAppendClose()
105     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
106     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
107     // quicker heartbeat interval for faster DN death notification
108     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
109     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
110     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
111     // faster failover with cluster.shutdown();fs.close() idiom
112     TEST_UTIL.getConfiguration()
113         .setInt("ipc.client.connect.max.retries", 1);
114     TEST_UTIL.getConfiguration().setInt(
115         "dfs.client.block.recovery.retries", 1);
116     TEST_UTIL.getConfiguration().setInt(
117       "ipc.client.connection.maxidletime", 500);
118     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
119         SampleRegionWALObserver.class.getName());
120     TEST_UTIL.startMiniDFSCluster(3);
121 
122     conf = TEST_UTIL.getConfiguration();
123     cluster = TEST_UTIL.getDFSCluster();
124     fs = cluster.getFileSystem();
125 
126     hbaseDir = TEST_UTIL.createRootDir();
127     oldLogDir = new Path(hbaseDir, ".oldlogs");
128     dir = new Path(hbaseDir, getName());
129   }
130   @AfterClass
131   public static void tearDownAfterClass() throws Exception {
132     TEST_UTIL.shutdownMiniCluster();
133   }
134 
135   private static String getName() {
136     // TODO Auto-generated method stub
137     return "TestHLog";
138   }
139 
140   /**
141    * Test that with three concurrent threads we still write edits in sequence
142    * edit id order.
143    * @throws Exception
144    */
145   @Test
146   public void testMaintainOrderWithConcurrentWrites() throws Exception {
147     // Run the HPE tool with three threads writing 3000 edits each concurrently.
148     // When done, verify that all edits were written and that the order in the
149     // WALs is of ascending edit sequence ids.
150     int errCode =
151       HLogPerformanceEvaluation.innerMain(new String [] {"-threads", "3", "-verify", "-iterations", "3000"});
152     assertEquals(0, errCode);
153   }
154 
155   /**
156    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
157    * would fail.
158    * @throws IOException
159    */
160   @Test
161   public void testSplit() throws IOException {
162 
163     final byte [] tableName = Bytes.toBytes(getName());
164     final byte [] rowName = tableName;
165     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
166     HLog log = new HLog(fs, logdir, oldLogDir, conf);
167     final int howmany = 3;
168     HRegionInfo[] infos = new HRegionInfo[3];
169     Path tabledir = new Path(hbaseDir, getName());
170     fs.mkdirs(tabledir);
171     for(int i = 0; i < howmany; i++) {
172       infos[i] = new HRegionInfo(tableName,
173                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
174       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
175       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
176     }
177     HTableDescriptor htd = new HTableDescriptor(tableName);
178     htd.addFamily(new HColumnDescriptor("column"));
179 
180     // Add edits for three regions.
181     try {
182       for (int ii = 0; ii < howmany; ii++) {
183         for (int i = 0; i < howmany; i++) {
184 
185           for (int j = 0; j < howmany; j++) {
186             WALEdit edit = new WALEdit();
187             byte [] family = Bytes.toBytes("column");
188             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
189             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
190             edit.add(new KeyValue(rowName, family, qualifier,
191                 System.currentTimeMillis(), column));
192             LOG.info("Region " + i + ": " + edit);
193             log.append(infos[i], tableName, edit,
194               System.currentTimeMillis(), htd);
195           }
196         }
197         log.rollWriter();
198       }
199       log.close();
200       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
201           hbaseDir, logdir, this.oldLogDir, this.fs);
202       List<Path> splits =
203         logSplitter.splitLog();
204       verifySplits(splits, howmany);
205       log = null;
206     } finally {
207       if (log != null) {
208         log.closeAndDelete();
209       }
210     }
211   }
212 
213   /**
214    * Test new HDFS-265 sync.
215    * @throws Exception
216    */
217   @Test
218   public void Broken_testSync() throws Exception {
219     byte [] bytes = Bytes.toBytes(getName());
220     // First verify that using streams all works.
221     Path p = new Path(dir, getName() + ".fsdos");
222     FSDataOutputStream out = fs.create(p);
223     out.write(bytes);
224     Method syncMethod = null;
225     try {
226       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
227     } catch (NoSuchMethodException e) {
228       try {
229         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
230       } catch (NoSuchMethodException ex) {
231         fail("This version of Hadoop supports neither Syncable.sync() " +
232             "nor Syncable.hflush().");
233       }
234     }
235     syncMethod.invoke(out, new Object[]{});
236     FSDataInputStream in = fs.open(p);
237     assertTrue(in.available() > 0);
238     byte [] buffer = new byte [1024];
239     int read = in.read(buffer);
240     assertEquals(bytes.length, read);
241     out.close();
242     in.close();
243     Path subdir = new Path(dir, "hlogdir");
244     HLog wal = new HLog(fs, subdir, oldLogDir, conf);
245     final int total = 20;
246     HLog.Reader reader = null;
247 
248     try {
249       HRegionInfo info = new HRegionInfo(bytes,
250                   null,null, false);
251       HTableDescriptor htd = new HTableDescriptor();
252       htd.addFamily(new HColumnDescriptor(bytes));
253 
254       for (int i = 0; i < total; i++) {
255         WALEdit kvs = new WALEdit();
256         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
257         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
258       }
259       // Now call sync and try reading.  Opening a Reader before you sync just
260       // gives you EOFE.
261       wal.sync();
262       // Open a Reader.
263       Path walPath = wal.computeFilename();
264       reader = HLog.getReader(fs, walPath, conf);
265       int count = 0;
266       HLog.Entry entry = new HLog.Entry();
267       while ((entry = reader.next(entry)) != null) count++;
268       assertEquals(total, count);
269       reader.close();
270       // Add test that checks to see that an open of a Reader works on a file
271       // that has had a sync done on it.
272       for (int i = 0; i < total; i++) {
273         WALEdit kvs = new WALEdit();
274         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
275         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
276       }
277       reader = HLog.getReader(fs, walPath, conf);
278       count = 0;
279       while((entry = reader.next(entry)) != null) count++;
280       assertTrue(count >= total);
281       reader.close();
282       // If I sync, should see double the edits.
283       wal.sync();
284       reader = HLog.getReader(fs, walPath, conf);
285       count = 0;
286       while((entry = reader.next(entry)) != null) count++;
287       assertEquals(total * 2, count);
288       // Now do a test that ensures stuff works when we go over block boundary,
289       // especially that we return good length on file.
290       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
291       for (int i = 0; i < total; i++) {
292         WALEdit kvs = new WALEdit();
293         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
294         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
295       }
296       // Now I should have written out lots of blocks.  Sync then read.
297       wal.sync();
298       reader = HLog.getReader(fs, walPath, conf);
299       count = 0;
300       while((entry = reader.next(entry)) != null) count++;
301       assertEquals(total * 3, count);
302       reader.close();
303       // Close it and ensure that closed, Reader gets right length also.
304       wal.close();
305       reader = HLog.getReader(fs, walPath, conf);
306       count = 0;
307       while((entry = reader.next(entry)) != null) count++;
308       assertEquals(total * 3, count);
309       reader.close();
310     } finally {
311       if (wal != null) wal.closeAndDelete();
312       if (reader != null) reader.close();
313     }
314   }
315 
316   /**
317    * Test the findMemstoresWithEditsEqualOrOlderThan method.
318    * @throws IOException
319    */
320   @Test
321   public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
322     Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
323     for (int i = 0; i < 10; i++) {
324       Long l = Long.valueOf(i);
325       regionsToSeqids.put(l.toString().getBytes(), l);
326     }
327     byte [][] regions =
328       HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
329     assertEquals(2, regions.length);
330     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
331         Bytes.equals(regions[0], "1".getBytes()));
332     regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
333     int count = 4;
334     assertEquals(count, regions.length);
335     // Regions returned are not ordered.
336     for (int i = 0; i < count; i++) {
337       assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
338         Bytes.equals(regions[i], "1".getBytes()) ||
339         Bytes.equals(regions[i], "2".getBytes()) ||
340         Bytes.equals(regions[i], "3".getBytes()));
341     }
342   }
343 
344   private void verifySplits(List<Path> splits, final int howmany)
345   throws IOException {
346     assertEquals(howmany, splits.size());
347     for (int i = 0; i < splits.size(); i++) {
348       LOG.info("Verifying=" + splits.get(i));
349       HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
350       try {
351         int count = 0;
352         String previousRegion = null;
353         long seqno = -1;
354         HLog.Entry entry = new HLog.Entry();
355         while((entry = reader.next(entry)) != null) {
356           HLogKey key = entry.getKey();
357           String region = Bytes.toString(key.getEncodedRegionName());
358           // Assert that all edits are for same region.
359           if (previousRegion != null) {
360             assertEquals(previousRegion, region);
361           }
362           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
363           assertTrue(seqno < key.getLogSeqNum());
364           seqno = key.getLogSeqNum();
365           previousRegion = region;
366           count++;
367         }
368         assertEquals(howmany * howmany, count);
369       } finally {
370         reader.close();
371       }
372     }
373   }
374   
375   /*
376    * We pass different values to recoverFileLease() so that different code paths are covered
377    * 
378    * For this test to pass, requires:
379    * 1. HDFS-200 (append support)
380    * 2. HDFS-988 (SafeMode should freeze file operations
381    *              [FSNamesystem.nextGenerationStampForBlock])
382    * 3. HDFS-142 (on restart, maintain pendingCreates)
383    */
384   @Test
385   public void testAppendClose() throws Exception {
386     testAppendClose(true);
387     testAppendClose(false);
388   }
389 
390   /*
391    * @param triggerDirectAppend whether to trigger direct call of fs.append()
392    */
393   public void testAppendClose(final boolean triggerDirectAppend) throws Exception {
394     byte [] tableName = Bytes.toBytes(getName());
395     HRegionInfo regioninfo = new HRegionInfo(tableName,
396              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
397     Path subdir = new Path(dir, "hlogdir" + triggerDirectAppend);
398     Path archdir = new Path(dir, "hlogdir_archive");
399     HLog wal = new HLog(fs, subdir, archdir, conf);
400     final int total = 20;
401 
402     HTableDescriptor htd = new HTableDescriptor();
403     htd.addFamily(new HColumnDescriptor(tableName));
404 
405     for (int i = 0; i < total; i++) {
406       WALEdit kvs = new WALEdit();
407       kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
408       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
409     }
410     // Now call sync to send the data to HDFS datanodes
411     wal.sync();
412      int namenodePort = cluster.getNameNodePort();
413     final Path walPath = wal.computeFilename();
414     
415 
416     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
417     try {
418       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
419       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
420       cluster.shutdown();
421       try {
422         // wal.writer.close() will throw an exception,
423         // but still call this since it closes the LogSyncer thread first
424         wal.close();
425       } catch (IOException e) {
426         LOG.info(e);
427       }
428       fs.close(); // closing FS last so DFSOutputStream can't call close
429       LOG.info("STOPPED first instance of the cluster");
430     } finally {
431       // Restart the cluster
432       while (cluster.isClusterUp()){
433         LOG.error("Waiting for cluster to go down");
434         Thread.sleep(1000);
435       }
436 
437       // Workaround a strange issue with Hadoop's RPC system - if we don't
438       // sleep here, the new datanodes will pick up a cached IPC connection to
439       // the old (dead) NN and fail to start. Sleeping 2 seconds goes past
440       // the idle time threshold configured in the conf above
441       Thread.sleep(2000);
442 
443       cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
444       TEST_UTIL.setDFSCluster(cluster);
445       cluster.waitActive();
446       fs = cluster.getFileSystem();
447       LOG.info("START second instance.");
448     }
449 
450     // set the lease period to be 1 second so that the
451     // namenode triggers lease recovery upon append request
452     Method setLeasePeriod = cluster.getClass()
453       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
454     setLeasePeriod.setAccessible(true);
455     setLeasePeriod.invoke(cluster,
456                           new Object[]{new Long(1000), new Long(1000)});
457     try {
458       Thread.sleep(1000);
459     } catch (InterruptedException e) {
460       LOG.info(e);
461     }
462     
463     // Now try recovering the log, like the HMaster would do
464     final FileSystem recoveredFs = fs;
465     final Configuration rlConf = conf;
466     
467     class RecoverLogThread extends Thread {
468       public Exception exception = null;
469       public void run() {
470           try {
471             rlConf.setBoolean(FSHDFSUtils.TEST_TRIGGER_DFS_APPEND, triggerDirectAppend);
472             FSUtils.getInstance(fs, rlConf)
473               .recoverFileLease(recoveredFs, walPath, rlConf);
474           } catch (IOException e) {
475             exception = e;
476           }
477       }
478     }
479 
480     RecoverLogThread t = new RecoverLogThread();
481     t.start();
482     // Timeout after 60 sec. Without correct patches, would be an infinite loop
483     t.join(60 * 1000);
484     if(t.isAlive()) {
485       t.interrupt();
486       throw new Exception("Timed out waiting for HLog.recoverLog()");
487     }
488 
489     if (t.exception != null)
490       throw t.exception;
491 
492     // Make sure you can read all the content
493     HLog.Reader reader = HLog.getReader(this.fs, walPath, this.conf);
494     int count = 0;
495     HLog.Entry entry = new HLog.Entry();
496     while (reader.next(entry) != null) {
497       count++;
498       assertTrue("Should be one KeyValue per WALEdit",
499                  entry.getEdit().getKeyValues().size() == 1);
500     }
501     assertEquals(total, count);
502     reader.close();
503 
504     // Reset the lease period
505     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
506   }
507 
508   /**
509    * Tests that we can write out an edit, close, and then read it back in again.
510    * @throws IOException
511    */
512   @Test
513   public void testEditAdd() throws IOException {
514     final int COL_COUNT = 10;
515     final byte [] tableName = Bytes.toBytes("tablename");
516     final byte [] row = Bytes.toBytes("row");
517     HLog.Reader reader = null;
518     HLog log = null;
519     try {
520       log = new HLog(fs, dir, oldLogDir, conf);
521       // Write columns named 1, 2, 3, etc. and then values of single byte
522       // 1, 2, 3...
523       long timestamp = System.currentTimeMillis();
524       WALEdit cols = new WALEdit();
525       for (int i = 0; i < COL_COUNT; i++) {
526         cols.add(new KeyValue(row, Bytes.toBytes("column"),
527             Bytes.toBytes(Integer.toString(i)),
528           timestamp, new byte[] { (byte)(i + '0') }));
529       }
530       HRegionInfo info = new HRegionInfo(tableName,
531         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
532       HTableDescriptor htd = new HTableDescriptor();
533       htd.addFamily(new HColumnDescriptor("column"));
534 
535       log.append(info, tableName, cols, System.currentTimeMillis(), htd);
536       long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
537       log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
538           info.isMetaRegion());
539       log.close();
540       Path filename = log.computeFilename();
541       log = null;
542       // Now open a reader on the log and assert append worked.
543       reader = HLog.getReader(fs, filename, conf);
544       // Above we added all columns on a single row so we only read one
545       // entry in the below... thats why we have '1'.
546       for (int i = 0; i < 1; i++) {
547         HLog.Entry entry = reader.next(null);
548         if (entry == null) break;
549         HLogKey key = entry.getKey();
550         WALEdit val = entry.getEdit();
551         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
552         assertTrue(Bytes.equals(tableName, key.getTablename()));
553         KeyValue kv = val.getKeyValues().get(0);
554         assertTrue(Bytes.equals(row, kv.getRow()));
555         assertEquals((byte)(i + '0'), kv.getValue()[0]);
556         System.out.println(key + " " + val);
557       }
558       HLog.Entry entry = null;
559       while ((entry = reader.next(null)) != null) {
560         HLogKey key = entry.getKey();
561         WALEdit val = entry.getEdit();
562         // Assert only one more row... the meta flushed row.
563         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
564         assertTrue(Bytes.equals(tableName, key.getTablename()));
565         KeyValue kv = val.getKeyValues().get(0);
566         assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
567         assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
568         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
569           val.getKeyValues().get(0).getValue()));
570         System.out.println(key + " " + val);
571       }
572     } finally {
573       if (log != null) {
574         log.closeAndDelete();
575       }
576       if (reader != null) {
577         reader.close();
578       }
579     }
580   }
581 
582   /**
583    * @throws IOException
584    */
585   @Test
586   public void testAppend() throws IOException {
587     final int COL_COUNT = 10;
588     final byte [] tableName = Bytes.toBytes("tablename");
589     final byte [] row = Bytes.toBytes("row");
590     Reader reader = null;
591     HLog log = new HLog(fs, dir, oldLogDir, conf);
592     try {
593       // Write columns named 1, 2, 3, etc. and then values of single byte
594       // 1, 2, 3...
595       long timestamp = System.currentTimeMillis();
596       WALEdit cols = new WALEdit();
597       for (int i = 0; i < COL_COUNT; i++) {
598         cols.add(new KeyValue(row, Bytes.toBytes("column"),
599           Bytes.toBytes(Integer.toString(i)),
600           timestamp, new byte[] { (byte)(i + '0') }));
601       }
602       HRegionInfo hri = new HRegionInfo(tableName,
603           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
604       HTableDescriptor htd = new HTableDescriptor();
605       htd.addFamily(new HColumnDescriptor("column"));
606       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
607       long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
608       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
609       log.close();
610       Path filename = log.computeFilename();
611       log = null;
612       // Now open a reader on the log and assert append worked.
613       reader = HLog.getReader(fs, filename, conf);
614       HLog.Entry entry = reader.next();
615       assertEquals(COL_COUNT, entry.getEdit().size());
616       int idx = 0;
617       for (KeyValue val : entry.getEdit().getKeyValues()) {
618         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
619           entry.getKey().getEncodedRegionName()));
620         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
621         assertTrue(Bytes.equals(row, val.getRow()));
622         assertEquals((byte)(idx + '0'), val.getValue()[0]);
623         System.out.println(entry.getKey() + " " + val);
624         idx++;
625       }
626 
627       // Get next row... the meta flushed row.
628       entry = reader.next();
629       assertEquals(1, entry.getEdit().size());
630       for (KeyValue val : entry.getEdit().getKeyValues()) {
631         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
632           entry.getKey().getEncodedRegionName()));
633         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
634         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
635         assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
636         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
637           val.getValue()));
638         System.out.println(entry.getKey() + " " + val);
639       }
640     } finally {
641       if (log != null) {
642         log.closeAndDelete();
643       }
644       if (reader != null) {
645         reader.close();
646       }
647     }
648   }
649 
650   /**
651    * Test that we can visit entries before they are appended
652    * @throws Exception
653    */
654   @Test
655   public void testVisitors() throws Exception {
656     final int COL_COUNT = 10;
657     final byte [] tableName = Bytes.toBytes("tablename");
658     final byte [] row = Bytes.toBytes("row");
659     HLog log = new HLog(fs, dir, oldLogDir, conf);
660     try {
661       DumbWALActionsListener visitor = new DumbWALActionsListener();
662       log.registerWALActionsListener(visitor);
663       long timestamp = System.currentTimeMillis();
664       HTableDescriptor htd = new HTableDescriptor();
665       htd.addFamily(new HColumnDescriptor("column"));
666 
667       HRegionInfo hri = new HRegionInfo(tableName,
668           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
669       for (int i = 0; i < COL_COUNT; i++) {
670         WALEdit cols = new WALEdit();
671         cols.add(new KeyValue(row, Bytes.toBytes("column"),
672             Bytes.toBytes(Integer.toString(i)),
673             timestamp, new byte[]{(byte) (i + '0')}));
674         log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
675       }
676       assertEquals(COL_COUNT, visitor.increments);
677       log.unregisterWALActionsListener(visitor);
678       WALEdit cols = new WALEdit();
679       cols.add(new KeyValue(row, Bytes.toBytes("column"),
680           Bytes.toBytes(Integer.toString(11)),
681           timestamp, new byte[]{(byte) (11 + '0')}));
682       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
683       assertEquals(COL_COUNT, visitor.increments);
684     } finally {
685       if (log != null) log.closeAndDelete();
686     }
687   }
688 
689   @Test
690   public void testLogCleaning() throws Exception {
691     LOG.info("testLogCleaning");
692     final byte [] tableName = Bytes.toBytes("testLogCleaning");
693     final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
694 
695     HLog log = new HLog(fs, dir, oldLogDir, conf);
696     try {
697       HRegionInfo hri = new HRegionInfo(tableName,
698           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
699       HRegionInfo hri2 = new HRegionInfo(tableName2,
700           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
701 
702       // Add a single edit and make sure that rolling won't remove the file
703       // Before HBASE-3198 it used to delete it
704       addEdits(log, hri, tableName, 1);
705       log.rollWriter();
706       assertEquals(1, log.getNumLogFiles());
707 
708       // See if there's anything wrong with more than 1 edit
709       addEdits(log, hri, tableName, 2);
710       log.rollWriter();
711       assertEquals(2, log.getNumLogFiles());
712 
713       // Now mix edits from 2 regions, still no flushing
714       addEdits(log, hri, tableName, 1);
715       addEdits(log, hri2, tableName2, 1);
716       addEdits(log, hri, tableName, 1);
717       addEdits(log, hri2, tableName2, 1);
718       log.rollWriter();
719       assertEquals(3, log.getNumLogFiles());
720 
721       // Flush the first region, we expect to see the first two files getting
722       // archived
723       long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
724       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
725       log.rollWriter();
726       assertEquals(2, log.getNumLogFiles());
727 
728       // Flush the second region, which removes all the remaining output files
729       // since the oldest was completely flushed and the two others only contain
730       // flush information
731       seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
732       log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
733       log.rollWriter();
734       assertEquals(0, log.getNumLogFiles());
735     } finally {
736       if (log != null) log.closeAndDelete();
737     }
738   }
739 
740   /**
741    * A loaded WAL coprocessor won't break existing HLog test cases.
742    */
743   @Test
744   public void testWALCoprocessorLoaded() throws Exception {
745     // test to see whether the coprocessor is loaded or not.
746     HLog log = new HLog(fs, dir, oldLogDir, conf);
747     try {
748       WALCoprocessorHost host = log.getCoprocessorHost();
749       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
750       assertNotNull(c);
751     } finally {
752       if (log != null) log.closeAndDelete();
753     }
754   }
755 
756   private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
757                         int times) throws IOException {
758     HTableDescriptor htd = new HTableDescriptor();
759     htd.addFamily(new HColumnDescriptor("row"));
760 
761     final byte [] row = Bytes.toBytes("row");
762     for (int i = 0; i < times; i++) {
763       long timestamp = System.currentTimeMillis();
764       WALEdit cols = new WALEdit();
765       cols.add(new KeyValue(row, row, row, timestamp, row));
766       log.append(hri, tableName, cols, timestamp, htd);
767     }
768   }
769 
770   static class DumbWALActionsListener implements WALActionsListener {
771     int increments = 0;
772 
773     @Override
774     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
775                                          WALEdit logEdit) {
776       increments++;
777     }
778 
779     @Override
780     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
781       //To change body of implemented methods use File | Settings | File Templates.
782       increments++;
783     }
784 
785     @Override
786     public void preLogRoll(Path oldFile, Path newFile) {
787       // TODO Auto-generated method stub
788     }
789 
790     @Override
791     public void postLogRoll(Path oldFile, Path newFile) {
792       // TODO Auto-generated method stub
793     }
794 
795     @Override
796     public void preLogArchive(Path oldFile, Path newFile) {
797       // TODO Auto-generated method stub
798     }
799 
800     @Override
801     public void postLogArchive(Path oldFile, Path newFile) {
802       // TODO Auto-generated method stub
803     }
804 
805     @Override
806     public void logRollRequested() {
807       // TODO Auto-generated method stub
808       
809     }
810 
811     @Override
812     public void logCloseRequested() {
813       // not interested
814     }
815   }
816 
817   @org.junit.Rule
818   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
819     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
820 }
821