1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertTrue;
25  import static org.junit.Assert.fail;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.net.BindException;
30  import java.util.HashMap;
31  import java.util.List;
32  import java.util.Map;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.commons.logging.impl.Log4JLogger;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FSDataInputStream;
39  import org.apache.hadoop.fs.FSDataOutputStream;
40  import org.apache.hadoop.fs.FileStatus;
41  import org.apache.hadoop.fs.FileSystem;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.Coprocessor;
44  import org.apache.hadoop.hbase.HBaseTestingUtility;
45  import org.apache.hadoop.hbase.HColumnDescriptor;
46  import org.apache.hadoop.hbase.HConstants;
47  import org.apache.hadoop.hbase.HRegionInfo;
48  import org.apache.hadoop.hbase.HTableDescriptor;
49  import org.apache.hadoop.hbase.KeyValue;
50  import org.apache.hadoop.hbase.LargeTests;
51  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
52  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
53  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
54  import org.apache.hadoop.hbase.util.Bytes;
55  import org.apache.hadoop.hbase.util.FSUtils;
56  import org.apache.hadoop.hdfs.DFSClient;
57  import org.apache.hadoop.hdfs.DistributedFileSystem;
58  import org.apache.hadoop.hdfs.MiniDFSCluster;
59  import org.apache.hadoop.hdfs.protocol.FSConstants;
60  import org.apache.hadoop.hdfs.server.datanode.DataNode;
61  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
62  import org.apache.log4j.Level;
63  import org.junit.After;
64  import org.junit.AfterClass;
65  import org.junit.Before;
66  import org.junit.BeforeClass;
67  import org.junit.Test;
68  import org.junit.experimental.categories.Category;
69  
70  /** JUnit test case for HLog */
71  @Category(LargeTests.class)
72  public class TestHLog  {
73    private static final Log LOG = LogFactory.getLog(TestHLog.class);
74    {
75      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
76      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
77      ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
78        .getLogger().setLevel(Level.ALL);
79      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
80      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
81    }
82  
83    private static Configuration conf;
84    private static FileSystem fs;
85    private static Path dir;
86    private static MiniDFSCluster cluster;
87    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
88    private static Path hbaseDir;
89    private static Path oldLogDir;
90  
91    @Before
92    public void setUp() throws Exception {
93  
94      FileStatus[] entries = fs.listStatus(new Path("/"));
95      for (FileStatus dir : entries) {
96        fs.delete(dir.getPath(), true);
97      }
98  
99    }
100 
101   @After
102   public void tearDown() throws Exception {
103   }
104 
105   @BeforeClass
106   public static void setUpBeforeClass() throws Exception {
107     // Make block sizes small.
108     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
109     // needed for testAppendClose()
110     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
111     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
112     // quicker heartbeat interval for faster DN death notification
113     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
114     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
115     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
116     // faster failover with cluster.shutdown();fs.close() idiom
117     TEST_UTIL.getConfiguration()
118         .setInt("ipc.client.connect.max.retries", 1);
119     TEST_UTIL.getConfiguration().setInt(
120         "dfs.client.block.recovery.retries", 1);
121     TEST_UTIL.getConfiguration().setInt(
122       "ipc.client.connection.maxidletime", 500);
123     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
124         SampleRegionWALObserver.class.getName());
125     TEST_UTIL.startMiniDFSCluster(3);
126 
127     conf = TEST_UTIL.getConfiguration();
128     cluster = TEST_UTIL.getDFSCluster();
129     fs = cluster.getFileSystem();
130 
131     hbaseDir = TEST_UTIL.createRootDir();
132     oldLogDir = new Path(hbaseDir, ".oldlogs");
133     dir = new Path(hbaseDir, getName());
134   }
135   @AfterClass
136   public static void tearDownAfterClass() throws Exception {
137     TEST_UTIL.shutdownMiniCluster();
138   }
139 
140   private static String getName() {
141     // TODO Auto-generated method stub
142     return "TestHLog";
143   }
144 
145   /**
146    * Test that with three concurrent threads we still write edits in sequence
147    * edit id order.
148    * @throws Exception
149    */
150   @Test
151   public void testMaintainOrderWithConcurrentWrites() throws Exception {
152     // Run the HPE tool with three threads writing 3000 edits each concurrently.
153     // When done, verify that all edits were written and that the order in the
154     // WALs is of ascending edit sequence ids.
155     int errCode =
156       HLogPerformanceEvaluation.innerMain(new String [] {"-threads", "3", "-verify", "-iterations", "3000"});
157     assertEquals(0, errCode);
158   }
159 
160   /**
161    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
162    * would fail.
163    * @throws IOException
164    */
165   @Test
166   public void testSplit() throws IOException {
167 
168     final byte [] tableName = Bytes.toBytes(getName());
169     final byte [] rowName = tableName;
170     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
171     HLog log = new HLog(fs, logdir, oldLogDir, conf);
172     final int howmany = 3;
173     HRegionInfo[] infos = new HRegionInfo[3];
174     Path tabledir = new Path(hbaseDir, getName());
175     fs.mkdirs(tabledir);
176     for(int i = 0; i < howmany; i++) {
177       infos[i] = new HRegionInfo(tableName,
178                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
179       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
180       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
181     }
182     HTableDescriptor htd = new HTableDescriptor(tableName);
183     htd.addFamily(new HColumnDescriptor("column"));
184 
185     // Add edits for three regions.
186     try {
187       for (int ii = 0; ii < howmany; ii++) {
188         for (int i = 0; i < howmany; i++) {
189 
190           for (int j = 0; j < howmany; j++) {
191             WALEdit edit = new WALEdit();
192             byte [] family = Bytes.toBytes("column");
193             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
194             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
195             edit.add(new KeyValue(rowName, family, qualifier,
196                 System.currentTimeMillis(), column));
197             LOG.info("Region " + i + ": " + edit);
198             log.append(infos[i], tableName, edit,
199               System.currentTimeMillis(), htd);
200           }
201         }
202         log.rollWriter();
203       }
204       log.close();
205       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
206           hbaseDir, logdir, oldLogDir, fs);
207       List<Path> splits =
208         logSplitter.splitLog();
209       verifySplits(splits, howmany);
210       log = null;
211     } finally {
212       if (log != null) {
213         log.closeAndDelete();
214       }
215     }
216   }
217 
218   /**
219    * Test new HDFS-265 sync.
220    * @throws Exception
221    */
222   @Test
223   public void Broken_testSync() throws Exception {
224     byte [] bytes = Bytes.toBytes(getName());
225     // First verify that using streams all works.
226     Path p = new Path(dir, getName() + ".fsdos");
227     FSDataOutputStream out = fs.create(p);
228     out.write(bytes);
229     Method syncMethod = null;
230     try {
231       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
232     } catch (NoSuchMethodException e) {
233       try {
234         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
235       } catch (NoSuchMethodException ex) {
236         fail("This version of Hadoop supports neither Syncable.sync() " +
237             "nor Syncable.hflush().");
238       }
239     }
240     syncMethod.invoke(out, new Object[]{});
241     FSDataInputStream in = fs.open(p);
242     assertTrue(in.available() > 0);
243     byte [] buffer = new byte [1024];
244     int read = in.read(buffer);
245     assertEquals(bytes.length, read);
246     out.close();
247     in.close();
248     Path subdir = new Path(dir, "hlogdir");
249     HLog wal = new HLog(fs, subdir, oldLogDir, conf);
250     final int total = 20;
251     HLog.Reader reader = null;
252 
253     try {
254       HRegionInfo info = new HRegionInfo(bytes,
255                   null,null, false);
256       HTableDescriptor htd = new HTableDescriptor();
257       htd.addFamily(new HColumnDescriptor(bytes));
258 
259       for (int i = 0; i < total; i++) {
260         WALEdit kvs = new WALEdit();
261         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
262         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
263       }
264       // Now call sync and try reading.  Opening a Reader before you sync just
265       // gives you EOFE.
266       wal.sync();
267       // Open a Reader.
268       Path walPath = wal.computeFilename();
269       reader = HLog.getReader(fs, walPath, conf);
270       int count = 0;
271       HLog.Entry entry = new HLog.Entry();
272       while ((entry = reader.next(entry)) != null) count++;
273       assertEquals(total, count);
274       reader.close();
275       // Add test that checks to see that an open of a Reader works on a file
276       // that has had a sync done on it.
277       for (int i = 0; i < total; i++) {
278         WALEdit kvs = new WALEdit();
279         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
280         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
281       }
282       reader = HLog.getReader(fs, walPath, conf);
283       count = 0;
284       while((entry = reader.next(entry)) != null) count++;
285       assertTrue(count >= total);
286       reader.close();
287       // If I sync, should see double the edits.
288       wal.sync();
289       reader = HLog.getReader(fs, walPath, conf);
290       count = 0;
291       while((entry = reader.next(entry)) != null) count++;
292       assertEquals(total * 2, count);
293       // Now do a test that ensures stuff works when we go over block boundary,
294       // especially that we return good length on file.
295       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
296       for (int i = 0; i < total; i++) {
297         WALEdit kvs = new WALEdit();
298         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
299         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
300       }
301       // Now I should have written out lots of blocks.  Sync then read.
302       wal.sync();
303       reader = HLog.getReader(fs, walPath, conf);
304       count = 0;
305       while((entry = reader.next(entry)) != null) count++;
306       assertEquals(total * 3, count);
307       reader.close();
308       // Close it and ensure that closed, Reader gets right length also.
309       wal.close();
310       reader = HLog.getReader(fs, walPath, conf);
311       count = 0;
312       while((entry = reader.next(entry)) != null) count++;
313       assertEquals(total * 3, count);
314       reader.close();
315     } finally {
316       if (wal != null) wal.closeAndDelete();
317       if (reader != null) reader.close();
318     }
319   }
320 
321   /**
322    * Test the findMemstoresWithEditsEqualOrOlderThan method.
323    * @throws IOException
324    */
325   @Test
326   public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
327     Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
328     for (int i = 0; i < 10; i++) {
329       Long l = Long.valueOf(i);
330       regionsToSeqids.put(l.toString().getBytes(), l);
331     }
332     byte [][] regions =
333       HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
334     assertEquals(2, regions.length);
335     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
336         Bytes.equals(regions[0], "1".getBytes()));
337     regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
338     int count = 4;
339     assertEquals(count, regions.length);
340     // Regions returned are not ordered.
341     for (int i = 0; i < count; i++) {
342       assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
343         Bytes.equals(regions[i], "1".getBytes()) ||
344         Bytes.equals(regions[i], "2".getBytes()) ||
345         Bytes.equals(regions[i], "3".getBytes()));
346     }
347   }
348 
349   private void verifySplits(List<Path> splits, final int howmany)
350   throws IOException {
351     assertEquals(howmany, splits.size());
352     for (int i = 0; i < splits.size(); i++) {
353       LOG.info("Verifying=" + splits.get(i));
354       HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
355       try {
356         int count = 0;
357         String previousRegion = null;
358         long seqno = -1;
359         HLog.Entry entry = new HLog.Entry();
360         while((entry = reader.next(entry)) != null) {
361           HLogKey key = entry.getKey();
362           String region = Bytes.toString(key.getEncodedRegionName());
363           // Assert that all edits are for same region.
364           if (previousRegion != null) {
365             assertEquals(previousRegion, region);
366           }
367           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
368           assertTrue(seqno < key.getLogSeqNum());
369           seqno = key.getLogSeqNum();
370           previousRegion = region;
371           count++;
372         }
373         assertEquals(howmany * howmany, count);
374       } finally {
375         reader.close();
376       }
377     }
378   }
379 
380   /*
381    * We pass different values to recoverFileLease() so that different code paths are covered
382    *
383    * For this test to pass, requires:
384    * 1. HDFS-200 (append support)
385    * 2. HDFS-988 (SafeMode should freeze file operations
386    *              [FSNamesystem.nextGenerationStampForBlock])
387    * 3. HDFS-142 (on restart, maintain pendingCreates)
388    */
389   @Test
390   public void testAppendClose() throws Exception {
391     byte [] tableName = Bytes.toBytes(getName());
392     HRegionInfo regioninfo = new HRegionInfo(tableName,
393              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
394     Path subdir = new Path(dir, "hlogdir");
395     Path archdir = new Path(dir, "hlogdir_archive");
396     HLog wal = new HLog(fs, subdir, archdir, conf);
397     final int total = 20;
398 
399     HTableDescriptor htd = new HTableDescriptor();
400     htd.addFamily(new HColumnDescriptor(tableName));
401 
402     for (int i = 0; i < total; i++) {
403       WALEdit kvs = new WALEdit();
404       kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
405       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
406     }
407     // Now call sync to send the data to HDFS datanodes
408     wal.sync();
409      int namenodePort = cluster.getNameNodePort();
410     final Path walPath = wal.computeFilename();
411 
412 
413     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
414     try {
415       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
416       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
417       cluster.shutdown();
418       try {
419         // wal.writer.close() will throw an exception,
420         // but still call this since it closes the LogSyncer thread first
421         wal.close();
422       } catch (IOException e) {
423         LOG.info(e);
424       }
425       fs.close(); // closing FS last so DFSOutputStream can't call close
426       LOG.info("STOPPED first instance of the cluster");
427     } finally {
428       // Restart the cluster
429       while (cluster.isClusterUp()){
430         LOG.error("Waiting for cluster to go down");
431         Thread.sleep(1000);
432       }
433 
434       // Workaround a strange issue with Hadoop's RPC system - if we don't
435       // sleep here, the new datanodes will pick up a cached IPC connection to
436       // the old (dead) NN and fail to start. Sleeping 2 seconds goes past
437       // the idle time threshold configured in the conf above
438       Thread.sleep(2000);
439 
440       cluster = null;
441       // retry a few times if the port is not freed, yet.
442       for (int i = 0; i < 30; i++) {
443         try {
444           cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
445           break;
446         } catch (BindException e) {
447           LOG.info("Sleeping.  BindException bringing up new cluster");
448           Thread.sleep(1000);
449         }
450       }
451       TEST_UTIL.setDFSCluster(cluster);
452       cluster.waitActive();
453       fs = cluster.getFileSystem();
454       LOG.info("START second instance.");
455     }
456 
457     // set the lease period to be 1 second so that the
458     // namenode triggers lease recovery upon append request
459     Method setLeasePeriod = cluster.getClass()
460       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
461     setLeasePeriod.setAccessible(true);
462     setLeasePeriod.invoke(cluster, 1000L, 1000L);
463     try {
464       Thread.sleep(1000);
465     } catch (InterruptedException e) {
466       LOG.info(e);
467     }
468 
469     // Now try recovering the log, like the HMaster would do
470     final FileSystem recoveredFs = fs;
471     final Configuration rlConf = conf;
472 
473     class RecoverLogThread extends Thread {
474       public Exception exception = null;
475       public void run() {
476           try {
477             FSUtils.getInstance(fs, rlConf)
478               .recoverFileLease(recoveredFs, walPath, rlConf);
479           } catch (IOException e) {
480             exception = e;
481           }
482       }
483     }
484 
485     RecoverLogThread t = new RecoverLogThread();
486     t.start();
487     // Timeout after 60 sec. Without correct patches, would be an infinite loop
488     t.join(60 * 1000);
489     if(t.isAlive()) {
490       t.interrupt();
491       throw new Exception("Timed out waiting for HLog.recoverLog()");
492     }
493 
494     if (t.exception != null)
495       throw t.exception;
496 
497     // Make sure you can read all the content
498     HLog.Reader reader = HLog.getReader(this.fs, walPath, this.conf);
499     int count = 0;
500     HLog.Entry entry = new HLog.Entry();
501     while (reader.next(entry) != null) {
502       count++;
503       assertTrue("Should be one KeyValue per WALEdit",
504                  entry.getEdit().getKeyValues().size() == 1);
505     }
506     assertEquals(total, count);
507     reader.close();
508 
509     // Reset the lease period
510     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
511   }
512 
513   /**
514    * Tests that we can write out an edit, close, and then read it back in again.
515    * @throws IOException
516    */
517   @Test
518   public void testEditAdd() throws IOException {
519     final int COL_COUNT = 10;
520     final byte [] tableName = Bytes.toBytes("tablename");
521     final byte [] row = Bytes.toBytes("row");
522     HLog.Reader reader = null;
523     HLog log = null;
524     try {
525       log = new HLog(fs, dir, oldLogDir, conf);
526       // Write columns named 1, 2, 3, etc. and then values of single byte
527       // 1, 2, 3...
528       long timestamp = System.currentTimeMillis();
529       WALEdit cols = new WALEdit();
530       for (int i = 0; i < COL_COUNT; i++) {
531         cols.add(new KeyValue(row, Bytes.toBytes("column"),
532             Bytes.toBytes(Integer.toString(i)),
533           timestamp, new byte[] { (byte)(i + '0') }));
534       }
535       HRegionInfo info = new HRegionInfo(tableName,
536         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
537       HTableDescriptor htd = new HTableDescriptor();
538       htd.addFamily(new HColumnDescriptor("column"));
539 
540       log.append(info, tableName, cols, System.currentTimeMillis(), htd);
541       long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
542       log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
543           info.isMetaRegion());
544       log.close();
545       Path filename = log.computeFilename();
546       log = null;
547       // Now open a reader on the log and assert append worked.
548       reader = HLog.getReader(fs, filename, conf);
549       // Above we added all columns on a single row so we only read one
550       // entry in the below... thats why we have '1'.
551       for (int i = 0; i < 1; i++) {
552         HLog.Entry entry = reader.next(null);
553         if (entry == null) break;
554         HLogKey key = entry.getKey();
555         WALEdit val = entry.getEdit();
556         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
557         assertTrue(Bytes.equals(tableName, key.getTablename()));
558         KeyValue kv = val.getKeyValues().get(0);
559         assertTrue(Bytes.equals(row, kv.getRow()));
560         assertEquals((byte)(i + '0'), kv.getValue()[0]);
561         System.out.println(key + " " + val);
562       }
563       HLog.Entry entry = null;
564       while ((entry = reader.next(null)) != null) {
565         HLogKey key = entry.getKey();
566         WALEdit val = entry.getEdit();
567         // Assert only one more row... the meta flushed row.
568         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
569         assertTrue(Bytes.equals(tableName, key.getTablename()));
570         KeyValue kv = val.getKeyValues().get(0);
571         assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
572         assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
573         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
574           val.getKeyValues().get(0).getValue()));
575         System.out.println(key + " " + val);
576       }
577     } finally {
578       if (log != null) {
579         log.closeAndDelete();
580       }
581       if (reader != null) {
582         reader.close();
583       }
584     }
585   }
586 
587   /**
588    * @throws IOException
589    */
590   @Test
591   public void testAppend() throws IOException {
592     final int COL_COUNT = 10;
593     final byte [] tableName = Bytes.toBytes("tablename");
594     final byte [] row = Bytes.toBytes("row");
595     Reader reader = null;
596     HLog log = new HLog(fs, dir, oldLogDir, conf);
597     try {
598       // Write columns named 1, 2, 3, etc. and then values of single byte
599       // 1, 2, 3...
600       long timestamp = System.currentTimeMillis();
601       WALEdit cols = new WALEdit();
602       for (int i = 0; i < COL_COUNT; i++) {
603         cols.add(new KeyValue(row, Bytes.toBytes("column"),
604           Bytes.toBytes(Integer.toString(i)),
605           timestamp, new byte[] { (byte)(i + '0') }));
606       }
607       HRegionInfo hri = new HRegionInfo(tableName,
608           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
609       HTableDescriptor htd = new HTableDescriptor();
610       htd.addFamily(new HColumnDescriptor("column"));
611       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
612       long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
613       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
614       log.close();
615       Path filename = log.computeFilename();
616       log = null;
617       // Now open a reader on the log and assert append worked.
618       reader = HLog.getReader(fs, filename, conf);
619       HLog.Entry entry = reader.next();
620       assertEquals(COL_COUNT, entry.getEdit().size());
621       int idx = 0;
622       for (KeyValue val : entry.getEdit().getKeyValues()) {
623         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
624           entry.getKey().getEncodedRegionName()));
625         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
626         assertTrue(Bytes.equals(row, val.getRow()));
627         assertEquals((byte)(idx + '0'), val.getValue()[0]);
628         System.out.println(entry.getKey() + " " + val);
629         idx++;
630       }
631 
632       // Get next row... the meta flushed row.
633       entry = reader.next();
634       assertEquals(1, entry.getEdit().size());
635       for (KeyValue val : entry.getEdit().getKeyValues()) {
636         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
637           entry.getKey().getEncodedRegionName()));
638         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
639         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
640         assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
641         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
642           val.getValue()));
643         System.out.println(entry.getKey() + " " + val);
644       }
645     } finally {
646       if (log != null) {
647         log.closeAndDelete();
648       }
649       if (reader != null) {
650         reader.close();
651       }
652     }
653   }
654 
655   /**
656    * Test that we can visit entries before they are appended
657    * @throws Exception
658    */
659   @Test
660   public void testVisitors() throws Exception {
661     final int COL_COUNT = 10;
662     final byte [] tableName = Bytes.toBytes("tablename");
663     final byte [] row = Bytes.toBytes("row");
664     HLog log = new HLog(fs, dir, oldLogDir, conf);
665     try {
666       DumbWALActionsListener visitor = new DumbWALActionsListener();
667       log.registerWALActionsListener(visitor);
668       long timestamp = System.currentTimeMillis();
669       HTableDescriptor htd = new HTableDescriptor();
670       htd.addFamily(new HColumnDescriptor("column"));
671 
672       HRegionInfo hri = new HRegionInfo(tableName,
673           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
674       for (int i = 0; i < COL_COUNT; i++) {
675         WALEdit cols = new WALEdit();
676         cols.add(new KeyValue(row, Bytes.toBytes("column"),
677             Bytes.toBytes(Integer.toString(i)),
678             timestamp, new byte[]{(byte) (i + '0')}));
679         log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
680       }
681       assertEquals(COL_COUNT, visitor.increments);
682       log.unregisterWALActionsListener(visitor);
683       WALEdit cols = new WALEdit();
684       cols.add(new KeyValue(row, Bytes.toBytes("column"),
685           Bytes.toBytes(Integer.toString(11)),
686           timestamp, new byte[]{(byte) (11 + '0')}));
687       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
688       assertEquals(COL_COUNT, visitor.increments);
689     } finally {
690       if (log != null) log.closeAndDelete();
691     }
692   }
693 
694   @Test
695   public void testLogCleaning() throws Exception {
696     LOG.info("testLogCleaning");
697     final byte [] tableName = Bytes.toBytes("testLogCleaning");
698     final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
699 
700     HLog log = new HLog(fs, dir, oldLogDir, conf);
701     try {
702       HRegionInfo hri = new HRegionInfo(tableName,
703           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
704       HRegionInfo hri2 = new HRegionInfo(tableName2,
705           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
706 
707       // Add a single edit and make sure that rolling won't remove the file
708       // Before HBASE-3198 it used to delete it
709       addEdits(log, hri, tableName, 1);
710       log.rollWriter();
711       assertEquals(2, log.getNumLogFiles());
712 
713       // See if there's anything wrong with more than 1 edit
714       addEdits(log, hri, tableName, 2);
715       log.rollWriter();
716       assertEquals(3, log.getNumLogFiles());
717 
718       // Now mix edits from 2 regions, still no flushing
719       addEdits(log, hri, tableName, 1);
720       addEdits(log, hri2, tableName2, 1);
721       addEdits(log, hri, tableName, 1);
722       addEdits(log, hri2, tableName2, 1);
723       log.rollWriter();
724       assertEquals(4, log.getNumLogFiles());
725 
726       // Flush the first region, we expect to see the first two files getting
727       // archived
728       long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
729       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
730       log.rollWriter();
731       assertEquals(3, log.getNumLogFiles());
732 
733       // Flush the second region, which removes all the remaining output files
734       // since the oldest was completely flushed and the two others only contain
735       // flush information
736       seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
737       log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
738       log.rollWriter();
739       assertEquals(1, log.getNumLogFiles());
740     } finally {
741       if (log != null) log.closeAndDelete();
742     }
743   }
744 
745   /**
746    * A loaded WAL coprocessor won't break existing HLog test cases.
747    */
748   @Test
749   public void testWALCoprocessorLoaded() throws Exception {
750     // test to see whether the coprocessor is loaded or not.
751     HLog log = new HLog(fs, dir, oldLogDir, conf);
752     try {
753       WALCoprocessorHost host = log.getCoprocessorHost();
754       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
755       assertNotNull(c);
756     } finally {
757       if (log != null) log.closeAndDelete();
758     }
759   }
760 
761   private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
762                         int times) throws IOException {
763     HTableDescriptor htd = new HTableDescriptor();
764     htd.addFamily(new HColumnDescriptor("row"));
765 
766     final byte [] row = Bytes.toBytes("row");
767     for (int i = 0; i < times; i++) {
768       long timestamp = System.currentTimeMillis();
769       WALEdit cols = new WALEdit();
770       cols.add(new KeyValue(row, row, row, timestamp, row));
771       log.append(hri, tableName, cols, timestamp, htd);
772     }
773   }
774 
775   static class DumbWALActionsListener implements WALActionsListener {
776     int increments = 0;
777 
778     @Override
779     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
780                                          WALEdit logEdit) {
781       increments++;
782     }
783 
784     @Override
785     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
786       //To change body of implemented methods use File | Settings | File Templates.
787       increments++;
788     }
789 
790     @Override
791     public void preLogRoll(Path oldFile, Path newFile) {
792       // TODO Auto-generated method stub
793     }
794 
795     @Override
796     public void postLogRoll(Path oldFile, Path newFile) {
797       // TODO Auto-generated method stub
798     }
799 
800     @Override
801     public void preLogArchive(Path oldFile, Path newFile) {
802       // TODO Auto-generated method stub
803     }
804 
805     @Override
806     public void postLogArchive(Path oldFile, Path newFile) {
807       // TODO Auto-generated method stub
808     }
809 
810     @Override
811     public void logRollRequested() {
812       // TODO Auto-generated method stub
813 
814     }
815 
816     @Override
817     public void logCloseRequested() {
818       // not interested
819     }
820   }
821 
822   @org.junit.Rule
823   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
824     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
825 }
826