View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import static org.junit.Assert.*;
22  
23  import java.io.IOException;
24  import java.lang.reflect.Method;
25  import java.net.BindException;
26  import java.util.Comparator;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.concurrent.atomic.AtomicLong;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.commons.logging.impl.Log4JLogger;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FSDataInputStream;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.*;
42  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
43  import org.apache.hadoop.hbase.util.Bytes;
44  import org.apache.hadoop.hbase.util.FSUtils;
45  import org.apache.hadoop.hbase.util.Threads;
46  import org.apache.hadoop.hbase.Coprocessor;
47  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
48  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
49  import org.apache.hadoop.hdfs.DFSClient;
50  import org.apache.hadoop.hdfs.DistributedFileSystem;
51  import org.apache.hadoop.hdfs.MiniDFSCluster;
52  import org.apache.hadoop.hdfs.protocol.FSConstants;
53  import org.apache.hadoop.hdfs.server.datanode.DataNode;
54  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
55  import org.apache.log4j.Level;
56  import org.junit.After;
57  import org.junit.AfterClass;
58  import org.junit.Assert;
59  import org.junit.Before;
60  import org.junit.BeforeClass;
61  import org.junit.Test;
62  import org.junit.experimental.categories.Category;
63  
64  /** JUnit test case for HLog */
65  @Category(LargeTests.class)
66  @SuppressWarnings("deprecation")
67  public class TestHLog  {
68    private static final Log LOG = LogFactory.getLog(TestHLog.class);
69    {
70      // Uncomment the following lines if more verbosity is needed for
71      // debugging (see HBASE-12285 for details).
72      //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
73      //((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
74      //((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
75      //    .getLogger().setLevel(Level.ALL);
76      //((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
77      //((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
78    }
79  
80    private static Configuration conf;
81    private static FileSystem fs;
82    private static Path dir;
83    private static MiniDFSCluster cluster;
84    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
85    private static Path hbaseDir;
86    private static Path oldLogDir;
87  
88    @Before
89    public void setUp() throws Exception {
90  
91      FileStatus[] entries = fs.listStatus(new Path("/"));
92      for (FileStatus dir : entries) {
93        fs.delete(dir.getPath(), true);
94      }
95  
96    }
97  
98    @After
99    public void tearDown() throws Exception {
100   }
101 
102   @BeforeClass
103   public static void setUpBeforeClass() throws Exception {
104     // Make block sizes small.
105     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
106     // needed for testAppendClose()
107     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
108     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
109     // quicker heartbeat interval for faster DN death notification
110     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
111     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
112     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
113     // faster failover with cluster.shutdown();fs.close() idiom
114     TEST_UTIL.getConfiguration().setInt("ipc.client.connect.max.retries", 1);
115     TEST_UTIL.getConfiguration().setInt("hbase.ipc.client.connect.max.retries", 1);
116     TEST_UTIL.getConfiguration().setInt("dfs.client.block.recovery.retries", 1);
117     TEST_UTIL.getConfiguration().setInt("ipc.client.connection.maxidletime", 500);
118     TEST_UTIL.getConfiguration().setInt("hbase.ipc.client.connection.maxidletime", 500);
119     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
120         SampleRegionWALObserver.class.getName());
121     TEST_UTIL.startMiniDFSCluster(3);
122 
123     conf = TEST_UTIL.getConfiguration();
124     cluster = TEST_UTIL.getDFSCluster();
125     fs = cluster.getFileSystem();
126 
127     hbaseDir = TEST_UTIL.createRootDir();
128     oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
129     dir = new Path(hbaseDir, getName());
130   }
131   @AfterClass
132   public static void tearDownAfterClass() throws Exception {
133     TEST_UTIL.shutdownMiniCluster();
134   }
135 
136   private static String getName() {
137     // TODO Auto-generated method stub
138     return "TestHLog";
139   }
140 
141   /**
142    * Write to a log file with three concurrent threads and verifying all data is written.
143    * @throws Exception
144    */
145   @Test
146   public void testConcurrentWrites() throws Exception {
147     // Run the HPE tool with three threads writing 3000 edits each concurrently.
148     // When done, verify that all edits were written.
149     int errCode = HLogPerformanceEvaluation.
150       innerMain(new Configuration(TEST_UTIL.getConfiguration()),
151         new String [] {"-threads", "3", "-verify", "-noclosefs", "-iterations", "3000"});
152     assertEquals(0, errCode);
153   }
154 
155   /**
156    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
157    * would fail.
158    * @throws IOException
159    */
160   @Test
161   public void testSplit() throws IOException {
162 
163     final TableName tableName =
164         TableName.valueOf(getName());
165     final byte [] rowName = tableName.getName();
166     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
167     HLog log = HLogFactory.createHLog(fs, hbaseDir,
168         HConstants.HREGION_LOGDIR_NAME, conf);
169     final int howmany = 3;
170     HRegionInfo[] infos = new HRegionInfo[3];
171     Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
172     fs.mkdirs(tabledir);
173     for(int i = 0; i < howmany; i++) {
174       infos[i] = new HRegionInfo(tableName,
175                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
176       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
177       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
178     }
179     HTableDescriptor htd = new HTableDescriptor(tableName);
180     htd.addFamily(new HColumnDescriptor("column"));
181 
182     // Add edits for three regions.
183     final AtomicLong sequenceId = new AtomicLong(1);
184     try {
185       for (int ii = 0; ii < howmany; ii++) {
186         for (int i = 0; i < howmany; i++) {
187 
188           for (int j = 0; j < howmany; j++) {
189             WALEdit edit = new WALEdit();
190             byte [] family = Bytes.toBytes("column");
191             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
192             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
193             edit.add(new KeyValue(rowName, family, qualifier,
194                 System.currentTimeMillis(), column));
195             LOG.info("Region " + i + ": " + edit);
196             log.append(infos[i], tableName, edit,
197               System.currentTimeMillis(), htd, sequenceId);
198           }
199         }
200         log.rollWriter();
201       }
202       log.close();
203       List<Path> splits = HLogSplitter.split(
204         hbaseDir, logdir, oldLogDir, fs, conf);
205       verifySplits(splits, howmany);
206       log = null;
207     } finally {
208       if (log != null) {
209         log.closeAndDelete();
210       }
211     }
212   }
213 
214   /**
215    * Test new HDFS-265 sync.
216    * @throws Exception
217    */
218   @Test
219   public void Broken_testSync() throws Exception {
220     TableName tableName =
221         TableName.valueOf(getName());
222     // First verify that using streams all works.
223     Path p = new Path(dir, getName() + ".fsdos");
224     FSDataOutputStream out = fs.create(p);
225     out.write(tableName.getName());
226     Method syncMethod = null;
227     try {
228       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
229     } catch (NoSuchMethodException e) {
230       try {
231         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
232       } catch (NoSuchMethodException ex) {
233         fail("This version of Hadoop supports neither Syncable.sync() " +
234             "nor Syncable.hflush().");
235       }
236     }
237     syncMethod.invoke(out, new Object[]{});
238     FSDataInputStream in = fs.open(p);
239     assertTrue(in.available() > 0);
240     byte [] buffer = new byte [1024];
241     int read = in.read(buffer);
242     assertEquals(tableName.getName().length, read);
243     out.close();
244     in.close();
245 
246     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf);
247     final AtomicLong sequenceId = new AtomicLong(1);
248     final int total = 20;
249     HLog.Reader reader = null;
250 
251     try {
252       HRegionInfo info = new HRegionInfo(tableName,
253                   null,null, false);
254       HTableDescriptor htd = new HTableDescriptor();
255       htd.addFamily(new HColumnDescriptor(tableName.getName()));
256 
257       for (int i = 0; i < total; i++) {
258         WALEdit kvs = new WALEdit();
259         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
260         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
261       }
262       // Now call sync and try reading.  Opening a Reader before you sync just
263       // gives you EOFE.
264       wal.sync();
265       // Open a Reader.
266       Path walPath = ((FSHLog) wal).computeFilename();
267       reader = HLogFactory.createReader(fs, walPath, conf);
268       int count = 0;
269       HLog.Entry entry = new HLog.Entry();
270       while ((entry = reader.next(entry)) != null) count++;
271       assertEquals(total, count);
272       reader.close();
273       // Add test that checks to see that an open of a Reader works on a file
274       // that has had a sync done on it.
275       for (int i = 0; i < total; i++) {
276         WALEdit kvs = new WALEdit();
277         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
278         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
279       }
280       reader = HLogFactory.createReader(fs, walPath, conf);
281       count = 0;
282       while((entry = reader.next(entry)) != null) count++;
283       assertTrue(count >= total);
284       reader.close();
285       // If I sync, should see double the edits.
286       wal.sync();
287       reader = HLogFactory.createReader(fs, walPath, conf);
288       count = 0;
289       while((entry = reader.next(entry)) != null) count++;
290       assertEquals(total * 2, count);
291       // Now do a test that ensures stuff works when we go over block boundary,
292       // especially that we return good length on file.
293       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
294       for (int i = 0; i < total; i++) {
295         WALEdit kvs = new WALEdit();
296         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
297         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
298       }
299       // Now I should have written out lots of blocks.  Sync then read.
300       wal.sync();
301       reader = HLogFactory.createReader(fs, walPath, conf);
302       count = 0;
303       while((entry = reader.next(entry)) != null) count++;
304       assertEquals(total * 3, count);
305       reader.close();
306       // Close it and ensure that closed, Reader gets right length also.
307       wal.close();
308       reader = HLogFactory.createReader(fs, walPath, conf);
309       count = 0;
310       while((entry = reader.next(entry)) != null) count++;
311       assertEquals(total * 3, count);
312       reader.close();
313     } finally {
314       if (wal != null) wal.closeAndDelete();
315       if (reader != null) reader.close();
316     }
317   }
318 
319   private void verifySplits(List<Path> splits, final int howmany)
320   throws IOException {
321     assertEquals(howmany * howmany, splits.size());
322     for (int i = 0; i < splits.size(); i++) {
323       LOG.info("Verifying=" + splits.get(i));
324       HLog.Reader reader = HLogFactory.createReader(fs, splits.get(i), conf);
325       try {
326         int count = 0;
327         String previousRegion = null;
328         long seqno = -1;
329         HLog.Entry entry = new HLog.Entry();
330         while((entry = reader.next(entry)) != null) {
331           HLogKey key = entry.getKey();
332           String region = Bytes.toString(key.getEncodedRegionName());
333           // Assert that all edits are for same region.
334           if (previousRegion != null) {
335             assertEquals(previousRegion, region);
336           }
337           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
338           assertTrue(seqno < key.getLogSeqNum());
339           seqno = key.getLogSeqNum();
340           previousRegion = region;
341           count++;
342         }
343         assertEquals(howmany, count);
344       } finally {
345         reader.close();
346       }
347     }
348   }
349 
350   /*
351    * We pass different values to recoverFileLease() so that different code paths are covered
352    *
353    * For this test to pass, requires:
354    * 1. HDFS-200 (append support)
355    * 2. HDFS-988 (SafeMode should freeze file operations
356    *              [FSNamesystem.nextGenerationStampForBlock])
357    * 3. HDFS-142 (on restart, maintain pendingCreates)
358    */
359   @Test (timeout=300000)
360   public void testAppendClose() throws Exception {
361     TableName tableName =
362         TableName.valueOf(getName());
363     HRegionInfo regioninfo = new HRegionInfo(tableName,
364              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
365 
366     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
367         "hlogdir_archive", conf);
368     final AtomicLong sequenceId = new AtomicLong(1);
369     final int total = 20;
370 
371     HTableDescriptor htd = new HTableDescriptor();
372     htd.addFamily(new HColumnDescriptor(tableName.getName()));
373 
374     for (int i = 0; i < total; i++) {
375       WALEdit kvs = new WALEdit();
376       kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
377       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
378     }
379     // Now call sync to send the data to HDFS datanodes
380     wal.sync();
381      int namenodePort = cluster.getNameNodePort();
382     final Path walPath = ((FSHLog) wal).computeFilename();
383 
384 
385     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
386     try {
387       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
388       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
389       TEST_UTIL.shutdownMiniDFSCluster();
390       try {
391         // wal.writer.close() will throw an exception,
392         // but still call this since it closes the LogSyncer thread first
393         wal.close();
394       } catch (IOException e) {
395         LOG.info(e);
396       }
397       fs.close(); // closing FS last so DFSOutputStream can't call close
398       LOG.info("STOPPED first instance of the cluster");
399     } finally {
400       // Restart the cluster
401       while (cluster.isClusterUp()){
402         LOG.error("Waiting for cluster to go down");
403         Thread.sleep(1000);
404       }
405       assertFalse(cluster.isClusterUp());
406       cluster = null;
407       for (int i = 0; i < 100; i++) {
408         try {
409           cluster = TEST_UTIL.startMiniDFSClusterForTestHLog(namenodePort);
410           break;
411         } catch (BindException e) {
412           LOG.info("Sleeping.  BindException bringing up new cluster");
413           Threads.sleep(1000);
414         }
415       }
416       cluster.waitActive();
417       fs = cluster.getFileSystem();
418       LOG.info("STARTED second instance.");
419     }
420 
421     // set the lease period to be 1 second so that the
422     // namenode triggers lease recovery upon append request
423     Method setLeasePeriod = cluster.getClass()
424       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
425     setLeasePeriod.setAccessible(true);
426     setLeasePeriod.invoke(cluster, 1000L, 1000L);
427     try {
428       Thread.sleep(1000);
429     } catch (InterruptedException e) {
430       LOG.info(e);
431     }
432 
433     // Now try recovering the log, like the HMaster would do
434     final FileSystem recoveredFs = fs;
435     final Configuration rlConf = conf;
436 
437     class RecoverLogThread extends Thread {
438       public Exception exception = null;
439       public void run() {
440           try {
441             FSUtils.getInstance(fs, rlConf)
442               .recoverFileLease(recoveredFs, walPath, rlConf, null);
443           } catch (IOException e) {
444             exception = e;
445           }
446       }
447     }
448 
449     RecoverLogThread t = new RecoverLogThread();
450     t.start();
451     // Timeout after 60 sec. Without correct patches, would be an infinite loop
452     t.join(60 * 1000);
453     if(t.isAlive()) {
454       t.interrupt();
455       throw new Exception("Timed out waiting for HLog.recoverLog()");
456     }
457 
458     if (t.exception != null)
459       throw t.exception;
460 
461     // Make sure you can read all the content
462     HLog.Reader reader = HLogFactory.createReader(fs, walPath, conf);
463     int count = 0;
464     HLog.Entry entry = new HLog.Entry();
465     while (reader.next(entry) != null) {
466       count++;
467       assertTrue("Should be one KeyValue per WALEdit",
468                   entry.getEdit().getKeyValues().size() == 1);
469     }
470     assertEquals(total, count);
471     reader.close();
472 
473     // Reset the lease period
474     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
475   }
476 
477   /**
478    * Tests that we can write out an edit, close, and then read it back in again.
479    * @throws IOException
480    */
481   @Test
482   public void testEditAdd() throws IOException {
483     final int COL_COUNT = 10;
484     final TableName tableName =
485         TableName.valueOf("tablename");
486     final byte [] row = Bytes.toBytes("row");
487     HLog.Reader reader = null;
488     HLog log = null;
489     try {
490       log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
491       final AtomicLong sequenceId = new AtomicLong(1);
492 
493       // Write columns named 1, 2, 3, etc. and then values of single byte
494       // 1, 2, 3...
495       long timestamp = System.currentTimeMillis();
496       WALEdit cols = new WALEdit();
497       for (int i = 0; i < COL_COUNT; i++) {
498         cols.add(new KeyValue(row, Bytes.toBytes("column"),
499             Bytes.toBytes(Integer.toString(i)),
500           timestamp, new byte[] { (byte)(i + '0') }));
501       }
502       HRegionInfo info = new HRegionInfo(tableName,
503         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
504       HTableDescriptor htd = new HTableDescriptor();
505       htd.addFamily(new HColumnDescriptor("column"));
506 
507       log.append(info, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
508       log.startCacheFlush(info.getEncodedNameAsBytes());
509       log.completeCacheFlush(info.getEncodedNameAsBytes());
510       log.close();
511       Path filename = ((FSHLog) log).computeFilename();
512       log = null;
513       // Now open a reader on the log and assert append worked.
514       reader = HLogFactory.createReader(fs, filename, conf);
515       // Above we added all columns on a single row so we only read one
516       // entry in the below... thats why we have '1'.
517       for (int i = 0; i < 1; i++) {
518         HLog.Entry entry = reader.next(null);
519         if (entry == null) break;
520         HLogKey key = entry.getKey();
521         WALEdit val = entry.getEdit();
522         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
523         assertTrue(tableName.equals(key.getTablename()));
524         KeyValue kv = val.getKeyValues().get(0);
525         assertTrue(Bytes.equals(row, kv.getRow()));
526         assertEquals((byte)(i + '0'), kv.getValue()[0]);
527         System.out.println(key + " " + val);
528       }
529     } finally {
530       if (log != null) {
531         log.closeAndDelete();
532       }
533       if (reader != null) {
534         reader.close();
535       }
536     }
537   }
538 
539   /**
540    * @throws IOException
541    */
542   @Test
543   public void testAppend() throws IOException {
544     final int COL_COUNT = 10;
545     final TableName tableName =
546         TableName.valueOf("tablename");
547     final byte [] row = Bytes.toBytes("row");
548     Reader reader = null;
549     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
550     final AtomicLong sequenceId = new AtomicLong(1);
551     try {
552       // Write columns named 1, 2, 3, etc. and then values of single byte
553       // 1, 2, 3...
554       long timestamp = System.currentTimeMillis();
555       WALEdit cols = new WALEdit();
556       for (int i = 0; i < COL_COUNT; i++) {
557         cols.add(new KeyValue(row, Bytes.toBytes("column"),
558           Bytes.toBytes(Integer.toString(i)),
559           timestamp, new byte[] { (byte)(i + '0') }));
560       }
561       HRegionInfo hri = new HRegionInfo(tableName,
562           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
563       HTableDescriptor htd = new HTableDescriptor();
564       htd.addFamily(new HColumnDescriptor("column"));
565       log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
566       log.startCacheFlush(hri.getEncodedNameAsBytes());
567       log.completeCacheFlush(hri.getEncodedNameAsBytes());
568       log.close();
569       Path filename = ((FSHLog) log).computeFilename();
570       log = null;
571       // Now open a reader on the log and assert append worked.
572       reader = HLogFactory.createReader(fs, filename, conf);
573       HLog.Entry entry = reader.next();
574       assertEquals(COL_COUNT, entry.getEdit().size());
575       int idx = 0;
576       for (KeyValue val : entry.getEdit().getKeyValues()) {
577         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
578           entry.getKey().getEncodedRegionName()));
579         assertTrue(tableName.equals(entry.getKey().getTablename()));
580         assertTrue(Bytes.equals(row, val.getRow()));
581         assertEquals((byte)(idx + '0'), val.getValue()[0]);
582         System.out.println(entry.getKey() + " " + val);
583         idx++;
584       }
585     } finally {
586       if (log != null) {
587         log.closeAndDelete();
588       }
589       if (reader != null) {
590         reader.close();
591       }
592     }
593   }
594 
595   /**
596    * Test that we can visit entries before they are appended
597    * @throws Exception
598    */
599   @Test
600   public void testVisitors() throws Exception {
601     final int COL_COUNT = 10;
602     final TableName tableName =
603         TableName.valueOf("tablename");
604     final byte [] row = Bytes.toBytes("row");
605     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
606     final AtomicLong sequenceId = new AtomicLong(1);
607     try {
608       DumbWALActionsListener visitor = new DumbWALActionsListener();
609       log.registerWALActionsListener(visitor);
610       long timestamp = System.currentTimeMillis();
611       HTableDescriptor htd = new HTableDescriptor();
612       htd.addFamily(new HColumnDescriptor("column"));
613 
614       HRegionInfo hri = new HRegionInfo(tableName,
615           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
616       for (int i = 0; i < COL_COUNT; i++) {
617         WALEdit cols = new WALEdit();
618         cols.add(new KeyValue(row, Bytes.toBytes("column"),
619             Bytes.toBytes(Integer.toString(i)),
620             timestamp, new byte[]{(byte) (i + '0')}));
621         log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
622       }
623       assertEquals(COL_COUNT, visitor.increments);
624       log.unregisterWALActionsListener(visitor);
625       WALEdit cols = new WALEdit();
626       cols.add(new KeyValue(row, Bytes.toBytes("column"),
627           Bytes.toBytes(Integer.toString(11)),
628           timestamp, new byte[]{(byte) (11 + '0')}));
629       log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
630       assertEquals(COL_COUNT, visitor.increments);
631     } finally {
632       if (log != null) log.closeAndDelete();
633     }
634   }
635 
636   @Test
637   public void testLogCleaning() throws Exception {
638     LOG.info("testLogCleaning");
639     final TableName tableName =
640         TableName.valueOf("testLogCleaning");
641     final TableName tableName2 =
642         TableName.valueOf("testLogCleaning2");
643 
644     HLog log = HLogFactory.createHLog(fs, hbaseDir,
645         getName(), conf);
646     final AtomicLong sequenceId = new AtomicLong(1);
647     try {
648       HRegionInfo hri = new HRegionInfo(tableName,
649           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
650       HRegionInfo hri2 = new HRegionInfo(tableName2,
651           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
652 
653       // Add a single edit and make sure that rolling won't remove the file
654       // Before HBASE-3198 it used to delete it
655       addEdits(log, hri, tableName, 1, sequenceId);
656       log.rollWriter();
657       assertEquals(1, ((FSHLog) log).getNumRolledLogFiles());
658 
659       // See if there's anything wrong with more than 1 edit
660       addEdits(log, hri, tableName, 2, sequenceId);
661       log.rollWriter();
662       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
663 
664       // Now mix edits from 2 regions, still no flushing
665       addEdits(log, hri, tableName, 1, sequenceId);
666       addEdits(log, hri2, tableName2, 1, sequenceId);
667       addEdits(log, hri, tableName, 1, sequenceId);
668       addEdits(log, hri2, tableName2, 1, sequenceId);
669       log.rollWriter();
670       assertEquals(3, ((FSHLog) log).getNumRolledLogFiles());
671 
672       // Flush the first region, we expect to see the first two files getting
673       // archived. We need to append something or writer won't be rolled.
674       addEdits(log, hri2, tableName2, 1, sequenceId);
675       log.startCacheFlush(hri.getEncodedNameAsBytes());
676       log.completeCacheFlush(hri.getEncodedNameAsBytes());
677       log.rollWriter();
678       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
679 
680       // Flush the second region, which removes all the remaining output files
681       // since the oldest was completely flushed and the two others only contain
682       // flush information
683       addEdits(log, hri2, tableName2, 1, sequenceId);
684       log.startCacheFlush(hri2.getEncodedNameAsBytes());
685       log.completeCacheFlush(hri2.getEncodedNameAsBytes());
686       log.rollWriter();
687       assertEquals(0, ((FSHLog) log).getNumRolledLogFiles());
688     } finally {
689       if (log != null) log.closeAndDelete();
690     }
691   }
692 
693   @Test
694   public void testFailedToCreateHLogIfParentRenamed() throws IOException {
695     FSHLog log = (FSHLog)HLogFactory.createHLog(
696       fs, hbaseDir, "testFailedToCreateHLogIfParentRenamed", conf);
697     long filenum = System.currentTimeMillis();
698     Path path = log.computeFilename(filenum);
699     HLogFactory.createWALWriter(fs, path, conf);
700     Path parent = path.getParent();
701     path = log.computeFilename(filenum + 1);
702     Path newPath = new Path(parent.getParent(), parent.getName() + "-splitting");
703     fs.rename(parent, newPath);
704     try {
705       HLogFactory.createWALWriter(fs, path, conf);
706       fail("It should fail to create the new WAL");
707     } catch (IOException ioe) {
708       // expected, good.
709     }
710   }
711 
712   @Test
713   public void testGetServerNameFromHLogDirectoryName() throws IOException {
714     ServerName sn = ServerName.valueOf("hn", 450, 1398);
715     String hl = FSUtils.getRootDir(conf) + "/" + HLogUtil.getHLogDirectoryName(sn.toString());
716 
717     // Must not throw exception
718     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null));
719     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf,
720         FSUtils.getRootDir(conf).toUri().toString()));
721     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, ""));
722     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "                  "));
723     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl));
724     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "qdf"));
725     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf" + hl + "qdf"));
726 
727     final String wals = "/WALs/";
728     ServerName parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
729       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
730       "/localhost%2C32984%2C1343316388997.1343316390417");
731     Assert.assertEquals("standard",  sn, parsed);
732 
733     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "/qdf");
734     Assert.assertEquals("subdir", sn, parsed);
735 
736     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
737       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
738       "-splitting/localhost%3A57020.1340474893931");
739     Assert.assertEquals("split", sn, parsed);
740   }
741 
742   /**
743    * A loaded WAL coprocessor won't break existing HLog test cases.
744    */
745   @Test
746   public void testWALCoprocessorLoaded() throws Exception {
747     // test to see whether the coprocessor is loaded or not.
748     HLog log = HLogFactory.createHLog(fs, hbaseDir,
749         getName(), conf);
750     try {
751       WALCoprocessorHost host = log.getCoprocessorHost();
752       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
753       assertNotNull(c);
754     } finally {
755       if (log != null) log.closeAndDelete();
756     }
757   }
758 
759   private void addEdits(HLog log, HRegionInfo hri, TableName tableName,
760                         int times, AtomicLong sequenceId) throws IOException {
761     HTableDescriptor htd = new HTableDescriptor();
762     htd.addFamily(new HColumnDescriptor("row"));
763 
764     final byte [] row = Bytes.toBytes("row");
765     for (int i = 0; i < times; i++) {
766       long timestamp = System.currentTimeMillis();
767       WALEdit cols = new WALEdit();
768       cols.add(new KeyValue(row, row, row, timestamp, row));
769       log.append(hri, tableName, cols, timestamp, htd, sequenceId);
770     }
771   }
772 
773 
774   /**
775    * @throws IOException
776    */
777   @Test
778   public void testReadLegacyLog() throws IOException {
779     final int columnCount = 5;
780     final int recordCount = 5;
781     final TableName tableName =
782         TableName.valueOf("tablename");
783     final byte[] row = Bytes.toBytes("row");
784     long timestamp = System.currentTimeMillis();
785     Path path = new Path(dir, "temphlog");
786     SequenceFileLogWriter sflw = null;
787     HLog.Reader reader = null;
788     try {
789       HRegionInfo hri = new HRegionInfo(tableName,
790           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
791       HTableDescriptor htd = new HTableDescriptor(tableName);
792       fs.mkdirs(dir);
793       // Write log in pre-PB format.
794       sflw = new SequenceFileLogWriter();
795       sflw.init(fs, path, conf, false);
796       for (int i = 0; i < recordCount; ++i) {
797         HLogKey key = new HLogKey(
798             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
799         WALEdit edit = new WALEdit();
800         for (int j = 0; j < columnCount; ++j) {
801           if (i == 0) {
802             htd.addFamily(new HColumnDescriptor("column" + j));
803           }
804           String value = i + "" + j;
805           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
806         }
807         sflw.append(new HLog.Entry(key, edit));
808       }
809       sflw.sync();
810       sflw.close();
811 
812       // Now read the log using standard means.
813       reader = HLogFactory.createReader(fs, path, conf);
814       assertTrue(reader instanceof SequenceFileLogReader);
815       for (int i = 0; i < recordCount; ++i) {
816         HLog.Entry entry = reader.next();
817         assertNotNull(entry);
818         assertEquals(columnCount, entry.getEdit().size());
819         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
820         assertEquals(tableName, entry.getKey().getTablename());
821         int idx = 0;
822         for (KeyValue val : entry.getEdit().getKeyValues()) {
823           assertTrue(Bytes.equals(row, val.getRow()));
824           String value = i + "" + idx;
825           assertArrayEquals(Bytes.toBytes(value), val.getValue());
826           idx++;
827         }
828       }
829       HLog.Entry entry = reader.next();
830       assertNull(entry);
831     } finally {
832       if (sflw != null) {
833         sflw.close();
834       }
835       if (reader != null) {
836         reader.close();
837       }
838     }
839   }
840 
841   /**
842    * Reads the WAL with and without WALTrailer.
843    * @throws IOException
844    */
845   @Test
846   public void testWALTrailer() throws IOException {
847     // read With trailer.
848     doRead(true);
849     // read without trailer
850     doRead(false);
851   }
852 
853   /**
854    * Appends entries in the WAL and reads it.
855    * @param withTrailer If 'withTrailer' is true, it calls a close on the WALwriter before reading
856    *          so that a trailer is appended to the WAL. Otherwise, it starts reading after the sync
857    *          call. This means that reader is not aware of the trailer. In this scenario, if the
858    *          reader tries to read the trailer in its next() call, it returns false from
859    *          ProtoBufLogReader.
860    * @throws IOException
861    */
862   private void doRead(boolean withTrailer) throws IOException {
863     final int columnCount = 5;
864     final int recordCount = 5;
865     final TableName tableName =
866         TableName.valueOf("tablename");
867     final byte[] row = Bytes.toBytes("row");
868     long timestamp = System.currentTimeMillis();
869     Path path = new Path(dir, "temphlog");
870     // delete the log if already exists, for test only
871     fs.delete(path, true);
872     HLog.Writer writer = null;
873     HLog.Reader reader = null;
874     try {
875       HRegionInfo hri = new HRegionInfo(tableName,
876           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
877       HTableDescriptor htd = new HTableDescriptor(tableName);
878       fs.mkdirs(dir);
879       // Write log in pb format.
880       writer = HLogFactory.createWALWriter(fs, path, conf);
881       for (int i = 0; i < recordCount; ++i) {
882         HLogKey key = new HLogKey(
883             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
884         WALEdit edit = new WALEdit();
885         for (int j = 0; j < columnCount; ++j) {
886           if (i == 0) {
887             htd.addFamily(new HColumnDescriptor("column" + j));
888           }
889           String value = i + "" + j;
890           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
891         }
892         writer.append(new HLog.Entry(key, edit));
893       }
894       writer.sync();
895       if (withTrailer) writer.close();
896 
897       // Now read the log using standard means.
898       reader = HLogFactory.createReader(fs, path, conf);
899       assertTrue(reader instanceof ProtobufLogReader);
900       if (withTrailer) {
901         assertNotNull(reader.getWALTrailer());
902       } else {
903         assertNull(reader.getWALTrailer());
904       }
905       for (int i = 0; i < recordCount; ++i) {
906         HLog.Entry entry = reader.next();
907         assertNotNull(entry);
908         assertEquals(columnCount, entry.getEdit().size());
909         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
910         assertEquals(tableName, entry.getKey().getTablename());
911         int idx = 0;
912         for (KeyValue val : entry.getEdit().getKeyValues()) {
913           assertTrue(Bytes.equals(row, val.getRow()));
914           String value = i + "" + idx;
915           assertArrayEquals(Bytes.toBytes(value), val.getValue());
916           idx++;
917         }
918       }
919       HLog.Entry entry = reader.next();
920       assertNull(entry);
921     } finally {
922       if (writer != null) {
923         writer.close();
924       }
925       if (reader != null) {
926         reader.close();
927       }
928     }
929   }
930 
931   /**
932    * tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws
933    * exception if we do). Comparison is based on the timestamp present in the wal name.
934    * @throws Exception
935    */
936   @Test
937   public void testHLogComparator() throws Exception {
938     HLog hlog1 = null;
939     HLog hlogMeta = null;
940     try {
941       hlog1 = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
942       LOG.debug("Log obtained is: " + hlog1);
943       Comparator<Path> comp = ((FSHLog) hlog1).LOG_NAME_COMPARATOR;
944       Path p1 = ((FSHLog) hlog1).computeFilename(11);
945       Path p2 = ((FSHLog) hlog1).computeFilename(12);
946       // comparing with itself returns 0
947       assertTrue(comp.compare(p1, p1) == 0);
948       // comparing with different filenum.
949       assertTrue(comp.compare(p1, p2) < 0);
950       hlogMeta = HLogFactory.createMetaHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf,
951         null, null);
952       Comparator<Path> compMeta = ((FSHLog) hlogMeta).LOG_NAME_COMPARATOR;
953 
954       Path p1WithMeta = ((FSHLog) hlogMeta).computeFilename(11);
955       Path p2WithMeta = ((FSHLog) hlogMeta).computeFilename(12);
956       assertTrue(compMeta.compare(p1WithMeta, p1WithMeta) == 0);
957       assertTrue(compMeta.compare(p1WithMeta, p2WithMeta) < 0);
958       // mixing meta and non-meta logs gives error
959       boolean ex = false;
960       try {
961         comp.compare(p1WithMeta, p2);
962       } catch (Exception e) {
963         ex = true;
964       }
965       assertTrue("Comparator doesn't complain while checking meta log files", ex);
966       boolean exMeta = false;
967       try {
968         compMeta.compare(p1WithMeta, p2);
969       } catch (Exception e) {
970         exMeta = true;
971       }
972       assertTrue("Meta comparator doesn't complain while checking log files", exMeta);
973     } finally {
974       if (hlog1 != null) hlog1.close();
975       if (hlogMeta != null) hlogMeta.close();
976     }
977   }
978 
979   /**
980    * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
981    * and also don't archive "live logs" (that is, a log with un-flushed entries).
982    * <p>
983    * This is what it does:
984    * It creates two regions, and does a series of inserts along with log rolling.
985    * Whenever a WAL is rolled, FSHLog checks previous wals for archiving. A wal is eligible for
986    * archiving if for all the regions which have entries in that wal file, have flushed - past
987    * their maximum sequence id in that wal file.
988    * <p>
989    * @throws IOException
990    */
991   @Test
992   public void testWALArchiving() throws IOException {
993     LOG.debug("testWALArchiving");
994     TableName table1 = TableName.valueOf("t1");
995     TableName table2 = TableName.valueOf("t2");
996     HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
997     try {
998       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
999       HRegionInfo hri1 = new HRegionInfo(table1, HConstants.EMPTY_START_ROW,
1000           HConstants.EMPTY_END_ROW);
1001       HRegionInfo hri2 = new HRegionInfo(table2, HConstants.EMPTY_START_ROW,
1002           HConstants.EMPTY_END_ROW);
1003       // ensure that we don't split the regions.
1004       hri1.setSplit(false);
1005       hri2.setSplit(false);
1006       // variables to mock region sequenceIds.
1007       final AtomicLong sequenceId1 = new AtomicLong(1);
1008       final AtomicLong sequenceId2 = new AtomicLong(1);
1009       // start with the testing logic: insert a waledit, and roll writer
1010       addEdits(hlog, hri1, table1, 1, sequenceId1);
1011       hlog.rollWriter();
1012       // assert that the wal is rolled
1013       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1014       // add edits in the second wal file, and roll writer.
1015       addEdits(hlog, hri1, table1, 1, sequenceId1);
1016       hlog.rollWriter();
1017       // assert that the wal is rolled
1018       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1019       // add a waledit to table1, and flush the region.
1020       addEdits(hlog, hri1, table1, 3, sequenceId1);
1021       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1022       // roll log; all old logs should be archived.
1023       hlog.rollWriter();
1024       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1025       // add an edit to table2, and roll writer
1026       addEdits(hlog, hri2, table2, 1, sequenceId2);
1027       hlog.rollWriter();
1028       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1029       // add edits for table1, and roll writer
1030       addEdits(hlog, hri1, table1, 2, sequenceId1);
1031       hlog.rollWriter();
1032       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1033       // add edits for table2, and flush hri1.
1034       addEdits(hlog, hri2, table2, 2, sequenceId2);
1035       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1036       // the log : region-sequenceId map is
1037       // log1: region2 (unflushed)
1038       // log2: region1 (flushed)
1039       // log3: region2 (unflushed)
1040       // roll the writer; log2 should be archived.
1041       hlog.rollWriter();
1042       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1043       // flush region2, and all logs should be archived.
1044       addEdits(hlog, hri2, table2, 2, sequenceId2);
1045       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1046       hlog.rollWriter();
1047       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1048     } finally {
1049       if (hlog != null) hlog.close();
1050     }
1051   }
1052 
1053   /**
1054    * On rolling a wal after reaching the threshold, {@link HLog#rollWriter()} returns the list of
1055    * regions which should be flushed in order to archive the oldest wal file.
1056    * <p>
1057    * This method tests this behavior by inserting edits and rolling the wal enough times to reach
1058    * the max number of logs threshold. It checks whether we get the "right regions" for flush on
1059    * rolling the wal.
1060    * @throws Exception
1061    */
1062   @Test
1063   public void testFindMemStoresEligibleForFlush() throws Exception {
1064     LOG.debug("testFindMemStoresEligibleForFlush");
1065     Configuration conf1 = HBaseConfiguration.create(conf);
1066     conf1.setInt("hbase.regionserver.maxlogs", 1);
1067     HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf1), dir.toString(), conf1);
1068     TableName t1 = TableName.valueOf("t1");
1069     TableName t2 = TableName.valueOf("t2");
1070     HRegionInfo hri1 = new HRegionInfo(t1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1071     HRegionInfo hri2 = new HRegionInfo(t2, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1072     // variables to mock region sequenceIds
1073     final AtomicLong sequenceId1 = new AtomicLong(1);
1074     final AtomicLong sequenceId2 = new AtomicLong(1);
1075     // add edits and roll the wal
1076     try {
1077       addEdits(hlog, hri1, t1, 2, sequenceId1);
1078       hlog.rollWriter();
1079       // add some more edits and roll the wal. This would reach the log number threshold
1080       addEdits(hlog, hri1, t1, 2, sequenceId1);
1081       hlog.rollWriter();
1082       // with above rollWriter call, the max logs limit is reached.
1083       assertTrue(((FSHLog) hlog).getNumRolledLogFiles() == 2);
1084 
1085       // get the regions to flush; since there is only one region in the oldest wal, it should
1086       // return only one region.
1087       byte[][] regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1088       assertEquals(1, regionsToFlush.length);
1089       assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
1090       // insert edits in second region
1091       addEdits(hlog, hri2, t2, 2, sequenceId2);
1092       // get the regions to flush, it should still read region1.
1093       regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1094       assertEquals(regionsToFlush.length, 1);
1095       assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
1096       // flush region 1, and roll the wal file. Only last wal which has entries for region1 should
1097       // remain.
1098       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1099       hlog.rollWriter();
1100       // only one wal should remain now (that is for the second region).
1101       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1102       // flush the second region
1103       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1104       hlog.rollWriter(true);
1105       // no wal should remain now.
1106       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1107       // add edits both to region 1 and region 2, and roll.
1108       addEdits(hlog, hri1, t1, 2, sequenceId1);
1109       addEdits(hlog, hri2, t2, 2, sequenceId2);
1110       hlog.rollWriter();
1111       // add edits and roll the writer, to reach the max logs limit.
1112       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1113       addEdits(hlog, hri1, t1, 2, sequenceId1);
1114       hlog.rollWriter();
1115       // it should return two regions to flush, as the oldest wal file has entries
1116       // for both regions.
1117       regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1118       assertEquals(2, regionsToFlush.length);
1119       // flush both regions
1120       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1121       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1122       hlog.rollWriter(true);
1123       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1124       // Add an edit to region1, and roll the wal.
1125       addEdits(hlog, hri1, t1, 2, sequenceId1);
1126       // tests partial flush: roll on a partial flush, and ensure that wal is not archived.
1127       hlog.startCacheFlush(hri1.getEncodedNameAsBytes());
1128       hlog.rollWriter();
1129       hlog.completeCacheFlush(hri1.getEncodedNameAsBytes());
1130       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1131     } finally {
1132       if (hlog != null) hlog.close();
1133     }
1134   }
1135 
1136   /**
1137    * Simulates HLog append ops for a region and tests
1138    * {@link FSHLog#areAllRegionsFlushed(Map, Map, Map)} API.
1139    * It compares the region sequenceIds with oldestFlushing and oldestUnFlushed entries.
1140    * If a region's entries are larger than min of (oldestFlushing, oldestUnFlushed), then the
1141    * region should be flushed before archiving this WAL.
1142   */
1143   @Test
1144   public void testAllRegionsFlushed() {
1145     LOG.debug("testAllRegionsFlushed");
1146     Map<byte[], Long> oldestFlushingSeqNo = new HashMap<byte[], Long>();
1147     Map<byte[], Long> oldestUnFlushedSeqNo = new HashMap<byte[], Long>();
1148     Map<byte[], Long> seqNo = new HashMap<byte[], Long>();
1149     // create a table
1150     TableName t1 = TableName.valueOf("t1");
1151     // create a region
1152     HRegionInfo hri1 = new HRegionInfo(t1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1153     // variables to mock region sequenceIds
1154     final AtomicLong sequenceId1 = new AtomicLong(1);
1155     // test empty map
1156     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1157     // add entries in the region
1158     seqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.incrementAndGet());
1159     oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
1160     // should say region1 is not flushed.
1161     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1162     // test with entries in oldestFlushing map.
1163     oldestUnFlushedSeqNo.clear();
1164     oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
1165     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1166     // simulate region flush, i.e., clear oldestFlushing and oldestUnflushed maps
1167     oldestFlushingSeqNo.clear();
1168     oldestUnFlushedSeqNo.clear();
1169     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1170     // insert some large values for region1
1171     oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), 1000l);
1172     seqNo.put(hri1.getEncodedNameAsBytes(), 1500l);
1173     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1174 
1175     // tests when oldestUnFlushed/oldestFlushing contains larger value.
1176     // It means region is flushed.
1177     oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), 1200l);
1178     oldestUnFlushedSeqNo.clear();
1179     seqNo.put(hri1.getEncodedNameAsBytes(), 1199l);
1180     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1181   }
1182 
1183   /**
1184    * helper method to simulate region flush for a WAL.
1185    * @param hlog
1186    * @param regionEncodedName
1187    */
1188   private void flushRegion(HLog hlog, byte[] regionEncodedName) {
1189     hlog.startCacheFlush(regionEncodedName);
1190     hlog.completeCacheFlush(regionEncodedName);
1191   }
1192 
1193   static class DumbWALActionsListener implements WALActionsListener {
1194     int increments = 0;
1195 
1196     @Override
1197     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
1198                                          WALEdit logEdit) {
1199       increments++;
1200     }
1201 
1202     @Override
1203     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
1204       //To change body of implemented methods use File | Settings | File Templates.
1205       increments++;
1206     }
1207 
1208     @Override
1209     public void preLogRoll(Path oldFile, Path newFile) {
1210       // TODO Auto-generated method stub
1211     }
1212 
1213     @Override
1214     public void postLogRoll(Path oldFile, Path newFile) {
1215       // TODO Auto-generated method stub
1216     }
1217 
1218     @Override
1219     public void preLogArchive(Path oldFile, Path newFile) {
1220       // TODO Auto-generated method stub
1221     }
1222 
1223     @Override
1224     public void postLogArchive(Path oldFile, Path newFile) {
1225       // TODO Auto-generated method stub
1226     }
1227 
1228     @Override
1229     public void logRollRequested() {
1230       // TODO Auto-generated method stub
1231 
1232     }
1233 
1234     @Override
1235     public void logCloseRequested() {
1236       // not interested
1237     }
1238   }
1239 
1240 }
1241