View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import static org.junit.Assert.*;
22  
23  import java.io.IOException;
24  import java.lang.reflect.Method;
25  import java.net.BindException;
26  import java.util.Comparator;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.concurrent.atomic.AtomicLong;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.commons.logging.impl.Log4JLogger;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FSDataInputStream;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.*;
42  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
43  import org.apache.hadoop.hbase.util.Bytes;
44  import org.apache.hadoop.hbase.util.FSUtils;
45  import org.apache.hadoop.hbase.util.Threads;
46  import org.apache.hadoop.hbase.Coprocessor;
47  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
48  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
49  import org.apache.hadoop.hdfs.DFSClient;
50  import org.apache.hadoop.hdfs.DistributedFileSystem;
51  import org.apache.hadoop.hdfs.MiniDFSCluster;
52  import org.apache.hadoop.hdfs.protocol.FSConstants;
53  import org.apache.hadoop.hdfs.server.datanode.DataNode;
54  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
55  import org.apache.log4j.Level;
56  import org.junit.After;
57  import org.junit.AfterClass;
58  import org.junit.Assert;
59  import org.junit.Before;
60  import org.junit.BeforeClass;
61  import org.junit.Test;
62  import org.junit.experimental.categories.Category;
63  
64  /** JUnit test case for HLog */
65  @Category(LargeTests.class)
66  @SuppressWarnings("deprecation")
67  public class TestHLog  {
68    private static final Log LOG = LogFactory.getLog(TestHLog.class);
69    {
70      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
71      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
72      ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
73        .getLogger().setLevel(Level.ALL);
74      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
75      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
76    }
77  
78    private static Configuration conf;
79    private static FileSystem fs;
80    private static Path dir;
81    private static MiniDFSCluster cluster;
82    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
83    private static Path hbaseDir;
84    private static Path oldLogDir;
85  
86    @Before
87    public void setUp() throws Exception {
88  
89      FileStatus[] entries = fs.listStatus(new Path("/"));
90      for (FileStatus dir : entries) {
91        fs.delete(dir.getPath(), true);
92      }
93  
94    }
95  
96    @After
97    public void tearDown() throws Exception {
98    }
99  
100   @BeforeClass
101   public static void setUpBeforeClass() throws Exception {
102     // Make block sizes small.
103     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
104     // needed for testAppendClose()
105     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
106     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
107     // quicker heartbeat interval for faster DN death notification
108     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
109     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
110     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
111     // faster failover with cluster.shutdown();fs.close() idiom
112     TEST_UTIL.getConfiguration()
113         .setInt("ipc.client.connect.max.retries", 1);
114     TEST_UTIL.getConfiguration().setInt(
115         "dfs.client.block.recovery.retries", 1);
116     TEST_UTIL.getConfiguration().setInt(
117       "ipc.client.connection.maxidletime", 500);
118     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
119         SampleRegionWALObserver.class.getName());
120     TEST_UTIL.startMiniDFSCluster(3);
121 
122     conf = TEST_UTIL.getConfiguration();
123     cluster = TEST_UTIL.getDFSCluster();
124     fs = cluster.getFileSystem();
125 
126     hbaseDir = TEST_UTIL.createRootDir();
127     oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
128     dir = new Path(hbaseDir, getName());
129   }
130   @AfterClass
131   public static void tearDownAfterClass() throws Exception {
132     TEST_UTIL.shutdownMiniCluster();
133   }
134 
135   private static String getName() {
136     // TODO Auto-generated method stub
137     return "TestHLog";
138   }
139 
140   /**
141    * Write to a log file with three concurrent threads and verifying all data is written.
142    * @throws Exception
143    */
144   @Test
145   public void testConcurrentWrites() throws Exception {
146     // Run the HPE tool with three threads writing 3000 edits each concurrently.
147     // When done, verify that all edits were written.
148     int errCode = HLogPerformanceEvaluation.
149       innerMain(new Configuration(TEST_UTIL.getConfiguration()),
150         new String [] {"-threads", "3", "-verify", "-noclosefs", "-iterations", "3000"});
151     assertEquals(0, errCode);
152   }
153 
154   /**
155    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
156    * would fail.
157    * @throws IOException
158    */
159   @Test
160   public void testSplit() throws IOException {
161 
162     final TableName tableName =
163         TableName.valueOf(getName());
164     final byte [] rowName = tableName.getName();
165     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
166     HLog log = HLogFactory.createHLog(fs, hbaseDir,
167         HConstants.HREGION_LOGDIR_NAME, conf);
168     final int howmany = 3;
169     HRegionInfo[] infos = new HRegionInfo[3];
170     Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
171     fs.mkdirs(tabledir);
172     for(int i = 0; i < howmany; i++) {
173       infos[i] = new HRegionInfo(tableName,
174                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
175       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
176       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
177     }
178     HTableDescriptor htd = new HTableDescriptor(tableName);
179     htd.addFamily(new HColumnDescriptor("column"));
180 
181     // Add edits for three regions.
182     final AtomicLong sequenceId = new AtomicLong(1);
183     try {
184       for (int ii = 0; ii < howmany; ii++) {
185         for (int i = 0; i < howmany; i++) {
186 
187           for (int j = 0; j < howmany; j++) {
188             WALEdit edit = new WALEdit();
189             byte [] family = Bytes.toBytes("column");
190             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
191             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
192             edit.add(new KeyValue(rowName, family, qualifier,
193                 System.currentTimeMillis(), column));
194             LOG.info("Region " + i + ": " + edit);
195             log.append(infos[i], tableName, edit,
196               System.currentTimeMillis(), htd, sequenceId);
197           }
198         }
199         log.rollWriter();
200       }
201       log.close();
202       List<Path> splits = HLogSplitter.split(
203         hbaseDir, logdir, oldLogDir, fs, conf);
204       verifySplits(splits, howmany);
205       log = null;
206     } finally {
207       if (log != null) {
208         log.closeAndDelete();
209       }
210     }
211   }
212 
213   /**
214    * Test new HDFS-265 sync.
215    * @throws Exception
216    */
217   @Test
218   public void Broken_testSync() throws Exception {
219     TableName tableName =
220         TableName.valueOf(getName());
221     // First verify that using streams all works.
222     Path p = new Path(dir, getName() + ".fsdos");
223     FSDataOutputStream out = fs.create(p);
224     out.write(tableName.getName());
225     Method syncMethod = null;
226     try {
227       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
228     } catch (NoSuchMethodException e) {
229       try {
230         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
231       } catch (NoSuchMethodException ex) {
232         fail("This version of Hadoop supports neither Syncable.sync() " +
233             "nor Syncable.hflush().");
234       }
235     }
236     syncMethod.invoke(out, new Object[]{});
237     FSDataInputStream in = fs.open(p);
238     assertTrue(in.available() > 0);
239     byte [] buffer = new byte [1024];
240     int read = in.read(buffer);
241     assertEquals(tableName.getName().length, read);
242     out.close();
243     in.close();
244 
245     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf);
246     final AtomicLong sequenceId = new AtomicLong(1);
247     final int total = 20;
248     HLog.Reader reader = null;
249 
250     try {
251       HRegionInfo info = new HRegionInfo(tableName,
252                   null,null, false);
253       HTableDescriptor htd = new HTableDescriptor();
254       htd.addFamily(new HColumnDescriptor(tableName.getName()));
255 
256       for (int i = 0; i < total; i++) {
257         WALEdit kvs = new WALEdit();
258         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
259         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
260       }
261       // Now call sync and try reading.  Opening a Reader before you sync just
262       // gives you EOFE.
263       wal.sync();
264       // Open a Reader.
265       Path walPath = ((FSHLog) wal).computeFilename();
266       reader = HLogFactory.createReader(fs, walPath, conf);
267       int count = 0;
268       HLog.Entry entry = new HLog.Entry();
269       while ((entry = reader.next(entry)) != null) count++;
270       assertEquals(total, count);
271       reader.close();
272       // Add test that checks to see that an open of a Reader works on a file
273       // that has had a sync done on it.
274       for (int i = 0; i < total; i++) {
275         WALEdit kvs = new WALEdit();
276         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
277         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
278       }
279       reader = HLogFactory.createReader(fs, walPath, conf);
280       count = 0;
281       while((entry = reader.next(entry)) != null) count++;
282       assertTrue(count >= total);
283       reader.close();
284       // If I sync, should see double the edits.
285       wal.sync();
286       reader = HLogFactory.createReader(fs, walPath, conf);
287       count = 0;
288       while((entry = reader.next(entry)) != null) count++;
289       assertEquals(total * 2, count);
290       // Now do a test that ensures stuff works when we go over block boundary,
291       // especially that we return good length on file.
292       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
293       for (int i = 0; i < total; i++) {
294         WALEdit kvs = new WALEdit();
295         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
296         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
297       }
298       // Now I should have written out lots of blocks.  Sync then read.
299       wal.sync();
300       reader = HLogFactory.createReader(fs, walPath, conf);
301       count = 0;
302       while((entry = reader.next(entry)) != null) count++;
303       assertEquals(total * 3, count);
304       reader.close();
305       // Close it and ensure that closed, Reader gets right length also.
306       wal.close();
307       reader = HLogFactory.createReader(fs, walPath, conf);
308       count = 0;
309       while((entry = reader.next(entry)) != null) count++;
310       assertEquals(total * 3, count);
311       reader.close();
312     } finally {
313       if (wal != null) wal.closeAndDelete();
314       if (reader != null) reader.close();
315     }
316   }
317 
318   private void verifySplits(List<Path> splits, final int howmany)
319   throws IOException {
320     assertEquals(howmany * howmany, splits.size());
321     for (int i = 0; i < splits.size(); i++) {
322       LOG.info("Verifying=" + splits.get(i));
323       HLog.Reader reader = HLogFactory.createReader(fs, splits.get(i), conf);
324       try {
325         int count = 0;
326         String previousRegion = null;
327         long seqno = -1;
328         HLog.Entry entry = new HLog.Entry();
329         while((entry = reader.next(entry)) != null) {
330           HLogKey key = entry.getKey();
331           String region = Bytes.toString(key.getEncodedRegionName());
332           // Assert that all edits are for same region.
333           if (previousRegion != null) {
334             assertEquals(previousRegion, region);
335           }
336           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
337           assertTrue(seqno < key.getLogSeqNum());
338           seqno = key.getLogSeqNum();
339           previousRegion = region;
340           count++;
341         }
342         assertEquals(howmany, count);
343       } finally {
344         reader.close();
345       }
346     }
347   }
348 
349   /*
350    * We pass different values to recoverFileLease() so that different code paths are covered
351    *
352    * For this test to pass, requires:
353    * 1. HDFS-200 (append support)
354    * 2. HDFS-988 (SafeMode should freeze file operations
355    *              [FSNamesystem.nextGenerationStampForBlock])
356    * 3. HDFS-142 (on restart, maintain pendingCreates)
357    */
358   @Test (timeout=300000)
359   public void testAppendClose() throws Exception {
360     TableName tableName =
361         TableName.valueOf(getName());
362     HRegionInfo regioninfo = new HRegionInfo(tableName,
363              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
364 
365     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
366         "hlogdir_archive", conf);
367     final AtomicLong sequenceId = new AtomicLong(1);
368     final int total = 20;
369 
370     HTableDescriptor htd = new HTableDescriptor();
371     htd.addFamily(new HColumnDescriptor(tableName.getName()));
372 
373     for (int i = 0; i < total; i++) {
374       WALEdit kvs = new WALEdit();
375       kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
376       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
377     }
378     // Now call sync to send the data to HDFS datanodes
379     wal.sync();
380      int namenodePort = cluster.getNameNodePort();
381     final Path walPath = ((FSHLog) wal).computeFilename();
382 
383 
384     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
385     try {
386       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
387       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
388       TEST_UTIL.shutdownMiniDFSCluster();
389       try {
390         // wal.writer.close() will throw an exception,
391         // but still call this since it closes the LogSyncer thread first
392         wal.close();
393       } catch (IOException e) {
394         LOG.info(e);
395       }
396       fs.close(); // closing FS last so DFSOutputStream can't call close
397       LOG.info("STOPPED first instance of the cluster");
398     } finally {
399       // Restart the cluster
400       while (cluster.isClusterUp()){
401         LOG.error("Waiting for cluster to go down");
402         Thread.sleep(1000);
403       }
404       assertFalse(cluster.isClusterUp());
405       cluster = null;
406       for (int i = 0; i < 100; i++) {
407         try {
408           cluster = TEST_UTIL.startMiniDFSClusterForTestHLog(namenodePort);
409           break;
410         } catch (BindException e) {
411           LOG.info("Sleeping.  BindException bringing up new cluster");
412           Threads.sleep(1000);
413         }
414       }
415       cluster.waitActive();
416       fs = cluster.getFileSystem();
417       LOG.info("STARTED second instance.");
418     }
419 
420     // set the lease period to be 1 second so that the
421     // namenode triggers lease recovery upon append request
422     Method setLeasePeriod = cluster.getClass()
423       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
424     setLeasePeriod.setAccessible(true);
425     setLeasePeriod.invoke(cluster, 1000L, 1000L);
426     try {
427       Thread.sleep(1000);
428     } catch (InterruptedException e) {
429       LOG.info(e);
430     }
431 
432     // Now try recovering the log, like the HMaster would do
433     final FileSystem recoveredFs = fs;
434     final Configuration rlConf = conf;
435 
436     class RecoverLogThread extends Thread {
437       public Exception exception = null;
438       public void run() {
439           try {
440             FSUtils.getInstance(fs, rlConf)
441               .recoverFileLease(recoveredFs, walPath, rlConf, null);
442           } catch (IOException e) {
443             exception = e;
444           }
445       }
446     }
447 
448     RecoverLogThread t = new RecoverLogThread();
449     t.start();
450     // Timeout after 60 sec. Without correct patches, would be an infinite loop
451     t.join(60 * 1000);
452     if(t.isAlive()) {
453       t.interrupt();
454       throw new Exception("Timed out waiting for HLog.recoverLog()");
455     }
456 
457     if (t.exception != null)
458       throw t.exception;
459 
460     // Make sure you can read all the content
461     HLog.Reader reader = HLogFactory.createReader(fs, walPath, conf);
462     int count = 0;
463     HLog.Entry entry = new HLog.Entry();
464     while (reader.next(entry) != null) {
465       count++;
466       assertTrue("Should be one KeyValue per WALEdit",
467                   entry.getEdit().getKeyValues().size() == 1);
468     }
469     assertEquals(total, count);
470     reader.close();
471 
472     // Reset the lease period
473     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
474   }
475 
476   /**
477    * Tests that we can write out an edit, close, and then read it back in again.
478    * @throws IOException
479    */
480   @Test
481   public void testEditAdd() throws IOException {
482     final int COL_COUNT = 10;
483     final TableName tableName =
484         TableName.valueOf("tablename");
485     final byte [] row = Bytes.toBytes("row");
486     HLog.Reader reader = null;
487     HLog log = null;
488     try {
489       log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
490       final AtomicLong sequenceId = new AtomicLong(1);
491 
492       // Write columns named 1, 2, 3, etc. and then values of single byte
493       // 1, 2, 3...
494       long timestamp = System.currentTimeMillis();
495       WALEdit cols = new WALEdit();
496       for (int i = 0; i < COL_COUNT; i++) {
497         cols.add(new KeyValue(row, Bytes.toBytes("column"),
498             Bytes.toBytes(Integer.toString(i)),
499           timestamp, new byte[] { (byte)(i + '0') }));
500       }
501       HRegionInfo info = new HRegionInfo(tableName,
502         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
503       HTableDescriptor htd = new HTableDescriptor();
504       htd.addFamily(new HColumnDescriptor("column"));
505 
506       log.append(info, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
507       log.startCacheFlush(info.getEncodedNameAsBytes());
508       log.completeCacheFlush(info.getEncodedNameAsBytes());
509       log.close();
510       Path filename = ((FSHLog) log).computeFilename();
511       log = null;
512       // Now open a reader on the log and assert append worked.
513       reader = HLogFactory.createReader(fs, filename, conf);
514       // Above we added all columns on a single row so we only read one
515       // entry in the below... thats why we have '1'.
516       for (int i = 0; i < 1; i++) {
517         HLog.Entry entry = reader.next(null);
518         if (entry == null) break;
519         HLogKey key = entry.getKey();
520         WALEdit val = entry.getEdit();
521         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
522         assertTrue(tableName.equals(key.getTablename()));
523         KeyValue kv = val.getKeyValues().get(0);
524         assertTrue(Bytes.equals(row, kv.getRow()));
525         assertEquals((byte)(i + '0'), kv.getValue()[0]);
526         System.out.println(key + " " + val);
527       }
528     } finally {
529       if (log != null) {
530         log.closeAndDelete();
531       }
532       if (reader != null) {
533         reader.close();
534       }
535     }
536   }
537 
538   /**
539    * @throws IOException
540    */
541   @Test
542   public void testAppend() throws IOException {
543     final int COL_COUNT = 10;
544     final TableName tableName =
545         TableName.valueOf("tablename");
546     final byte [] row = Bytes.toBytes("row");
547     Reader reader = null;
548     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
549     final AtomicLong sequenceId = new AtomicLong(1);
550     try {
551       // Write columns named 1, 2, 3, etc. and then values of single byte
552       // 1, 2, 3...
553       long timestamp = System.currentTimeMillis();
554       WALEdit cols = new WALEdit();
555       for (int i = 0; i < COL_COUNT; i++) {
556         cols.add(new KeyValue(row, Bytes.toBytes("column"),
557           Bytes.toBytes(Integer.toString(i)),
558           timestamp, new byte[] { (byte)(i + '0') }));
559       }
560       HRegionInfo hri = new HRegionInfo(tableName,
561           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
562       HTableDescriptor htd = new HTableDescriptor();
563       htd.addFamily(new HColumnDescriptor("column"));
564       log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
565       log.startCacheFlush(hri.getEncodedNameAsBytes());
566       log.completeCacheFlush(hri.getEncodedNameAsBytes());
567       log.close();
568       Path filename = ((FSHLog) log).computeFilename();
569       log = null;
570       // Now open a reader on the log and assert append worked.
571       reader = HLogFactory.createReader(fs, filename, conf);
572       HLog.Entry entry = reader.next();
573       assertEquals(COL_COUNT, entry.getEdit().size());
574       int idx = 0;
575       for (KeyValue val : entry.getEdit().getKeyValues()) {
576         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
577           entry.getKey().getEncodedRegionName()));
578         assertTrue(tableName.equals(entry.getKey().getTablename()));
579         assertTrue(Bytes.equals(row, val.getRow()));
580         assertEquals((byte)(idx + '0'), val.getValue()[0]);
581         System.out.println(entry.getKey() + " " + val);
582         idx++;
583       }
584     } finally {
585       if (log != null) {
586         log.closeAndDelete();
587       }
588       if (reader != null) {
589         reader.close();
590       }
591     }
592   }
593 
594   /**
595    * Test that we can visit entries before they are appended
596    * @throws Exception
597    */
598   @Test
599   public void testVisitors() throws Exception {
600     final int COL_COUNT = 10;
601     final TableName tableName =
602         TableName.valueOf("tablename");
603     final byte [] row = Bytes.toBytes("row");
604     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
605     final AtomicLong sequenceId = new AtomicLong(1);
606     try {
607       DumbWALActionsListener visitor = new DumbWALActionsListener();
608       log.registerWALActionsListener(visitor);
609       long timestamp = System.currentTimeMillis();
610       HTableDescriptor htd = new HTableDescriptor();
611       htd.addFamily(new HColumnDescriptor("column"));
612 
613       HRegionInfo hri = new HRegionInfo(tableName,
614           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
615       for (int i = 0; i < COL_COUNT; i++) {
616         WALEdit cols = new WALEdit();
617         cols.add(new KeyValue(row, Bytes.toBytes("column"),
618             Bytes.toBytes(Integer.toString(i)),
619             timestamp, new byte[]{(byte) (i + '0')}));
620         log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
621       }
622       assertEquals(COL_COUNT, visitor.increments);
623       log.unregisterWALActionsListener(visitor);
624       WALEdit cols = new WALEdit();
625       cols.add(new KeyValue(row, Bytes.toBytes("column"),
626           Bytes.toBytes(Integer.toString(11)),
627           timestamp, new byte[]{(byte) (11 + '0')}));
628       log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId);
629       assertEquals(COL_COUNT, visitor.increments);
630     } finally {
631       if (log != null) log.closeAndDelete();
632     }
633   }
634 
635   @Test
636   public void testLogCleaning() throws Exception {
637     LOG.info("testLogCleaning");
638     final TableName tableName =
639         TableName.valueOf("testLogCleaning");
640     final TableName tableName2 =
641         TableName.valueOf("testLogCleaning2");
642 
643     HLog log = HLogFactory.createHLog(fs, hbaseDir,
644         getName(), conf);
645     final AtomicLong sequenceId = new AtomicLong(1);
646     try {
647       HRegionInfo hri = new HRegionInfo(tableName,
648           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
649       HRegionInfo hri2 = new HRegionInfo(tableName2,
650           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
651 
652       // Add a single edit and make sure that rolling won't remove the file
653       // Before HBASE-3198 it used to delete it
654       addEdits(log, hri, tableName, 1, sequenceId);
655       log.rollWriter();
656       assertEquals(1, ((FSHLog) log).getNumRolledLogFiles());
657 
658       // See if there's anything wrong with more than 1 edit
659       addEdits(log, hri, tableName, 2, sequenceId);
660       log.rollWriter();
661       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
662 
663       // Now mix edits from 2 regions, still no flushing
664       addEdits(log, hri, tableName, 1, sequenceId);
665       addEdits(log, hri2, tableName2, 1, sequenceId);
666       addEdits(log, hri, tableName, 1, sequenceId);
667       addEdits(log, hri2, tableName2, 1, sequenceId);
668       log.rollWriter();
669       assertEquals(3, ((FSHLog) log).getNumRolledLogFiles());
670 
671       // Flush the first region, we expect to see the first two files getting
672       // archived. We need to append something or writer won't be rolled.
673       addEdits(log, hri2, tableName2, 1, sequenceId);
674       log.startCacheFlush(hri.getEncodedNameAsBytes());
675       log.completeCacheFlush(hri.getEncodedNameAsBytes());
676       log.rollWriter();
677       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
678 
679       // Flush the second region, which removes all the remaining output files
680       // since the oldest was completely flushed and the two others only contain
681       // flush information
682       addEdits(log, hri2, tableName2, 1, sequenceId);
683       log.startCacheFlush(hri2.getEncodedNameAsBytes());
684       log.completeCacheFlush(hri2.getEncodedNameAsBytes());
685       log.rollWriter();
686       assertEquals(0, ((FSHLog) log).getNumRolledLogFiles());
687     } finally {
688       if (log != null) log.closeAndDelete();
689     }
690   }
691 
692   @Test
693   public void testFailedToCreateHLogIfParentRenamed() throws IOException {
694     FSHLog log = (FSHLog)HLogFactory.createHLog(
695       fs, hbaseDir, "testFailedToCreateHLogIfParentRenamed", conf);
696     long filenum = System.currentTimeMillis();
697     Path path = log.computeFilename(filenum);
698     HLogFactory.createWALWriter(fs, path, conf);
699     Path parent = path.getParent();
700     path = log.computeFilename(filenum + 1);
701     Path newPath = new Path(parent.getParent(), parent.getName() + "-splitting");
702     fs.rename(parent, newPath);
703     try {
704       HLogFactory.createWALWriter(fs, path, conf);
705       fail("It should fail to create the new WAL");
706     } catch (IOException ioe) {
707       // expected, good.
708     }
709   }
710 
711   @Test
712   public void testGetServerNameFromHLogDirectoryName() throws IOException {
713     ServerName sn = ServerName.valueOf("hn", 450, 1398);
714     String hl = FSUtils.getRootDir(conf) + "/" + HLogUtil.getHLogDirectoryName(sn.toString());
715 
716     // Must not throw exception
717     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null));
718     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf,
719         FSUtils.getRootDir(conf).toUri().toString()));
720     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, ""));
721     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "                  "));
722     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl));
723     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "qdf"));
724     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf" + hl + "qdf"));
725 
726     final String wals = "/WALs/";
727     ServerName parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
728       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
729       "/localhost%2C32984%2C1343316388997.1343316390417");
730     Assert.assertEquals("standard",  sn, parsed);
731 
732     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "/qdf");
733     Assert.assertEquals("subdir", sn, parsed);
734 
735     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
736       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
737       "-splitting/localhost%3A57020.1340474893931");
738     Assert.assertEquals("split", sn, parsed);
739   }
740 
741   /**
742    * A loaded WAL coprocessor won't break existing HLog test cases.
743    */
744   @Test
745   public void testWALCoprocessorLoaded() throws Exception {
746     // test to see whether the coprocessor is loaded or not.
747     HLog log = HLogFactory.createHLog(fs, hbaseDir,
748         getName(), conf);
749     try {
750       WALCoprocessorHost host = log.getCoprocessorHost();
751       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
752       assertNotNull(c);
753     } finally {
754       if (log != null) log.closeAndDelete();
755     }
756   }
757 
758   private void addEdits(HLog log, HRegionInfo hri, TableName tableName,
759                         int times, AtomicLong sequenceId) throws IOException {
760     HTableDescriptor htd = new HTableDescriptor();
761     htd.addFamily(new HColumnDescriptor("row"));
762 
763     final byte [] row = Bytes.toBytes("row");
764     for (int i = 0; i < times; i++) {
765       long timestamp = System.currentTimeMillis();
766       WALEdit cols = new WALEdit();
767       cols.add(new KeyValue(row, row, row, timestamp, row));
768       log.append(hri, tableName, cols, timestamp, htd, sequenceId);
769     }
770   }
771 
772 
773   /**
774    * @throws IOException
775    */
776   @Test
777   public void testReadLegacyLog() throws IOException {
778     final int columnCount = 5;
779     final int recordCount = 5;
780     final TableName tableName =
781         TableName.valueOf("tablename");
782     final byte[] row = Bytes.toBytes("row");
783     long timestamp = System.currentTimeMillis();
784     Path path = new Path(dir, "temphlog");
785     SequenceFileLogWriter sflw = null;
786     HLog.Reader reader = null;
787     try {
788       HRegionInfo hri = new HRegionInfo(tableName,
789           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
790       HTableDescriptor htd = new HTableDescriptor(tableName);
791       fs.mkdirs(dir);
792       // Write log in pre-PB format.
793       sflw = new SequenceFileLogWriter();
794       sflw.init(fs, path, conf, false);
795       for (int i = 0; i < recordCount; ++i) {
796         HLogKey key = new HLogKey(
797             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
798         WALEdit edit = new WALEdit();
799         for (int j = 0; j < columnCount; ++j) {
800           if (i == 0) {
801             htd.addFamily(new HColumnDescriptor("column" + j));
802           }
803           String value = i + "" + j;
804           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
805         }
806         sflw.append(new HLog.Entry(key, edit));
807       }
808       sflw.sync();
809       sflw.close();
810 
811       // Now read the log using standard means.
812       reader = HLogFactory.createReader(fs, path, conf);
813       assertTrue(reader instanceof SequenceFileLogReader);
814       for (int i = 0; i < recordCount; ++i) {
815         HLog.Entry entry = reader.next();
816         assertNotNull(entry);
817         assertEquals(columnCount, entry.getEdit().size());
818         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
819         assertEquals(tableName, entry.getKey().getTablename());
820         int idx = 0;
821         for (KeyValue val : entry.getEdit().getKeyValues()) {
822           assertTrue(Bytes.equals(row, val.getRow()));
823           String value = i + "" + idx;
824           assertArrayEquals(Bytes.toBytes(value), val.getValue());
825           idx++;
826         }
827       }
828       HLog.Entry entry = reader.next();
829       assertNull(entry);
830     } finally {
831       if (sflw != null) {
832         sflw.close();
833       }
834       if (reader != null) {
835         reader.close();
836       }
837     }
838   }
839 
840   /**
841    * Reads the WAL with and without WALTrailer.
842    * @throws IOException
843    */
844   @Test
845   public void testWALTrailer() throws IOException {
846     // read With trailer.
847     doRead(true);
848     // read without trailer
849     doRead(false);
850   }
851 
852   /**
853    * Appends entries in the WAL and reads it.
854    * @param withTrailer If 'withTrailer' is true, it calls a close on the WALwriter before reading
855    *          so that a trailer is appended to the WAL. Otherwise, it starts reading after the sync
856    *          call. This means that reader is not aware of the trailer. In this scenario, if the
857    *          reader tries to read the trailer in its next() call, it returns false from
858    *          ProtoBufLogReader.
859    * @throws IOException
860    */
861   private void doRead(boolean withTrailer) throws IOException {
862     final int columnCount = 5;
863     final int recordCount = 5;
864     final TableName tableName =
865         TableName.valueOf("tablename");
866     final byte[] row = Bytes.toBytes("row");
867     long timestamp = System.currentTimeMillis();
868     Path path = new Path(dir, "temphlog");
869     // delete the log if already exists, for test only
870     fs.delete(path, true);
871     HLog.Writer writer = null;
872     HLog.Reader reader = null;
873     try {
874       HRegionInfo hri = new HRegionInfo(tableName,
875           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
876       HTableDescriptor htd = new HTableDescriptor(tableName);
877       fs.mkdirs(dir);
878       // Write log in pb format.
879       writer = HLogFactory.createWALWriter(fs, path, conf);
880       for (int i = 0; i < recordCount; ++i) {
881         HLogKey key = new HLogKey(
882             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
883         WALEdit edit = new WALEdit();
884         for (int j = 0; j < columnCount; ++j) {
885           if (i == 0) {
886             htd.addFamily(new HColumnDescriptor("column" + j));
887           }
888           String value = i + "" + j;
889           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
890         }
891         writer.append(new HLog.Entry(key, edit));
892       }
893       writer.sync();
894       if (withTrailer) writer.close();
895 
896       // Now read the log using standard means.
897       reader = HLogFactory.createReader(fs, path, conf);
898       assertTrue(reader instanceof ProtobufLogReader);
899       if (withTrailer) {
900         assertNotNull(reader.getWALTrailer());
901       } else {
902         assertNull(reader.getWALTrailer());
903       }
904       for (int i = 0; i < recordCount; ++i) {
905         HLog.Entry entry = reader.next();
906         assertNotNull(entry);
907         assertEquals(columnCount, entry.getEdit().size());
908         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
909         assertEquals(tableName, entry.getKey().getTablename());
910         int idx = 0;
911         for (KeyValue val : entry.getEdit().getKeyValues()) {
912           assertTrue(Bytes.equals(row, val.getRow()));
913           String value = i + "" + idx;
914           assertArrayEquals(Bytes.toBytes(value), val.getValue());
915           idx++;
916         }
917       }
918       HLog.Entry entry = reader.next();
919       assertNull(entry);
920     } finally {
921       if (writer != null) {
922         writer.close();
923       }
924       if (reader != null) {
925         reader.close();
926       }
927     }
928   }
929 
930   /**
931    * tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws
932    * exception if we do). Comparison is based on the timestamp present in the wal name.
933    * @throws Exception
934    */
935   @Test
936   public void testHLogComparator() throws Exception {
937     HLog hlog1 = null;
938     HLog hlogMeta = null;
939     try {
940       hlog1 = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
941       LOG.debug("Log obtained is: " + hlog1);
942       Comparator<Path> comp = ((FSHLog) hlog1).LOG_NAME_COMPARATOR;
943       Path p1 = ((FSHLog) hlog1).computeFilename(11);
944       Path p2 = ((FSHLog) hlog1).computeFilename(12);
945       // comparing with itself returns 0
946       assertTrue(comp.compare(p1, p1) == 0);
947       // comparing with different filenum.
948       assertTrue(comp.compare(p1, p2) < 0);
949       hlogMeta = HLogFactory.createMetaHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf,
950         null, null);
951       Comparator<Path> compMeta = ((FSHLog) hlogMeta).LOG_NAME_COMPARATOR;
952 
953       Path p1WithMeta = ((FSHLog) hlogMeta).computeFilename(11);
954       Path p2WithMeta = ((FSHLog) hlogMeta).computeFilename(12);
955       assertTrue(compMeta.compare(p1WithMeta, p1WithMeta) == 0);
956       assertTrue(compMeta.compare(p1WithMeta, p2WithMeta) < 0);
957       // mixing meta and non-meta logs gives error
958       boolean ex = false;
959       try {
960         comp.compare(p1WithMeta, p2);
961       } catch (Exception e) {
962         ex = true;
963       }
964       assertTrue("Comparator doesn't complain while checking meta log files", ex);
965       boolean exMeta = false;
966       try {
967         compMeta.compare(p1WithMeta, p2);
968       } catch (Exception e) {
969         exMeta = true;
970       }
971       assertTrue("Meta comparator doesn't complain while checking log files", exMeta);
972     } finally {
973       if (hlog1 != null) hlog1.close();
974       if (hlogMeta != null) hlogMeta.close();
975     }
976   }
977 
978   /**
979    * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
980    * and also don't archive "live logs" (that is, a log with un-flushed entries).
981    * <p>
982    * This is what it does:
983    * It creates two regions, and does a series of inserts along with log rolling.
984    * Whenever a WAL is rolled, FSHLog checks previous wals for archiving. A wal is eligible for
985    * archiving if for all the regions which have entries in that wal file, have flushed - past
986    * their maximum sequence id in that wal file.
987    * <p>
988    * @throws IOException
989    */
990   @Test
991   public void testWALArchiving() throws IOException {
992     LOG.debug("testWALArchiving");
993     TableName table1 = TableName.valueOf("t1");
994     TableName table2 = TableName.valueOf("t2");
995     HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
996     try {
997       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
998       HRegionInfo hri1 = new HRegionInfo(table1, HConstants.EMPTY_START_ROW,
999           HConstants.EMPTY_END_ROW);
1000       HRegionInfo hri2 = new HRegionInfo(table2, HConstants.EMPTY_START_ROW,
1001           HConstants.EMPTY_END_ROW);
1002       // ensure that we don't split the regions.
1003       hri1.setSplit(false);
1004       hri2.setSplit(false);
1005       // variables to mock region sequenceIds.
1006       final AtomicLong sequenceId1 = new AtomicLong(1);
1007       final AtomicLong sequenceId2 = new AtomicLong(1);
1008       // start with the testing logic: insert a waledit, and roll writer
1009       addEdits(hlog, hri1, table1, 1, sequenceId1);
1010       hlog.rollWriter();
1011       // assert that the wal is rolled
1012       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1013       // add edits in the second wal file, and roll writer.
1014       addEdits(hlog, hri1, table1, 1, sequenceId1);
1015       hlog.rollWriter();
1016       // assert that the wal is rolled
1017       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1018       // add a waledit to table1, and flush the region.
1019       addEdits(hlog, hri1, table1, 3, sequenceId1);
1020       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1021       // roll log; all old logs should be archived.
1022       hlog.rollWriter();
1023       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1024       // add an edit to table2, and roll writer
1025       addEdits(hlog, hri2, table2, 1, sequenceId2);
1026       hlog.rollWriter();
1027       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1028       // add edits for table1, and roll writer
1029       addEdits(hlog, hri1, table1, 2, sequenceId1);
1030       hlog.rollWriter();
1031       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1032       // add edits for table2, and flush hri1.
1033       addEdits(hlog, hri2, table2, 2, sequenceId2);
1034       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1035       // the log : region-sequenceId map is
1036       // log1: region2 (unflushed)
1037       // log2: region1 (flushed)
1038       // log3: region2 (unflushed)
1039       // roll the writer; log2 should be archived.
1040       hlog.rollWriter();
1041       assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
1042       // flush region2, and all logs should be archived.
1043       addEdits(hlog, hri2, table2, 2, sequenceId2);
1044       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1045       hlog.rollWriter();
1046       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1047     } finally {
1048       if (hlog != null) hlog.close();
1049     }
1050   }
1051 
1052   /**
1053    * On rolling a wal after reaching the threshold, {@link HLog#rollWriter()} returns the list of
1054    * regions which should be flushed in order to archive the oldest wal file.
1055    * <p>
1056    * This method tests this behavior by inserting edits and rolling the wal enough times to reach
1057    * the max number of logs threshold. It checks whether we get the "right regions" for flush on
1058    * rolling the wal.
1059    * @throws Exception
1060    */
1061   @Test
1062   public void testFindMemStoresEligibleForFlush() throws Exception {
1063     LOG.debug("testFindMemStoresEligibleForFlush");
1064     Configuration conf1 = HBaseConfiguration.create(conf);
1065     conf1.setInt("hbase.regionserver.maxlogs", 1);
1066     HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf1), dir.toString(), conf1);
1067     TableName t1 = TableName.valueOf("t1");
1068     TableName t2 = TableName.valueOf("t2");
1069     HRegionInfo hri1 = new HRegionInfo(t1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1070     HRegionInfo hri2 = new HRegionInfo(t2, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1071     // variables to mock region sequenceIds
1072     final AtomicLong sequenceId1 = new AtomicLong(1);
1073     final AtomicLong sequenceId2 = new AtomicLong(1);
1074     // add edits and roll the wal
1075     try {
1076       addEdits(hlog, hri1, t1, 2, sequenceId1);
1077       hlog.rollWriter();
1078       // add some more edits and roll the wal. This would reach the log number threshold
1079       addEdits(hlog, hri1, t1, 2, sequenceId1);
1080       hlog.rollWriter();
1081       // with above rollWriter call, the max logs limit is reached.
1082       assertTrue(((FSHLog) hlog).getNumRolledLogFiles() == 2);
1083 
1084       // get the regions to flush; since there is only one region in the oldest wal, it should
1085       // return only one region.
1086       byte[][] regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1087       assertEquals(1, regionsToFlush.length);
1088       assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
1089       // insert edits in second region
1090       addEdits(hlog, hri2, t2, 2, sequenceId2);
1091       // get the regions to flush, it should still read region1.
1092       regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1093       assertEquals(regionsToFlush.length, 1);
1094       assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
1095       // flush region 1, and roll the wal file. Only last wal which has entries for region1 should
1096       // remain.
1097       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1098       hlog.rollWriter();
1099       // only one wal should remain now (that is for the second region).
1100       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1101       // flush the second region
1102       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1103       hlog.rollWriter(true);
1104       // no wal should remain now.
1105       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1106       // add edits both to region 1 and region 2, and roll.
1107       addEdits(hlog, hri1, t1, 2, sequenceId1);
1108       addEdits(hlog, hri2, t2, 2, sequenceId2);
1109       hlog.rollWriter();
1110       // add edits and roll the writer, to reach the max logs limit.
1111       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1112       addEdits(hlog, hri1, t1, 2, sequenceId1);
1113       hlog.rollWriter();
1114       // it should return two regions to flush, as the oldest wal file has entries
1115       // for both regions.
1116       regionsToFlush = ((FSHLog) hlog).findRegionsToForceFlush();
1117       assertEquals(2, regionsToFlush.length);
1118       // flush both regions
1119       flushRegion(hlog, hri1.getEncodedNameAsBytes());
1120       flushRegion(hlog, hri2.getEncodedNameAsBytes());
1121       hlog.rollWriter(true);
1122       assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
1123       // Add an edit to region1, and roll the wal.
1124       addEdits(hlog, hri1, t1, 2, sequenceId1);
1125       // tests partial flush: roll on a partial flush, and ensure that wal is not archived.
1126       hlog.startCacheFlush(hri1.getEncodedNameAsBytes());
1127       hlog.rollWriter();
1128       hlog.completeCacheFlush(hri1.getEncodedNameAsBytes());
1129       assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
1130     } finally {
1131       if (hlog != null) hlog.close();
1132     }
1133   }
1134 
1135   /**
1136    * Simulates HLog append ops for a region and tests
1137    * {@link FSHLog#areAllRegionsFlushed(Map, Map, Map)} API.
1138    * It compares the region sequenceIds with oldestFlushing and oldestUnFlushed entries.
1139    * If a region's entries are larger than min of (oldestFlushing, oldestUnFlushed), then the
1140    * region should be flushed before archiving this WAL.
1141   */
1142   @Test
1143   public void testAllRegionsFlushed() {
1144     LOG.debug("testAllRegionsFlushed");
1145     Map<byte[], Long> oldestFlushingSeqNo = new HashMap<byte[], Long>();
1146     Map<byte[], Long> oldestUnFlushedSeqNo = new HashMap<byte[], Long>();
1147     Map<byte[], Long> seqNo = new HashMap<byte[], Long>();
1148     // create a table
1149     TableName t1 = TableName.valueOf("t1");
1150     // create a region
1151     HRegionInfo hri1 = new HRegionInfo(t1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
1152     // variables to mock region sequenceIds
1153     final AtomicLong sequenceId1 = new AtomicLong(1);
1154     // test empty map
1155     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1156     // add entries in the region
1157     seqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.incrementAndGet());
1158     oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
1159     // should say region1 is not flushed.
1160     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1161     // test with entries in oldestFlushing map.
1162     oldestUnFlushedSeqNo.clear();
1163     oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
1164     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1165     // simulate region flush, i.e., clear oldestFlushing and oldestUnflushed maps
1166     oldestFlushingSeqNo.clear();
1167     oldestUnFlushedSeqNo.clear();
1168     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1169     // insert some large values for region1
1170     oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), 1000l);
1171     seqNo.put(hri1.getEncodedNameAsBytes(), 1500l);
1172     assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1173 
1174     // tests when oldestUnFlushed/oldestFlushing contains larger value.
1175     // It means region is flushed.
1176     oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), 1200l);
1177     oldestUnFlushedSeqNo.clear();
1178     seqNo.put(hri1.getEncodedNameAsBytes(), 1199l);
1179     assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
1180   }
1181 
1182   /**
1183    * helper method to simulate region flush for a WAL.
1184    * @param hlog
1185    * @param regionEncodedName
1186    */
1187   private void flushRegion(HLog hlog, byte[] regionEncodedName) {
1188     hlog.startCacheFlush(regionEncodedName);
1189     hlog.completeCacheFlush(regionEncodedName);
1190   }
1191 
1192   static class DumbWALActionsListener implements WALActionsListener {
1193     int increments = 0;
1194 
1195     @Override
1196     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
1197                                          WALEdit logEdit) {
1198       increments++;
1199     }
1200 
1201     @Override
1202     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
1203       //To change body of implemented methods use File | Settings | File Templates.
1204       increments++;
1205     }
1206 
1207     @Override
1208     public void preLogRoll(Path oldFile, Path newFile) {
1209       // TODO Auto-generated method stub
1210     }
1211 
1212     @Override
1213     public void postLogRoll(Path oldFile, Path newFile) {
1214       // TODO Auto-generated method stub
1215     }
1216 
1217     @Override
1218     public void preLogArchive(Path oldFile, Path newFile) {
1219       // TODO Auto-generated method stub
1220     }
1221 
1222     @Override
1223     public void postLogArchive(Path oldFile, Path newFile) {
1224       // TODO Auto-generated method stub
1225     }
1226 
1227     @Override
1228     public void logRollRequested() {
1229       // TODO Auto-generated method stub
1230 
1231     }
1232 
1233     @Override
1234     public void logCloseRequested() {
1235       // not interested
1236     }
1237   }
1238 
1239 }
1240