View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import static org.junit.Assert.*;
22  
23  import java.io.IOException;
24  import java.lang.reflect.Method;
25  import java.net.BindException;
26  import java.util.TreeMap;
27  import java.util.List;
28  import java.util.Map;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.commons.logging.impl.Log4JLogger;
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.FSDataInputStream;
35  import org.apache.hadoop.fs.FSDataOutputStream;
36  import org.apache.hadoop.fs.FileStatus;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.*;
40  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.FSUtils;
43  import org.apache.hadoop.hbase.util.Threads;
44  import org.apache.hadoop.hbase.Coprocessor;
45  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
46  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
47  import org.apache.hadoop.hdfs.DFSClient;
48  import org.apache.hadoop.hdfs.DistributedFileSystem;
49  import org.apache.hadoop.hdfs.MiniDFSCluster;
50  import org.apache.hadoop.hdfs.protocol.FSConstants;
51  import org.apache.hadoop.hdfs.server.datanode.DataNode;
52  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
53  import org.apache.log4j.Level;
54  import org.junit.After;
55  import org.junit.AfterClass;
56  import org.junit.Assert;
57  import org.junit.Before;
58  import org.junit.BeforeClass;
59  import org.junit.Test;
60  import org.junit.experimental.categories.Category;
61  
62  /** JUnit test case for HLog */
63  @Category(LargeTests.class)
64  @SuppressWarnings("deprecation")
65  public class TestHLog  {
66    private static final Log LOG = LogFactory.getLog(TestHLog.class);
67    {
68      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
69      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
70      ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
71        .getLogger().setLevel(Level.ALL);
72      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
73      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
74    }
75  
76    private static Configuration conf;
77    private static FileSystem fs;
78    private static Path dir;
79    private static MiniDFSCluster cluster;
80    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
81    private static Path hbaseDir;
82    private static Path oldLogDir;
83  
84    @Before
85    public void setUp() throws Exception {
86  
87      FileStatus[] entries = fs.listStatus(new Path("/"));
88      for (FileStatus dir : entries) {
89        fs.delete(dir.getPath(), true);
90      }
91  
92    }
93  
94    @After
95    public void tearDown() throws Exception {
96    }
97  
98    @BeforeClass
99    public static void setUpBeforeClass() throws Exception {
100     // Make block sizes small.
101     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
102     // needed for testAppendClose()
103     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
104     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
105     // quicker heartbeat interval for faster DN death notification
106     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
107     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
108     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
109     // faster failover with cluster.shutdown();fs.close() idiom
110     TEST_UTIL.getConfiguration()
111         .setInt("ipc.client.connect.max.retries", 1);
112     TEST_UTIL.getConfiguration().setInt(
113         "dfs.client.block.recovery.retries", 1);
114     TEST_UTIL.getConfiguration().setInt(
115       "ipc.client.connection.maxidletime", 500);
116     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
117         SampleRegionWALObserver.class.getName());
118     TEST_UTIL.startMiniDFSCluster(3);
119 
120     conf = TEST_UTIL.getConfiguration();
121     cluster = TEST_UTIL.getDFSCluster();
122     fs = cluster.getFileSystem();
123 
124     hbaseDir = TEST_UTIL.createRootDir();
125     oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
126     dir = new Path(hbaseDir, getName());
127   }
128   @AfterClass
129   public static void tearDownAfterClass() throws Exception {
130     TEST_UTIL.shutdownMiniCluster();
131   }
132 
133   private static String getName() {
134     // TODO Auto-generated method stub
135     return "TestHLog";
136   }
137 
138   /**
139    * Test that with three concurrent threads we still write edits in sequence
140    * edit id order.
141    * @throws Exception
142    */
143   @Test
144   public void testMaintainOrderWithConcurrentWrites() throws Exception {
145     // Run the HPE tool with three threads writing 3000 edits each concurrently.
146     // When done, verify that all edits were written and that the order in the
147     // WALs is of ascending edit sequence ids.
148     int errCode = HLogPerformanceEvaluation.
149       innerMain(new Configuration(TEST_UTIL.getConfiguration()),
150         new String [] {"-threads", "3", "-verify", "-noclosefs", "-iterations", "3000"});
151     assertEquals(0, errCode);
152   }
153 
154   /**
155    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
156    * would fail.
157    * @throws IOException
158    */
159   @Test
160   public void testSplit() throws IOException {
161 
162     final TableName tableName =
163         TableName.valueOf(getName());
164     final byte [] rowName = tableName.getName();
165     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
166     HLog log = HLogFactory.createHLog(fs, hbaseDir,
167         HConstants.HREGION_LOGDIR_NAME, conf);
168     final int howmany = 3;
169     HRegionInfo[] infos = new HRegionInfo[3];
170     Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
171     fs.mkdirs(tabledir);
172     for(int i = 0; i < howmany; i++) {
173       infos[i] = new HRegionInfo(tableName,
174                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
175       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
176       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
177     }
178     HTableDescriptor htd = new HTableDescriptor(tableName);
179     htd.addFamily(new HColumnDescriptor("column"));
180 
181     // Add edits for three regions.
182     try {
183       for (int ii = 0; ii < howmany; ii++) {
184         for (int i = 0; i < howmany; i++) {
185 
186           for (int j = 0; j < howmany; j++) {
187             WALEdit edit = new WALEdit();
188             byte [] family = Bytes.toBytes("column");
189             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
190             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
191             edit.add(new KeyValue(rowName, family, qualifier,
192                 System.currentTimeMillis(), column));
193             LOG.info("Region " + i + ": " + edit);
194             log.append(infos[i], tableName, edit,
195               System.currentTimeMillis(), htd);
196           }
197         }
198         log.rollWriter();
199       }
200       log.close();
201       List<Path> splits = HLogSplitter.split(
202         hbaseDir, logdir, oldLogDir, fs, conf);
203       verifySplits(splits, howmany);
204       log = null;
205     } finally {
206       if (log != null) {
207         log.closeAndDelete();
208       }
209     }
210   }
211 
212   /**
213    * Test new HDFS-265 sync.
214    * @throws Exception
215    */
216   @Test
217   public void Broken_testSync() throws Exception {
218     TableName tableName =
219         TableName.valueOf(getName());
220     // First verify that using streams all works.
221     Path p = new Path(dir, getName() + ".fsdos");
222     FSDataOutputStream out = fs.create(p);
223     out.write(tableName.getName());
224     Method syncMethod = null;
225     try {
226       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
227     } catch (NoSuchMethodException e) {
228       try {
229         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
230       } catch (NoSuchMethodException ex) {
231         fail("This version of Hadoop supports neither Syncable.sync() " +
232             "nor Syncable.hflush().");
233       }
234     }
235     syncMethod.invoke(out, new Object[]{});
236     FSDataInputStream in = fs.open(p);
237     assertTrue(in.available() > 0);
238     byte [] buffer = new byte [1024];
239     int read = in.read(buffer);
240     assertEquals(tableName.getName().length, read);
241     out.close();
242     in.close();
243 
244     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf);
245 
246     final int total = 20;
247     HLog.Reader reader = null;
248 
249     try {
250       HRegionInfo info = new HRegionInfo(tableName,
251                   null,null, false);
252       HTableDescriptor htd = new HTableDescriptor();
253       htd.addFamily(new HColumnDescriptor(tableName.getName()));
254 
255       for (int i = 0; i < total; i++) {
256         WALEdit kvs = new WALEdit();
257         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
258         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
259       }
260       // Now call sync and try reading.  Opening a Reader before you sync just
261       // gives you EOFE.
262       wal.sync();
263       // Open a Reader.
264       Path walPath = ((FSHLog) wal).computeFilename();
265       reader = HLogFactory.createReader(fs, walPath, conf);
266       int count = 0;
267       HLog.Entry entry = new HLog.Entry();
268       while ((entry = reader.next(entry)) != null) count++;
269       assertEquals(total, count);
270       reader.close();
271       // Add test that checks to see that an open of a Reader works on a file
272       // that has had a sync done on it.
273       for (int i = 0; i < total; i++) {
274         WALEdit kvs = new WALEdit();
275         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
276         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
277       }
278       reader = HLogFactory.createReader(fs, walPath, conf);
279       count = 0;
280       while((entry = reader.next(entry)) != null) count++;
281       assertTrue(count >= total);
282       reader.close();
283       // If I sync, should see double the edits.
284       wal.sync();
285       reader = HLogFactory.createReader(fs, walPath, conf);
286       count = 0;
287       while((entry = reader.next(entry)) != null) count++;
288       assertEquals(total * 2, count);
289       // Now do a test that ensures stuff works when we go over block boundary,
290       // especially that we return good length on file.
291       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
292       for (int i = 0; i < total; i++) {
293         WALEdit kvs = new WALEdit();
294         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
295         wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
296       }
297       // Now I should have written out lots of blocks.  Sync then read.
298       wal.sync();
299       reader = HLogFactory.createReader(fs, walPath, conf);
300       count = 0;
301       while((entry = reader.next(entry)) != null) count++;
302       assertEquals(total * 3, count);
303       reader.close();
304       // Close it and ensure that closed, Reader gets right length also.
305       wal.close();
306       reader = HLogFactory.createReader(fs, walPath, conf);
307       count = 0;
308       while((entry = reader.next(entry)) != null) count++;
309       assertEquals(total * 3, count);
310       reader.close();
311     } finally {
312       if (wal != null) wal.closeAndDelete();
313       if (reader != null) reader.close();
314     }
315   }
316 
317   /**
318    * Test the findMemstoresWithEditsEqualOrOlderThan method.
319    * @throws IOException
320    */
321   @Test
322   public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
323     Map<byte [], Long> regionsToSeqids = new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
324     for (int i = 0; i < 10; i++) {
325       Long l = Long.valueOf(i);
326       regionsToSeqids.put(l.toString().getBytes(), l);
327     }
328     byte [][] regions =
329       FSHLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
330     assertEquals(2, regions.length);
331     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
332         Bytes.equals(regions[0], "1".getBytes()));
333     regions = FSHLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
334     int count = 4;
335     assertEquals(count, regions.length);
336     // Regions returned are not ordered.
337     for (int i = 0; i < count; i++) {
338       assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
339         Bytes.equals(regions[i], "1".getBytes()) ||
340         Bytes.equals(regions[i], "2".getBytes()) ||
341         Bytes.equals(regions[i], "3".getBytes()));
342     }
343   }
344 
345   private void verifySplits(List<Path> splits, final int howmany)
346   throws IOException {
347     assertEquals(howmany * howmany, splits.size());
348     for (int i = 0; i < splits.size(); i++) {
349       LOG.info("Verifying=" + splits.get(i));
350       HLog.Reader reader = HLogFactory.createReader(fs, splits.get(i), conf);
351       try {
352         int count = 0;
353         String previousRegion = null;
354         long seqno = -1;
355         HLog.Entry entry = new HLog.Entry();
356         while((entry = reader.next(entry)) != null) {
357           HLogKey key = entry.getKey();
358           String region = Bytes.toString(key.getEncodedRegionName());
359           // Assert that all edits are for same region.
360           if (previousRegion != null) {
361             assertEquals(previousRegion, region);
362           }
363           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
364           assertTrue(seqno < key.getLogSeqNum());
365           seqno = key.getLogSeqNum();
366           previousRegion = region;
367           count++;
368         }
369         assertEquals(howmany, count);
370       } finally {
371         reader.close();
372       }
373     }
374   }
375 
376   /*
377    * We pass different values to recoverFileLease() so that different code paths are covered
378    *
379    * For this test to pass, requires:
380    * 1. HDFS-200 (append support)
381    * 2. HDFS-988 (SafeMode should freeze file operations
382    *              [FSNamesystem.nextGenerationStampForBlock])
383    * 3. HDFS-142 (on restart, maintain pendingCreates)
384    */
385   @Test (timeout=300000)
386   public void testAppendClose() throws Exception {
387     TableName tableName =
388         TableName.valueOf(getName());
389     HRegionInfo regioninfo = new HRegionInfo(tableName,
390              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
391 
392     HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
393         "hlogdir_archive", conf);
394     final int total = 20;
395 
396     HTableDescriptor htd = new HTableDescriptor();
397     htd.addFamily(new HColumnDescriptor(tableName.getName()));
398 
399     for (int i = 0; i < total; i++) {
400       WALEdit kvs = new WALEdit();
401       kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
402       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
403     }
404     // Now call sync to send the data to HDFS datanodes
405     wal.sync();
406      int namenodePort = cluster.getNameNodePort();
407     final Path walPath = ((FSHLog) wal).computeFilename();
408 
409 
410     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
411     try {
412       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
413       dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
414       TEST_UTIL.shutdownMiniDFSCluster();
415       try {
416         // wal.writer.close() will throw an exception,
417         // but still call this since it closes the LogSyncer thread first
418         wal.close();
419       } catch (IOException e) {
420         LOG.info(e);
421       }
422       fs.close(); // closing FS last so DFSOutputStream can't call close
423       LOG.info("STOPPED first instance of the cluster");
424     } finally {
425       // Restart the cluster
426       while (cluster.isClusterUp()){
427         LOG.error("Waiting for cluster to go down");
428         Thread.sleep(1000);
429       }
430       assertFalse(cluster.isClusterUp());
431       cluster = null;
432       for (int i = 0; i < 100; i++) {
433         try {
434           cluster = TEST_UTIL.startMiniDFSClusterForTestHLog(namenodePort);
435           break;
436         } catch (BindException e) {
437           LOG.info("Sleeping.  BindException bringing up new cluster");
438           Threads.sleep(1000);
439         }
440       }
441       cluster.waitActive();
442       fs = cluster.getFileSystem();
443       LOG.info("STARTED second instance.");
444     }
445 
446     // set the lease period to be 1 second so that the
447     // namenode triggers lease recovery upon append request
448     Method setLeasePeriod = cluster.getClass()
449       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
450     setLeasePeriod.setAccessible(true);
451     setLeasePeriod.invoke(cluster, 1000L, 1000L);
452     try {
453       Thread.sleep(1000);
454     } catch (InterruptedException e) {
455       LOG.info(e);
456     }
457 
458     // Now try recovering the log, like the HMaster would do
459     final FileSystem recoveredFs = fs;
460     final Configuration rlConf = conf;
461 
462     class RecoverLogThread extends Thread {
463       public Exception exception = null;
464       public void run() {
465           try {
466             FSUtils.getInstance(fs, rlConf)
467               .recoverFileLease(recoveredFs, walPath, rlConf, null);
468           } catch (IOException e) {
469             exception = e;
470           }
471       }
472     }
473 
474     RecoverLogThread t = new RecoverLogThread();
475     t.start();
476     // Timeout after 60 sec. Without correct patches, would be an infinite loop
477     t.join(60 * 1000);
478     if(t.isAlive()) {
479       t.interrupt();
480       throw new Exception("Timed out waiting for HLog.recoverLog()");
481     }
482 
483     if (t.exception != null)
484       throw t.exception;
485 
486     // Make sure you can read all the content
487     HLog.Reader reader = HLogFactory.createReader(fs, walPath, conf);
488     int count = 0;
489     HLog.Entry entry = new HLog.Entry();
490     while (reader.next(entry) != null) {
491       count++;
492       assertTrue("Should be one KeyValue per WALEdit",
493                   entry.getEdit().getKeyValues().size() == 1);
494     }
495     assertEquals(total, count);
496     reader.close();
497 
498     // Reset the lease period
499     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
500   }
501 
502   /**
503    * Tests that we can write out an edit, close, and then read it back in again.
504    * @throws IOException
505    */
506   @Test
507   public void testEditAdd() throws IOException {
508     final int COL_COUNT = 10;
509     final TableName tableName =
510         TableName.valueOf("tablename");
511     final byte [] row = Bytes.toBytes("row");
512     HLog.Reader reader = null;
513     HLog log = null;
514     try {
515       log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
516 
517       // Write columns named 1, 2, 3, etc. and then values of single byte
518       // 1, 2, 3...
519       long timestamp = System.currentTimeMillis();
520       WALEdit cols = new WALEdit();
521       for (int i = 0; i < COL_COUNT; i++) {
522         cols.add(new KeyValue(row, Bytes.toBytes("column"),
523             Bytes.toBytes(Integer.toString(i)),
524           timestamp, new byte[] { (byte)(i + '0') }));
525       }
526       HRegionInfo info = new HRegionInfo(tableName,
527         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
528       HTableDescriptor htd = new HTableDescriptor();
529       htd.addFamily(new HColumnDescriptor("column"));
530 
531       log.append(info, tableName, cols, System.currentTimeMillis(), htd);
532       log.startCacheFlush(info.getEncodedNameAsBytes());
533       log.completeCacheFlush(info.getEncodedNameAsBytes());
534       log.close();
535       Path filename = ((FSHLog) log).computeFilename();
536       log = null;
537       // Now open a reader on the log and assert append worked.
538       reader = HLogFactory.createReader(fs, filename, conf);
539       // Above we added all columns on a single row so we only read one
540       // entry in the below... thats why we have '1'.
541       for (int i = 0; i < 1; i++) {
542         HLog.Entry entry = reader.next(null);
543         if (entry == null) break;
544         HLogKey key = entry.getKey();
545         WALEdit val = entry.getEdit();
546         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
547         assertTrue(tableName.equals(key.getTablename()));
548         KeyValue kv = val.getKeyValues().get(0);
549         assertTrue(Bytes.equals(row, kv.getRow()));
550         assertEquals((byte)(i + '0'), kv.getValue()[0]);
551         System.out.println(key + " " + val);
552       }
553     } finally {
554       if (log != null) {
555         log.closeAndDelete();
556       }
557       if (reader != null) {
558         reader.close();
559       }
560     }
561   }
562 
563   /**
564    * @throws IOException
565    */
566   @Test
567   public void testAppend() throws IOException {
568     final int COL_COUNT = 10;
569     final TableName tableName =
570         TableName.valueOf("tablename");
571     final byte [] row = Bytes.toBytes("row");
572     Reader reader = null;
573     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
574     try {
575       // Write columns named 1, 2, 3, etc. and then values of single byte
576       // 1, 2, 3...
577       long timestamp = System.currentTimeMillis();
578       WALEdit cols = new WALEdit();
579       for (int i = 0; i < COL_COUNT; i++) {
580         cols.add(new KeyValue(row, Bytes.toBytes("column"),
581           Bytes.toBytes(Integer.toString(i)),
582           timestamp, new byte[] { (byte)(i + '0') }));
583       }
584       HRegionInfo hri = new HRegionInfo(tableName,
585           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
586       HTableDescriptor htd = new HTableDescriptor();
587       htd.addFamily(new HColumnDescriptor("column"));
588       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
589       log.startCacheFlush(hri.getEncodedNameAsBytes());
590       log.completeCacheFlush(hri.getEncodedNameAsBytes());
591       log.close();
592       Path filename = ((FSHLog) log).computeFilename();
593       log = null;
594       // Now open a reader on the log and assert append worked.
595       reader = HLogFactory.createReader(fs, filename, conf);
596       HLog.Entry entry = reader.next();
597       assertEquals(COL_COUNT, entry.getEdit().size());
598       int idx = 0;
599       for (KeyValue val : entry.getEdit().getKeyValues()) {
600         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
601           entry.getKey().getEncodedRegionName()));
602         assertTrue(tableName.equals(entry.getKey().getTablename()));
603         assertTrue(Bytes.equals(row, val.getRow()));
604         assertEquals((byte)(idx + '0'), val.getValue()[0]);
605         System.out.println(entry.getKey() + " " + val);
606         idx++;
607       }
608     } finally {
609       if (log != null) {
610         log.closeAndDelete();
611       }
612       if (reader != null) {
613         reader.close();
614       }
615     }
616   }
617 
618   /**
619    * Test that we can visit entries before they are appended
620    * @throws Exception
621    */
622   @Test
623   public void testVisitors() throws Exception {
624     final int COL_COUNT = 10;
625     final TableName tableName =
626         TableName.valueOf("tablename");
627     final byte [] row = Bytes.toBytes("row");
628     HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
629     try {
630       DumbWALActionsListener visitor = new DumbWALActionsListener();
631       log.registerWALActionsListener(visitor);
632       long timestamp = System.currentTimeMillis();
633       HTableDescriptor htd = new HTableDescriptor();
634       htd.addFamily(new HColumnDescriptor("column"));
635 
636       HRegionInfo hri = new HRegionInfo(tableName,
637           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
638       for (int i = 0; i < COL_COUNT; i++) {
639         WALEdit cols = new WALEdit();
640         cols.add(new KeyValue(row, Bytes.toBytes("column"),
641             Bytes.toBytes(Integer.toString(i)),
642             timestamp, new byte[]{(byte) (i + '0')}));
643         log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
644       }
645       assertEquals(COL_COUNT, visitor.increments);
646       log.unregisterWALActionsListener(visitor);
647       WALEdit cols = new WALEdit();
648       cols.add(new KeyValue(row, Bytes.toBytes("column"),
649           Bytes.toBytes(Integer.toString(11)),
650           timestamp, new byte[]{(byte) (11 + '0')}));
651       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
652       assertEquals(COL_COUNT, visitor.increments);
653     } finally {
654       if (log != null) log.closeAndDelete();
655     }
656   }
657 
658   @Test
659   public void testLogCleaning() throws Exception {
660     LOG.info("testLogCleaning");
661     final TableName tableName =
662         TableName.valueOf("testLogCleaning");
663     final TableName tableName2 =
664         TableName.valueOf("testLogCleaning2");
665 
666     HLog log = HLogFactory.createHLog(fs, hbaseDir,
667         getName(), conf);
668     try {
669       HRegionInfo hri = new HRegionInfo(tableName,
670           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
671       HRegionInfo hri2 = new HRegionInfo(tableName2,
672           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
673 
674       // Add a single edit and make sure that rolling won't remove the file
675       // Before HBASE-3198 it used to delete it
676       addEdits(log, hri, tableName, 1);
677       log.rollWriter();
678       assertEquals(1, ((FSHLog) log).getNumRolledLogFiles());
679 
680       // See if there's anything wrong with more than 1 edit
681       addEdits(log, hri, tableName, 2);
682       log.rollWriter();
683       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
684 
685       // Now mix edits from 2 regions, still no flushing
686       addEdits(log, hri, tableName, 1);
687       addEdits(log, hri2, tableName2, 1);
688       addEdits(log, hri, tableName, 1);
689       addEdits(log, hri2, tableName2, 1);
690       log.rollWriter();
691       assertEquals(3, ((FSHLog) log).getNumRolledLogFiles());
692 
693       // Flush the first region, we expect to see the first two files getting
694       // archived. We need to append something or writer won't be rolled.
695       addEdits(log, hri2, tableName2, 1);
696       log.startCacheFlush(hri.getEncodedNameAsBytes());
697       log.completeCacheFlush(hri.getEncodedNameAsBytes());
698       log.rollWriter();
699       assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
700 
701       // Flush the second region, which removes all the remaining output files
702       // since the oldest was completely flushed and the two others only contain
703       // flush information
704       addEdits(log, hri2, tableName2, 1);
705       log.startCacheFlush(hri2.getEncodedNameAsBytes());
706       log.completeCacheFlush(hri2.getEncodedNameAsBytes());
707       log.rollWriter();
708       assertEquals(0, ((FSHLog) log).getNumRolledLogFiles());
709     } finally {
710       if (log != null) log.closeAndDelete();
711     }
712   }
713 
714   @Test
715   public void testFailedToCreateHLogIfParentRenamed() throws IOException {
716     FSHLog log = (FSHLog)HLogFactory.createHLog(
717       fs, hbaseDir, "testFailedToCreateHLogIfParentRenamed", conf);
718     long filenum = System.currentTimeMillis();
719     Path path = log.computeFilename(filenum);
720     HLogFactory.createWALWriter(fs, path, conf);
721     Path parent = path.getParent();
722     path = log.computeFilename(filenum + 1);
723     Path newPath = new Path(parent.getParent(), parent.getName() + "-splitting");
724     fs.rename(parent, newPath);
725     try {
726       HLogFactory.createWALWriter(fs, path, conf);
727       fail("It should fail to create the new WAL");
728     } catch (IOException ioe) {
729       // expected, good.
730     }
731   }
732 
733   @Test
734   public void testGetServerNameFromHLogDirectoryName() throws IOException {
735     ServerName sn = ServerName.valueOf("hn", 450, 1398);
736     String hl = FSUtils.getRootDir(conf) + "/" + HLogUtil.getHLogDirectoryName(sn.toString());
737 
738     // Must not throw exception
739     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null));
740     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf,
741         FSUtils.getRootDir(conf).toUri().toString()));
742     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, ""));
743     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "                  "));
744     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl));
745     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "qdf"));
746     Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf" + hl + "qdf"));
747 
748     final String wals = "/WALs/";
749     ServerName parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
750       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
751       "/localhost%2C32984%2C1343316388997.1343316390417");
752     Assert.assertEquals("standard",  sn, parsed);
753 
754     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "/qdf");
755     Assert.assertEquals("subdir", sn, parsed);
756 
757     parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
758       FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
759       "-splitting/localhost%3A57020.1340474893931");
760     Assert.assertEquals("split", sn, parsed);
761   }
762 
763   /**
764    * A loaded WAL coprocessor won't break existing HLog test cases.
765    */
766   @Test
767   public void testWALCoprocessorLoaded() throws Exception {
768     // test to see whether the coprocessor is loaded or not.
769     HLog log = HLogFactory.createHLog(fs, hbaseDir,
770         getName(), conf);
771     try {
772       WALCoprocessorHost host = log.getCoprocessorHost();
773       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
774       assertNotNull(c);
775     } finally {
776       if (log != null) log.closeAndDelete();
777     }
778   }
779 
780   private void addEdits(HLog log, HRegionInfo hri, TableName tableName,
781                         int times) throws IOException {
782     HTableDescriptor htd = new HTableDescriptor();
783     htd.addFamily(new HColumnDescriptor("row"));
784 
785     final byte [] row = Bytes.toBytes("row");
786     for (int i = 0; i < times; i++) {
787       long timestamp = System.currentTimeMillis();
788       WALEdit cols = new WALEdit();
789       cols.add(new KeyValue(row, row, row, timestamp, row));
790       log.append(hri, tableName, cols, timestamp, htd);
791     }
792   }
793 
794 
795   /**
796    * @throws IOException
797    */
798   @Test
799   public void testReadLegacyLog() throws IOException {
800     final int columnCount = 5;
801     final int recordCount = 5;
802     final TableName tableName =
803         TableName.valueOf("tablename");
804     final byte[] row = Bytes.toBytes("row");
805     long timestamp = System.currentTimeMillis();
806     Path path = new Path(dir, "temphlog");
807     SequenceFileLogWriter sflw = null;
808     HLog.Reader reader = null;
809     try {
810       HRegionInfo hri = new HRegionInfo(tableName,
811           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
812       HTableDescriptor htd = new HTableDescriptor(tableName);
813       fs.mkdirs(dir);
814       // Write log in pre-PB format.
815       sflw = new SequenceFileLogWriter();
816       sflw.init(fs, path, conf, false);
817       for (int i = 0; i < recordCount; ++i) {
818         HLogKey key = new HLogKey(
819             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
820         WALEdit edit = new WALEdit();
821         for (int j = 0; j < columnCount; ++j) {
822           if (i == 0) {
823             htd.addFamily(new HColumnDescriptor("column" + j));
824           }
825           String value = i + "" + j;
826           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
827         }
828         sflw.append(new HLog.Entry(key, edit));
829       }
830       sflw.sync();
831       sflw.close();
832 
833       // Now read the log using standard means.
834       reader = HLogFactory.createReader(fs, path, conf);
835       assertTrue(reader instanceof SequenceFileLogReader);
836       for (int i = 0; i < recordCount; ++i) {
837         HLog.Entry entry = reader.next();
838         assertNotNull(entry);
839         assertEquals(columnCount, entry.getEdit().size());
840         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
841         assertEquals(tableName, entry.getKey().getTablename());
842         int idx = 0;
843         for (KeyValue val : entry.getEdit().getKeyValues()) {
844           assertTrue(Bytes.equals(row, val.getRow()));
845           String value = i + "" + idx;
846           assertArrayEquals(Bytes.toBytes(value), val.getValue());
847           idx++;
848         }
849       }
850       HLog.Entry entry = reader.next();
851       assertNull(entry);
852     } finally {
853       if (sflw != null) {
854         sflw.close();
855       }
856       if (reader != null) {
857         reader.close();
858       }
859     }
860   }
861 
862   /**
863    * Reads the WAL with and without WALTrailer.
864    * @throws IOException
865    */
866   @Test
867   public void testWALTrailer() throws IOException {
868     // read With trailer.
869     doRead(true);
870     // read without trailer
871     doRead(false);
872   }
873 
874   /**
875    * Appends entries in the WAL and reads it.
876    * @param withTrailer If 'withTrailer' is true, it calls a close on the WALwriter before reading
877    *          so that a trailer is appended to the WAL. Otherwise, it starts reading after the sync
878    *          call. This means that reader is not aware of the trailer. In this scenario, if the
879    *          reader tries to read the trailer in its next() call, it returns false from
880    *          ProtoBufLogReader.
881    * @throws IOException
882    */
883   private void doRead(boolean withTrailer) throws IOException {
884     final int columnCount = 5;
885     final int recordCount = 5;
886     final TableName tableName =
887         TableName.valueOf("tablename");
888     final byte[] row = Bytes.toBytes("row");
889     long timestamp = System.currentTimeMillis();
890     Path path = new Path(dir, "temphlog");
891     // delete the log if already exists, for test only
892     fs.delete(path, true);
893     HLog.Writer writer = null;
894     HLog.Reader reader = null;
895     try {
896       HRegionInfo hri = new HRegionInfo(tableName,
897           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
898       HTableDescriptor htd = new HTableDescriptor(tableName);
899       fs.mkdirs(dir);
900       // Write log in pb format.
901       writer = HLogFactory.createWALWriter(fs, path, conf);
902       for (int i = 0; i < recordCount; ++i) {
903         HLogKey key = new HLogKey(
904             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
905         WALEdit edit = new WALEdit();
906         for (int j = 0; j < columnCount; ++j) {
907           if (i == 0) {
908             htd.addFamily(new HColumnDescriptor("column" + j));
909           }
910           String value = i + "" + j;
911           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
912         }
913         writer.append(new HLog.Entry(key, edit));
914       }
915       writer.sync();
916       if (withTrailer) writer.close();
917 
918       // Now read the log using standard means.
919       reader = HLogFactory.createReader(fs, path, conf);
920       assertTrue(reader instanceof ProtobufLogReader);
921       if (withTrailer) {
922         assertNotNull(reader.getWALTrailer());
923       } else {
924         assertNull(reader.getWALTrailer());
925       }
926       for (int i = 0; i < recordCount; ++i) {
927         HLog.Entry entry = reader.next();
928         assertNotNull(entry);
929         assertEquals(columnCount, entry.getEdit().size());
930         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
931         assertEquals(tableName, entry.getKey().getTablename());
932         int idx = 0;
933         for (KeyValue val : entry.getEdit().getKeyValues()) {
934           assertTrue(Bytes.equals(row, val.getRow()));
935           String value = i + "" + idx;
936           assertArrayEquals(Bytes.toBytes(value), val.getValue());
937           idx++;
938         }
939       }
940       HLog.Entry entry = reader.next();
941       assertNull(entry);
942     } finally {
943       if (writer != null) {
944         writer.close();
945       }
946       if (reader != null) {
947         reader.close();
948       }
949     }
950   }
951 
952   static class DumbWALActionsListener implements WALActionsListener {
953     int increments = 0;
954 
955     @Override
956     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
957                                          WALEdit logEdit) {
958       increments++;
959     }
960 
961     @Override
962     public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
963       //To change body of implemented methods use File | Settings | File Templates.
964       increments++;
965     }
966 
967     @Override
968     public void preLogRoll(Path oldFile, Path newFile) {
969       // TODO Auto-generated method stub
970     }
971 
972     @Override
973     public void postLogRoll(Path oldFile, Path newFile) {
974       // TODO Auto-generated method stub
975     }
976 
977     @Override
978     public void preLogArchive(Path oldFile, Path newFile) {
979       // TODO Auto-generated method stub
980     }
981 
982     @Override
983     public void postLogArchive(Path oldFile, Path newFile) {
984       // TODO Auto-generated method stub
985     }
986 
987     @Override
988     public void logRollRequested() {
989       // TODO Auto-generated method stub
990 
991     }
992 
993     @Override
994     public void logCloseRequested() {
995       // not interested
996     }
997   }
998 
999 }
1000