1   /*
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.replication;
21  
22  import static org.junit.Assert.assertNotNull;
23  import static org.junit.Assert.assertNull;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.HBaseConfiguration;
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HConstants;
33  import org.apache.hadoop.hbase.KeyValue;
34  import org.apache.hadoop.hbase.MediumTests;
35  import org.apache.hadoop.hbase.regionserver.wal.HLog;
36  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
37  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.junit.BeforeClass;
40  import org.junit.Test;
41  import org.junit.experimental.categories.Category;
42  
43  @Category(MediumTests.class)
44  public class TestReplicationSource {
45  
46    private static final Log LOG =
47        LogFactory.getLog(TestReplicationSource.class);
48    private final static HBaseTestingUtility TEST_UTIL =
49        new HBaseTestingUtility();
50    private static FileSystem FS;
51    private static Path oldLogDir;
52    private static Path logDir;
53    private static Configuration conf = HBaseConfiguration.create();
54  
55    /**
56     * @throws java.lang.Exception
57     */
58    @BeforeClass
59    public static void setUpBeforeClass() throws Exception {
60      TEST_UTIL.startMiniDFSCluster(1);
61      FS = TEST_UTIL.getDFSCluster().getFileSystem();
62      oldLogDir = new Path(FS.getHomeDirectory(),
63          HConstants.HREGION_OLDLOGDIR_NAME);
64      if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true);
65      logDir = new Path(FS.getHomeDirectory(),
66          HConstants.HREGION_LOGDIR_NAME);
67      if (FS.exists(logDir)) FS.delete(logDir, true);
68    }
69  
70    /**
71     * Sanity check that we can move logs around while we are reading
72     * from them. Should this test fail, ReplicationSource would have a hard
73     * time reading logs that are being archived.
74     * @throws Exception
75     */
76    @Test
77    public void testLogMoving() throws Exception{
78      Path logPath = new Path(logDir, "log");
79      if (!FS.exists(logDir)) FS.mkdirs(logDir);
80      if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
81      HLog.Writer writer = HLog.createWriter(FS, logPath, conf);
82      for(int i = 0; i < 3; i++) {
83        byte[] b = Bytes.toBytes(Integer.toString(i));
84        KeyValue kv = new KeyValue(b,b,b);
85        WALEdit edit = new WALEdit();
86        edit.add(kv);
87        HLogKey key = new HLogKey(b, b, 0, 0, HConstants.DEFAULT_CLUSTER_ID);
88        writer.append(new HLog.Entry(key, edit));
89        writer.sync();
90      }
91      writer.close();
92  
93      HLog.Reader reader = HLog.getReader(FS, logPath, conf);
94      HLog.Entry entry = reader.next();
95      assertNotNull(entry);
96  
97      Path oldLogPath = new Path(oldLogDir, "log");
98      FS.rename(logPath, oldLogPath);
99  
100     entry = reader.next();
101     assertNotNull(entry);
102 
103     entry = reader.next();
104     entry = reader.next();
105 
106     assertNull(entry);
107 
108   }
109 
110   @org.junit.Rule
111   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
112     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
113 }
114