View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master.cleaner;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertFalse;
22  import static org.junit.Assert.assertTrue;
23  import static org.mockito.Mockito.doThrow;
24  import static org.mockito.Mockito.spy;
25  
26  import java.io.IOException;
27  import java.lang.reflect.Field;
28  import java.net.URLEncoder;
29  import java.util.Iterator;
30  import java.util.LinkedList;
31  import java.util.List;
32  
33  import com.google.common.collect.Lists;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileStatus;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.catalog.CatalogTracker;
39  import org.apache.hadoop.hbase.Abortable;
40  import org.apache.hadoop.hbase.HBaseTestingUtility;
41  import org.apache.hadoop.hbase.HConstants;
42  import org.apache.hadoop.hbase.testclassification.MediumTests;
43  import org.apache.hadoop.hbase.Server;
44  import org.apache.hadoop.hbase.ServerName;
45  import org.apache.hadoop.hbase.Waiter;
46  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
47  import org.apache.hadoop.hbase.replication.ReplicationFactory;
48  import org.apache.hadoop.hbase.replication.ReplicationQueues;
49  import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
50  import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
51  import org.apache.hadoop.hbase.replication.regionserver.Replication;
52  import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
53  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
54  import org.apache.zookeeper.KeeperException;
55  import org.apache.zookeeper.data.Stat;
56  import org.junit.AfterClass;
57  import org.junit.BeforeClass;
58  import org.junit.Test;
59  import org.junit.experimental.categories.Category;
60  import org.mockito.Mockito;
61  
62  @Category(MediumTests.class)
63  public class TestLogsCleaner {
64  
65    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
66  
67    /**
68     * @throws java.lang.Exception
69     */
70    @BeforeClass
71    public static void setUpBeforeClass() throws Exception {
72      TEST_UTIL.startMiniZKCluster();
73    }
74  
75    /**
76     * @throws java.lang.Exception
77     */
78    @AfterClass
79    public static void tearDownAfterClass() throws Exception {
80      TEST_UTIL.shutdownMiniZKCluster();
81    }
82  
83    @Test
84    public void testLogCleaning() throws Exception{
85      Configuration conf = TEST_UTIL.getConfiguration();
86      // set TTL
87      long ttl = 10000;
88      conf.setLong("hbase.master.logcleaner.ttl", ttl);
89      conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
90      Replication.decorateMasterConfiguration(conf);
91      Server server = new DummyServer();
92      ReplicationQueues repQueues =
93          ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
94      repQueues.init(server.getServerName().toString());
95      final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
96          HConstants.HREGION_OLDLOGDIR_NAME);
97      String fakeMachineName =
98        URLEncoder.encode(server.getServerName().toString(), "UTF8");
99  
100     final FileSystem fs = FileSystem.get(conf);
101 
102     // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
103     long now = System.currentTimeMillis();
104     fs.delete(oldLogDir, true);
105     fs.mkdirs(oldLogDir);
106     // Case 1: 2 invalid files, which would be deleted directly
107     fs.createNewFile(new Path(oldLogDir, "a"));
108     fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
109     // Case 2: 1 "recent" file, not even deletable for the first log cleaner
110     // (TimeToLiveLogCleaner), so we are not going down the chain
111     System.out.println("Now is: " + now);
112     for (int i = 1; i < 31; i++) {
113       // Case 3: old files which would be deletable for the first log cleaner
114       // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
115       Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
116       fs.createNewFile(fileName);
117       // Case 4: put 3 old log files in ZK indicating that they are scheduled
118       // for replication so these files would pass the first log cleaner
119       // (TimeToLiveLogCleaner) but would be rejected by the second
120       // (ReplicationLogCleaner)
121       if (i % (30/3) == 1) {
122         repQueues.addLog(fakeMachineName, fileName.getName());
123         System.out.println("Replication log file: " + fileName);
124       }
125     }
126 
127     // sleep for sometime to get newer modifcation time
128     Thread.sleep(ttl);
129     fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
130 
131     // Case 2: 1 newer file, not even deletable for the first log cleaner
132     // (TimeToLiveLogCleaner), so we are not going down the chain
133     fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));
134 
135     for (FileStatus stat : fs.listStatus(oldLogDir)) {
136       System.out.println(stat.getPath().toString());
137     }
138 
139     assertEquals(34, fs.listStatus(oldLogDir).length);
140 
141     LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);
142 
143     cleaner.chore();
144 
145     // We end up with the current log file, a newer one and the 3 old log
146     // files which are scheduled for replication
147     TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
148       @Override
149       public boolean evaluate() throws Exception {
150         return 5 == fs.listStatus(oldLogDir).length;
151       }
152     });
153 
154     for (FileStatus file : fs.listStatus(oldLogDir)) {
155       System.out.println("Kept log files: " + file.getPath().getName());
156     }
157   }
158 
159   @Test(timeout=5000)
160   public void testZnodeCversionChange() throws Exception {
161     Configuration conf = TEST_UTIL.getConfiguration();
162     ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
163     cleaner.setConf(conf);
164 
165     ReplicationQueuesClient rqcMock = Mockito.mock(ReplicationQueuesClient.class);
166     Mockito.when(rqcMock.getQueuesZNodeCversion()).thenReturn(1, 2, 3, 4);
167 
168     Field rqc = ReplicationLogCleaner.class.getDeclaredField("replicationQueues");
169     rqc.setAccessible(true);
170 
171     rqc.set(cleaner, rqcMock);
172 
173     // This should return eventually when cversion stabilizes
174     cleaner.getDeletableFiles(new LinkedList<FileStatus>());
175   }
176 
177   /**
178    * ReplicationLogCleaner should be able to ride over ZooKeeper errors without
179    * aborting.
180    */
181   @Test
182   public void testZooKeeperAbort() throws Exception {
183     Configuration conf = TEST_UTIL.getConfiguration();
184     ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
185 
186     List<FileStatus> dummyFiles = Lists.newArrayList(
187         new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
188         new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
189     );
190 
191     FaultyZooKeeperWatcher faultyZK =
192         new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
193     try {
194       faultyZK.init();
195       cleaner.setConf(conf, faultyZK);
196       // should keep all files due to a ConnectionLossException getting the queues znodes
197       Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
198       assertFalse(toDelete.iterator().hasNext());
199       assertFalse(cleaner.isStopped());
200     } finally {
201       faultyZK.close();
202     }
203 
204     // when zk is working both files should be returned
205     cleaner = new ReplicationLogCleaner();
206     ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
207     try {
208       cleaner.setConf(conf, zkw);
209       Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
210       Iterator<FileStatus> iter = filesToDelete.iterator();
211       assertTrue(iter.hasNext());
212       assertEquals(new Path("log1"), iter.next().getPath());
213       assertTrue(iter.hasNext());
214       assertEquals(new Path("log2"), iter.next().getPath());
215       assertFalse(iter.hasNext());
216     } finally {
217       zkw.close();
218     }
219   }
220 
221   static class DummyServer implements Server {
222 
223     @Override
224     public Configuration getConfiguration() {
225       return TEST_UTIL.getConfiguration();
226     }
227 
228     @Override
229     public ZooKeeperWatcher getZooKeeper() {
230       try {
231         return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
232       } catch (IOException e) {
233         e.printStackTrace();
234       }
235       return null;
236     }
237 
238     @Override
239     public CatalogTracker getCatalogTracker() {
240       return null;
241     }
242 
243     @Override
244     public ServerName getServerName() {
245       return ServerName.valueOf("regionserver,60020,000000");
246     }
247 
248     @Override
249     public void abort(String why, Throwable e) {}
250 
251     @Override
252     public boolean isAborted() {
253       return false;
254     }
255 
256     @Override
257     public void stop(String why) {}
258 
259     @Override
260     public boolean isStopped() {
261       return false;
262     }
263   }
264 
265   static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {
266     private RecoverableZooKeeper zk;
267 
268     public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable)
269         throws ZooKeeperConnectionException, IOException {
270       super(conf, identifier, abortable);
271     }
272 
273     public void init() throws Exception {
274       this.zk = spy(super.getRecoverableZooKeeper());
275       doThrow(new KeeperException.ConnectionLossException())
276           .when(zk).getData("/hbase/replication/rs", null, new Stat());
277     }
278 
279     public RecoverableZooKeeper getRecoverableZooKeeper() {
280       return zk;
281     }
282   }
283 }
284