1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.master.cleaner;
19
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertFalse;
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.IOException;
25
26 import org.apache.commons.logging.Log;
27 import org.apache.commons.logging.LogFactory;
28 import org.apache.hadoop.conf.Configuration;
29 import org.apache.hadoop.fs.FileStatus;
30 import org.apache.hadoop.fs.FileSystem;
31 import org.apache.hadoop.fs.Path;
32 import org.apache.hadoop.hbase.HBaseTestingUtility;
33 import org.apache.hadoop.hbase.HConstants;
34 import org.apache.hadoop.hbase.MediumTests;
35 import org.apache.hadoop.hbase.Server;
36 import org.apache.hadoop.hbase.ServerName;
37 import org.apache.hadoop.hbase.catalog.CatalogTracker;
38 import org.apache.hadoop.hbase.util.EnvironmentEdge;
39 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
40 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
41 import org.junit.AfterClass;
42 import org.junit.BeforeClass;
43 import org.junit.Test;
44 import org.junit.experimental.categories.Category;
45
46 @Category(MediumTests.class)
47 public class TestHFileCleaner {
48 private static final Log LOG = LogFactory.getLog(TestHFileCleaner.class);
49
50 private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
51
52 @BeforeClass
53 public static void setupCluster() throws Exception {
54
55 UTIL.startMiniDFSCluster(1);
56 }
57
58 @AfterClass
59 public static void shutdownCluster() throws Exception {
60 UTIL.shutdownMiniDFSCluster();
61 }
62
63 @Test
64 public void testTTLCleaner() throws IOException, InterruptedException {
65 FileSystem fs = UTIL.getDFSCluster().getFileSystem();
66 Path root = UTIL.getDataTestDir();
67 Path file = new Path(root, "file");
68 fs.createNewFile(file);
69 long createTime = System.currentTimeMillis();
70 assertTrue("Test file not created!", fs.exists(file));
71 TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
72
73 fs.setTimes(file, createTime - 100, -1);
74 Configuration conf = UTIL.getConfiguration();
75 conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
76 cleaner.setConf(conf);
77 assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
78 + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
79 }
80
81
82
83
84
85 private String getFileStats(Path file, FileSystem fs) throws IOException {
86 FileStatus status = fs.getFileStatus(file);
87 return "File" + file + ", mtime:" + status.getModificationTime() + ", atime:"
88 + status.getAccessTime();
89 }
90
91 @Test(timeout = 60 *1000)
92 public void testHFileCleaning() throws Exception {
93 final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
94 String prefix = "someHFileThatWouldBeAUUID";
95 Configuration conf = UTIL.getConfiguration();
96
97 long ttl = 2000;
98 conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
99 "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
100 conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
101 Server server = new DummyServer();
102 Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
103 FileSystem fs = FileSystem.get(conf);
104 HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
105
106
107 final long createTime = System.currentTimeMillis();
108 fs.delete(archivedHfileDir, true);
109 fs.mkdirs(archivedHfileDir);
110
111 fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
112
113
114 LOG.debug("Now is: " + createTime);
115 for (int i = 1; i < 32; i++) {
116
117
118 Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
119 fs.createNewFile(fileName);
120
121 fs.setTimes(fileName, createTime - ttl - 1, -1);
122 LOG.debug("Creating " + getFileStats(fileName, fs));
123 }
124
125
126
127 Path saved = new Path(archivedHfileDir, "thisFileShouldBeSaved.00000000000");
128 fs.createNewFile(saved);
129
130 fs.setTimes(saved, createTime + (ttl * 2), -1);
131 LOG.debug("Creating " + getFileStats(saved, fs));
132
133 assertEquals(33, fs.listStatus(archivedHfileDir).length);
134
135
136 EnvironmentEdge setTime = new EnvironmentEdge() {
137 @Override
138 public long currentTimeMillis() {
139 return createTime;
140 }
141 };
142 EnvironmentEdgeManager.injectEdge(setTime);
143
144
145 cleaner.chore();
146
147 for (FileStatus file : fs.listStatus(archivedHfileDir)) {
148 LOG.debug("Kept hfile: " + file.getPath());
149 }
150
151
152 assertEquals("Didn't dev expected number of files in the archive!", 1,
153 fs.listStatus(archivedHfileDir).length);
154
155 cleaner.interrupt();
156
157 EnvironmentEdgeManager.injectEdge(originalEdge);
158 }
159
160 @Test
161 public void testRemovesEmptyDirectories() throws Exception {
162 Configuration conf = UTIL.getConfiguration();
163
164 conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
165 Server server = new DummyServer();
166 Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
167
168
169 FileSystem fs = UTIL.getDFSCluster().getFileSystem();
170 HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
171
172
173 Path table = new Path(archivedHfileDir, "table");
174 Path region = new Path(table, "regionsomthing");
175 Path family = new Path(region, "fam");
176 Path file = new Path(family, "file12345");
177 fs.mkdirs(family);
178 if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
179 fs.create(file).close();
180 if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);
181
182
183 cleaner.chore();
184
185
186 assertFalse("family directory not removed for empty directory", fs.exists(family));
187 assertFalse("region directory not removed for empty directory", fs.exists(region));
188 assertFalse("table directory not removed for empty directory", fs.exists(table));
189 assertTrue("archive directory", fs.exists(archivedHfileDir));
190 }
191
192 static class DummyServer implements Server {
193
194 @Override
195 public Configuration getConfiguration() {
196 return UTIL.getConfiguration();
197 }
198
199 @Override
200 public ZooKeeperWatcher getZooKeeper() {
201 try {
202 return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
203 } catch (IOException e) {
204 e.printStackTrace();
205 }
206 return null;
207 }
208
209 @Override
210 public CatalogTracker getCatalogTracker() {
211 return null;
212 }
213
214 @Override
215 public ServerName getServerName() {
216 return new ServerName("regionserver,60020,000000");
217 }
218
219 @Override
220 public void abort(String why, Throwable e) {
221 }
222
223 @Override
224 public boolean isAborted() {
225 return false;
226 }
227
228 @Override
229 public void stop(String why) {}
230
231 @Override
232 public boolean isStopped() {
233 return false;
234 }
235 }
236
237 @org.junit.Rule
238 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
239 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
240 }
241