1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.fs;
20
21
22 import java.lang.reflect.Field;
23 import java.lang.reflect.InvocationTargetException;
24 import java.lang.reflect.Method;
25 import java.net.BindException;
26 import java.net.ServerSocket;
27
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.commons.logging.impl.Log4JLogger;
31 import org.apache.hadoop.conf.Configuration;
32 import org.apache.hadoop.fs.BlockLocation;
33 import org.apache.hadoop.fs.FSDataInputStream;
34 import org.apache.hadoop.fs.FSDataOutputStream;
35 import org.apache.hadoop.fs.FileStatus;
36 import org.apache.hadoop.fs.FileSystem;
37 import org.apache.hadoop.fs.Path;
38 import org.apache.hadoop.hbase.HBaseTestingUtility;
39 import org.apache.hadoop.hbase.HConstants;
40 import org.apache.hadoop.hbase.testclassification.LargeTests;
41 import org.apache.hadoop.hbase.MiniHBaseCluster;
42 import org.apache.hadoop.hbase.client.HTable;
43 import org.apache.hadoop.hbase.client.Put;
44 import org.apache.hadoop.hbase.regionserver.HRegionServer;
45 import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
46 import org.apache.hadoop.hbase.util.FSUtils;
47 import org.apache.hadoop.hdfs.DFSClient;
48 import org.apache.hadoop.hdfs.DistributedFileSystem;
49 import org.apache.hadoop.hdfs.MiniDFSCluster;
50 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
51 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
52 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
53 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
54 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
55 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
56 import org.apache.hadoop.hdfs.server.datanode.DataNode;
57 import org.apache.log4j.Level;
58 import org.junit.After;
59 import org.junit.Assert;
60 import org.junit.Before;
61 import org.junit.Test;
62 import org.junit.experimental.categories.Category;
63
64
65
66
67 @Category(LargeTests.class)
68 public class TestBlockReorder {
69 private static final Log LOG = LogFactory.getLog(TestBlockReorder.class);
70
71 static {
72 ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
73 ((Log4JLogger) HFileSystem.LOG).getLogger().setLevel(Level.ALL);
74 }
75
76 private Configuration conf;
77 private MiniDFSCluster cluster;
78 private HBaseTestingUtility htu;
79 private DistributedFileSystem dfs;
80 private static final String host1 = "host1";
81 private static final String host2 = "host2";
82 private static final String host3 = "host3";
83
84 @Before
85 public void setUp() throws Exception {
86 htu = new HBaseTestingUtility();
87 htu.getConfiguration().setInt("dfs.block.size", 1024);
88 htu.getConfiguration().setBoolean("dfs.support.append", true);
89 htu.getConfiguration().setInt("dfs.replication", 3);
90 htu.startMiniDFSCluster(3,
91 new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
92
93 conf = htu.getConfiguration();
94 cluster = htu.getDFSCluster();
95 dfs = (DistributedFileSystem) FileSystem.get(conf);
96 }
97
98 @After
99 public void tearDownAfterClass() throws Exception {
100 htu.shutdownMiniCluster();
101 }
102
103
104
105
106 @Test
107 public void testBlockLocationReorder() throws Exception {
108 Path p = new Path("hello");
109
110 Assert.assertTrue((short) cluster.getDataNodes().size() > 1);
111 final int repCount = 2;
112
113
114 FSDataOutputStream fop = dfs.create(p, (short) repCount);
115 final double toWrite = 875.5613;
116 fop.writeDouble(toWrite);
117 fop.close();
118
119
120 long start = System.currentTimeMillis();
121 FSDataInputStream fin = dfs.open(p);
122 Assert.assertTrue(toWrite == fin.readDouble());
123 long end = System.currentTimeMillis();
124 LOG.info("readtime= " + (end - start));
125 fin.close();
126 Assert.assertTrue((end - start) < 30 * 1000);
127
128
129
130 FileStatus f = dfs.getFileStatus(p);
131 BlockLocation[] lbs;
132 do {
133 lbs = dfs.getFileBlockLocations(f, 0, 1);
134 } while (lbs.length != 1 && lbs[0].getLength() != repCount);
135 final String name = lbs[0].getNames()[0];
136 Assert.assertTrue(name.indexOf(':') > 0);
137 String portS = name.substring(name.indexOf(':') + 1);
138 final int port = Integer.parseInt(portS);
139 LOG.info("port= " + port);
140 int ipcPort = -1;
141
142
143
144 boolean ok = false;
145 final String lookup = lbs[0].getHosts()[0];
146 StringBuilder sb = new StringBuilder();
147 for (DataNode dn : cluster.getDataNodes()) {
148 final String dnName = getHostName(dn);
149 sb.append(dnName).append(' ');
150 if (lookup.equals(dnName)) {
151 ok = true;
152 LOG.info("killing datanode " + name + " / " + lookup);
153 ipcPort = dn.ipcServer.getListenerAddress().getPort();
154 dn.shutdown();
155 LOG.info("killed datanode " + name + " / " + lookup);
156 break;
157 }
158 }
159 Assert.assertTrue(
160 "didn't find the server to kill, was looking for " + lookup + " found " + sb, ok);
161 LOG.info("ipc port= " + ipcPort);
162
163
164 Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf,
165 new HFileSystem.ReorderBlocks() {
166 @Override
167 public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) {
168 for (LocatedBlock lb : lbs.getLocatedBlocks()) {
169 if (lb.getLocations().length > 1) {
170 if (lb.getLocations()[0].getHostName().equals(lookup)) {
171 LOG.info("HFileSystem bad host, inverting");
172 DatanodeInfo tmp = lb.getLocations()[0];
173 lb.getLocations()[0] = lb.getLocations()[1];
174 lb.getLocations()[1] = tmp;
175 }
176 }
177 }
178 }
179 }));
180
181
182 final int retries = 10;
183 ServerSocket ss = null;
184 ServerSocket ssI;
185 try {
186 ss = new ServerSocket(port);
187 ssI = new ServerSocket(ipcPort);
188 } catch (BindException be) {
189 LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort +
190 ", this means that the datanode has not closed the socket or" +
191 " someone else took it. It may happen, skipping this test for this time.", be);
192 if (ss != null) {
193 ss.close();
194 }
195 return;
196 }
197
198
199
200 for (int i = 0; i < retries; i++) {
201 start = System.currentTimeMillis();
202
203 fin = dfs.open(p);
204 Assert.assertTrue(toWrite == fin.readDouble());
205 fin.close();
206 end = System.currentTimeMillis();
207 LOG.info("HFileSystem readtime= " + (end - start));
208 Assert.assertFalse("We took too much time to read", (end - start) > 60000);
209 }
210
211 ss.close();
212 ssI.close();
213 }
214
215
216
217
218 private String getHostName(DataNode dn) throws InvocationTargetException, IllegalAccessException {
219 Method m;
220 try {
221 m = DataNode.class.getMethod("getDisplayName");
222 } catch (NoSuchMethodException e) {
223 try {
224 m = DataNode.class.getMethod("getHostName");
225 } catch (NoSuchMethodException e1) {
226 throw new RuntimeException(e1);
227 }
228 }
229
230 String res = (String) m.invoke(dn);
231 if (res.contains(":")) {
232 return res.split(":")[0];
233 } else {
234 return res;
235 }
236 }
237
238
239
240
241 @Test()
242 public void testHBaseCluster() throws Exception {
243 byte[] sb = "sb".getBytes();
244 htu.startMiniZKCluster();
245
246 MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
247 hbm.waitForActiveAndReadyMaster();
248 hbm.getRegionServer(0).waitForServerOnline();
249
250
251
252 String host4 = hbm.getRegionServer(0).getServerName().getHostname();
253 LOG.info("Starting a new datanode with the name=" + host4);
254 cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null);
255 cluster.waitClusterUp();
256
257 final int repCount = 3;
258 HRegionServer targetRs = hbm.getRegionServer(0);
259
260
261 conf = targetRs.getConfiguration();
262 HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
263 HTable h = htu.createTable("table".getBytes(), sb);
264
265
266
267
268
269
270
271 String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME +
272 "/" + targetRs.getServerName().toString()).toUri().getPath();
273
274 DistributedFileSystem mdfs = (DistributedFileSystem)
275 hbm.getMaster().getMasterFileSystem().getFileSystem();
276
277
278 int nbTest = 0;
279 while (nbTest < 10) {
280 htu.getHBaseAdmin().rollHLogWriter(targetRs.getServerName().toString());
281
282
283 Thread.sleep(100);
284
285
286 Put p = new Put(sb);
287 p.add(sb, sb, sb);
288 h.put(p);
289
290 DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
291 HdfsFileStatus[] hfs = dl.getPartialListing();
292
293
294 Assert.assertTrue(hfs.length >= 1);
295 for (HdfsFileStatus hf : hfs) {
296 LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
297 String logFile = rootDir + "/" + hf.getLocalName();
298 FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
299
300 LOG.info("Checking log file: " + logFile);
301
302
303
304
305 BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
306 if (bls.length > 0) {
307 BlockLocation bl = bls[0];
308
309 LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
310 for (int i = 0; i < bl.getHosts().length - 1; i++) {
311 LOG.info(bl.getHosts()[i] + " " + logFile);
312 Assert.assertNotSame(bl.getHosts()[i], host4);
313 }
314 String last = bl.getHosts()[bl.getHosts().length - 1];
315 LOG.info(last + " " + logFile);
316 if (host4.equals(last)) {
317 nbTest++;
318 LOG.info(logFile + " is on the new datanode and is ok");
319 if (bl.getHosts().length == 3) {
320
321
322 testFromDFS(dfs, logFile, repCount, host4);
323
324
325 testFromDFS(mdfs, logFile, repCount, host4);
326 }
327 }
328 }
329 }
330 }
331 }
332
333 private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost)
334 throws Exception {
335
336 for (int i = 0; i < 10; i++) {
337 LocatedBlocks l;
338
339 final long max = System.currentTimeMillis() + 10000;
340 boolean done;
341 do {
342 Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
343 l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
344 Assert.assertNotNull("Can't get block locations for " + src, l);
345 Assert.assertNotNull(l.getLocatedBlocks());
346 Assert.assertTrue(l.getLocatedBlocks().size() > 0);
347
348 done = true;
349 for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
350 done = (l.get(y).getLocations().length == repCount);
351 }
352 } while (!done);
353
354 for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
355 Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
356 }
357 }
358 }
359
360 private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
361 Field nf = DFSClient.class.getDeclaredField("namenode");
362 nf.setAccessible(true);
363 return (ClientProtocol) nf.get(dfsc);
364 }
365
366
367
368
369 @Test
370 public void testBlockLocation() throws Exception {
371
372 htu.startMiniZKCluster();
373 MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
374 conf = hbm.getConfiguration();
375
376
377
378 final String fileName = "/helloWorld";
379 Path p = new Path(fileName);
380
381 final int repCount = 3;
382 Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
383
384
385 FSDataOutputStream fop = dfs.create(p, (short) repCount);
386 final double toWrite = 875.5613;
387 fop.writeDouble(toWrite);
388 fop.close();
389
390 for (int i=0; i<10; i++){
391
392 LocatedBlocks l;
393 final long max = System.currentTimeMillis() + 10000;
394 do {
395 l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
396 Assert.assertNotNull(l.getLocatedBlocks());
397 Assert.assertEquals(l.getLocatedBlocks().size(), 1);
398 Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length,
399 System.currentTimeMillis() < max);
400 } while (l.get(0).getLocations().length != repCount);
401
402
403 Object originalList[] = l.getLocatedBlocks().toArray();
404 HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks();
405 lrb.reorderBlocks(conf, l, fileName);
406 Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray());
407
408
409 Assert.assertNotNull(conf.get(HConstants.HBASE_DIR));
410 Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty());
411 String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" +
412 HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile";
413
414
415 Assert.assertNotNull("log= " + pseudoLogFile,
416 HLogUtil.getServerNameFromHLogDirectoryName(dfs.getConf(), pseudoLogFile));
417
418
419 lrb.reorderBlocks(conf, l, pseudoLogFile);
420 Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
421
422
423 lrb.reorderBlocks(conf, l, pseudoLogFile);
424 Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
425 }
426 }
427
428 }