View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.fs;
20  
21  
22  import java.lang.reflect.Field;
23  import java.lang.reflect.InvocationTargetException;
24  import java.lang.reflect.Method;
25  import java.net.BindException;
26  import java.net.ServerSocket;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.commons.logging.impl.Log4JLogger;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.BlockLocation;
33  import org.apache.hadoop.fs.FSDataInputStream;
34  import org.apache.hadoop.fs.FSDataOutputStream;
35  import org.apache.hadoop.fs.FileStatus;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.testclassification.LargeTests;
41  import org.apache.hadoop.hbase.MiniHBaseCluster;
42  import org.apache.hadoop.hbase.client.HTable;
43  import org.apache.hadoop.hbase.client.Put;
44  import org.apache.hadoop.hbase.regionserver.HRegionServer;
45  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
46  import org.apache.hadoop.hbase.util.FSUtils;
47  import org.apache.hadoop.hdfs.DFSClient;
48  import org.apache.hadoop.hdfs.DistributedFileSystem;
49  import org.apache.hadoop.hdfs.MiniDFSCluster;
50  import org.apache.hadoop.hdfs.protocol.ClientProtocol;
51  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
52  import org.apache.hadoop.hdfs.protocol.DirectoryListing;
53  import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
54  import org.apache.hadoop.hdfs.protocol.LocatedBlock;
55  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
56  import org.apache.hadoop.hdfs.server.datanode.DataNode;
57  import org.apache.log4j.Level;
58  import org.junit.After;
59  import org.junit.Assert;
60  import org.junit.Before;
61  import org.junit.Test;
62  import org.junit.experimental.categories.Category;
63  
64  /**
65   * Tests for the hdfs fix from HBASE-6435.
66   */
67  @Category(LargeTests.class)
68  public class TestBlockReorder {
69    private static final Log LOG = LogFactory.getLog(TestBlockReorder.class);
70  
71    static {
72      ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
73      ((Log4JLogger) HFileSystem.LOG).getLogger().setLevel(Level.ALL);
74    }
75  
76    private Configuration conf;
77    private MiniDFSCluster cluster;
78    private HBaseTestingUtility htu;
79    private DistributedFileSystem dfs;
80    private static final String host1 = "host1";
81    private static final String host2 = "host2";
82    private static final String host3 = "host3";
83  
84    @Before
85    public void setUp() throws Exception {
86      htu = new HBaseTestingUtility();
87      htu.getConfiguration().setInt("dfs.block.size", 1024);// For the test with multiple blocks
88      htu.getConfiguration().setBoolean("dfs.support.append", true);
89      htu.getConfiguration().setInt("dfs.replication", 3);
90      htu.startMiniDFSCluster(3,
91          new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
92  
93      conf = htu.getConfiguration();
94      cluster = htu.getDFSCluster();
95      dfs = (DistributedFileSystem) FileSystem.get(conf);
96    }
97  
98    @After
99    public void tearDownAfterClass() throws Exception {
100     htu.shutdownMiniCluster();
101   }
102 
103   /**
104    * Test that we're can add a hook, and that this hook works when we try to read the file in HDFS.
105    */
106   @Test
107   public void testBlockLocationReorder() throws Exception {
108     Path p = new Path("hello");
109 
110     Assert.assertTrue((short) cluster.getDataNodes().size() > 1);
111     final int repCount = 2;
112 
113     // Let's write the file
114     FSDataOutputStream fop = dfs.create(p, (short) repCount);
115     final double toWrite = 875.5613;
116     fop.writeDouble(toWrite);
117     fop.close();
118 
119     // Let's check we can read it when everybody's there
120     long start = System.currentTimeMillis();
121     FSDataInputStream fin = dfs.open(p);
122     Assert.assertTrue(toWrite == fin.readDouble());
123     long end = System.currentTimeMillis();
124     LOG.info("readtime= " + (end - start));
125     fin.close();
126     Assert.assertTrue((end - start) < 30 * 1000);
127 
128     // Let's kill the first location. But actually the fist location returned will change
129     // The first thing to do is to get the location, then the port
130     FileStatus f = dfs.getFileStatus(p);
131     BlockLocation[] lbs;
132     do {
133       lbs = dfs.getFileBlockLocations(f, 0, 1);
134     } while (lbs.length != 1 && lbs[0].getLength() != repCount);
135     final String name = lbs[0].getNames()[0];
136     Assert.assertTrue(name.indexOf(':') > 0);
137     String portS = name.substring(name.indexOf(':') + 1);
138     final int port = Integer.parseInt(portS);
139     LOG.info("port= " + port);
140     int ipcPort = -1;
141 
142     // Let's find the DN to kill. cluster.getDataNodes(int) is not on the same port, so we need
143     // to iterate ourselves.
144     boolean ok = false;
145     final String lookup = lbs[0].getHosts()[0];
146     StringBuilder sb = new StringBuilder();
147     for (DataNode dn : cluster.getDataNodes()) {
148       final String dnName = getHostName(dn);
149       sb.append(dnName).append(' ');
150       if (lookup.equals(dnName)) {
151         ok = true;
152         LOG.info("killing datanode " + name + " / " + lookup);
153         ipcPort = dn.ipcServer.getListenerAddress().getPort();
154         dn.shutdown();
155         LOG.info("killed datanode " + name + " / " + lookup);
156         break;
157       }
158     }
159     Assert.assertTrue(
160         "didn't find the server to kill, was looking for " + lookup + " found " + sb, ok);
161     LOG.info("ipc port= " + ipcPort);
162 
163     // Add the hook, with an implementation checking that we don't use the port we've just killed.
164     Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf,
165         new HFileSystem.ReorderBlocks() {
166           @Override
167           public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) {
168             for (LocatedBlock lb : lbs.getLocatedBlocks()) {
169               if (lb.getLocations().length > 1) {
170                 if (lb.getLocations()[0].getHostName().equals(lookup)) {
171                   LOG.info("HFileSystem bad host, inverting");
172                   DatanodeInfo tmp = lb.getLocations()[0];
173                   lb.getLocations()[0] = lb.getLocations()[1];
174                   lb.getLocations()[1] = tmp;
175                 }
176               }
177             }
178           }
179         }));
180 
181 
182     final int retries = 10;
183     ServerSocket ss = null;
184     ServerSocket ssI;
185     try {
186       ss = new ServerSocket(port);// We're taking the port to have a timeout issue later.
187       ssI = new ServerSocket(ipcPort);
188     } catch (BindException be) {
189       LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort +
190           ", this means that the datanode has not closed the socket or" +
191           " someone else took it. It may happen, skipping this test for this time.", be);
192       if (ss != null) {
193         ss.close();
194       }
195       return;
196     }
197 
198     // Now it will fail with a timeout, unfortunately it does not always connect to the same box,
199     // so we try retries times;  with the reorder it will never last more than a few milli seconds
200     for (int i = 0; i < retries; i++) {
201       start = System.currentTimeMillis();
202 
203       fin = dfs.open(p);
204       Assert.assertTrue(toWrite == fin.readDouble());
205       fin.close();
206       end = System.currentTimeMillis();
207       LOG.info("HFileSystem readtime= " + (end - start));
208       Assert.assertFalse("We took too much time to read", (end - start) > 60000);
209     }
210 
211     ss.close();
212     ssI.close();
213   }
214 
215   /**
216    * Allow to get the hostname, using getHostName (hadoop 1) or getDisplayName (hadoop 2)
217    */
218   private String getHostName(DataNode dn) throws InvocationTargetException, IllegalAccessException {
219     Method m;
220     try {
221       m = DataNode.class.getMethod("getDisplayName");
222     } catch (NoSuchMethodException e) {
223       try {
224         m = DataNode.class.getMethod("getHostName");
225       } catch (NoSuchMethodException e1) {
226         throw new RuntimeException(e1);
227       }
228     }
229 
230     String res = (String) m.invoke(dn);
231     if (res.contains(":")) {
232       return res.split(":")[0];
233     } else {
234       return res;
235     }
236   }
237 
238   /**
239    * Test that the hook works within HBase, including when there are multiple blocks.
240    */
241   @Test()
242   public void testHBaseCluster() throws Exception {
243     byte[] sb = "sb".getBytes();
244     htu.startMiniZKCluster();
245 
246     MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
247     hbm.waitForActiveAndReadyMaster();
248     hbm.getRegionServer(0).waitForServerOnline();
249 
250     // We want to have a datanode with the same name as the region server, so
251     //  we're going to get the regionservername, and start a new datanode with this name.
252     String host4 = hbm.getRegionServer(0).getServerName().getHostname();
253     LOG.info("Starting a new datanode with the name=" + host4);
254     cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null);
255     cluster.waitClusterUp();
256 
257     final int repCount = 3;
258     HRegionServer targetRs = hbm.getRegionServer(0);
259 
260     // We use the regionserver file system & conf as we expect it to have the hook.
261     conf = targetRs.getConfiguration();
262     HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
263     HTable h = htu.createTable("table".getBytes(), sb);
264 
265     // Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
266     // with the same node will be used. We can't really stop an existing datanode, this would
267     // make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
268 
269     // Now we need to find the log file, its locations, and look at it
270 
271     String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME +
272             "/" + targetRs.getServerName().toString()).toUri().getPath();
273 
274     DistributedFileSystem mdfs = (DistributedFileSystem)
275         hbm.getMaster().getMasterFileSystem().getFileSystem();
276 
277 
278     int nbTest = 0;
279     while (nbTest < 10) {
280       htu.getHBaseAdmin().rollHLogWriter(targetRs.getServerName().toString());
281 
282       // We need a sleep as the namenode is informed asynchronously
283       Thread.sleep(100);
284 
285       // insert one put to ensure a minimal size
286       Put p = new Put(sb);
287       p.add(sb, sb, sb);
288       h.put(p);
289 
290       DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
291       HdfsFileStatus[] hfs = dl.getPartialListing();
292 
293       // As we wrote a put, we should have at least one log file.
294       Assert.assertTrue(hfs.length >= 1);
295       for (HdfsFileStatus hf : hfs) {
296         LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
297         String logFile = rootDir + "/" + hf.getLocalName();
298         FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
299 
300         LOG.info("Checking log file: " + logFile);
301         // Now checking that the hook is up and running
302         // We can't call directly getBlockLocations, it's not available in HFileSystem
303         // We're trying multiple times to be sure, as the order is random
304 
305         BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
306         if (bls.length > 0) {
307           BlockLocation bl = bls[0];
308 
309           LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
310           for (int i = 0; i < bl.getHosts().length - 1; i++) {
311             LOG.info(bl.getHosts()[i] + "    " + logFile);
312             Assert.assertNotSame(bl.getHosts()[i], host4);
313           }
314           String last = bl.getHosts()[bl.getHosts().length - 1];
315           LOG.info(last + "    " + logFile);
316           if (host4.equals(last)) {
317             nbTest++;
318             LOG.info(logFile + " is on the new datanode and is ok");
319             if (bl.getHosts().length == 3) {
320               // We can test this case from the file system as well
321               // Checking the underlying file system. Multiple times as the order is random
322               testFromDFS(dfs, logFile, repCount, host4);
323 
324               // now from the master
325               testFromDFS(mdfs, logFile, repCount, host4);
326             }
327           }
328         }
329       }
330     }
331   }
332 
333   private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost)
334       throws Exception {
335     // Multiple times as the order is random
336     for (int i = 0; i < 10; i++) {
337       LocatedBlocks l;
338       // The NN gets the block list asynchronously, so we may need multiple tries to get the list
339       final long max = System.currentTimeMillis() + 10000;
340       boolean done;
341       do {
342         Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
343         l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
344         Assert.assertNotNull("Can't get block locations for " + src, l);
345         Assert.assertNotNull(l.getLocatedBlocks());
346         Assert.assertTrue(l.getLocatedBlocks().size() > 0);
347 
348         done = true;
349         for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
350           done = (l.get(y).getLocations().length == repCount);
351         }
352       } while (!done);
353 
354       for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
355         Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
356       }
357     }
358   }
359 
360   private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
361     Field nf = DFSClient.class.getDeclaredField("namenode");
362     nf.setAccessible(true);
363     return (ClientProtocol) nf.get(dfsc);
364   }
365 
366   /**
367    * Test that the reorder algo works as we expect.
368    */
369   @Test
370   public void testBlockLocation() throws Exception {
371     // We need to start HBase to get  HConstants.HBASE_DIR set in conf
372     htu.startMiniZKCluster();
373     MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
374     conf = hbm.getConfiguration();
375 
376 
377     // The "/" is mandatory, without it we've got a null pointer exception on the namenode
378     final String fileName = "/helloWorld";
379     Path p = new Path(fileName);
380 
381     final int repCount = 3;
382     Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
383 
384     // Let's write the file
385     FSDataOutputStream fop = dfs.create(p, (short) repCount);
386     final double toWrite = 875.5613;
387     fop.writeDouble(toWrite);
388     fop.close();
389 
390     for (int i=0; i<10; i++){
391       // The interceptor is not set in this test, so we get the raw list at this point
392       LocatedBlocks l;
393       final long max = System.currentTimeMillis() + 10000;
394       do {
395         l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
396         Assert.assertNotNull(l.getLocatedBlocks());
397         Assert.assertEquals(l.getLocatedBlocks().size(), 1);
398         Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length,
399             System.currentTimeMillis() < max);
400       } while (l.get(0).getLocations().length != repCount);
401 
402       // Should be filtered, the name is different => The order won't change
403       Object originalList[] = l.getLocatedBlocks().toArray();
404       HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks();
405       lrb.reorderBlocks(conf, l, fileName);
406       Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray());
407 
408       // Should be reordered, as we pretend to be a file name with a compliant stuff
409       Assert.assertNotNull(conf.get(HConstants.HBASE_DIR));
410       Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty());
411       String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" +
412           HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile";
413 
414       // Check that it will be possible to extract a ServerName from our construction
415       Assert.assertNotNull("log= " + pseudoLogFile,
416           HLogUtil.getServerNameFromHLogDirectoryName(dfs.getConf(), pseudoLogFile));
417 
418       // And check we're doing the right reorder.
419       lrb.reorderBlocks(conf, l, pseudoLogFile);
420       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
421 
422       // Check again, it should remain the same.
423       lrb.reorderBlocks(conf, l, pseudoLogFile);
424       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
425     }
426   }
427 
428 }