View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.fs;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.lang.reflect.Field;
24  import java.lang.reflect.InvocationTargetException;
25  import java.lang.reflect.Method;
26  import java.net.BindException;
27  import java.net.ServerSocket;
28  import java.util.List;
29  import java.util.concurrent.CountDownLatch;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.commons.logging.impl.Log4JLogger;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.BlockLocation;
36  import org.apache.hadoop.fs.FSDataInputStream;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.ipc.RemoteException;
42  import org.apache.hadoop.hbase.HBaseTestingUtility;
43  import org.apache.hadoop.hbase.HConstants;
44  import org.apache.hadoop.hbase.testclassification.LargeTests;
45  import org.apache.hadoop.hbase.MiniHBaseCluster;
46  import org.apache.hadoop.hbase.TableName;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.Table;
49  import org.apache.hadoop.hbase.regionserver.HRegion;
50  import org.apache.hadoop.hbase.regionserver.HRegionServer;
51  import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
52  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
53  import org.apache.hadoop.hbase.util.FSUtils;
54  import org.apache.hadoop.hdfs.DFSClient;
55  import org.apache.hadoop.hdfs.DistributedFileSystem;
56  import org.apache.hadoop.hdfs.MiniDFSCluster;
57  import org.apache.hadoop.hdfs.protocol.ClientProtocol;
58  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
59  import org.apache.hadoop.hdfs.protocol.DirectoryListing;
60  import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
61  import org.apache.hadoop.hdfs.protocol.LocatedBlock;
62  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
63  import org.apache.hadoop.hdfs.server.datanode.DataNode;
64  import org.apache.log4j.Level;
65  import org.junit.After;
66  import org.junit.Assert;
67  import org.junit.Before;
68  import org.junit.Test;
69  import org.junit.experimental.categories.Category;
70  
71  /**
72   * Tests for the hdfs fix from HBASE-6435.
73   */
74  @Category(LargeTests.class)
75  public class TestBlockReorder {
76    private static final Log LOG = LogFactory.getLog(TestBlockReorder.class);
77  
78    static {
79      ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
80      ((Log4JLogger) HFileSystem.LOG).getLogger().setLevel(Level.ALL);
81    }
82  
83    private Configuration conf;
84    private MiniDFSCluster cluster;
85    private HBaseTestingUtility htu;
86    private DistributedFileSystem dfs;
87    private static final String host1 = "host1";
88    private static final String host2 = "host2";
89    private static final String host3 = "host3";
90  
91    @Before
92    public void setUp() throws Exception {
93      htu = new HBaseTestingUtility();
94      htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
95      htu.getConfiguration().setBoolean("dfs.support.append", true);
96      htu.getConfiguration().setInt("dfs.replication", 3);
97      htu.startMiniDFSCluster(3,
98          new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
99  
100     conf = htu.getConfiguration();
101     cluster = htu.getDFSCluster();
102     dfs = (DistributedFileSystem) FileSystem.get(conf);
103   }
104 
105   @After
106   public void tearDownAfterClass() throws Exception {
107     htu.shutdownMiniCluster();
108   }
109 
110   /**
111    * Test that we're can add a hook, and that this hook works when we try to read the file in HDFS.
112    */
113   @Test
114   public void testBlockLocationReorder() throws Exception {
115     Path p = new Path("hello");
116 
117     Assert.assertTrue((short) cluster.getDataNodes().size() > 1);
118     final int repCount = 2;
119 
120     // Let's write the file
121     FSDataOutputStream fop = dfs.create(p, (short) repCount);
122     final double toWrite = 875.5613;
123     fop.writeDouble(toWrite);
124     fop.close();
125 
126     // Let's check we can read it when everybody's there
127     long start = System.currentTimeMillis();
128     FSDataInputStream fin = dfs.open(p);
129     Assert.assertTrue(toWrite == fin.readDouble());
130     long end = System.currentTimeMillis();
131     LOG.info("readtime= " + (end - start));
132     fin.close();
133     Assert.assertTrue((end - start) < 30 * 1000);
134 
135     // Let's kill the first location. But actually the fist location returned will change
136     // The first thing to do is to get the location, then the port
137     FileStatus f = dfs.getFileStatus(p);
138     BlockLocation[] lbs;
139     do {
140       lbs = dfs.getFileBlockLocations(f, 0, 1);
141     } while (lbs.length != 1 && lbs[0].getLength() != repCount);
142     final String name = lbs[0].getNames()[0];
143     Assert.assertTrue(name.indexOf(':') > 0);
144     String portS = name.substring(name.indexOf(':') + 1);
145     final int port = Integer.parseInt(portS);
146     LOG.info("port= " + port);
147     int ipcPort = -1;
148 
149     // Let's find the DN to kill. cluster.getDataNodes(int) is not on the same port, so we need
150     // to iterate ourselves.
151     boolean ok = false;
152     final String lookup = lbs[0].getHosts()[0];
153     StringBuilder sb = new StringBuilder();
154     for (DataNode dn : cluster.getDataNodes()) {
155       final String dnName = getHostName(dn);
156       sb.append(dnName).append(' ');
157       if (lookup.equals(dnName)) {
158         ok = true;
159         LOG.info("killing datanode " + name + " / " + lookup);
160         ipcPort = dn.ipcServer.getListenerAddress().getPort();
161         dn.shutdown();
162         LOG.info("killed datanode " + name + " / " + lookup);
163         break;
164       }
165     }
166     Assert.assertTrue(
167         "didn't find the server to kill, was looking for " + lookup + " found " + sb, ok);
168     LOG.info("ipc port= " + ipcPort);
169 
170     // Add the hook, with an implementation checking that we don't use the port we've just killed.
171     Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf,
172         new HFileSystem.ReorderBlocks() {
173           @Override
174           public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) {
175             for (LocatedBlock lb : lbs.getLocatedBlocks()) {
176               if (lb.getLocations().length > 1) {
177                 DatanodeInfo[] infos = lb.getLocations();
178                 if (infos[0].getHostName().equals(lookup)) {
179                   LOG.info("HFileSystem bad host, inverting");
180                   DatanodeInfo tmp = infos[0];
181                   infos[0] = infos[1];
182                   infos[1] = tmp;
183                 }
184               }
185             }
186           }
187         }));
188 
189 
190     final int retries = 10;
191     ServerSocket ss = null;
192     ServerSocket ssI;
193     try {
194       ss = new ServerSocket(port);// We're taking the port to have a timeout issue later.
195       ssI = new ServerSocket(ipcPort);
196     } catch (BindException be) {
197       LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort +
198           ", this means that the datanode has not closed the socket or" +
199           " someone else took it. It may happen, skipping this test for this time.", be);
200       if (ss != null) {
201         ss.close();
202       }
203       return;
204     }
205 
206     // Now it will fail with a timeout, unfortunately it does not always connect to the same box,
207     // so we try retries times;  with the reorder it will never last more than a few milli seconds
208     for (int i = 0; i < retries; i++) {
209       start = System.currentTimeMillis();
210 
211       fin = dfs.open(p);
212       Assert.assertTrue(toWrite == fin.readDouble());
213       fin.close();
214       end = System.currentTimeMillis();
215       LOG.info("HFileSystem readtime= " + (end - start));
216       Assert.assertFalse("We took too much time to read", (end - start) > 60000);
217     }
218 
219     ss.close();
220     ssI.close();
221   }
222 
223   /**
224    * Allow to get the hostname, using getHostName (hadoop 1) or getDisplayName (hadoop 2)
225    */
226   private String getHostName(DataNode dn) throws InvocationTargetException, IllegalAccessException {
227     Method m;
228     try {
229       m = DataNode.class.getMethod("getDisplayName");
230     } catch (NoSuchMethodException e) {
231       try {
232         m = DataNode.class.getMethod("getHostName");
233       } catch (NoSuchMethodException e1) {
234         throw new RuntimeException(e1);
235       }
236     }
237 
238     String res = (String) m.invoke(dn);
239     if (res.contains(":")) {
240       return res.split(":")[0];
241     } else {
242       return res;
243     }
244   }
245 
246   /**
247    * Test that the hook works within HBase, including when there are multiple blocks.
248    */
249   @Test()
250   public void testHBaseCluster() throws Exception {
251     byte[] sb = "sb".getBytes();
252     htu.startMiniZKCluster();
253 
254     MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
255     hbm.waitForActiveAndReadyMaster();
256     hbm.getRegionServer(0).waitForServerOnline();
257     HRegionServer targetRs = hbm.getRegionServer(0);
258 
259     // We want to have a datanode with the same name as the region server, so
260     //  we're going to get the regionservername, and start a new datanode with this name.
261     String host4 = targetRs.getServerName().getHostname();
262     LOG.info("Starting a new datanode with the name=" + host4);
263     cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null);
264     cluster.waitClusterUp();
265 
266     final int repCount = 3;
267 
268     // We use the regionserver file system & conf as we expect it to have the hook.
269     conf = targetRs.getConfiguration();
270     HFileSystem rfs = (HFileSystem) targetRs.getFileSystem();
271     Table h = htu.createTable(TableName.valueOf("table"), sb);
272 
273     // Now, we have 4 datanodes and a replication count of 3. So we don't know if the datanode
274     // with the same node will be used. We can't really stop an existing datanode, this would
275     // make us fall in nasty hdfs bugs/issues. So we're going to try multiple times.
276 
277     // Now we need to find the log file, its locations, and look at it
278 
279     String rootDir = new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME +
280             "/" + targetRs.getServerName().toString()).toUri().getPath();
281 
282     DistributedFileSystem mdfs = (DistributedFileSystem)
283         hbm.getMaster().getMasterFileSystem().getFileSystem();
284 
285 
286     int nbTest = 0;
287     while (nbTest < 10) {
288       final List<HRegion> regions = targetRs.getOnlineRegions(h.getName());
289       final CountDownLatch latch = new CountDownLatch(regions.size());
290       // listen for successful log rolls
291       final WALActionsListener listener = new WALActionsListener.Base() {
292             @Override
293             public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
294               latch.countDown();
295             }
296           };
297       for (HRegion region : regions) {
298         region.getWAL().registerWALActionsListener(listener);
299       }
300 
301       htu.getHBaseAdmin().rollWALWriter(targetRs.getServerName());
302 
303       // wait
304       try {
305         latch.await();
306       } catch (InterruptedException exception) {
307         LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " +
308             "tests fail, it's probably because we should still be waiting.");
309         Thread.currentThread().interrupt();
310       }
311       for (HRegion region : regions) {
312         region.getWAL().unregisterWALActionsListener(listener);
313       }
314 
315       // We need a sleep as the namenode is informed asynchronously
316       Thread.sleep(100);
317 
318       // insert one put to ensure a minimal size
319       Put p = new Put(sb);
320       p.add(sb, sb, sb);
321       h.put(p);
322 
323       DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
324       HdfsFileStatus[] hfs = dl.getPartialListing();
325 
326       // As we wrote a put, we should have at least one log file.
327       Assert.assertTrue(hfs.length >= 1);
328       for (HdfsFileStatus hf : hfs) {
329         // Because this is a live cluster, log files might get archived while we're processing
330         try {
331           LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
332           String logFile = rootDir + "/" + hf.getLocalName();
333           FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
334 
335           LOG.info("Checking log file: " + logFile);
336           // Now checking that the hook is up and running
337           // We can't call directly getBlockLocations, it's not available in HFileSystem
338           // We're trying multiple times to be sure, as the order is random
339 
340           BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
341           if (bls.length > 0) {
342             BlockLocation bl = bls[0];
343 
344             LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
345             for (int i = 0; i < bl.getHosts().length - 1; i++) {
346               LOG.info(bl.getHosts()[i] + "    " + logFile);
347               Assert.assertNotSame(bl.getHosts()[i], host4);
348             }
349             String last = bl.getHosts()[bl.getHosts().length - 1];
350             LOG.info(last + "    " + logFile);
351             if (host4.equals(last)) {
352               nbTest++;
353               LOG.info(logFile + " is on the new datanode and is ok");
354               if (bl.getHosts().length == 3) {
355                 // We can test this case from the file system as well
356                 // Checking the underlying file system. Multiple times as the order is random
357                 testFromDFS(dfs, logFile, repCount, host4);
358 
359                 // now from the master
360                 testFromDFS(mdfs, logFile, repCount, host4);
361               }
362             }
363           }
364         } catch (FileNotFoundException exception) {
365           LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " +
366               "archived out from under us so we'll ignore and retry. If this test hangs " +
367               "indefinitely you should treat this failure as a symptom.", exception);
368         } catch (RemoteException exception) {
369           if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
370             LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " +
371                 "archived out from under us so we'll ignore and retry. If this test hangs " +
372                 "indefinitely you should treat this failure as a symptom.", exception);
373           } else {
374             throw exception;
375           }
376         }
377       }
378     }
379   }
380 
381   private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost)
382       throws Exception {
383     // Multiple times as the order is random
384     for (int i = 0; i < 10; i++) {
385       LocatedBlocks l;
386       // The NN gets the block list asynchronously, so we may need multiple tries to get the list
387       final long max = System.currentTimeMillis() + 10000;
388       boolean done;
389       do {
390         Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
391         l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
392         Assert.assertNotNull("Can't get block locations for " + src, l);
393         Assert.assertNotNull(l.getLocatedBlocks());
394         Assert.assertTrue(l.getLocatedBlocks().size() > 0);
395 
396         done = true;
397         for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
398           done = (l.get(y).getLocations().length == repCount);
399         }
400       } while (!done);
401 
402       for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
403         Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
404       }
405     }
406   }
407 
408   private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
409     Field nf = DFSClient.class.getDeclaredField("namenode");
410     nf.setAccessible(true);
411     return (ClientProtocol) nf.get(dfsc);
412   }
413 
414   /**
415    * Test that the reorder algo works as we expect.
416    */
417   @Test
418   public void testBlockLocation() throws Exception {
419     // We need to start HBase to get  HConstants.HBASE_DIR set in conf
420     htu.startMiniZKCluster();
421     MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
422     conf = hbm.getConfiguration();
423 
424 
425     // The "/" is mandatory, without it we've got a null pointer exception on the namenode
426     final String fileName = "/helloWorld";
427     Path p = new Path(fileName);
428 
429     final int repCount = 3;
430     Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
431 
432     // Let's write the file
433     FSDataOutputStream fop = dfs.create(p, (short) repCount);
434     final double toWrite = 875.5613;
435     fop.writeDouble(toWrite);
436     fop.close();
437 
438     for (int i=0; i<10; i++){
439       // The interceptor is not set in this test, so we get the raw list at this point
440       LocatedBlocks l;
441       final long max = System.currentTimeMillis() + 10000;
442       do {
443         l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
444         Assert.assertNotNull(l.getLocatedBlocks());
445         Assert.assertEquals(l.getLocatedBlocks().size(), 1);
446         Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length,
447             System.currentTimeMillis() < max);
448       } while (l.get(0).getLocations().length != repCount);
449 
450       // Should be filtered, the name is different => The order won't change
451       Object originalList[] = l.getLocatedBlocks().toArray();
452       HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks();
453       lrb.reorderBlocks(conf, l, fileName);
454       Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray());
455 
456       // Should be reordered, as we pretend to be a file name with a compliant stuff
457       Assert.assertNotNull(conf.get(HConstants.HBASE_DIR));
458       Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty());
459       String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" +
460           HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile";
461 
462       // Check that it will be possible to extract a ServerName from our construction
463       Assert.assertNotNull("log= " + pseudoLogFile,
464           DefaultWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(), pseudoLogFile));
465 
466       // And check we're doing the right reorder.
467       lrb.reorderBlocks(conf, l, pseudoLogFile);
468       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
469 
470       // Check again, it should remain the same.
471       lrb.reorderBlocks(conf, l, pseudoLogFile);
472       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
473     }
474   }
475 
476 }