View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.fail;
23  
24  import java.lang.reflect.Method;
25  import java.net.InetSocketAddress;
26  import java.net.URI;
27  import java.util.ArrayList;
28  import java.util.List;
29  
30  import org.apache.hadoop.fs.BlockLocation;
31  import org.apache.hadoop.fs.FileStatus;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.fs.permission.FsPermission;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.HBaseTestingUtility;
36  import org.apache.hadoop.hbase.testclassification.MediumTests;
37  import org.apache.hadoop.hbase.client.HTable;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hdfs.DistributedFileSystem;
40  import org.apache.hadoop.hdfs.server.datanode.DataNode;
41  import org.apache.hadoop.util.Progressable;
42  import org.junit.AfterClass;
43  import org.junit.Assume;
44  import org.junit.BeforeClass;
45  import org.junit.Test;
46  import org.junit.experimental.categories.Category;
47  
48  /**
49   * Tests the ability to specify favored nodes for a region.
50   */
51  @Category(MediumTests.class)
52  public class TestRegionFavoredNodes {
53  
54    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
55    private static HTable table;
56    private static final TableName TABLE_NAME =
57        TableName.valueOf("table");
58    private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family");
59    private static final int FAVORED_NODES_NUM = 3;
60    private static final int REGION_SERVERS = 6;
61    private static final int FLUSHES = 3;
62    private static Method createWithFavoredNode = null;
63  
64    @BeforeClass
65    public static void setUpBeforeClass() throws Exception {
66      try {
67        createWithFavoredNode = DistributedFileSystem.class.getDeclaredMethod("create", Path.class,
68          FsPermission.class, boolean.class, int.class, short.class, long.class,
69          Progressable.class, InetSocketAddress[].class);
70      } catch (NoSuchMethodException nm) {
71        return;
72      }
73      TEST_UTIL.startMiniCluster(REGION_SERVERS);
74      table = TEST_UTIL.createTable(TABLE_NAME, COLUMN_FAMILY);
75      TEST_UTIL.createMultiRegions(table, COLUMN_FAMILY);
76      TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
77    }
78  
79    @AfterClass
80    public static void tearDownAfterClass() throws Exception {
81      table.close();
82      if (createWithFavoredNode == null) {
83        return;
84      }
85      TEST_UTIL.shutdownMiniCluster();
86    }
87  
88    @Test
89    public void testFavoredNodes() throws Exception {
90      Assume.assumeTrue(createWithFavoredNode != null);
91      // Get the addresses of the datanodes in the cluster.
92      InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS];
93      List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes();
94      Method selfAddress;
95      try {
96        selfAddress = DataNode.class.getMethod("getSelfAddr");
97      } catch (NoSuchMethodException ne) {
98        selfAddress = DataNode.class.getMethod("getXferAddress");
99      }
100     for (int i = 0; i < REGION_SERVERS; i++) {
101       nodes[i] = (InetSocketAddress)selfAddress.invoke(datanodes.get(i));
102     }
103 
104     String[] nodeNames = new String[REGION_SERVERS];
105     for (int i = 0; i < REGION_SERVERS; i++) {
106       nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" +
107           nodes[i].getPort();
108     }
109 
110     // For each region, choose some datanodes as the favored nodes then assign
111     // them as favored nodes through the HRegion.
112     for (int i = 0; i < REGION_SERVERS; i++) {
113       HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
114       List<HRegion> regions = server.getOnlineRegions(TABLE_NAME);
115       for (HRegion region : regions) {
116         List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>favoredNodes =
117             new ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(3);
118         String encodedRegionName = region.getRegionInfo().getEncodedName();
119         for (int j = 0; j < FAVORED_NODES_NUM; j++) {
120           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder b =
121               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
122           b.setHostName(nodes[(i + j) % REGION_SERVERS].getAddress().getHostAddress());
123           b.setPort(nodes[(i + j) % REGION_SERVERS].getPort());
124           b.setStartCode(-1);
125           favoredNodes.add(b.build());
126         }
127         server.updateRegionFavoredNodesMapping(encodedRegionName, favoredNodes);
128       }
129     }
130 
131     // Write some data to each region and flush. Repeat some number of times to
132     // get multiple files for each region.
133     for (int i = 0; i < FLUSHES; i++) {
134       TEST_UTIL.loadTable(table, COLUMN_FAMILY, false);
135       TEST_UTIL.flush();
136     }
137 
138     // For each region, check the block locations of each file and ensure that
139     // they are consistent with the favored nodes for that region.
140     for (int i = 0; i < REGION_SERVERS; i++) {
141       HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i);
142       List<HRegion> regions = server.getOnlineRegions(TABLE_NAME);
143       for (HRegion region : regions) {
144         List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY});
145         for (String file : files) {
146           FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem().
147               getFileStatus(new Path(new URI(file).getPath()));
148           BlockLocation[] lbks = 
149               ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem())
150               .getFileBlockLocations(status, 0, Long.MAX_VALUE);
151           for (BlockLocation lbk : lbks) {
152             locations:
153               for (String info : lbk.getNames()) {
154                 for (int j = 0; j < FAVORED_NODES_NUM; j++) {
155                   if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) {
156                     continue locations;
157                   }
158                 }
159                 // This block was at a location that was not a favored location.
160                 fail("Block location " + info + " not a favored node");
161               }
162           }
163         }
164       }
165     }
166   }
167 }