View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotEquals;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertNull;
26  import static org.junit.Assert.assertTrue;
27  
28  import java.io.File;
29  import java.io.IOException;
30  import java.util.UUID;
31  
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.fs.FSDataOutputStream;
34  import org.apache.hadoop.fs.FileStatus;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.fs.permission.FsPermission;
38  import org.apache.hadoop.hbase.HBaseConfiguration;
39  import org.apache.hadoop.hbase.HBaseTestingUtility;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
42  import org.apache.hadoop.hbase.MediumTests;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hdfs.MiniDFSCluster;
45  import org.junit.Test;
46  import org.junit.experimental.categories.Category;
47  
48  /**
49   * Test {@link FSUtils}.
50   */
51  @Category(MediumTests.class)
52  public class TestFSUtils {
53    /**
54     * Test path compare and prefix checking.
55     * @throws IOException
56     */
57    @Test
58    public void testMatchingTail() throws IOException {
59      HBaseTestingUtility htu = new HBaseTestingUtility();
60      final FileSystem fs = htu.getTestFileSystem();
61      Path rootdir = htu.getDataTestDir();
62      assertTrue(rootdir.depth() > 1);
63      Path partPath = new Path("a", "b");
64      Path fullPath = new Path(rootdir, partPath);
65      Path fullyQualifiedPath = fs.makeQualified(fullPath);
66      assertFalse(FSUtils.isMatchingTail(fullPath, partPath));
67      assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString()));
68      assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString()));
69      assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString()));
70      assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString()));
71      assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath));
72      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath));
73      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString()));
74      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath)));
75      assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString()));
76      assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x")));
77      assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath));
78    }
79  
80    @Test
81    public void testVersion() throws DeserializationException, IOException {
82      HBaseTestingUtility htu = new HBaseTestingUtility();
83      final FileSystem fs = htu.getTestFileSystem();
84      final Path rootdir = htu.getDataTestDir();
85      assertNull(FSUtils.getVersion(fs, rootdir));
86      // Write out old format version file.  See if we can read it in and convert.
87      Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
88      FSDataOutputStream s = fs.create(versionFile);
89      final String version = HConstants.FILE_SYSTEM_VERSION;
90      s.writeUTF(version);
91      s.close();
92      assertTrue(fs.exists(versionFile));
93      FileStatus [] status = fs.listStatus(versionFile);
94      assertNotNull(status);
95      assertTrue(status.length > 0);
96      String newVersion = FSUtils.getVersion(fs, rootdir);
97      assertEquals(version.length(), newVersion.length());
98      assertEquals(version, newVersion);
99      // File will have been converted. Exercise the pb format
100     assertEquals(version, FSUtils.getVersion(fs, rootdir));
101     FSUtils.checkVersion(fs, rootdir, true);
102   }
103 
104   @Test public void testIsHDFS() throws Exception {
105     HBaseTestingUtility htu = new HBaseTestingUtility();
106     htu.getConfiguration().setBoolean("dfs.support.append", false);
107     assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
108     htu.getConfiguration().setBoolean("dfs.support.append", true);
109     MiniDFSCluster cluster = null;
110     try {
111       cluster = htu.startMiniDFSCluster(1);
112       assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
113       assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
114     } finally {
115       if (cluster != null) cluster.shutdown();
116     }
117   }
118 
119   private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
120     throws Exception {
121     FSDataOutputStream out = fs.create(file);
122     byte [] data = new byte[dataSize];
123     out.write(data, 0, dataSize);
124     out.close();
125   }
126 
127   @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
128     HBaseTestingUtility htu = new HBaseTestingUtility();
129     final int DEFAULT_BLOCK_SIZE = 1024;
130     htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
131     MiniDFSCluster cluster = null;
132     Path testFile = null;
133 
134     try {
135       // set up a cluster with 3 nodes
136       String hosts[] = new String[] { "host1", "host2", "host3" };
137       cluster = htu.startMiniDFSCluster(hosts);
138       cluster.waitActive();
139       FileSystem fs = cluster.getFileSystem();
140 
141       // create a file with two blocks
142       testFile = new Path("/test1.txt");
143       WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
144 
145       // given the default replication factor is 3, the same as the number of
146       // datanodes; the locality index for each host should be 100%,
147       // or getWeight for each host should be the same as getUniqueBlocksWeights
148       final long maxTime = System.currentTimeMillis() + 2000;
149       boolean ok;
150       do {
151         ok = true;
152         FileStatus status = fs.getFileStatus(testFile);
153         HDFSBlocksDistribution blocksDistribution =
154           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
155         long uniqueBlocksTotalWeight =
156           blocksDistribution.getUniqueBlocksTotalWeight();
157         for (String host : hosts) {
158           long weight = blocksDistribution.getWeight(host);
159           ok = (ok && uniqueBlocksTotalWeight == weight);
160         }
161       } while (!ok && System.currentTimeMillis() < maxTime);
162       assertTrue(ok);
163       } finally {
164       htu.shutdownMiniDFSCluster();
165     }
166 
167 
168     try {
169       // set up a cluster with 4 nodes
170       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
171       cluster = htu.startMiniDFSCluster(hosts);
172       cluster.waitActive();
173       FileSystem fs = cluster.getFileSystem();
174 
175       // create a file with three blocks
176       testFile = new Path("/test2.txt");
177       WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
178 
179       // given the default replication factor is 3, we will have total of 9
180       // replica of blocks; thus the host with the highest weight should have
181       // weight == 3 * DEFAULT_BLOCK_SIZE
182       final long maxTime = System.currentTimeMillis() + 2000;
183       long weight;
184       long uniqueBlocksTotalWeight;
185       do {
186         FileStatus status = fs.getFileStatus(testFile);
187         HDFSBlocksDistribution blocksDistribution =
188           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
189         uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
190 
191         String tophost = blocksDistribution.getTopHosts().get(0);
192         weight = blocksDistribution.getWeight(tophost);
193 
194         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
195       } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
196       assertTrue(uniqueBlocksTotalWeight == weight);
197 
198     } finally {
199       htu.shutdownMiniDFSCluster();
200     }
201 
202 
203     try {
204       // set up a cluster with 4 nodes
205       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
206       cluster = htu.startMiniDFSCluster(hosts);
207       cluster.waitActive();
208       FileSystem fs = cluster.getFileSystem();
209 
210       // create a file with one block
211       testFile = new Path("/test3.txt");
212       WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
213 
214       // given the default replication factor is 3, we will have total of 3
215       // replica of blocks; thus there is one host without weight
216       final long maxTime = System.currentTimeMillis() + 2000;
217       HDFSBlocksDistribution blocksDistribution;
218       do {
219         FileStatus status = fs.getFileStatus(testFile);
220         blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
221         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
222       }
223       while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
224       assertEquals("Wrong number of hosts distributing blocks.", 3,
225         blocksDistribution.getTopHosts().size());
226     } finally {
227       htu.shutdownMiniDFSCluster();
228     }
229   }
230 
231   @Test
232   public void testPermMask() throws Exception {
233 
234     Configuration conf = HBaseConfiguration.create();
235     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
236     FileSystem fs = FileSystem.get(conf);
237     // first check that we don't crash if we don't have perms set
238     FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf,
239         HConstants.DATA_FILE_UMASK_KEY);
240     assertEquals(FsPermission.getDefault(), defaultPerms);
241 
242     conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
243     // now check that we get the right perms
244     FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
245         HConstants.DATA_FILE_UMASK_KEY);
246     assertEquals(new FsPermission("700"), filePerm);
247 
248     // then that the correct file is created
249     Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
250     try {
251       FSDataOutputStream out = FSUtils.create(fs, p, filePerm, null);
252       out.close();
253       FileStatus stat = fs.getFileStatus(p);
254       assertEquals(new FsPermission("700"), stat.getPermission());
255       // and then cleanup
256     } finally {
257       fs.delete(p, true);
258     }
259   }
260 
261   @Test
262   public void testDeleteAndExists() throws Exception {
263     HBaseTestingUtility htu = new HBaseTestingUtility();
264     Configuration conf = htu.getConfiguration();
265     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
266     FileSystem fs = FileSystem.get(conf);
267     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
268     // then that the correct file is created
269     String file = UUID.randomUUID().toString();
270     Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
271     Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
272     try {
273       FSDataOutputStream out = FSUtils.create(fs, p, perms, null);
274       out.close();
275       assertTrue("The created file should be present", FSUtils.isExists(fs, p));
276       // delete the file with recursion as false. Only the file will be deleted.
277       FSUtils.delete(fs, p, false);
278       // Create another file
279       FSDataOutputStream out1 = FSUtils.create(fs, p1, perms, null);
280       out1.close();
281       // delete the file with recursion as false. Still the file only will be deleted
282       FSUtils.delete(fs, p1, true);
283       assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
284       // and then cleanup
285     } finally {
286       FSUtils.delete(fs, p, true);
287       FSUtils.delete(fs, p1, true);
288     }
289   }
290 
291   @Test
292   public void testRenameAndSetModifyTime() throws Exception {
293     HBaseTestingUtility htu = new HBaseTestingUtility();
294     Configuration conf = htu.getConfiguration();
295 
296     MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
297     assertTrue(FSUtils.isHDFS(conf));
298 
299     FileSystem fs = FileSystem.get(conf);
300     Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
301 
302     String file = UUID.randomUUID().toString();
303     Path p = new Path(testDir, file);
304 
305     FSDataOutputStream out = fs.create(p);
306     out.close();
307     assertTrue("The created file should be present", FSUtils.isExists(fs, p));
308 
309     long expect = System.currentTimeMillis() + 1000;
310     assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
311 
312     ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
313     mockEnv.setValue(expect);
314     EnvironmentEdgeManager.injectEdge(mockEnv);
315 
316     String dstFile = UUID.randomUUID().toString();
317     Path dst = new Path(testDir , dstFile);
318 
319     assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
320     assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
321     assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
322 
323     assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
324     cluster.shutdown();
325   }
326 }