1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertTrue;
25  
26  import java.io.File;
27  import java.util.UUID;
28  
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FSDataOutputStream;
31  import org.apache.hadoop.fs.FileStatus;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.fs.permission.FsPermission;
35  import org.apache.hadoop.hbase.HBaseConfiguration;
36  import org.apache.hadoop.hbase.HBaseTestingUtility;
37  import org.apache.hadoop.hbase.HConstants;
38  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
39  import org.apache.hadoop.hbase.MediumTests;
40  import org.apache.hadoop.hdfs.MiniDFSCluster;
41  import org.junit.Test;
42  import org.junit.experimental.categories.Category;
43  
44  /**
45   * Test {@link FSUtils}.
46   */
47  @Category(MediumTests.class)
48  public class TestFSUtils {
49    @Test public void testIsHDFS() throws Exception {
50      HBaseTestingUtility htu = new HBaseTestingUtility();
51      htu.getConfiguration().setBoolean("dfs.support.append", false);
52      assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
53      htu.getConfiguration().setBoolean("dfs.support.append", true);
54      MiniDFSCluster cluster = null;
55      try {
56        cluster = htu.startMiniDFSCluster(1);
57        assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
58        assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
59      } finally {
60        if (cluster != null) cluster.shutdown();
61      }
62    }
63    
64    private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
65      throws Exception {
66      FSDataOutputStream out = fs.create(file);
67      byte [] data = new byte[dataSize];
68      out.write(data, 0, dataSize);
69      out.close();
70    }
71    
72    @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
73      HBaseTestingUtility htu = new HBaseTestingUtility();
74      final int DEFAULT_BLOCK_SIZE = 1024;
75      htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
76      MiniDFSCluster cluster = null;
77      Path testFile = null;
78      
79      try {
80        // set up a cluster with 3 nodes
81        String hosts[] = new String[] { "host1", "host2", "host3" };
82        cluster = htu.startMiniDFSCluster(hosts);
83        cluster.waitActive();
84        FileSystem fs = cluster.getFileSystem();
85  
86        // create a file with two blocks
87        testFile = new Path("/test1.txt");
88        WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
89        
90        // given the default replication factor is 3, the same as the number of
91        // datanodes; the locality index for each host should be 100%,
92        // or getWeight for each host should be the same as getUniqueBlocksWeights
93        final long maxTime = System.currentTimeMillis() + 2000;
94        boolean ok;
95        do {
96          ok = true;
97          FileStatus status = fs.getFileStatus(testFile);
98          HDFSBlocksDistribution blocksDistribution =
99            FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
100         long uniqueBlocksTotalWeight =
101           blocksDistribution.getUniqueBlocksTotalWeight();
102         for (String host : hosts) {
103           long weight = blocksDistribution.getWeight(host);
104           ok = (ok && uniqueBlocksTotalWeight == weight);
105         }
106       } while (!ok && System.currentTimeMillis() < maxTime);
107       assertTrue(ok);
108       } finally {
109       htu.shutdownMiniDFSCluster();
110     }
111 
112 
113     try {
114       // set up a cluster with 4 nodes
115       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
116       cluster = htu.startMiniDFSCluster(hosts);
117       cluster.waitActive();
118       FileSystem fs = cluster.getFileSystem();
119 
120       // create a file with three blocks
121       testFile = new Path("/test2.txt");        
122       WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
123               
124       // given the default replication factor is 3, we will have total of 9
125       // replica of blocks; thus the host with the highest weight should have
126       // weight == 3 * DEFAULT_BLOCK_SIZE
127       final long maxTime = System.currentTimeMillis() + 2000;
128       long weight;
129       long uniqueBlocksTotalWeight;
130       do {
131         FileStatus status = fs.getFileStatus(testFile);
132         HDFSBlocksDistribution blocksDistribution =
133           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
134         uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
135 
136         String tophost = blocksDistribution.getTopHosts().get(0);
137         weight = blocksDistribution.getWeight(tophost);
138 
139         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
140       } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
141       assertTrue(uniqueBlocksTotalWeight == weight);
142 
143     } finally {
144       htu.shutdownMiniDFSCluster();
145     }
146 
147     
148     try {
149       // set up a cluster with 4 nodes
150       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
151       cluster = htu.startMiniDFSCluster(hosts);
152       cluster.waitActive();
153       FileSystem fs = cluster.getFileSystem();
154 
155       // create a file with one block
156       testFile = new Path("/test3.txt");        
157       WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
158       
159       // given the default replication factor is 3, we will have total of 3
160       // replica of blocks; thus there is one host without weight
161       final long maxTime = System.currentTimeMillis() + 2000;
162       HDFSBlocksDistribution blocksDistribution;
163       do {
164         FileStatus status = fs.getFileStatus(testFile);
165         blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
166         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
167       }
168       while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
169       assertEquals("Wrong number of hosts distributing blocks.", 3,
170         blocksDistribution.getTopHosts().size());
171     } finally {
172       htu.shutdownMiniDFSCluster();
173     }
174   }
175 
176   @Test
177   public void testPermMask() throws Exception {
178 
179     Configuration conf = HBaseConfiguration.create();
180     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
181     FileSystem fs = FileSystem.get(conf);
182     // first check that we don't crash if we don't have perms set
183     FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf,
184         HConstants.DATA_FILE_UMASK_KEY);
185     assertEquals(FsPermission.getDefault(), defaultPerms);
186 
187     conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
188     // now check that we get the right perms
189     FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
190         HConstants.DATA_FILE_UMASK_KEY);
191     assertEquals(new FsPermission("700"), filePerm);
192 
193     // then that the correct file is created
194     Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
195     try {
196       FSDataOutputStream out = FSUtils.create(fs, p, filePerm);
197       out.close();
198       FileStatus stat = fs.getFileStatus(p);
199       assertEquals(new FsPermission("700"), stat.getPermission());
200       // and then cleanup
201     } finally {
202       fs.delete(p, true);
203     }
204   }
205   
206   @Test
207   public void testDeleteAndExists() throws Exception {
208     Configuration conf = HBaseConfiguration.create();
209     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
210     FileSystem fs = FileSystem.get(conf);
211     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
212     // then that the correct file is created
213     String file = UUID.randomUUID().toString();
214     Path p = new Path("temptarget" + File.separator + file);
215     Path p1 = new Path("temppath" + File.separator + file);
216     try {
217       FSDataOutputStream out = FSUtils.create(fs, p, perms);
218       out.close();
219       assertTrue("The created file should be present", FSUtils.isExists(fs, p));
220       // delete the file with recursion as false. Only the file will be deleted.
221       FSUtils.delete(fs, p, false);
222       // Create another file
223       FSDataOutputStream out1 = FSUtils.create(fs, p1, perms);
224       out1.close();
225       // delete the file with recursion as false. Still the file only will be deleted
226       FSUtils.delete(fs, p1, true);
227       assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
228       // and then cleanup
229     } finally {
230       FSUtils.delete(fs, p, true);
231       FSUtils.delete(fs, p1, true);
232     }
233   }
234 
235   @org.junit.Rule
236   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
237     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
238 }
239