1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertFalse;
24 import static org.junit.Assert.assertTrue;
25
26 import java.io.File;
27 import java.util.UUID;
28
29 import org.apache.hadoop.conf.Configuration;
30 import org.apache.hadoop.fs.FSDataOutputStream;
31 import org.apache.hadoop.fs.FileStatus;
32 import org.apache.hadoop.fs.FileSystem;
33 import org.apache.hadoop.fs.Path;
34 import org.apache.hadoop.fs.permission.FsPermission;
35 import org.apache.hadoop.hbase.HBaseConfiguration;
36 import org.apache.hadoop.hbase.HBaseTestingUtility;
37 import org.apache.hadoop.hbase.HConstants;
38 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
39 import org.apache.hadoop.hbase.MediumTests;
40 import org.apache.hadoop.hdfs.MiniDFSCluster;
41 import org.junit.Test;
42 import org.junit.experimental.categories.Category;
43
44
45
46
47 @Category(MediumTests.class)
48 public class TestFSUtils {
49 @Test public void testIsHDFS() throws Exception {
50 HBaseTestingUtility htu = new HBaseTestingUtility();
51 htu.getConfiguration().setBoolean("dfs.support.append", false);
52 assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
53 htu.getConfiguration().setBoolean("dfs.support.append", true);
54 MiniDFSCluster cluster = null;
55 try {
56 cluster = htu.startMiniDFSCluster(1);
57 assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
58 assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
59 } finally {
60 if (cluster != null) cluster.shutdown();
61 }
62 }
63
64 private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
65 throws Exception {
66 FSDataOutputStream out = fs.create(file);
67 byte [] data = new byte[dataSize];
68 out.write(data, 0, dataSize);
69 out.close();
70 }
71
72 @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
73 HBaseTestingUtility htu = new HBaseTestingUtility();
74 final int DEFAULT_BLOCK_SIZE = 1024;
75 htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
76 MiniDFSCluster cluster = null;
77 Path testFile = null;
78
79 try {
80
81 String hosts[] = new String[] { "host1", "host2", "host3" };
82 cluster = htu.startMiniDFSCluster(hosts);
83 cluster.waitActive();
84 FileSystem fs = cluster.getFileSystem();
85
86
87 testFile = new Path("/test1.txt");
88 WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
89
90
91
92
93 final long maxTime = System.currentTimeMillis() + 2000;
94 boolean ok;
95 do {
96 ok = true;
97 FileStatus status = fs.getFileStatus(testFile);
98 HDFSBlocksDistribution blocksDistribution =
99 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
100 long uniqueBlocksTotalWeight =
101 blocksDistribution.getUniqueBlocksTotalWeight();
102 for (String host : hosts) {
103 long weight = blocksDistribution.getWeight(host);
104 ok = (ok && uniqueBlocksTotalWeight == weight);
105 }
106 } while (!ok && System.currentTimeMillis() < maxTime);
107 assertTrue(ok);
108 } finally {
109 htu.shutdownMiniDFSCluster();
110 }
111
112
113 try {
114
115 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
116 cluster = htu.startMiniDFSCluster(hosts);
117 cluster.waitActive();
118 FileSystem fs = cluster.getFileSystem();
119
120
121 testFile = new Path("/test2.txt");
122 WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
123
124
125
126
127 final long maxTime = System.currentTimeMillis() + 2000;
128 long weight;
129 long uniqueBlocksTotalWeight;
130 do {
131 FileStatus status = fs.getFileStatus(testFile);
132 HDFSBlocksDistribution blocksDistribution =
133 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
134 uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
135
136 String tophost = blocksDistribution.getTopHosts().get(0);
137 weight = blocksDistribution.getWeight(tophost);
138
139
140 } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
141 assertTrue(uniqueBlocksTotalWeight == weight);
142
143 } finally {
144 htu.shutdownMiniDFSCluster();
145 }
146
147
148 try {
149
150 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
151 cluster = htu.startMiniDFSCluster(hosts);
152 cluster.waitActive();
153 FileSystem fs = cluster.getFileSystem();
154
155
156 testFile = new Path("/test3.txt");
157 WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
158
159
160
161 final long maxTime = System.currentTimeMillis() + 2000;
162 HDFSBlocksDistribution blocksDistribution;
163 do {
164 FileStatus status = fs.getFileStatus(testFile);
165 blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
166
167 }
168 while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
169 assertEquals("Wrong number of hosts distributing blocks.", 3,
170 blocksDistribution.getTopHosts().size());
171 } finally {
172 htu.shutdownMiniDFSCluster();
173 }
174 }
175
176 @Test
177 public void testPermMask() throws Exception {
178
179 Configuration conf = HBaseConfiguration.create();
180 conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
181 FileSystem fs = FileSystem.get(conf);
182
183 FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf,
184 HConstants.DATA_FILE_UMASK_KEY);
185 assertEquals(FsPermission.getDefault(), defaultPerms);
186
187 conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
188
189 FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
190 HConstants.DATA_FILE_UMASK_KEY);
191 assertEquals(new FsPermission("700"), filePerm);
192
193
194 Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
195 try {
196 FSDataOutputStream out = FSUtils.create(fs, p, filePerm);
197 out.close();
198 FileStatus stat = fs.getFileStatus(p);
199 assertEquals(new FsPermission("700"), stat.getPermission());
200
201 } finally {
202 fs.delete(p, true);
203 }
204 }
205
206 @Test
207 public void testDeleteAndExists() throws Exception {
208 Configuration conf = HBaseConfiguration.create();
209 conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
210 FileSystem fs = FileSystem.get(conf);
211 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
212
213 String file = UUID.randomUUID().toString();
214 Path p = new Path("temptarget" + File.separator + file);
215 Path p1 = new Path("temppath" + File.separator + file);
216 try {
217 FSDataOutputStream out = FSUtils.create(fs, p, perms);
218 out.close();
219 assertTrue("The created file should be present", FSUtils.isExists(fs, p));
220
221 FSUtils.delete(fs, p, false);
222
223 FSDataOutputStream out1 = FSUtils.create(fs, p1, perms);
224 out1.close();
225
226 FSUtils.delete(fs, p1, true);
227 assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
228
229 } finally {
230 FSUtils.delete(fs, p, true);
231 FSUtils.delete(fs, p1, true);
232 }
233 }
234
235 @org.junit.Rule
236 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
237 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
238 }
239