1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.io;
20
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertTrue;
24
25 import org.junit.Test;
26 import org.junit.AfterClass;
27 import org.junit.BeforeClass;
28 import junit.framework.TestCase;
29 import org.junit.experimental.categories.Category;
30
31 import org.apache.hadoop.conf.Configuration;
32 import org.apache.hadoop.fs.FileSystem;
33 import org.apache.hadoop.fs.Path;
34 import org.apache.hadoop.fs.FSDataInputStream;
35 import org.apache.hadoop.fs.FSDataOutputStream;
36 import org.apache.hadoop.hdfs.MiniDFSCluster;
37 import org.apache.hadoop.hbase.HBaseTestingUtility;
38 import org.apache.hadoop.hbase.MediumTests;
39 import org.apache.hadoop.hbase.io.FileLink;
40
41 import java.io.FileNotFoundException;
42 import java.io.IOException;
43 import java.util.ArrayList;
44 import java.util.List;
45
46
47
48
49
50 @Category(MediumTests.class)
51 public class TestFileLink {
52
53
54
55
56 @Test
57 public void testHDFSLinkReadDuringRename() throws Exception {
58 HBaseTestingUtility testUtil = new HBaseTestingUtility();
59 Configuration conf = testUtil.getConfiguration();
60 conf.setInt("dfs.blocksize", 1024 * 1024);
61 conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
62
63 testUtil.startMiniDFSCluster(1);
64 MiniDFSCluster cluster = testUtil.getDFSCluster();
65 FileSystem fs = cluster.getFileSystem();
66 assertEquals("hdfs", fs.getUri().getScheme());
67
68 try {
69 testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
70 } finally {
71 testUtil.shutdownMiniCluster();
72 }
73 }
74
75
76
77
78
79 @Test
80 public void testLocalLinkReadDuringRename() throws IOException {
81 HBaseTestingUtility testUtil = new HBaseTestingUtility();
82 FileSystem fs = testUtil.getTestFileSystem();
83 assertEquals("file", fs.getUri().getScheme());
84 testLinkReadDuringRename(fs, testUtil.getDataTestDir());
85 }
86
87
88
89
90 private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
91 Path originalPath = new Path(rootDir, "test.file");
92 Path archivedPath = new Path(rootDir, "archived.file");
93
94 writeSomeData(fs, originalPath, 256 << 20, (byte)2);
95
96 List<Path> files = new ArrayList<Path>();
97 files.add(originalPath);
98 files.add(archivedPath);
99
100 FileLink link = new FileLink(files);
101 FSDataInputStream in = link.open(fs);
102 try {
103 byte[] data = new byte[8192];
104 long size = 0;
105
106
107 int n = in.read(data);
108 dataVerify(data, n, (byte)2);
109 size += n;
110
111
112 assertFalse(fs.exists(archivedPath));
113 fs.rename(originalPath, archivedPath);
114 assertFalse(fs.exists(originalPath));
115 assertTrue(fs.exists(archivedPath));
116
117
118 while ((n = in.read(data)) > 0) {
119 dataVerify(data, n, (byte)2);
120 size += n;
121 }
122
123 assertEquals(256 << 20, size);
124 } finally {
125 in.close();
126 if (fs.exists(originalPath)) fs.delete(originalPath);
127 if (fs.exists(archivedPath)) fs.delete(archivedPath);
128 }
129 }
130
131
132
133
134
135
136
137
138
139
140
141
142 @Test
143 public void testHDFSLinkReadDuringDelete() throws Exception {
144 HBaseTestingUtility testUtil = new HBaseTestingUtility();
145 Configuration conf = testUtil.getConfiguration();
146 conf.setInt("dfs.blocksize", 1024 * 1024);
147 conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
148
149 testUtil.startMiniDFSCluster(1);
150 MiniDFSCluster cluster = testUtil.getDFSCluster();
151 FileSystem fs = cluster.getFileSystem();
152 assertEquals("hdfs", fs.getUri().getScheme());
153
154 try {
155 List<Path> files = new ArrayList<Path>();
156 for (int i = 0; i < 3; i++) {
157 Path path = new Path(String.format("test-data-%d", i));
158 writeSomeData(fs, path, 1 << 20, (byte)i);
159 files.add(path);
160 }
161
162 FileLink link = new FileLink(files);
163 FSDataInputStream in = link.open(fs);
164 try {
165 byte[] data = new byte[8192];
166 int n;
167
168
169 n = in.read(data);
170 dataVerify(data, n, (byte)0);
171 fs.delete(files.get(0));
172 skipBuffer(in, (byte)0);
173
174
175 n = in.read(data);
176 dataVerify(data, n, (byte)1);
177 fs.delete(files.get(1));
178 skipBuffer(in, (byte)1);
179
180
181 n = in.read(data);
182 dataVerify(data, n, (byte)2);
183 fs.delete(files.get(2));
184 skipBuffer(in, (byte)2);
185
186
187 try {
188 n = in.read(data);
189 assert(n <= 0);
190 } catch (FileNotFoundException e) {
191 assertTrue(true);
192 }
193 } finally {
194 in.close();
195 }
196 } finally {
197 testUtil.shutdownMiniCluster();
198 }
199 }
200
201
202
203
204 private void writeSomeData (FileSystem fs, Path path, long size, byte v) throws IOException {
205 byte[] data = new byte[4096];
206 for (int i = 0; i < data.length; i++) {
207 data[i] = v;
208 }
209
210 FSDataOutputStream stream = fs.create(path);
211 try {
212 long written = 0;
213 while (written < size) {
214 stream.write(data, 0, data.length);
215 written += data.length;
216 }
217 } finally {
218 stream.close();
219 }
220 }
221
222
223
224
225 private static void dataVerify(byte[] data, int n, byte v) {
226 for (int i = 0; i < n; ++i) {
227 assertEquals(v, data[i]);
228 }
229 }
230
231 private static void skipBuffer(FSDataInputStream in, byte v) throws IOException {
232 byte[] data = new byte[8192];
233 try {
234 int n;
235 while ((n = in.read(data)) == data.length) {
236 for (int i = 0; i < data.length; ++i) {
237 if (data[i] != v)
238 throw new Exception("File changed");
239 }
240 }
241 } catch (Exception e) {
242 }
243 }
244 }