1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.util;
19
20 import static org.junit.Assert.assertTrue;
21
22 import java.io.IOException;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.conf.Configuration;
27 import org.apache.hadoop.fs.FileSystem;
28 import org.apache.hadoop.fs.Path;
29 import org.apache.hadoop.hbase.HBaseConfiguration;
30 import org.apache.hadoop.hbase.HBaseTestingUtility;
31 import org.apache.hadoop.hbase.MediumTests;
32 import org.apache.hadoop.hdfs.DistributedFileSystem;
33 import org.junit.Before;
34 import org.junit.Test;
35 import org.junit.experimental.categories.Category;
36 import org.mockito.Mockito;
37
38
39
40
41 @Category(MediumTests.class)
42 public class TestFSHDFSUtils {
43 private static final Log LOG = LogFactory.getLog(TestFSHDFSUtils.class);
44 private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
45 static {
46 Configuration conf = HTU.getConfiguration();
47 conf.setInt("hbase.lease.recovery.first.pause", 10);
48 conf.setInt("hbase.lease.recovery.pause", 10);
49 };
50 private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
51 private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
52 long startTime = -1;
53
54 @Before
55 public void setup() {
56 this.startTime = EnvironmentEdgeManager.currentTimeMillis();
57 }
58
59
60
61
62
63 @Test (timeout = 30000)
64 public void testRecoverLease() throws IOException {
65 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
66 DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
67
68 Mockito.when(dfs.recoverLease(FILE)).
69 thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
70 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration()));
71 Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
72
73
74 assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) >
75 (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
76 }
77
78
79
80
81
82 @Test (timeout = 30000)
83 public void testIsFileClosed() throws IOException {
84
85 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
86 IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
87
88
89
90 Mockito.when(dfs.recoverLease(FILE)).
91 thenReturn(false).thenReturn(false).thenReturn(true);
92 Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true);
93 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration()));
94 Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE);
95 Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE);
96 }
97
98 @Test
99 public void testIsSameHdfs() throws IOException {
100 try {
101 Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
102 dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
103 } catch (Exception e) {
104 LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version.");
105 return;
106 }
107
108 Configuration conf = HBaseConfiguration.create();
109 Path srcPath = new Path("hdfs://localhost:8020/");
110 Path desPath = new Path("hdfs://127.0.0.1/");
111 FileSystem srcFs = srcPath.getFileSystem(conf);
112 FileSystem desFs = desPath.getFileSystem(conf);
113
114 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
115
116 desPath = new Path("hdfs://127.0.0.1:8070/");
117 desFs = desPath.getFileSystem(conf);
118 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
119
120 desPath = new Path("hdfs://127.0.1.1:8020/");
121 desFs = desPath.getFileSystem(conf);
122 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
123
124 conf.set("fs.defaultFS", "hdfs://haosong-hadoop");
125 conf.set("dfs.nameservices", "haosong-hadoop");
126 conf.set("dfs.federation.nameservices", "haosong-hadoop");
127 conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2");
128 conf.set("dfs.client.failover.proxy.provider.haosong-hadoop",
129 "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
130
131 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:8020");
132 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000");
133 desPath = new Path("/");
134 desFs = desPath.getFileSystem(conf);
135 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
136
137 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:8020");
138 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000");
139 desPath = new Path("/");
140 desFs = desPath.getFileSystem(conf);
141 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
142 }
143
144
145
146
147 class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
148
149
150
151
152 public boolean isFileClosed(Path f) throws IOException{
153 return false;
154 }
155 }
156 }