1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.util;
19
20 import static org.junit.Assert.assertTrue;
21
22 import java.io.IOException;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.conf.Configuration;
27 import org.apache.hadoop.fs.FileSystem;
28 import org.apache.hadoop.fs.Path;
29 import org.apache.hadoop.hbase.HBaseConfiguration;
30 import org.apache.hadoop.hbase.HBaseTestingUtility;
31 import org.apache.hadoop.hbase.MediumTests;
32 import org.apache.hadoop.hdfs.DistributedFileSystem;
33 import org.junit.Before;
34 import org.junit.Test;
35 import org.junit.experimental.categories.Category;
36 import org.mockito.Mockito;
37
38
39
40
41 @Category(MediumTests.class)
42 public class TestFSHDFSUtils {
43 private static final Log LOG = LogFactory.getLog(TestFSHDFSUtils.class);
44 private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
45 static {
46 Configuration conf = HTU.getConfiguration();
47 conf.setInt("hbase.lease.recovery.first.pause", 10);
48 conf.setInt("hbase.lease.recovery.pause", 10);
49 };
50 private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
51 private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
52 long startTime = -1;
53
54 @Before
55 public void setup() {
56 this.startTime = EnvironmentEdgeManager.currentTimeMillis();
57 }
58
59
60
61
62
63 @Test (timeout = 30000)
64 public void testRecoverLease() throws IOException {
65 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
66 CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
67 Mockito.when(reporter.progress()).thenReturn(true);
68 DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
69
70 Mockito.when(dfs.recoverLease(FILE)).
71 thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
72 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
73 Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
74
75
76 assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) >
77 (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
78 }
79
80
81
82
83
84 @Test (timeout = 30000)
85 public void testIsFileClosed() throws IOException {
86
87 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
88 CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
89 Mockito.when(reporter.progress()).thenReturn(true);
90 IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
91
92
93
94 Mockito.when(dfs.recoverLease(FILE)).
95 thenReturn(false).thenReturn(false).thenReturn(true);
96 Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true);
97 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
98 Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE);
99 Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE);
100 }
101
102 @Test
103 public void testIsSameHdfs() throws IOException {
104 try {
105 Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
106 dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
107 } catch (Exception e) {
108 LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version.");
109 return;
110 }
111
112 Configuration conf = HBaseConfiguration.create();
113 Path srcPath = new Path("hdfs://localhost:8020/");
114 Path desPath = new Path("hdfs://127.0.0.1/");
115 FileSystem srcFs = srcPath.getFileSystem(conf);
116 FileSystem desFs = desPath.getFileSystem(conf);
117
118 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
119
120 desPath = new Path("hdfs://127.0.0.1:8070/");
121 desFs = desPath.getFileSystem(conf);
122 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
123
124 desPath = new Path("hdfs://127.0.1.1:8020/");
125 desFs = desPath.getFileSystem(conf);
126 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
127
128 conf.set("fs.defaultFS", "hdfs://haosong-hadoop");
129 conf.set("dfs.nameservices", "haosong-hadoop");
130 conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2");
131 conf.set("dfs.client.failover.proxy.provider.haosong-hadoop",
132 "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
133
134 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:8020");
135 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000");
136 desPath = new Path("/");
137 desFs = desPath.getFileSystem(conf);
138 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
139
140 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:8020");
141 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000");
142 desPath = new Path("/");
143 desFs = desPath.getFileSystem(conf);
144 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
145 }
146
147
148
149
150 class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
151
152
153
154
155 public boolean isFileClosed(Path f) throws IOException{
156 return false;
157 }
158 }
159 }