View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  
20  import static org.junit.Assert.assertTrue;
21  
22  import java.io.IOException;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.conf.Configuration;
27  import org.apache.hadoop.fs.FileSystem;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.HBaseConfiguration;
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.MediumTests;
32  import org.apache.hadoop.hdfs.DistributedFileSystem;
33  import org.junit.Before;
34  import org.junit.Test;
35  import org.junit.experimental.categories.Category;
36  import org.mockito.Mockito;
37  
38  /**
39   * Test our recoverLease loop against mocked up filesystem.
40   */
41  @Category(MediumTests.class)
42  public class TestFSHDFSUtils {
43    private static final Log LOG = LogFactory.getLog(TestFSHDFSUtils.class);
44    private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
45    static {
46      Configuration conf = HTU.getConfiguration();
47      conf.setInt("hbase.lease.recovery.first.pause", 10);
48      conf.setInt("hbase.lease.recovery.pause", 10);
49    };
50    private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
51    private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
52    long startTime = -1;
53  
54    @Before
55    public void setup() {
56      this.startTime = EnvironmentEdgeManager.currentTimeMillis();
57    }
58  
59    /**
60     * Test recover lease eventually succeeding.
61     * @throws IOException 
62     */
63    @Test (timeout = 30000)
64    public void testRecoverLease() throws IOException {
65      HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
66      CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
67      Mockito.when(reporter.progress()).thenReturn(true);
68      DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
69      // Fail four times and pass on the fifth.
70      Mockito.when(dfs.recoverLease(FILE)).
71        thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
72      assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
73      Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
74      // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
75      // invocations will happen pretty fast... the we fall into the longer wait loop).
76      assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) >
77        (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
78    }
79  
80    /**
81     * Test that isFileClosed makes us recover lease faster.
82     * @throws IOException
83     */
84    @Test (timeout = 30000)
85    public void testIsFileClosed() throws IOException {
86      // Make this time long so it is plain we broke out because of the isFileClosed invocation.
87      HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
88      CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
89      Mockito.when(reporter.progress()).thenReturn(true);
90      IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
91      // Now make it so we fail the first two times -- the two fast invocations, then we fall into
92      // the long loop during which we will call isFileClosed.... the next invocation should
93      // therefore return true if we are to break the loop.
94      Mockito.when(dfs.recoverLease(FILE)).
95        thenReturn(false).thenReturn(false).thenReturn(true);
96      Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true);
97      assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
98      Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE);
99      Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE);
100   }
101 
102   @Test
103   public void testIsSameHdfs() throws IOException {
104     try {
105       Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
106       dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
107     } catch (Exception e) {
108       LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version.");
109       return;
110     }
111 
112     Configuration conf = HBaseConfiguration.create();
113     Path srcPath = new Path("hdfs://localhost:8020/");
114     Path desPath = new Path("hdfs://127.0.0.1/");
115     FileSystem srcFs = srcPath.getFileSystem(conf);
116     FileSystem desFs = desPath.getFileSystem(conf);
117 
118     assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
119 
120     desPath = new Path("hdfs://127.0.0.1:8070/");
121     desFs = desPath.getFileSystem(conf);
122     assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
123 
124     desPath = new Path("hdfs://127.0.1.1:8020/");
125     desFs = desPath.getFileSystem(conf);
126     assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
127 
128     conf.set("fs.defaultFS", "hdfs://haosong-hadoop");
129     conf.set("dfs.nameservices", "haosong-hadoop");
130     conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2");
131     conf.set("dfs.client.failover.proxy.provider.haosong-hadoop",
132             "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
133 
134     conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:8020");
135     conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000");
136     desPath = new Path("/");
137     desFs = desPath.getFileSystem(conf);
138     assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
139 
140     conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:8020");
141     conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000");
142     desPath = new Path("/");
143     desFs = desPath.getFileSystem(conf);
144     assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
145   }
146 
147   /**
148    * Version of DFS that has HDFS-4525 in it.
149    */
150   class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
151     /**
152      * Close status of a file. Copied over from HDFS-4525
153      * @return true if file is already closed
154      **/
155     public boolean isFileClosed(Path f) throws IOException{
156       return false;
157     }
158   }
159 }