View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotEquals;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertNull;
26  import static org.junit.Assert.assertTrue;
27  
28  import java.io.File;
29  import java.io.IOException;
30  import java.util.UUID;
31  
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.fs.FSDataOutputStream;
34  import org.apache.hadoop.fs.FileStatus;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.fs.permission.FsPermission;
38  import org.apache.hadoop.hbase.HBaseConfiguration;
39  import org.apache.hadoop.hbase.HBaseTestingUtility;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
42  import org.apache.hadoop.hbase.MediumTests;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hdfs.MiniDFSCluster;
45  import org.junit.Test;
46  import org.junit.experimental.categories.Category;
47  
48  /**
49   * Test {@link FSUtils}.
50   */
51  @Category(MediumTests.class)
52  public class TestFSUtils {
53    /**
54     * Test path compare and prefix checking.
55     * @throws IOException
56     */
57    @Test
58    public void testMatchingTail() throws IOException {
59      HBaseTestingUtility htu = new HBaseTestingUtility();
60      final FileSystem fs = htu.getTestFileSystem();
61      Path rootdir = htu.getDataTestDir();
62      assertTrue(rootdir.depth() > 1);
63      Path partPath = new Path("a", "b");
64      Path fullPath = new Path(rootdir, partPath);
65      Path fullyQualifiedPath = fs.makeQualified(fullPath);
66      assertFalse(FSUtils.isMatchingTail(fullPath, partPath));
67      assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString()));
68      assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString()));
69      assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString()));
70      assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString()));
71      assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath));
72      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath));
73      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString()));
74      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath)));
75      assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString()));
76      assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x")));
77      assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath));
78    }
79  
80    @Test
81    public void testVersion() throws DeserializationException, IOException {
82      HBaseTestingUtility htu = new HBaseTestingUtility();
83      final FileSystem fs = htu.getTestFileSystem();
84      final Path rootdir = htu.getDataTestDir();
85      assertNull(FSUtils.getVersion(fs, rootdir));
86      // Write out old format version file.  See if we can read it in and convert.
87      Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
88      FSDataOutputStream s = fs.create(versionFile);
89      final String version = HConstants.FILE_SYSTEM_VERSION;
90      s.writeUTF(version);
91      s.close();
92      assertTrue(fs.exists(versionFile));
93      FileStatus [] status = fs.listStatus(versionFile);
94      assertNotNull(status);
95      assertTrue(status.length > 0);
96      String newVersion = FSUtils.getVersion(fs, rootdir);
97      assertEquals(version.length(), newVersion.length());
98      assertEquals(version, newVersion);
99      // File will have been converted. Exercise the pb format
100     assertEquals(version, FSUtils.getVersion(fs, rootdir));
101     FSUtils.checkVersion(fs, rootdir, true);
102   }
103 
104   @Test public void testIsHDFS() throws Exception {
105     HBaseTestingUtility htu = new HBaseTestingUtility();
106     htu.getConfiguration().setBoolean("dfs.support.append", false);
107     assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
108     htu.getConfiguration().setBoolean("dfs.support.append", true);
109     MiniDFSCluster cluster = null;
110     try {
111       cluster = htu.startMiniDFSCluster(1);
112       assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
113       assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
114     } finally {
115       if (cluster != null) cluster.shutdown();
116     }
117   }
118 
119   private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
120     throws Exception {
121     FSDataOutputStream out = fs.create(file);
122     byte [] data = new byte[dataSize];
123     out.write(data, 0, dataSize);
124     out.close();
125   }
126 
127   @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
128     HBaseTestingUtility htu = new HBaseTestingUtility();
129     final int DEFAULT_BLOCK_SIZE = 1024;
130     htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
131     MiniDFSCluster cluster = null;
132     Path testFile = null;
133 
134     try {
135       // set up a cluster with 3 nodes
136       String hosts[] = new String[] { "host1", "host2", "host3" };
137       cluster = htu.startMiniDFSCluster(hosts);
138       cluster.waitActive();
139       FileSystem fs = cluster.getFileSystem();
140 
141       // create a file with two blocks
142       testFile = new Path("/test1.txt");
143       WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
144 
145       // given the default replication factor is 3, the same as the number of
146       // datanodes; the locality index for each host should be 100%,
147       // or getWeight for each host should be the same as getUniqueBlocksWeights
148       final long maxTime = System.currentTimeMillis() + 2000;
149       boolean ok;
150       do {
151         ok = true;
152         FileStatus status = fs.getFileStatus(testFile);
153         HDFSBlocksDistribution blocksDistribution =
154           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
155         long uniqueBlocksTotalWeight =
156           blocksDistribution.getUniqueBlocksTotalWeight();
157         for (String host : hosts) {
158           long weight = blocksDistribution.getWeight(host);
159           ok = (ok && uniqueBlocksTotalWeight == weight);
160         }
161       } while (!ok && System.currentTimeMillis() < maxTime);
162       assertTrue(ok);
163       } finally {
164       htu.shutdownMiniDFSCluster();
165     }
166 
167 
168     try {
169       // set up a cluster with 4 nodes
170       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
171       cluster = htu.startMiniDFSCluster(hosts);
172       cluster.waitActive();
173       FileSystem fs = cluster.getFileSystem();
174 
175       // create a file with three blocks
176       testFile = new Path("/test2.txt");
177       WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
178 
179       // given the default replication factor is 3, we will have total of 9
180       // replica of blocks; thus the host with the highest weight should have
181       // weight == 3 * DEFAULT_BLOCK_SIZE
182       final long maxTime = System.currentTimeMillis() + 2000;
183       long weight;
184       long uniqueBlocksTotalWeight;
185       do {
186         FileStatus status = fs.getFileStatus(testFile);
187         HDFSBlocksDistribution blocksDistribution =
188           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
189         uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
190 
191         String tophost = blocksDistribution.getTopHosts().get(0);
192         weight = blocksDistribution.getWeight(tophost);
193 
194         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
195       } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
196       assertTrue(uniqueBlocksTotalWeight == weight);
197 
198     } finally {
199       htu.shutdownMiniDFSCluster();
200     }
201 
202 
203     try {
204       // set up a cluster with 4 nodes
205       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
206       cluster = htu.startMiniDFSCluster(hosts);
207       cluster.waitActive();
208       FileSystem fs = cluster.getFileSystem();
209 
210       // create a file with one block
211       testFile = new Path("/test3.txt");
212       WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
213 
214       // given the default replication factor is 3, we will have total of 3
215       // replica of blocks; thus there is one host without weight
216       final long maxTime = System.currentTimeMillis() + 2000;
217       HDFSBlocksDistribution blocksDistribution;
218       do {
219         FileStatus status = fs.getFileStatus(testFile);
220         blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
221         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
222       }
223       while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
224       assertEquals("Wrong number of hosts distributing blocks.", 3,
225         blocksDistribution.getTopHosts().size());
226     } finally {
227       htu.shutdownMiniDFSCluster();
228     }
229   }
230 
231   @Test
232   public void testPermMask() throws Exception {
233 
234     Configuration conf = HBaseConfiguration.create();
235     FileSystem fs = FileSystem.get(conf);
236 
237     // default fs permission
238     FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
239         HConstants.DATA_FILE_UMASK_KEY);
240     // 'hbase.data.umask.enable' is false. We will get default fs permission.
241     assertEquals(FSUtils.getFileDefault(), defaultFsPerm);
242 
243     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
244     // first check that we don't crash if we don't have perms set
245     FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
246         HConstants.DATA_FILE_UMASK_KEY);
247     // default 'hbase.data.umask'is 000, and this umask will be used when
248     // 'hbase.data.umask.enable' is true.
249     // Therefore we will not get the real fs default in this case.
250     // Instead we will get the starting point FULL_RWX_PERMISSIONS
251     assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);
252 
253     conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
254     // now check that we get the right perms
255     FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
256         HConstants.DATA_FILE_UMASK_KEY);
257     assertEquals(new FsPermission("700"), filePerm);
258 
259     // then that the correct file is created
260     Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
261     try {
262       FSDataOutputStream out = FSUtils.create(fs, p, filePerm, null);
263       out.close();
264       FileStatus stat = fs.getFileStatus(p);
265       assertEquals(new FsPermission("700"), stat.getPermission());
266       // and then cleanup
267     } finally {
268       fs.delete(p, true);
269     }
270   }
271 
272   @Test
273   public void testDeleteAndExists() throws Exception {
274     HBaseTestingUtility htu = new HBaseTestingUtility();
275     Configuration conf = htu.getConfiguration();
276     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
277     FileSystem fs = FileSystem.get(conf);
278     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
279     // then that the correct file is created
280     String file = UUID.randomUUID().toString();
281     Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
282     Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
283     try {
284       FSDataOutputStream out = FSUtils.create(fs, p, perms, null);
285       out.close();
286       assertTrue("The created file should be present", FSUtils.isExists(fs, p));
287       // delete the file with recursion as false. Only the file will be deleted.
288       FSUtils.delete(fs, p, false);
289       // Create another file
290       FSDataOutputStream out1 = FSUtils.create(fs, p1, perms, null);
291       out1.close();
292       // delete the file with recursion as false. Still the file only will be deleted
293       FSUtils.delete(fs, p1, true);
294       assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
295       // and then cleanup
296     } finally {
297       FSUtils.delete(fs, p, true);
298       FSUtils.delete(fs, p1, true);
299     }
300   }
301 
302   @Test
303   public void testRenameAndSetModifyTime() throws Exception {
304     HBaseTestingUtility htu = new HBaseTestingUtility();
305     Configuration conf = htu.getConfiguration();
306 
307     MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
308     assertTrue(FSUtils.isHDFS(conf));
309 
310     FileSystem fs = FileSystem.get(conf);
311     Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
312 
313     String file = UUID.randomUUID().toString();
314     Path p = new Path(testDir, file);
315 
316     FSDataOutputStream out = fs.create(p);
317     out.close();
318     assertTrue("The created file should be present", FSUtils.isExists(fs, p));
319 
320     long expect = System.currentTimeMillis() + 1000;
321     assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
322 
323     ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
324     mockEnv.setValue(expect);
325     EnvironmentEdgeManager.injectEdge(mockEnv);
326 
327     String dstFile = UUID.randomUUID().toString();
328     Path dst = new Path(testDir , dstFile);
329 
330     assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
331     assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
332     assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
333 
334     assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
335     cluster.shutdown();
336   }
337 }