View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import org.apache.hadoop.fs.FileSystem;
21  import org.apache.hadoop.fs.Path;
22  import org.apache.hadoop.hbase.client.HTable;
23  import org.apache.hadoop.hbase.client.Put;
24  import org.apache.hadoop.hbase.testclassification.MediumTests;
25  import org.apache.hadoop.hbase.util.Bytes;
26  import org.apache.hadoop.hdfs.MiniDFSCluster;
27  import org.junit.Test;
28  import org.junit.experimental.categories.Category;
29  
30  import java.util.UUID;
31  
32  import static org.junit.Assert.assertEquals;
33  import static org.junit.Assert.assertTrue;
34  
35  /**
36   * Test that an HBase cluster can run on top of an existing MiniDfsCluster
37   */
38  @Category(MediumTests.class)
39  public class TestHBaseOnOtherDfsCluster {
40  
41    @Test
42    public void testOveralyOnOtherCluster() throws Exception {
43      // just run HDFS
44      HBaseTestingUtility util1 = new HBaseTestingUtility();
45      MiniDFSCluster dfs = util1.startMiniDFSCluster(1);
46  
47      // run HBase on that HDFS
48      HBaseTestingUtility util2 = new HBaseTestingUtility();
49      // set the dfs
50      util2.setDFSCluster(dfs, false);
51      util2.startMiniCluster();
52  
53      //ensure that they are pointed at the same place
54      FileSystem fs = dfs.getFileSystem();
55      FileSystem targetFs = util2.getDFSCluster().getFileSystem();
56      assertFsSameUri(fs, targetFs);
57  
58      fs = FileSystem.get(util1.getConfiguration());
59      targetFs = FileSystem.get(util2.getConfiguration());
60      assertFsSameUri(fs, targetFs);
61  
62      Path randomFile = new Path("/"+UUID.randomUUID());
63      assertTrue(targetFs.createNewFile(randomFile));
64      assertTrue(fs.exists(randomFile));
65  
66      // do a simple create/write to ensure the cluster works as expected
67      byte[] family = Bytes.toBytes("testfamily");
68      byte[] tablename = Bytes.toBytes("testtable");
69      HTable table = util2.createTable(tablename, family);
70      Put p = new Put(new byte[] { 1, 2, 3 });
71      p.add(family, null, new byte[] { 1 });
72      table.put(p);
73      table.flushCommits();
74  
75      // shutdown and make sure cleanly shutting down
76      util2.shutdownMiniCluster();
77      util1.shutdownMiniDFSCluster();
78    }
79  
80    private void assertFsSameUri(FileSystem sourceFs, FileSystem targetFs) {
81      Path source = new Path(sourceFs.getUri());
82      Path target = new Path(targetFs.getUri());
83      assertEquals(source, target);
84    }
85  }