View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.mapreduce;
20  
21  import java.util.Set;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.fs.Path;
27  import org.apache.hadoop.hbase.HBaseConfiguration;
28  import org.apache.hadoop.hbase.IntegrationTestBase;
29  import org.apache.hadoop.hbase.IntegrationTestingUtility;
30  import org.apache.hadoop.hbase.testclassification.IntegrationTests;
31  import org.apache.hadoop.hbase.TableName;
32  import org.apache.hadoop.hbase.util.Bytes;
33  import org.apache.hadoop.util.ToolRunner;
34  import org.junit.After;
35  import org.junit.Before;
36  import org.junit.experimental.categories.Category;
37  
38  /**
39   * An integration test to test {@link TableSnapshotInputFormat} which enables
40   * reading directly from snapshot files without going through hbase servers.
41   *
42   * This test creates a table and loads the table with the rows ranging from
43   * 'aaa' to 'zzz', and for each row, sets the columns f1:(null) and f2:(null) to be
44   * the the same as the row value.
45   * <pre>
46   * aaa, f1: => aaa
47   * aaa, f2: => aaa
48   * aab, f1: => aab
49   * ....
50   * zzz, f2: => zzz
51   * </pre>
52   *
53   * Then the test creates a snapshot from this table, and overrides the values in the original
54   * table with values 'after_snapshot_value'. The test, then runs a mapreduce job over the snapshot
55   * with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a single reduce output
56   * file, and
57   * inspected later to verify that the MR job has seen all the values from the snapshot.
58   *
59   * <p> These parameters can be used to configure the job:
60   * <br>"IntegrationTestTableSnapshotInputFormat.table" =&gt; the name of the table
61   * <br>"IntegrationTestTableSnapshotInputFormat.snapshot" =&gt; the name of the snapshot
62   * <br>"IntegrationTestTableSnapshotInputFormat.numRegions" =&gt; number of regions in the table
63   * to be created (default, 32).
64   * <br>"IntegrationTestTableSnapshotInputFormat.tableDir" =&gt; temporary directory to restore the
65   * snapshot files
66   *
67   */
68  @Category(IntegrationTests.class)
69  // Not runnable as a unit test. See TestTableSnapshotInputFormat
70  public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase {
71  
72    private static final Log LOG = LogFactory.getLog(IntegrationTestTableSnapshotInputFormat.class);
73  
74    private static final String TABLE_NAME_KEY = "IntegrationTestTableSnapshotInputFormat.table";
75    private static final String DEFAULT_TABLE_NAME = "IntegrationTestTableSnapshotInputFormat";
76  
77    private static final String SNAPSHOT_NAME_KEY =
78        "IntegrationTestTableSnapshotInputFormat.snapshot";
79    private static final String NUM_REGIONS_KEY =
80        "IntegrationTestTableSnapshotInputFormat.numRegions";
81  
82    private static final String MR_IMPLEMENTATION_KEY =
83      "IntegrationTestTableSnapshotInputFormat.API";
84    private static final String MAPRED_IMPLEMENTATION = "mapred";
85    private static final String MAPREDUCE_IMPLEMENTATION = "mapreduce";
86  
87    private static final int DEFAULT_NUM_REGIONS = 32;
88    private static final String TABLE_DIR_KEY = "IntegrationTestTableSnapshotInputFormat.tableDir";
89  
90    private static final byte[] START_ROW = Bytes.toBytes("bbb");
91    private static final byte[] END_ROW = Bytes.toBytes("yyy");
92  
93    // mapred API missing feature pairity with mapreduce. See comments in
94    // mapred.TestTableSnapshotInputFormat
95    private static final byte[] MAPRED_START_ROW = Bytes.toBytes("aaa");
96    private static final byte[] MAPRED_END_ROW = Bytes.toBytes("zz{"); // 'z' + 1 => '{'
97  
98    private IntegrationTestingUtility util;
99  
100   @Override
101   public void setConf(Configuration conf) {
102     super.setConf(conf);
103     util = getTestingUtil(conf);
104   }
105 
106   @Override
107   @Before
108   public void setUp() throws Exception {
109     super.setUp();
110     util = getTestingUtil(getConf());
111     util.initializeCluster(1);
112     this.setConf(util.getConfiguration());
113   }
114 
115   @Override
116   @After
117   public void cleanUp() throws Exception {
118     util.restoreCluster();
119   }
120 
121   @Override
122   public void setUpCluster() throws Exception {
123   }
124 
125   @Override
126   public int runTestFromCommandLine() throws Exception {
127     Configuration conf = getConf();
128     TableName tableName = TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
129     String snapshotName = conf.get(SNAPSHOT_NAME_KEY, tableName.getQualifierAsString()
130       + "_snapshot_" + System.currentTimeMillis());
131     int numRegions = conf.getInt(NUM_REGIONS_KEY, DEFAULT_NUM_REGIONS);
132     String tableDirStr = conf.get(TABLE_DIR_KEY);
133     Path tableDir;
134     if (tableDirStr == null) {
135       tableDir = util.getDataTestDirOnTestFS(tableName.getQualifierAsString());
136     } else {
137       tableDir = new Path(tableDirStr);
138     }
139 
140     final String mr = conf.get(MR_IMPLEMENTATION_KEY, MAPREDUCE_IMPLEMENTATION);
141     if (mr.equalsIgnoreCase(MAPREDUCE_IMPLEMENTATION)) {
142       /*
143        * We create the table using HBaseAdmin#createTable(), which will create the table
144        * with desired number of regions. We pass bbb as startKey and yyy as endKey, so if
145        * desiredNumRegions is > 2, we create regions empty - bbb and yyy - empty, and we
146        * create numRegions - 2 regions between bbb - yyy. The test uses a Scan with startRow
147        * bbb and endRow yyy, so, we expect the first and last region to be filtered out in
148        * the input format, and we expect numRegions - 2 splits between bbb and yyy.
149        */
150       LOG.debug("Running job with mapreduce API.");
151       int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
152 
153       org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
154         tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
155         expectedNumSplits, false);
156     } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
157       /*
158        * Similar considerations to above. The difference is that mapred API does not support
159        * specifying start/end rows (or a scan object at all). Thus the omission of first and
160        * last regions are not performed. See comments in mapred.TestTableSnapshotInputFormat
161        * for details of how that test works around the problem. This feature should be added
162        * in follow-on work.
163        */
164       LOG.debug("Running job with mapred API.");
165       int expectedNumSplits = numRegions;
166 
167       org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
168         tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, numRegions,
169         expectedNumSplits, false);
170     } else {
171       throw new IllegalArgumentException("Unrecognized mapreduce implementation: " + mr +".");
172     }
173 
174     return 0;
175   }
176 
177   @Override // Chaos Monkey is not intended to be run with this test
178   public TableName getTablename() {
179     return null;
180   }
181 
182   @Override // Chaos Monkey is not inteded to be run with this test
183   protected Set<String> getColumnFamilies() { return null; }
184 
185   public static void main(String[] args) throws Exception {
186     Configuration conf = HBaseConfiguration.create();
187     IntegrationTestingUtility.setUseDistributedCluster(conf);
188     int ret = ToolRunner.run(conf, new IntegrationTestTableSnapshotInputFormat(), args);
189     System.exit(ret);
190   }
191 
192 }