1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.mapreduce;
19
20 import static java.lang.String.format;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertTrue;
24
25 import java.io.IOException;
26 import java.util.Arrays;
27 import java.util.Iterator;
28 import java.util.Set;
29 import java.util.TreeSet;
30 import java.util.UUID;
31
32 import org.apache.commons.logging.Log;
33 import org.apache.commons.logging.LogFactory;
34 import org.apache.hadoop.conf.Configurable;
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileSystem;
38 import org.apache.hadoop.fs.Path;
39 import org.apache.hadoop.hbase.Cell;
40 import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
41 import org.apache.hadoop.hbase.HBaseConfiguration;
42 import org.apache.hadoop.hbase.HBaseTestingUtility;
43 import org.apache.hadoop.hbase.IntegrationTestingUtility;
44 import org.apache.hadoop.hbase.IntegrationTests;
45 import org.apache.hadoop.hbase.KeyValue;
46 import org.apache.hadoop.hbase.KeyValue.Type;
47 import org.apache.hadoop.hbase.client.HTable;
48 import org.apache.hadoop.hbase.client.Result;
49 import org.apache.hadoop.hbase.client.Scan;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.io.LongWritable;
52 import org.apache.hadoop.io.Text;
53 import org.apache.hadoop.mapred.JobClient;
54 import org.apache.hadoop.mapred.JobConf;
55 import org.apache.hadoop.mapreduce.Job;
56 import org.apache.hadoop.mapreduce.JobContext;
57 import org.apache.hadoop.mapreduce.OutputCommitter;
58 import org.apache.hadoop.mapreduce.OutputFormat;
59 import org.apache.hadoop.mapreduce.RecordWriter;
60 import org.apache.hadoop.mapreduce.TaskAttemptContext;
61 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
62 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
63 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
64 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
65 import org.apache.hadoop.security.UserGroupInformation;
66 import org.apache.hadoop.util.GenericOptionsParser;
67 import org.apache.hadoop.util.Tool;
68 import org.apache.hadoop.util.ToolRunner;
69 import org.junit.AfterClass;
70 import org.junit.BeforeClass;
71 import org.junit.Test;
72 import org.junit.experimental.categories.Category;
73
74
75
76
77 @Category(IntegrationTests.class)
78 public class IntegrationTestImportTsv implements Configurable, Tool {
79
80 private static final String NAME = IntegrationTestImportTsv.class.getSimpleName();
81 protected static final Log LOG = LogFactory.getLog(IntegrationTestImportTsv.class);
82
83 protected static final String simple_tsv =
84 "row1\t1\tc1\tc2\n" +
85 "row2\t1\tc1\tc2\n" +
86 "row3\t1\tc1\tc2\n" +
87 "row4\t1\tc1\tc2\n" +
88 "row5\t1\tc1\tc2\n" +
89 "row6\t1\tc1\tc2\n" +
90 "row7\t1\tc1\tc2\n" +
91 "row8\t1\tc1\tc2\n" +
92 "row9\t1\tc1\tc2\n" +
93 "row10\t1\tc1\tc2\n";
94
95 protected static final Set<KeyValue> simple_expected =
96 new TreeSet<KeyValue>(KeyValue.COMPARATOR) {
97 private static final long serialVersionUID = 1L;
98 {
99 byte[] family = Bytes.toBytes("d");
100 for (String line : simple_tsv.split("\n")) {
101 String[] row = line.split("\t");
102 byte[] key = Bytes.toBytes(row[0]);
103 long ts = Long.parseLong(row[1]);
104 byte[][] fields = { Bytes.toBytes(row[2]), Bytes.toBytes(row[3]) };
105 add(new KeyValue(key, family, fields[0], ts, Type.Put, fields[0]));
106 add(new KeyValue(key, family, fields[1], ts, Type.Put, fields[1]));
107 }
108 }
109 };
110
111
112
113 protected static IntegrationTestingUtility util = null;
114
115 public Configuration getConf() {
116 return util.getConfiguration();
117 }
118
119 public void setConf(Configuration conf) {
120 throw new IllegalArgumentException("setConf not supported");
121 }
122
123 @BeforeClass
124 public static void provisionCluster() throws Exception {
125 if (null == util) {
126 util = new IntegrationTestingUtility();
127 }
128 util.initializeCluster(1);
129 if (!util.isDistributedCluster()) {
130
131 util.startMiniMapReduceCluster();
132 }
133 }
134
135 @AfterClass
136 public static void releaseCluster() throws Exception {
137 util.restoreCluster();
138 if (!util.isDistributedCluster()) {
139 util.shutdownMiniMapReduceCluster();
140 }
141 util = null;
142 }
143
144
145
146
147
148 protected void doLoadIncrementalHFiles(Path hfiles, String tableName)
149 throws Exception {
150
151 String[] args = { hfiles.toString(), tableName };
152 LOG.info(format("Running LoadIncrememntalHFiles with args: %s", Arrays.asList(args)));
153 assertEquals("Loading HFiles failed.",
154 0, ToolRunner.run(new LoadIncrementalHFiles(new Configuration(getConf())), args));
155
156 HTable table = null;
157 Scan scan = new Scan() {{
158 setCacheBlocks(false);
159 setCaching(1000);
160 }};
161 try {
162 table = new HTable(getConf(), tableName);
163 Iterator<Result> resultsIt = table.getScanner(scan).iterator();
164 Iterator<KeyValue> expectedIt = simple_expected.iterator();
165 while (resultsIt.hasNext() && expectedIt.hasNext()) {
166 Result r = resultsIt.next();
167 for (Cell actual : r.rawCells()) {
168 assertTrue(
169 "Ran out of expected values prematurely!",
170 expectedIt.hasNext());
171 KeyValue expected = expectedIt.next();
172 assertTrue(
173 format("Scan produced surprising result. expected: <%s>, actual: %s",
174 expected, actual),
175 KeyValue.COMPARATOR.compare(expected, actual) == 0);
176 }
177 }
178 assertFalse("Did not consume all expected values.", expectedIt.hasNext());
179 assertFalse("Did not consume all scan results.", resultsIt.hasNext());
180 } finally {
181 if (null != table) table.close();
182 }
183 }
184
185
186
187
188 protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
189 if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
190 return;
191
192 FileSystem fs = FileSystem.get(conf);
193 Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
194 assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
195 }
196
197 @Test
198 public void testGenerateAndLoad() throws Exception {
199 LOG.info("Running test testGenerateAndLoad.");
200 String table = NAME + "-" + UUID.randomUUID();
201 String cf = "d";
202 Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles");
203
204 String[] args = {
205 format("-D%s=%s", ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles),
206 format("-D%s=HBASE_ROW_KEY,HBASE_TS_KEY,%s:c1,%s:c2",
207 ImportTsv.COLUMNS_CONF_KEY, cf, cf),
208
209
210 format("-D%s=false", TestImportTsv.DELETE_AFTER_LOAD_CONF),
211 table
212 };
213
214
215 util.createTable(table, cf);
216 Tool t = TestImportTsv.doMROnTableTest(util, cf, simple_tsv, args);
217 doLoadIncrementalHFiles(hfiles, table);
218
219
220 validateDeletedPartitionsFile(t.getConf());
221
222
223 util.deleteTable(table);
224 util.cleanupDataTestDirOnTestFS(table);
225 LOG.info("testGenerateAndLoad completed successfully.");
226 }
227
228
229
230
231
232
233
234
235
236 private static class JobLaunchingOuputCommitter extends FileOutputCommitter {
237
238 public JobLaunchingOuputCommitter(Path outputPath, TaskAttemptContext context)
239 throws IOException {
240 super(outputPath, context);
241 }
242
243 @Override
244 public void commitJob(JobContext context) throws IOException {
245 super.commitJob(context);
246
247
248 Configuration conf = HBaseConfiguration.create(context.getConfiguration());
249 conf.set("mapred.job.classpath.archives",
250 context.getConfiguration().get("mapred.job.classpath.archives", ""));
251 conf.set("mapreduce.job.cache.archives.visibilities",
252 context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));
253
254
255
256 IntegrationTestingUtility util =
257 new IntegrationTestingUtility(conf);
258
259
260
261 final String table = format("%s-%s-child", NAME, context.getJobID());
262 final String cf = "FAM";
263 String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
264 conf.set(ImportTsv.CREDENTIALS_LOCATION, fileLocation);
265 String[] args = {
266 "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
267 "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
268 table
269 };
270
271 try {
272 util.createTable(table, cf);
273 LOG.info("testRunFromOutputCommitter: launching child job.");
274 TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
275 } catch (Exception e) {
276 throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
277 } finally {
278 if (util.getHBaseAdmin().tableExists(table)) {
279 util.deleteTable(table);
280 }
281 }
282 }
283 }
284
285
286
287
288 public static class JobLaunchingOutputFormat extends FileOutputFormat<LongWritable, Text> {
289
290 private OutputCommitter committer = null;
291
292 @Override
293 public RecordWriter<LongWritable, Text> getRecordWriter(TaskAttemptContext job)
294 throws IOException, InterruptedException {
295 return new RecordWriter<LongWritable, Text>() {
296 @Override
297 public void write(LongWritable key, Text value) throws IOException,
298 InterruptedException {
299
300 }
301
302 @Override
303 public void close(TaskAttemptContext context) throws IOException,
304 InterruptedException {
305
306 }
307 };
308 }
309
310 @Override
311 public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext context)
312 throws IOException {
313 if (committer == null) {
314 Path output = getOutputPath(context);
315 LOG.debug("Using JobLaunchingOuputCommitter.");
316 committer = new JobLaunchingOuputCommitter(output, context);
317 }
318 return committer;
319 }
320 }
321
322
323
324
325 public static void addTestDependencyJars(Configuration conf) throws IOException {
326 TableMapReduceUtil.addDependencyJars(conf,
327 org.apache.hadoop.hbase.BaseConfigurable.class,
328 HBaseTestingUtility.class,
329 HBaseCommonTestingUtility.class,
330 com.google.common.collect.ListMultimap.class,
331 org.cloudera.htrace.Trace.class);
332 }
333
334
335
336
337
338
339
340
341
342
343 @Test
344 public void testRunFromOutputCommitter() throws Exception {
345 LOG.info("Running test testRunFromOutputCommitter.");
346
347 FileSystem fs = FileSystem.get(getConf());
348 Path inputPath = new Path(util.getDataTestDirOnTestFS("parent"), "input.txt");
349 Path outputPath = new Path(util.getDataTestDirOnTestFS("parent"), "output");
350 FSDataOutputStream fout = null;
351 try {
352 fout = fs.create(inputPath, true);
353 fout.write(Bytes.toBytes("testRunFromOutputCommitter\n"));
354 LOG.debug(format("Wrote test data to file: %s", inputPath));
355 } finally {
356 if (fout != null) {
357 fout.close();
358 }
359 }
360
361
362
363 Job job = new Job(getConf(), NAME + ".testRunFromOutputCommitter - parent");
364 job.setJarByClass(IntegrationTestImportTsv.class);
365 job.setInputFormatClass(TextInputFormat.class);
366 job.setOutputFormatClass(JobLaunchingOutputFormat.class);
367 TextInputFormat.addInputPath(job, inputPath);
368 JobLaunchingOutputFormat.setOutputPath(job, outputPath);
369 TableMapReduceUtil.addDependencyJars(job);
370 addTestDependencyJars(job.getConfiguration());
371 TableMapReduceUtil.initCredentials(job);
372 JobClient jc = new JobClient(new JobConf(job.getConfiguration()));
373 job.getCredentials().addToken(new Text("my_mr_token"),
374 jc.getDelegationToken(new Text("renewer")));
375
376
377
378 LOG.info("testRunFromOutputCommitter: launching parent job.");
379 assertTrue(job.waitForCompletion(true));
380 LOG.info("testRunFromOutputCommitter completed successfully.");
381 }
382
383 public int run(String[] args) throws Exception {
384 if (args.length != 0) {
385 System.err.println(format("%s [genericOptions]", NAME));
386 System.err.println(" Runs ImportTsv integration tests against a distributed cluster.");
387 System.err.println();
388 GenericOptionsParser.printGenericCommandUsage(System.err);
389 return 1;
390 }
391
392
393
394 provisionCluster();
395 testGenerateAndLoad();
396 testRunFromOutputCommitter();
397 releaseCluster();
398
399 return 0;
400 }
401
402 public static void main(String[] args) throws Exception {
403 Configuration conf = HBaseConfiguration.create();
404 IntegrationTestingUtility.setUseDistributedCluster(conf);
405 util = new IntegrationTestingUtility(conf);
406
407 args = new GenericOptionsParser(conf, args).getRemainingArgs();
408 int status = new IntegrationTestImportTsv().run(args);
409 System.exit(status);
410 }
411 }