View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import static java.lang.String.format;
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  
25  import java.io.IOException;
26  import java.util.Arrays;
27  import java.util.Iterator;
28  import java.util.Set;
29  import java.util.TreeSet;
30  import java.util.UUID;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configurable;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.Cell;
40  import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
41  import org.apache.hadoop.hbase.HBaseConfiguration;
42  import org.apache.hadoop.hbase.HBaseTestingUtility;
43  import org.apache.hadoop.hbase.IntegrationTestingUtility;
44  import org.apache.hadoop.hbase.IntegrationTests;
45  import org.apache.hadoop.hbase.KeyValue;
46  import org.apache.hadoop.hbase.KeyValue.Type;
47  import org.apache.hadoop.hbase.client.HTable;
48  import org.apache.hadoop.hbase.client.Result;
49  import org.apache.hadoop.hbase.client.Scan;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.io.LongWritable;
52  import org.apache.hadoop.io.Text;
53  import org.apache.hadoop.mapreduce.Job;
54  import org.apache.hadoop.mapreduce.JobContext;
55  import org.apache.hadoop.mapreduce.OutputCommitter;
56  import org.apache.hadoop.mapreduce.OutputFormat;
57  import org.apache.hadoop.mapreduce.RecordWriter;
58  import org.apache.hadoop.mapreduce.TaskAttemptContext;
59  import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
60  import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
61  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
62  import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
63  import org.apache.hadoop.util.GenericOptionsParser;
64  import org.apache.hadoop.util.Tool;
65  import org.apache.hadoop.util.ToolRunner;
66  import org.junit.AfterClass;
67  import org.junit.BeforeClass;
68  import org.junit.Test;
69  import org.junit.experimental.categories.Category;
70  
71  /**
72   * Validate ImportTsv + LoadIncrementalHFiles on a distributed cluster.
73   */
74  @Category(IntegrationTests.class)
75  public class IntegrationTestImportTsv implements Configurable, Tool {
76  
77    private static final String NAME = IntegrationTestImportTsv.class.getSimpleName();
78    protected static final Log LOG = LogFactory.getLog(IntegrationTestImportTsv.class);
79  
80    protected static final String simple_tsv =
81        "row1\t1\tc1\tc2\n" +
82        "row2\t1\tc1\tc2\n" +
83        "row3\t1\tc1\tc2\n" +
84        "row4\t1\tc1\tc2\n" +
85        "row5\t1\tc1\tc2\n" +
86        "row6\t1\tc1\tc2\n" +
87        "row7\t1\tc1\tc2\n" +
88        "row8\t1\tc1\tc2\n" +
89        "row9\t1\tc1\tc2\n" +
90        "row10\t1\tc1\tc2\n";
91  
92    protected static final Set<KeyValue> simple_expected =
93        new TreeSet<KeyValue>(KeyValue.COMPARATOR) {
94      private static final long serialVersionUID = 1L;
95      {
96        byte[] family = Bytes.toBytes("d");
97        for (String line : simple_tsv.split("\n")) {
98          String[] row = line.split("\t");
99          byte[] key = Bytes.toBytes(row[0]);
100         long ts = Long.parseLong(row[1]);
101         byte[][] fields = { Bytes.toBytes(row[2]), Bytes.toBytes(row[3]) };
102         add(new KeyValue(key, family, fields[0], ts, Type.Put, fields[0]));
103         add(new KeyValue(key, family, fields[1], ts, Type.Put, fields[1]));
104       }
105     }
106   };
107 
108   // this instance is initialized on first access when the test is run from
109   // JUnit/Maven or by main when run from the CLI.
110   protected static IntegrationTestingUtility util = null;
111 
112   public Configuration getConf() {
113     return util.getConfiguration();
114   }
115 
116   public void setConf(Configuration conf) {
117     throw new IllegalArgumentException("setConf not supported");
118   }
119 
120   @BeforeClass
121   public static void provisionCluster() throws Exception {
122     if (null == util) {
123       util = new IntegrationTestingUtility();
124     }
125     util.initializeCluster(1);
126     if (!util.isDistributedCluster()) {
127       // also need MR when running without a real cluster
128       util.startMiniMapReduceCluster();
129     }
130   }
131 
132   @AfterClass
133   public static void releaseCluster() throws Exception {
134     util.restoreCluster();
135     if (!util.isDistributedCluster()) {
136       util.shutdownMiniMapReduceCluster();
137     }
138     util = null;
139   }
140 
141   /**
142    * Verify the data described by <code>simple_tsv</code> matches
143    * <code>simple_expected</code>.
144    */
145   protected void doLoadIncrementalHFiles(Path hfiles, String tableName)
146       throws Exception {
147 
148     String[] args = { hfiles.toString(), tableName };
149     LOG.info(format("Running LoadIncrememntalHFiles with args: %s", Arrays.asList(args)));
150     assertEquals("Loading HFiles failed.",
151       0, ToolRunner.run(new LoadIncrementalHFiles(new Configuration(getConf())), args));
152 
153     HTable table = null;
154     Scan scan = new Scan() {{
155       setCacheBlocks(false);
156       setCaching(1000);
157     }};
158     try {
159       table = new HTable(getConf(), tableName);
160       Iterator<Result> resultsIt = table.getScanner(scan).iterator();
161       Iterator<KeyValue> expectedIt = simple_expected.iterator();
162       while (resultsIt.hasNext() && expectedIt.hasNext()) {
163         Result r = resultsIt.next();
164         for (Cell actual : r.rawCells()) {
165           assertTrue(
166             "Ran out of expected values prematurely!",
167             expectedIt.hasNext());
168           KeyValue expected = expectedIt.next();
169           assertTrue(
170             format("Scan produced surprising result. expected: <%s>, actual: %s",
171               expected, actual),
172             KeyValue.COMPARATOR.compare(expected, actual) == 0);
173         }
174       }
175       assertFalse("Did not consume all expected values.", expectedIt.hasNext());
176       assertFalse("Did not consume all scan results.", resultsIt.hasNext());
177     } finally {
178       if (null != table) table.close();
179     }
180   }
181 
182   /**
183    * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
184    */
185   protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
186     if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
187       return;
188 
189     FileSystem fs = FileSystem.get(conf);
190     Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
191     assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
192   }
193 
194   @Test
195   public void testGenerateAndLoad() throws Exception {
196     LOG.info("Running test testGenerateAndLoad.");
197     String table = NAME + "-" + UUID.randomUUID();
198     String cf = "d";
199     Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles");
200 
201     String[] args = {
202         format("-D%s=%s", ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles),
203         format("-D%s=HBASE_ROW_KEY,HBASE_TS_KEY,%s:c1,%s:c2",
204           ImportTsv.COLUMNS_CONF_KEY, cf, cf),
205         // configure the test harness to NOT delete the HFiles after they're
206         // generated. We need those for doLoadIncrementalHFiles
207         format("-D%s=false", TestImportTsv.DELETE_AFTER_LOAD_CONF),
208         table
209     };
210 
211     // run the job, complete the load.
212     util.createTable(table, cf);
213     Tool t = TestImportTsv.doMROnTableTest(util, cf, simple_tsv, args);
214     doLoadIncrementalHFiles(hfiles, table);
215 
216     // validate post-conditions
217     validateDeletedPartitionsFile(t.getConf());
218 
219     // clean up after ourselves.
220     util.deleteTable(table);
221     util.cleanupDataTestDirOnTestFS(table);
222     LOG.info("testGenerateAndLoad completed successfully.");
223   }
224 
225   //
226   // helper classes used in the following test.
227   //
228 
229   /**
230    * A {@link FileOutputCommitter} that launches an ImportTsv job through
231    * its {@link #commitJob(JobContext)} method.
232    */
233   private static class JobLaunchingOuputCommitter extends FileOutputCommitter {
234 
235     public JobLaunchingOuputCommitter(Path outputPath, TaskAttemptContext context)
236         throws IOException {
237       super(outputPath, context);
238     }
239 
240     @Override
241     public void commitJob(JobContext context) throws IOException {
242       super.commitJob(context);
243 
244       // inherit jar dependencies added to distributed cache loaded by parent job
245       Configuration conf = HBaseConfiguration.create(context.getConfiguration());
246       conf.set("mapred.job.classpath.archives",
247         context.getConfiguration().get("mapred.job.classpath.archives", ""));
248       conf.set("mapreduce.job.cache.archives.visibilities",
249         context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));
250 
251       // can't use IntegrationTest instance of util because it hasn't been
252       // instantiated on the JVM running this method. Create our own.
253       IntegrationTestingUtility util =
254           new IntegrationTestingUtility(conf);
255 
256       // this is why we're here: launch a child job. The rest of this should
257       // look a lot like TestImportTsv#testMROnTable.
258       final String table = format("%s-%s-child", NAME, context.getJobID());
259       final String cf = "FAM";
260 
261       String[] args = {
262           "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
263           "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
264           table
265       };
266 
267       try {
268         util.createTable(table, cf);
269         LOG.info("testRunFromOutputCommitter: launching child job.");
270         TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
271       } catch (Exception e) {
272         throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
273       } finally {
274         util.deleteTable(table);
275       }
276     }
277   }
278 
279   /**
280    * An {@link OutputFormat} that exposes the <code>JobLaunchingOutputCommitter</code>.
281    */
282   public static class JobLaunchingOutputFormat extends FileOutputFormat<LongWritable, Text> {
283 
284     private OutputCommitter committer = null;
285 
286     @Override
287     public RecordWriter<LongWritable, Text> getRecordWriter(TaskAttemptContext job)
288         throws IOException, InterruptedException {
289       return new RecordWriter<LongWritable, Text>() {
290         @Override
291         public void write(LongWritable key, Text value) throws IOException,
292             InterruptedException {
293           /* do nothing */
294         }
295 
296         @Override
297         public void close(TaskAttemptContext context) throws IOException,
298             InterruptedException {
299           /* do nothing */
300         }
301       };
302     }
303 
304     @Override
305     public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext context)
306         throws IOException {
307       if (committer == null) {
308         Path output = getOutputPath(context);
309         LOG.debug("Using JobLaunchingOuputCommitter.");
310         committer = new JobLaunchingOuputCommitter(output, context);
311       }
312       return committer;
313     }
314   }
315 
316   /**
317    * Add classes necessary for integration-test jobs.
318    */
319   public static void addTestDependencyJars(Configuration conf) throws IOException {
320     TableMapReduceUtil.addDependencyJars(conf,
321       org.apache.hadoop.hbase.BaseConfigurable.class, // hbase-server
322       HBaseTestingUtility.class,                      // hbase-server-test
323       HBaseCommonTestingUtility.class,                // hbase-common-test
324       com.google.common.collect.ListMultimap.class,   // Guava
325       org.cloudera.htrace.Trace.class);               // HTrace
326   }
327 
328   /**
329    * {@link TableMapReduceUtil#addDependencyJars(Job)} is used when
330    * configuring a mapreduce job to ensure dependencies of the job are shipped
331    * to the cluster. Sometimes those dependencies are on the classpath, but not
332    * packaged as a jar, for instance, when run at the end of another mapreduce
333    * job. In that case, dependency jars have already been shipped to the cluster
334    * and expanded in the parent job's run folder. This test validates the child
335    * job's classpath is constructed correctly under that scenario.
336    */
337   @Test
338   public void testRunFromOutputCommitter() throws Exception {
339     LOG.info("Running test testRunFromOutputCommitter.");
340 
341     FileSystem fs = FileSystem.get(getConf());
342     Path inputPath = new Path(util.getDataTestDirOnTestFS("parent"), "input.txt");
343     Path outputPath = new Path(util.getDataTestDirOnTestFS("parent"), "output");
344     FSDataOutputStream fout = null;
345     try {
346       fout = fs.create(inputPath, true);
347       fout.write(Bytes.toBytes("testRunFromOutputCommitter\n"));
348       LOG.debug(format("Wrote test data to file: %s", inputPath));
349     } finally {
350       if (fout != null) {
351         fout.close();
352       }
353     }
354 
355     // create a parent job that ships the HBase dependencies. This is
356     // accurate as the expected calling context.
357     Job job = new Job(getConf(), NAME + ".testRunFromOutputCommitter - parent");
358     job.setJarByClass(IntegrationTestImportTsv.class);
359     job.setInputFormatClass(TextInputFormat.class);
360     job.setOutputFormatClass(JobLaunchingOutputFormat.class);
361     TextInputFormat.addInputPath(job, inputPath);
362     JobLaunchingOutputFormat.setOutputPath(job, outputPath);
363     TableMapReduceUtil.addDependencyJars(job);
364     addTestDependencyJars(job.getConfiguration());
365 
366     // Job launched by the OutputCommitter will fail if dependency jars are
367     // not shipped properly.
368     LOG.info("testRunFromOutputCommitter: launching parent job.");
369     assertTrue(job.waitForCompletion(true));
370     LOG.info("testRunFromOutputCommitter completed successfully.");
371   }
372 
373   public int run(String[] args) throws Exception {
374     if (args.length != 0) {
375       System.err.println(format("%s [genericOptions]", NAME));
376       System.err.println("  Runs ImportTsv integration tests against a distributed cluster.");
377       System.err.println();
378       GenericOptionsParser.printGenericCommandUsage(System.err);
379       return 1;
380     }
381 
382     // adding more test methods? Don't forget to add them here... or consider doing what
383     // IntegrationTestsDriver does.
384     provisionCluster();
385     testGenerateAndLoad();
386     testRunFromOutputCommitter();
387     releaseCluster();
388 
389     return 0;
390   }
391 
392   public static void main(String[] args) throws Exception {
393     Configuration conf = HBaseConfiguration.create();
394     IntegrationTestingUtility.setUseDistributedCluster(conf);
395     util = new IntegrationTestingUtility(conf);
396     // not using ToolRunner to avoid unnecessary call to setConf()
397     args = new GenericOptionsParser(conf, args).getRemainingArgs();
398     int status = new IntegrationTestImportTsv().run(args);
399     System.exit(status);
400   }
401 }