View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import java.io.File;
21  import java.io.IOException;
22  import java.util.Iterator;
23  import java.util.Map;
24  import java.util.NavigableMap;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileUtil;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.*;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.Put;
34  import org.apache.hadoop.hbase.client.Result;
35  import org.apache.hadoop.hbase.client.ResultScanner;
36  import org.apache.hadoop.hbase.client.Scan;
37  import org.apache.hadoop.hbase.client.Table;
38  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
39  import org.apache.hadoop.hbase.testclassification.LargeTests;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.mapreduce.Job;
42  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
43  import org.junit.AfterClass;
44  import org.junit.BeforeClass;
45  import org.junit.Test;
46  import org.junit.experimental.categories.Category;
47  
48  import static org.junit.Assert.fail;
49  import static org.junit.Assert.assertTrue;
50  
51  /**
52   * Test Map/Reduce job over HBase tables. The map/reduce process we're testing
53   * on our tables is simple - take every row in the table, reverse the value of
54   * a particular cell, and write it back to the table.
55   */
56  @Category(LargeTests.class)
57  public class TestMultithreadedTableMapper {
58    private static final Log LOG = LogFactory.getLog(TestMultithreadedTableMapper.class);
59    private static final HBaseTestingUtility UTIL =
60        new HBaseTestingUtility();
61    static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest");
62    static final byte[] INPUT_FAMILY = Bytes.toBytes("contents");
63    static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text");
64    static final int    NUMBER_OF_THREADS = 10;
65  
66    @BeforeClass
67    public static void beforeClass() throws Exception {
68      UTIL.startMiniCluster();
69      HTable table = UTIL.createTable(MULTI_REGION_TABLE_NAME, new byte[][] {INPUT_FAMILY, OUTPUT_FAMILY});
70      UTIL.createMultiRegions(table, INPUT_FAMILY);
71      UTIL.loadTable(table, INPUT_FAMILY, false);
72      UTIL.startMiniMapReduceCluster();
73      UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME);
74    }
75  
76    @AfterClass
77    public static void afterClass() throws Exception {
78      UTIL.shutdownMiniMapReduceCluster();
79      UTIL.shutdownMiniCluster();
80    }
81  
82    /**
83     * Pass the given key and processed record reduce
84     */
85    public static class ProcessContentsMapper
86    extends TableMapper<ImmutableBytesWritable, Put> {
87  
88      /**
89       * Pass the key, and reversed value to reduce
90       *
91       * @param key
92       * @param value
93       * @param context
94       * @throws IOException
95       */
96      public void map(ImmutableBytesWritable key, Result value,
97          Context context)
98              throws IOException, InterruptedException {
99        if (value.size() != 1) {
100         throw new IOException("There should only be one input column");
101       }
102       Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
103       cf = value.getMap();
104       if(!cf.containsKey(INPUT_FAMILY)) {
105         throw new IOException("Wrong input columns. Missing: '" +
106             Bytes.toString(INPUT_FAMILY) + "'.");
107       }
108       // Get the original value and reverse it
109       String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, null));
110       StringBuilder newValue = new StringBuilder(originalValue);
111       newValue.reverse();
112       // Now set the value to be collected
113       Put outval = new Put(key.get());
114       outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
115       context.write(key, outval);
116     }
117   }
118 
119   /**
120    * Test multithreadedTableMappper map/reduce against a multi-region table
121    * @throws IOException
122    * @throws ClassNotFoundException
123    * @throws InterruptedException
124    */
125   @Test
126   public void testMultithreadedTableMapper()
127       throws IOException, InterruptedException, ClassNotFoundException {
128     runTestOnTable(new HTable(new Configuration(UTIL.getConfiguration()),
129         MULTI_REGION_TABLE_NAME));
130   }
131 
132   private void runTestOnTable(HTable table)
133       throws IOException, InterruptedException, ClassNotFoundException {
134     Job job = null;
135     try {
136       LOG.info("Before map/reduce startup");
137       job = new Job(table.getConfiguration(), "process column contents");
138       job.setNumReduceTasks(1);
139       Scan scan = new Scan();
140       scan.addFamily(INPUT_FAMILY);
141       TableMapReduceUtil.initTableMapperJob(
142           table.getTableName(), scan,
143           MultithreadedTableMapper.class, ImmutableBytesWritable.class,
144           Put.class, job);
145       MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class);
146       MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS);
147       TableMapReduceUtil.initTableReducerJob(
148           Bytes.toString(table.getTableName()),
149           IdentityTableReducer.class, job);
150       FileOutputFormat.setOutputPath(job, new Path("test"));
151       LOG.info("Started " + table.getTableName());
152       assertTrue(job.waitForCompletion(true));
153       LOG.info("After map/reduce completion");
154       // verify map-reduce results
155       verify(table.getName());
156     } finally {
157       table.close();
158       if (job != null) {
159         FileUtil.fullyDelete(
160             new File(job.getConfiguration().get("hadoop.tmp.dir")));
161       }
162     }
163   }
164 
165   private void verify(TableName tableName) throws IOException {
166     Table table = new HTable(new Configuration(UTIL.getConfiguration()), tableName);
167     boolean verified = false;
168     long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000);
169     int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
170     for (int i = 0; i < numRetries; i++) {
171       try {
172         LOG.info("Verification attempt #" + i);
173         verifyAttempt(table);
174         verified = true;
175         break;
176       } catch (NullPointerException e) {
177         // If here, a cell was empty.  Presume its because updates came in
178         // after the scanner had been opened.  Wait a while and retry.
179         LOG.debug("Verification attempt failed: " + e.getMessage());
180       }
181       try {
182         Thread.sleep(pause);
183       } catch (InterruptedException e) {
184         // continue
185       }
186     }
187     assertTrue(verified);
188     table.close();
189   }
190 
191   /**
192    * Looks at every value of the mapreduce output and verifies that indeed
193    * the values have been reversed.
194    *
195    * @param table Table to scan.
196    * @throws IOException
197    * @throws NullPointerException if we failed to find a cell value
198    */
199   private void verifyAttempt(final Table table)
200       throws IOException, NullPointerException {
201     Scan scan = new Scan();
202     scan.addFamily(INPUT_FAMILY);
203     scan.addFamily(OUTPUT_FAMILY);
204     ResultScanner scanner = table.getScanner(scan);
205     try {
206       Iterator<Result> itr = scanner.iterator();
207       assertTrue(itr.hasNext());
208       while(itr.hasNext()) {
209         Result r = itr.next();
210         if (LOG.isDebugEnabled()) {
211           if (r.size() > 2 ) {
212             throw new IOException("Too many results, expected 2 got " +
213                 r.size());
214           }
215         }
216         byte[] firstValue = null;
217         byte[] secondValue = null;
218         int count = 0;
219         for(Cell kv : r.listCells()) {
220           if (count == 0) {
221             firstValue = CellUtil.cloneValue(kv);
222           }else if (count == 1) {
223             secondValue = CellUtil.cloneValue(kv);
224           }else if (count == 2) {
225             break;
226           }
227           count++;
228         }
229         String first = "";
230         if (firstValue == null) {
231           throw new NullPointerException(Bytes.toString(r.getRow()) +
232               ": first value is null");
233         }
234         first = Bytes.toString(firstValue);
235         String second = "";
236         if (secondValue == null) {
237           throw new NullPointerException(Bytes.toString(r.getRow()) +
238               ": second value is null");
239         }
240         byte[] secondReversed = new byte[secondValue.length];
241         for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) {
242           secondReversed[i] = secondValue[j];
243         }
244         second = Bytes.toString(secondReversed);
245         if (first.compareTo(second) != 0) {
246           if (LOG.isDebugEnabled()) {
247             LOG.debug("second key is not the reverse of first. row=" +
248                 Bytes.toStringBinary(r.getRow()) + ", first value=" + first +
249                 ", second value=" + second);
250           }
251           fail();
252         }
253       }
254     } finally {
255       scanner.close();
256     }
257   }
258 
259 }
260