View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce;
20  
21  import java.io.IOException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.classification.InterfaceStability;
27  import org.apache.hadoop.conf.Configurable;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.hbase.KeyValue;
30  import org.apache.hadoop.hbase.client.HTable;
31  import org.apache.hadoop.hbase.client.Scan;
32  import org.apache.hadoop.hbase.util.Bytes;
33  import org.apache.hadoop.util.StringUtils;
34  
35  /**
36   * Convert HBase tabular data into a format that is consumable by Map/Reduce.
37   */
38  @InterfaceAudience.Public
39  @InterfaceStability.Stable
40  public class TableInputFormat extends TableInputFormatBase
41  implements Configurable {
42  
43    private final Log LOG = LogFactory.getLog(TableInputFormat.class);
44  
45    /** Job parameter that specifies the input table. */
46    public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
47    /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified.
48     * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details.
49     */
50    public static final String SCAN = "hbase.mapreduce.scan";
51    /** Scan start row */
52    public static final String SCAN_ROW_START = "hbase.mapreduce.scan.row.start";
53    /** Scan stop row */
54    public static final String SCAN_ROW_STOP = "hbase.mapreduce.scan.row.stop";
55    /** Column Family to Scan */
56    public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family";
57    /** Space delimited list of columns to scan. */
58    public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
59    /** The timestamp used to filter columns with a specific timestamp. */
60    public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp";
61    /** The starting timestamp used to filter columns with a specific range of versions. */
62    public static final String SCAN_TIMERANGE_START = "hbase.mapreduce.scan.timerange.start";
63    /** The ending timestamp used to filter columns with a specific range of versions. */
64    public static final String SCAN_TIMERANGE_END = "hbase.mapreduce.scan.timerange.end";
65    /** The maximum number of version to return. */
66    public static final String SCAN_MAXVERSIONS = "hbase.mapreduce.scan.maxversions";
67    /** Set to false to disable server-side caching of blocks for this scan. */
68    public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks";
69    /** The number of rows for caching that will be passed to scanners. */
70    public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows";
71  
72    /** The configuration. */
73    private Configuration conf = null;
74  
75    /**
76     * Returns the current configuration.
77     *
78     * @return The current configuration.
79     * @see org.apache.hadoop.conf.Configurable#getConf()
80     */
81    @Override
82    public Configuration getConf() {
83      return conf;
84    }
85  
86    /**
87     * Sets the configuration. This is used to set the details for the table to
88     * be scanned.
89     *
90     * @param configuration  The configuration to set.
91     * @see org.apache.hadoop.conf.Configurable#setConf(
92     *   org.apache.hadoop.conf.Configuration)
93     */
94    @Override
95    public void setConf(Configuration configuration) {
96      this.conf = configuration;
97      String tableName = conf.get(INPUT_TABLE);
98      try {
99        setHTable(new HTable(new Configuration(conf), tableName));
100     } catch (Exception e) {
101       LOG.error(StringUtils.stringifyException(e));
102     }
103 
104     Scan scan = null;
105 
106     if (conf.get(SCAN) != null) {
107       try {
108         scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
109       } catch (IOException e) {
110         LOG.error("An error occurred.", e);
111       }
112     } else {
113       try {
114         scan = new Scan();
115 
116         if (conf.get(SCAN_ROW_START) != null) {
117           scan.setStartRow(Bytes.toBytes(conf.get(SCAN_ROW_START)));
118         }
119 
120         if (conf.get(SCAN_ROW_STOP) != null) {
121           scan.setStopRow(Bytes.toBytes(conf.get(SCAN_ROW_STOP)));
122         }
123 
124         if (conf.get(SCAN_COLUMNS) != null) {
125           addColumns(scan, conf.get(SCAN_COLUMNS));
126         }
127 
128         if (conf.get(SCAN_COLUMN_FAMILY) != null) {
129           scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY)));
130         }
131 
132         if (conf.get(SCAN_TIMESTAMP) != null) {
133           scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
134         }
135 
136         if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) {
137           scan.setTimeRange(
138               Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
139               Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
140         }
141 
142         if (conf.get(SCAN_MAXVERSIONS) != null) {
143           scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
144         }
145 
146         if (conf.get(SCAN_CACHEDROWS) != null) {
147           scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
148         }
149 
150         // false by default, full table scans generate too much BC churn
151         scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
152       } catch (Exception e) {
153           LOG.error(StringUtils.stringifyException(e));
154       }
155     }
156 
157     setScan(scan);
158   }
159   
160   /**
161    * Parses a combined family and qualifier and adds either both or just the
162    * family in case there is not qualifier. This assumes the older colon
163    * divided notation, e.g. "data:contents" or "meta:".
164    * <p>
165    * Note: It will through an error when the colon is missing.
166    *
167    * @param familyAndQualifier family and qualifier
168    * @return A reference to this instance.
169    * @throws IllegalArgumentException When the colon is missing.
170    */
171   private static void addColumn(Scan scan, byte[] familyAndQualifier) {
172     byte [][] fq = KeyValue.parseColumn(familyAndQualifier);
173     if (fq.length > 1 && fq[1] != null && fq[1].length > 0) {
174       scan.addColumn(fq[0], fq[1]);
175     } else {
176       scan.addFamily(fq[0]);
177     }
178   }
179 
180   /**
181    * Adds an array of columns specified using old format, family:qualifier.
182    * <p>
183    * Overrides previous calls to addFamily for any families in the input.
184    *
185    * @param columns array of columns, formatted as <pre>family:qualifier</pre>
186    */
187   public static void addColumns(Scan scan, byte [][] columns) {
188     for (byte[] column : columns) {
189       addColumn(scan, column);
190     }
191   }
192 
193   /**
194    * Convenience method to help parse old style (or rather user entry on the
195    * command line) column definitions, e.g. "data:contents mime:". The columns
196    * must be space delimited and always have a colon (":") to denote family
197    * and qualifier.
198    *
199    * @param columns  The columns to parse.
200    * @return A reference to this instance.
201    */
202   private static void addColumns(Scan scan, String columns) {
203     String[] cols = columns.split(" ");
204     for (String col : cols) {
205       addColumn(scan, Bytes.toBytes(col));
206     }
207   }
208 
209 }