View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import java.io.IOException;
21  import java.io.UnsupportedEncodingException;
22  import java.net.URLDecoder;
23  import java.net.URLEncoder;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.TreeMap;
29  import java.util.TreeSet;
30  import java.util.UUID;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.hbase.classification.InterfaceAudience;
35  import org.apache.hadoop.hbase.classification.InterfaceStability;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.Cell;
40  import org.apache.hadoop.hbase.CellUtil;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.KeyValueUtil;
46  import org.apache.hadoop.hbase.client.HTable;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.RegionLocator;
49  import org.apache.hadoop.hbase.client.Table;
50  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
51  import org.apache.hadoop.hbase.io.compress.Compression;
52  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
53  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
54  import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
55  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
56  import org.apache.hadoop.hbase.io.hfile.HFile;
57  import org.apache.hadoop.hbase.io.hfile.HFileContext;
58  import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
59  import org.apache.hadoop.hbase.regionserver.BloomType;
60  import org.apache.hadoop.hbase.regionserver.HStore;
61  import org.apache.hadoop.hbase.regionserver.StoreFile;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.io.NullWritable;
64  import org.apache.hadoop.io.SequenceFile;
65  import org.apache.hadoop.io.Text;
66  import org.apache.hadoop.mapreduce.Job;
67  import org.apache.hadoop.mapreduce.OutputFormat;
68  import org.apache.hadoop.mapreduce.RecordWriter;
69  import org.apache.hadoop.mapreduce.TaskAttemptContext;
70  import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
71  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
72  import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
73  
74  import com.google.common.annotations.VisibleForTesting;
75  
76  /**
77   * Writes HFiles. Passed Cells must arrive in order.
78   * Writes current time as the sequence id for the file. Sets the major compacted
79   * attribute on created @{link {@link HFile}s. Calling write(null,null) will forcibly roll
80   * all HFiles being written.
81   * <p>
82   * Using this class as part of a MapReduce job is best done
83   * using {@link #configureIncrementalLoad(Job, Table, RegionLocator)}.
84   */
85  @InterfaceAudience.Public
86  @InterfaceStability.Evolving
87  public class HFileOutputFormat2
88      extends FileOutputFormat<ImmutableBytesWritable, Cell> {
89    static Log LOG = LogFactory.getLog(HFileOutputFormat2.class);
90  
91    // The following constants are private since these are used by
92    // HFileOutputFormat2 to internally transfer data between job setup and
93    // reducer run using conf.
94    // These should not be changed by the client.
95    private static final String COMPRESSION_FAMILIES_CONF_KEY =
96        "hbase.hfileoutputformat.families.compression";
97    private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =
98        "hbase.hfileoutputformat.families.bloomtype";
99    private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =
100       "hbase.mapreduce.hfileoutputformat.blocksize";
101   private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =
102       "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";
103 
104   // This constant is public since the client can modify this when setting
105   // up their conf object and thus refer to this symbol.
106   // It is present for backwards compatibility reasons. Use it only to
107   // override the auto-detection of datablock encoding.
108   public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
109       "hbase.mapreduce.hfileoutputformat.datablock.encoding";
110 
111   @Override
112   public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(
113       final TaskAttemptContext context) throws IOException, InterruptedException {
114     return createRecordWriter(context);
115   }
116 
117   static <V extends Cell> RecordWriter<ImmutableBytesWritable, V>
118       createRecordWriter(final TaskAttemptContext context)
119           throws IOException {
120 
121     // Get the path of the temporary output file
122     final Path outputPath = FileOutputFormat.getOutputPath(context);
123     final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
124     final Configuration conf = context.getConfiguration();
125     final FileSystem fs = outputdir.getFileSystem(conf);
126     // These configs. are from hbase-*.xml
127     final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
128         HConstants.DEFAULT_MAX_FILE_SIZE);
129     // Invented config.  Add to hbase-*.xml if other than default compression.
130     final String defaultCompressionStr = conf.get("hfile.compression",
131         Compression.Algorithm.NONE.getName());
132     final Algorithm defaultCompression = AbstractHFileWriter
133         .compressionByName(defaultCompressionStr);
134     final boolean compactionExclude = conf.getBoolean(
135         "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
136 
137     // create a map from column family to the compression algorithm
138     final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
139     final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
140     final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
141 
142     String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
143     final Map<byte[], DataBlockEncoding> datablockEncodingMap
144         = createFamilyDataBlockEncodingMap(conf);
145     final DataBlockEncoding overriddenEncoding;
146     if (dataBlockEncodingStr != null) {
147       overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
148     } else {
149       overriddenEncoding = null;
150     }
151 
152     return new RecordWriter<ImmutableBytesWritable, V>() {
153       // Map of families to writers and how much has been output on the writer.
154       private final Map<byte [], WriterLength> writers =
155         new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
156       private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
157       private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
158       private boolean rollRequested = false;
159 
160       @Override
161       public void write(ImmutableBytesWritable row, V cell)
162           throws IOException {
163         KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
164 
165         // null input == user explicitly wants to flush
166         if (row == null && kv == null) {
167           rollWriters();
168           return;
169         }
170 
171         byte [] rowKey = CellUtil.cloneRow(kv);
172         long length = kv.getLength();
173         byte [] family = CellUtil.cloneFamily(kv);
174         WriterLength wl = this.writers.get(family);
175 
176         // If this is a new column family, verify that the directory exists
177         if (wl == null) {
178           fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
179         }
180 
181         // If any of the HFiles for the column families has reached
182         // maxsize, we need to roll all the writers
183         if (wl != null && wl.written + length >= maxsize) {
184           this.rollRequested = true;
185         }
186 
187         // This can only happen once a row is finished though
188         if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
189           rollWriters();
190         }
191 
192         // create a new WAL writer, if necessary
193         if (wl == null || wl.writer == null) {
194           wl = getNewWriter(family, conf);
195         }
196 
197         // we now have the proper WAL writer. full steam ahead
198         kv.updateLatestStamp(this.now);
199         wl.writer.append(kv);
200         wl.written += length;
201 
202         // Copy the row so we know when a row transition.
203         this.previousRow = rowKey;
204       }
205 
206       private void rollWriters() throws IOException {
207         for (WriterLength wl : this.writers.values()) {
208           if (wl.writer != null) {
209             LOG.info("Writer=" + wl.writer.getPath() +
210                 ((wl.written == 0)? "": ", wrote=" + wl.written));
211             close(wl.writer);
212           }
213           wl.writer = null;
214           wl.written = 0;
215         }
216         this.rollRequested = false;
217       }
218 
219       /* Create a new StoreFile.Writer.
220        * @param family
221        * @return A WriterLength, containing a new StoreFile.Writer.
222        * @throws IOException
223        */
224       @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",
225           justification="Not important")
226       private WriterLength getNewWriter(byte[] family, Configuration conf)
227           throws IOException {
228         WriterLength wl = new WriterLength();
229         Path familydir = new Path(outputdir, Bytes.toString(family));
230         Algorithm compression = compressionMap.get(family);
231         compression = compression == null ? defaultCompression : compression;
232         BloomType bloomType = bloomTypeMap.get(family);
233         bloomType = bloomType == null ? BloomType.NONE : bloomType;
234         Integer blockSize = blockSizeMap.get(family);
235         blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
236         DataBlockEncoding encoding = overriddenEncoding;
237         encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;
238         encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
239         Configuration tempConf = new Configuration(conf);
240         tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
241         HFileContextBuilder contextBuilder = new HFileContextBuilder()
242                                     .withCompression(compression)
243                                     .withChecksumType(HStore.getChecksumType(conf))
244                                     .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
245                                     .withBlockSize(blockSize);
246         contextBuilder.withDataBlockEncoding(encoding);
247         HFileContext hFileContext = contextBuilder.build();
248                                     
249         wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
250             .withOutputDir(familydir).withBloomType(bloomType)
251             .withComparator(KeyValue.COMPARATOR)
252             .withFileContext(hFileContext).build();
253 
254         this.writers.put(family, wl);
255         return wl;
256       }
257 
258       private void close(final StoreFile.Writer w) throws IOException {
259         if (w != null) {
260           w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
261               Bytes.toBytes(System.currentTimeMillis()));
262           w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
263               Bytes.toBytes(context.getTaskAttemptID().toString()));
264           w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
265               Bytes.toBytes(true));
266           w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
267               Bytes.toBytes(compactionExclude));
268           w.appendTrackedTimestampsToMetadata();
269           w.close();
270         }
271       }
272 
273       @Override
274       public void close(TaskAttemptContext c)
275       throws IOException, InterruptedException {
276         for (WriterLength wl: this.writers.values()) {
277           close(wl.writer);
278         }
279       }
280     };
281   }
282 
283   /*
284    * Data structure to hold a Writer and amount of data written on it.
285    */
286   static class WriterLength {
287     long written = 0;
288     StoreFile.Writer writer = null;
289   }
290 
291   /**
292    * Return the start keys of all of the regions in this table,
293    * as a list of ImmutableBytesWritable.
294    */
295   private static List<ImmutableBytesWritable> getRegionStartKeys(RegionLocator table)
296   throws IOException {
297     byte[][] byteKeys = table.getStartKeys();
298     ArrayList<ImmutableBytesWritable> ret =
299       new ArrayList<ImmutableBytesWritable>(byteKeys.length);
300     for (byte[] byteKey : byteKeys) {
301       ret.add(new ImmutableBytesWritable(byteKey));
302     }
303     return ret;
304   }
305 
306   /**
307    * Write out a {@link SequenceFile} that can be read by
308    * {@link TotalOrderPartitioner} that contains the split points in startKeys.
309    */
310   @SuppressWarnings("deprecation")
311   private static void writePartitions(Configuration conf, Path partitionsPath,
312       List<ImmutableBytesWritable> startKeys) throws IOException {
313     LOG.info("Writing partition information to " + partitionsPath);
314     if (startKeys.isEmpty()) {
315       throw new IllegalArgumentException("No regions passed");
316     }
317 
318     // We're generating a list of split points, and we don't ever
319     // have keys < the first region (which has an empty start key)
320     // so we need to remove it. Otherwise we would end up with an
321     // empty reducer with index 0
322     TreeSet<ImmutableBytesWritable> sorted =
323       new TreeSet<ImmutableBytesWritable>(startKeys);
324 
325     ImmutableBytesWritable first = sorted.first();
326     if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
327       throw new IllegalArgumentException(
328           "First region of table should have empty start key. Instead has: "
329           + Bytes.toStringBinary(first.get()));
330     }
331     sorted.remove(first);
332 
333     // Write the actual file
334     FileSystem fs = partitionsPath.getFileSystem(conf);
335     SequenceFile.Writer writer = SequenceFile.createWriter(
336       fs, conf, partitionsPath, ImmutableBytesWritable.class,
337       NullWritable.class);
338 
339     try {
340       for (ImmutableBytesWritable startKey : sorted) {
341         writer.append(startKey, NullWritable.get());
342       }
343     } finally {
344       writer.close();
345     }
346   }
347 
348   /**
349    * Configure a MapReduce Job to perform an incremental load into the given
350    * table. This
351    * <ul>
352    *   <li>Inspects the table to configure a total order partitioner</li>
353    *   <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
354    *   <li>Sets the number of reduce tasks to match the current number of regions</li>
355    *   <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
356    *   <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
357    *     PutSortReducer)</li>
358    * </ul>
359    * The user should be sure to set the map output value class to either KeyValue or Put before
360    * running this function.
361    * 
362    * @deprecated Use {@link #configureIncrementalLoad(Job, Table, RegionLocator)} instead.
363    */
364   @Deprecated
365   public static void configureIncrementalLoad(Job job, HTable table)
366       throws IOException {
367     configureIncrementalLoad(job, table, table);
368   }
369 
370   /**
371    * Configure a MapReduce Job to perform an incremental load into the given
372    * table. This
373    * <ul>
374    *   <li>Inspects the table to configure a total order partitioner</li>
375    *   <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
376    *   <li>Sets the number of reduce tasks to match the current number of regions</li>
377    *   <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
378    *   <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
379    *     PutSortReducer)</li>
380    * </ul>
381    * The user should be sure to set the map output value class to either KeyValue or Put before
382    * running this function.
383    */
384   public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)
385       throws IOException {
386     configureIncrementalLoad(job, table, regionLocator, HFileOutputFormat2.class);
387   }
388 
389   static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator,
390       Class<? extends OutputFormat<?, ?>> cls) throws IOException {
391     Configuration conf = job.getConfiguration();
392 
393     job.setOutputKeyClass(ImmutableBytesWritable.class);
394     job.setOutputValueClass(KeyValue.class);
395     job.setOutputFormatClass(cls);
396 
397     // Based on the configured map output class, set the correct reducer to properly
398     // sort the incoming values.
399     // TODO it would be nice to pick one or the other of these formats.
400     if (KeyValue.class.equals(job.getMapOutputValueClass())) {
401       job.setReducerClass(KeyValueSortReducer.class);
402     } else if (Put.class.equals(job.getMapOutputValueClass())) {
403       job.setReducerClass(PutSortReducer.class);
404     } else if (Text.class.equals(job.getMapOutputValueClass())) {
405       job.setReducerClass(TextSortReducer.class);
406     } else {
407       LOG.warn("Unknown map output value type:" + job.getMapOutputValueClass());
408     }
409 
410     conf.setStrings("io.serializations", conf.get("io.serializations"),
411         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
412         KeyValueSerialization.class.getName());
413 
414     // Use table's region boundaries for TOP split points.
415     LOG.info("Looking up current regions for table " + table.getName());
416     List<ImmutableBytesWritable> startKeys = getRegionStartKeys(regionLocator);
417     LOG.info("Configuring " + startKeys.size() + " reduce partitions " +
418         "to match current region count");
419     job.setNumReduceTasks(startKeys.size());
420 
421     configurePartitioner(job, startKeys);
422     // Set compression algorithms based on column families
423     configureCompression(table, conf);
424     configureBloomType(table, conf);
425     configureBlockSize(table, conf);
426     configureDataBlockEncoding(table, conf);
427 
428     TableMapReduceUtil.addDependencyJars(job);
429     TableMapReduceUtil.initCredentials(job);
430     LOG.info("Incremental table " + table.getName() + " output configured.");
431   }
432   
433   public static void configureIncrementalLoadMap(Job job, Table table) throws IOException {
434     Configuration conf = job.getConfiguration();
435 
436     job.setOutputKeyClass(ImmutableBytesWritable.class);
437     job.setOutputValueClass(KeyValue.class);
438     job.setOutputFormatClass(HFileOutputFormat2.class);
439 
440     // Set compression algorithms based on column families
441     configureCompression(table, conf);
442     configureBloomType(table, conf);
443     configureBlockSize(table, conf);
444     configureDataBlockEncoding(table, conf);
445 
446     TableMapReduceUtil.addDependencyJars(job);
447     TableMapReduceUtil.initCredentials(job);
448     LOG.info("Incremental table " + table.getName() + " output configured.");
449   }
450 
451   /**
452    * Runs inside the task to deserialize column family to compression algorithm
453    * map from the configuration.
454    *
455    * @param conf to read the serialized values from
456    * @return a map from column family to the configured compression algorithm
457    */
458   @VisibleForTesting
459   static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration
460       conf) {
461     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
462         COMPRESSION_FAMILIES_CONF_KEY);
463     Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
464         Algorithm>(Bytes.BYTES_COMPARATOR);
465     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
466       Algorithm algorithm = AbstractHFileWriter.compressionByName
467           (e.getValue());
468       compressionMap.put(e.getKey(), algorithm);
469     }
470     return compressionMap;
471   }
472 
473   /**
474    * Runs inside the task to deserialize column family to bloom filter type
475    * map from the configuration.
476    *
477    * @param conf to read the serialized values from
478    * @return a map from column family to the the configured bloom filter type
479    */
480   @VisibleForTesting
481   static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
482     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
483         BLOOM_TYPE_FAMILIES_CONF_KEY);
484     Map<byte[], BloomType> bloomTypeMap = new TreeMap<byte[],
485         BloomType>(Bytes.BYTES_COMPARATOR);
486     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
487       BloomType bloomType = BloomType.valueOf(e.getValue());
488       bloomTypeMap.put(e.getKey(), bloomType);
489     }
490     return bloomTypeMap;
491   }
492 
493   /**
494    * Runs inside the task to deserialize column family to block size
495    * map from the configuration.
496    *
497    * @param conf to read the serialized values from
498    * @return a map from column family to the configured block size
499    */
500   @VisibleForTesting
501   static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
502     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
503         BLOCK_SIZE_FAMILIES_CONF_KEY);
504     Map<byte[], Integer> blockSizeMap = new TreeMap<byte[],
505         Integer>(Bytes.BYTES_COMPARATOR);
506     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
507       Integer blockSize = Integer.parseInt(e.getValue());
508       blockSizeMap.put(e.getKey(), blockSize);
509     }
510     return blockSizeMap;
511   }
512 
513   /**
514    * Runs inside the task to deserialize column family to data block encoding
515    * type map from the configuration.
516    *
517    * @param conf to read the serialized values from
518    * @return a map from column family to HFileDataBlockEncoder for the
519    *         configured data block type for the family
520    */
521   @VisibleForTesting
522   static Map<byte[], DataBlockEncoding> createFamilyDataBlockEncodingMap(
523       Configuration conf) {
524     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
525         DATABLOCK_ENCODING_FAMILIES_CONF_KEY);
526     Map<byte[], DataBlockEncoding> encoderMap = new TreeMap<byte[],
527         DataBlockEncoding>(Bytes.BYTES_COMPARATOR);
528     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
529       encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue())));
530     }
531     return encoderMap;
532   }
533 
534 
535   /**
536    * Run inside the task to deserialize column family to given conf value map.
537    *
538    * @param conf to read the serialized values from
539    * @param confName conf key to read from the configuration
540    * @return a map of column family to the given configuration value
541    */
542   private static Map<byte[], String> createFamilyConfValueMap(
543       Configuration conf, String confName) {
544     Map<byte[], String> confValMap = new TreeMap<byte[], String>(Bytes.BYTES_COMPARATOR);
545     String confVal = conf.get(confName, "");
546     for (String familyConf : confVal.split("&")) {
547       String[] familySplit = familyConf.split("=");
548       if (familySplit.length != 2) {
549         continue;
550       }
551       try {
552         confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(),
553             URLDecoder.decode(familySplit[1], "UTF-8"));
554       } catch (UnsupportedEncodingException e) {
555         // will not happen with UTF-8 encoding
556         throw new AssertionError(e);
557       }
558     }
559     return confValMap;
560   }
561 
562   /**
563    * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
564    * <code>splitPoints</code>. Cleans up the partitions file after job exists.
565    */
566   static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
567       throws IOException {
568     Configuration conf = job.getConfiguration();
569     // create the partitions file
570     FileSystem fs = FileSystem.get(conf);
571     Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID());
572     fs.makeQualified(partitionsPath);
573     writePartitions(conf, partitionsPath, splitPoints);
574     fs.deleteOnExit(partitionsPath);
575 
576     // configure job to use it
577     job.setPartitionerClass(TotalOrderPartitioner.class);
578     TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
579   }
580 
581   /**
582    * Serialize column family to compression algorithm map to configuration.
583    * Invoked while configuring the MR job for incremental load.
584    *
585    * @param table to read the properties from
586    * @param conf to persist serialized values into
587    * @throws IOException
588    *           on failure to read column family descriptors
589    */
590   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
591       value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
592   @VisibleForTesting
593   static void configureCompression(
594       Table table, Configuration conf) throws IOException {
595     StringBuilder compressionConfigValue = new StringBuilder();
596     HTableDescriptor tableDescriptor = table.getTableDescriptor();
597     if(tableDescriptor == null){
598       // could happen with mock table instance
599       return;
600     }
601     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
602     int i = 0;
603     for (HColumnDescriptor familyDescriptor : families) {
604       if (i++ > 0) {
605         compressionConfigValue.append('&');
606       }
607       compressionConfigValue.append(URLEncoder.encode(
608         familyDescriptor.getNameAsString(), "UTF-8"));
609       compressionConfigValue.append('=');
610       compressionConfigValue.append(URLEncoder.encode(
611         familyDescriptor.getCompression().getName(), "UTF-8"));
612     }
613     // Get rid of the last ampersand
614     conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
615   }
616 
617   /**
618    * Serialize column family to block size map to configuration.
619    * Invoked while configuring the MR job for incremental load.
620    *
621    * @param table to read the properties from
622    * @param conf to persist serialized values into
623    * @throws IOException
624    *           on failure to read column family descriptors
625    */
626   @VisibleForTesting
627   static void configureBlockSize(
628       Table table, Configuration conf) throws IOException {
629     StringBuilder blockSizeConfigValue = new StringBuilder();
630     HTableDescriptor tableDescriptor = table.getTableDescriptor();
631     if (tableDescriptor == null) {
632       // could happen with mock table instance
633       return;
634     }
635     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
636     int i = 0;
637     for (HColumnDescriptor familyDescriptor : families) {
638       if (i++ > 0) {
639         blockSizeConfigValue.append('&');
640       }
641       blockSizeConfigValue.append(URLEncoder.encode(
642           familyDescriptor.getNameAsString(), "UTF-8"));
643       blockSizeConfigValue.append('=');
644       blockSizeConfigValue.append(URLEncoder.encode(
645           String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
646     }
647     // Get rid of the last ampersand
648     conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
649   }
650 
651   /**
652    * Serialize column family to bloom type map to configuration.
653    * Invoked while configuring the MR job for incremental load.
654    *
655    * @param table to read the properties from
656    * @param conf to persist serialized values into
657    * @throws IOException
658    *           on failure to read column family descriptors
659    */
660   @VisibleForTesting
661   static void configureBloomType(
662       Table table, Configuration conf) throws IOException {
663     HTableDescriptor tableDescriptor = table.getTableDescriptor();
664     if (tableDescriptor == null) {
665       // could happen with mock table instance
666       return;
667     }
668     StringBuilder bloomTypeConfigValue = new StringBuilder();
669     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
670     int i = 0;
671     for (HColumnDescriptor familyDescriptor : families) {
672       if (i++ > 0) {
673         bloomTypeConfigValue.append('&');
674       }
675       bloomTypeConfigValue.append(URLEncoder.encode(
676         familyDescriptor.getNameAsString(), "UTF-8"));
677       bloomTypeConfigValue.append('=');
678       String bloomType = familyDescriptor.getBloomFilterType().toString();
679       if (bloomType == null) {
680         bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
681       }
682       bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
683     }
684     conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
685   }
686 
687   /**
688    * Serialize column family to data block encoding map to configuration.
689    * Invoked while configuring the MR job for incremental load.
690    *
691    * @param table to read the properties from
692    * @param conf to persist serialized values into
693    * @throws IOException
694    *           on failure to read column family descriptors
695    */
696   @VisibleForTesting
697   static void configureDataBlockEncoding(Table table,
698       Configuration conf) throws IOException {
699     HTableDescriptor tableDescriptor = table.getTableDescriptor();
700     if (tableDescriptor == null) {
701       // could happen with mock table instance
702       return;
703     }
704     StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
705     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
706     int i = 0;
707     for (HColumnDescriptor familyDescriptor : families) {
708       if (i++ > 0) {
709         dataBlockEncodingConfigValue.append('&');
710       }
711       dataBlockEncodingConfigValue.append(
712           URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
713       dataBlockEncodingConfigValue.append('=');
714       DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
715       if (encoding == null) {
716         encoding = DataBlockEncoding.NONE;
717       }
718       dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
719           "UTF-8"));
720     }
721     conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
722         dataBlockEncodingConfigValue.toString());
723   }
724 }