View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertNotSame;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.IOException;
29  import java.util.Arrays;
30  import java.util.HashMap;
31  import java.util.Map;
32  import java.util.Map.Entry;
33  import java.util.Random;
34  import java.util.Set;
35  import java.util.concurrent.Callable;
36  import junit.framework.Assert;
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.fs.FileStatus;
41  import org.apache.hadoop.fs.FileSystem;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.Cell;
44  import org.apache.hadoop.hbase.CellUtil;
45  import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
46  import org.apache.hadoop.hbase.HBaseConfiguration;
47  import org.apache.hadoop.hbase.HBaseTestingUtility;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HTableDescriptor;
51  import org.apache.hadoop.hbase.HadoopShims;
52  import org.apache.hadoop.hbase.KeyValue;
53  import org.apache.hadoop.hbase.testclassification.LargeTests;
54  import org.apache.hadoop.hbase.PerformanceEvaluation;
55  import org.apache.hadoop.hbase.TableName;
56  import org.apache.hadoop.hbase.client.HBaseAdmin;
57  import org.apache.hadoop.hbase.client.HTable;
58  import org.apache.hadoop.hbase.client.Put;
59  import org.apache.hadoop.hbase.client.Result;
60  import org.apache.hadoop.hbase.client.ResultScanner;
61  import org.apache.hadoop.hbase.client.Scan;
62  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
63  import org.apache.hadoop.hbase.io.compress.Compression;
64  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
65  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
66  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
67  import org.apache.hadoop.hbase.io.hfile.HFile;
68  import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
69  import org.apache.hadoop.hbase.regionserver.BloomType;
70  import org.apache.hadoop.hbase.regionserver.HStore;
71  import org.apache.hadoop.hbase.regionserver.StoreFile;
72  import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
73  import org.apache.hadoop.hbase.util.Bytes;
74  import org.apache.hadoop.hbase.util.FSUtils;
75  import org.apache.hadoop.hbase.util.Threads;
76  import org.apache.hadoop.hbase.util.Writables;
77  import org.apache.hadoop.io.NullWritable;
78  import org.apache.hadoop.mapreduce.Job;
79  import org.apache.hadoop.mapreduce.Mapper;
80  import org.apache.hadoop.mapreduce.RecordWriter;
81  import org.apache.hadoop.mapreduce.TaskAttemptContext;
82  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
83  import org.junit.Ignore;
84  import org.junit.Test;
85  import org.junit.experimental.categories.Category;
86  import org.mockito.Mockito;
87  
88  /**
89   * Simple test for {@link KeyValueSortReducer} and {@link HFileOutputFormat}.
90   * Sets up and runs a mapreduce job that writes hfile output.
91   * Creates a few inner classes to implement splits and an inputformat that
92   * emits keys and values like those of {@link PerformanceEvaluation}.
93   */
94  @Category(LargeTests.class)
95  public class TestHFileOutputFormat  {
96    private final static int ROWSPERSPLIT = 1024;
97  
98    private static final byte[][] FAMILIES
99      = { Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-A"))
100       , Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-B"))};
101   private static final TableName TABLE_NAME =
102       TableName.valueOf("TestTable");
103 
104   private HBaseTestingUtility util = new HBaseTestingUtility();
105 
106   private static Log LOG = LogFactory.getLog(TestHFileOutputFormat.class);
107 
108   /**
109    * Simple mapper that makes KeyValue output.
110    */
111   static class RandomKVGeneratingMapper
112   extends Mapper<NullWritable, NullWritable,
113                  ImmutableBytesWritable, KeyValue> {
114 
115     private int keyLength;
116     private static final int KEYLEN_DEFAULT=10;
117     private static final String KEYLEN_CONF="randomkv.key.length";
118 
119     private int valLength;
120     private static final int VALLEN_DEFAULT=10;
121     private static final String VALLEN_CONF="randomkv.val.length";
122 
123     @Override
124     protected void setup(Context context) throws IOException,
125         InterruptedException {
126       super.setup(context);
127 
128       Configuration conf = context.getConfiguration();
129       keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT);
130       valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT);
131     }
132 
133     protected void map(
134         NullWritable n1, NullWritable n2,
135         Mapper<NullWritable, NullWritable,
136                ImmutableBytesWritable,KeyValue>.Context context)
137         throws java.io.IOException ,InterruptedException
138     {
139 
140       byte keyBytes[] = new byte[keyLength];
141       byte valBytes[] = new byte[valLength];
142 
143       int taskId = context.getTaskAttemptID().getTaskID().getId();
144       assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
145 
146       Random random = new Random();
147       for (int i = 0; i < ROWSPERSPLIT; i++) {
148 
149         random.nextBytes(keyBytes);
150         // Ensure that unique tasks generate unique keys
151         keyBytes[keyLength - 1] = (byte)(taskId & 0xFF);
152         random.nextBytes(valBytes);
153         ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
154 
155         for (byte[] family : TestHFileOutputFormat.FAMILIES) {
156           KeyValue kv = new KeyValue(keyBytes, family,
157               PerformanceEvaluation.QUALIFIER_NAME, valBytes);
158           context.write(key, kv);
159         }
160       }
161     }
162   }
163 
164   private void setupRandomGeneratorMapper(Job job) {
165     job.setInputFormatClass(NMapInputFormat.class);
166     job.setMapperClass(RandomKVGeneratingMapper.class);
167     job.setMapOutputKeyClass(ImmutableBytesWritable.class);
168     job.setMapOutputValueClass(KeyValue.class);
169   }
170 
171   /**
172    * Test that {@link HFileOutputFormat} RecordWriter amends timestamps if
173    * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}.
174    * @see <a href="https://issues.apache.org/jira/browse/HBASE-2615">HBASE-2615</a>
175    */
176   @Test
177   public void test_LATEST_TIMESTAMP_isReplaced()
178   throws Exception {
179     Configuration conf = new Configuration(this.util.getConfiguration());
180     RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
181     TaskAttemptContext context = null;
182     Path dir =
183       util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced");
184     try {
185       Job job = new Job(conf);
186       FileOutputFormat.setOutputPath(job, dir);
187       context = createTestTaskAttemptContext(job);
188       HFileOutputFormat hof = new HFileOutputFormat();
189       writer = hof.getRecordWriter(context);
190       final byte [] b = Bytes.toBytes("b");
191 
192       // Test 1.  Pass a KV that has a ts of LATEST_TIMESTAMP.  It should be
193       // changed by call to write.  Check all in kv is same but ts.
194       KeyValue kv = new KeyValue(b, b, b);
195       KeyValue original = kv.clone();
196       writer.write(new ImmutableBytesWritable(), kv);
197       assertFalse(original.equals(kv));
198       assertTrue(Bytes.equals(original.getRow(), kv.getRow()));
199       assertTrue(original.matchingColumn(kv.getFamily(), kv.getQualifier()));
200       assertNotSame(original.getTimestamp(), kv.getTimestamp());
201       assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp());
202 
203       // Test 2. Now test passing a kv that has explicit ts.  It should not be
204       // changed by call to record write.
205       kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b);
206       original = kv.clone();
207       writer.write(new ImmutableBytesWritable(), kv);
208       assertTrue(original.equals(kv));
209     } finally {
210       if (writer != null && context != null) writer.close(context);
211       dir.getFileSystem(conf).delete(dir, true);
212     }
213   }
214 
215   private TaskAttemptContext createTestTaskAttemptContext(final Job job)
216   throws IOException, Exception {
217     HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class);
218     TaskAttemptContext context = hadoop.createTestTaskAttemptContext(job, "attempt_200707121733_0001_m_000000_0");
219     return context;
220   }
221 
222   /*
223    * Test that {@link HFileOutputFormat} creates an HFile with TIMERANGE
224    * metadata used by time-restricted scans.
225    */
226   @Test
227   public void test_TIMERANGE() throws Exception {
228     Configuration conf = new Configuration(this.util.getConfiguration());
229     RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
230     TaskAttemptContext context = null;
231     Path dir =
232       util.getDataTestDir("test_TIMERANGE_present");
233     LOG.info("Timerange dir writing to dir: "+ dir);
234     try {
235       // build a record writer using HFileOutputFormat
236       Job job = new Job(conf);
237       FileOutputFormat.setOutputPath(job, dir);
238       context = createTestTaskAttemptContext(job);
239       HFileOutputFormat hof = new HFileOutputFormat();
240       writer = hof.getRecordWriter(context);
241 
242       // Pass two key values with explicit times stamps
243       final byte [] b = Bytes.toBytes("b");
244 
245       // value 1 with timestamp 2000
246       KeyValue kv = new KeyValue(b, b, b, 2000, b);
247       KeyValue original = kv.clone();
248       writer.write(new ImmutableBytesWritable(), kv);
249       assertEquals(original,kv);
250 
251       // value 2 with timestamp 1000
252       kv = new KeyValue(b, b, b, 1000, b);
253       original = kv.clone();
254       writer.write(new ImmutableBytesWritable(), kv);
255       assertEquals(original, kv);
256 
257       // verify that the file has the proper FileInfo.
258       writer.close(context);
259 
260       // the generated file lives 1 directory down from the attempt directory
261       // and is the only file, e.g.
262       // _attempt__0000_r_000000_0/b/1979617994050536795
263       FileSystem fs = FileSystem.get(conf);
264       Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
265       FileStatus[] sub1 = fs.listStatus(attemptDirectory);
266       FileStatus[] file = fs.listStatus(sub1[0].getPath());
267 
268       // open as HFile Reader and pull out TIMERANGE FileInfo.
269       HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
270           new CacheConfig(conf), conf);
271       Map<byte[],byte[]> finfo = rd.loadFileInfo();
272       byte[] range = finfo.get("TIMERANGE".getBytes());
273       assertNotNull(range);
274 
275       // unmarshall and check values.
276       TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
277       Writables.copyWritable(range, timeRangeTracker);
278       LOG.info(timeRangeTracker.getMinimumTimestamp() +
279           "...." + timeRangeTracker.getMaximumTimestamp());
280       assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
281       assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
282       rd.close();
283     } finally {
284       if (writer != null && context != null) writer.close(context);
285       dir.getFileSystem(conf).delete(dir, true);
286     }
287   }
288 
289   /**
290    * Run small MR job.
291    */
292   @Test
293   public void testWritingPEData() throws Exception {
294     Configuration conf = util.getConfiguration();
295     Path testDir = util.getDataTestDirOnTestFS("testWritingPEData");
296     FileSystem fs = testDir.getFileSystem(conf);
297 
298     // Set down this value or we OOME in eclipse.
299     conf.setInt("io.sort.mb", 20);
300     // Write a few files.
301     conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);
302 
303     Job job = new Job(conf, "testWritingPEData");
304     setupRandomGeneratorMapper(job);
305     // This partitioner doesn't work well for number keys but using it anyways
306     // just to demonstrate how to configure it.
307     byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
308     byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
309 
310     Arrays.fill(startKey, (byte)0);
311     Arrays.fill(endKey, (byte)0xff);
312 
313     job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
314     // Set start and end rows for partitioner.
315     SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(), startKey);
316     SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(), endKey);
317     job.setReducerClass(KeyValueSortReducer.class);
318     job.setOutputFormatClass(HFileOutputFormat.class);
319     job.setNumReduceTasks(4);
320     job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
321         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
322         KeyValueSerialization.class.getName());
323 
324     FileOutputFormat.setOutputPath(job, testDir);
325     assertTrue(job.waitForCompletion(false));
326     FileStatus [] files = fs.listStatus(testDir);
327     assertTrue(files.length > 0);
328   }
329 
330   @Test
331   public void testJobConfiguration() throws Exception {
332     Configuration conf = new Configuration(this.util.getConfiguration());
333     conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString());
334     Job job = new Job(conf);
335     job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
336     HTable table = Mockito.mock(HTable.class);
337     setupMockStartKeys(table);
338     HFileOutputFormat.configureIncrementalLoad(job, table);
339     assertEquals(job.getNumReduceTasks(), 4);
340   }
341 
342   private byte [][] generateRandomStartKeys(int numKeys) {
343     Random random = new Random();
344     byte[][] ret = new byte[numKeys][];
345     // first region start key is always empty
346     ret[0] = HConstants.EMPTY_BYTE_ARRAY;
347     for (int i = 1; i < numKeys; i++) {
348       ret[i] = PerformanceEvaluation.generateData(random, PerformanceEvaluation.VALUE_LENGTH);
349     }
350     return ret;
351   }
352 
353   @Test
354   public void testMRIncrementalLoad() throws Exception {
355     LOG.info("\nStarting test testMRIncrementalLoad\n");
356     doIncrementalLoadTest(false);
357   }
358 
359   @Test
360   public void testMRIncrementalLoadWithSplit() throws Exception {
361     LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n");
362     doIncrementalLoadTest(true);
363   }
364 
365   private void doIncrementalLoadTest(
366       boolean shouldChangeRegions) throws Exception {
367     util = new HBaseTestingUtility();
368     Configuration conf = util.getConfiguration();
369     byte[][] startKeys = generateRandomStartKeys(5);
370     HBaseAdmin admin = null;
371     try {
372       util.startMiniCluster();
373       Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
374       admin = new HBaseAdmin(conf);
375       HTable table = util.createTable(TABLE_NAME, FAMILIES);
376       assertEquals("Should start with empty table",
377           0, util.countRows(table));
378       int numRegions = util.createMultiRegions(
379           util.getConfiguration(), table, FAMILIES[0], startKeys);
380       assertEquals("Should make 5 regions", numRegions, 5);
381 
382       // Generate the bulk load files
383       util.startMiniMapReduceCluster();
384       runIncrementalPELoad(conf, table, testDir);
385       // This doesn't write into the table, just makes files
386       assertEquals("HFOF should not touch actual table",
387           0, util.countRows(table));
388 
389 
390       // Make sure that a directory was created for every CF
391       int dir = 0;
392       for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
393         for (byte[] family : FAMILIES) {
394           if (Bytes.toString(family).equals(f.getPath().getName())) {
395             ++dir;
396           }
397         }
398       }
399       assertEquals("Column family not found in FS.", FAMILIES.length, dir);
400 
401       // handle the split case
402       if (shouldChangeRegions) {
403         LOG.info("Changing regions in table");
404         admin.disableTable(table.getTableName());
405         while(util.getMiniHBaseCluster().getMaster().getAssignmentManager().
406             getRegionStates().isRegionsInTransition()) {
407           Threads.sleep(200);
408           LOG.info("Waiting on table to finish disabling");
409         }
410         byte[][] newStartKeys = generateRandomStartKeys(15);
411         util.createMultiRegions(
412             util.getConfiguration(), table, FAMILIES[0], newStartKeys);
413         admin.enableTable(table.getTableName());
414         while (table.getRegionLocations().size() != 15 ||
415             !admin.isTableAvailable(table.getTableName())) {
416           Thread.sleep(200);
417           LOG.info("Waiting for new region assignment to happen");
418         }
419       }
420 
421       // Perform the actual load
422       new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
423 
424       // Ensure data shows up
425       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
426       assertEquals("LoadIncrementalHFiles should put expected data in table",
427           expectedRows, util.countRows(table));
428       Scan scan = new Scan();
429       ResultScanner results = table.getScanner(scan);
430       for (Result res : results) {
431         assertEquals(FAMILIES.length, res.rawCells().length);
432         Cell first = res.rawCells()[0];
433         for (Cell kv : res.rawCells()) {
434           assertTrue(CellUtil.matchingRow(first, kv));
435           assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
436         }
437       }
438       results.close();
439       String tableDigestBefore = util.checksumRows(table);
440 
441       // Cause regions to reopen
442       admin.disableTable(TABLE_NAME);
443       while (!admin.isTableDisabled(TABLE_NAME)) {
444         Thread.sleep(200);
445         LOG.info("Waiting for table to disable");
446       }
447       admin.enableTable(TABLE_NAME);
448       util.waitTableAvailable(TABLE_NAME.getName());
449       assertEquals("Data should remain after reopening of regions",
450           tableDigestBefore, util.checksumRows(table));
451     } finally {
452       if (admin != null) admin.close();
453       util.shutdownMiniMapReduceCluster();
454       util.shutdownMiniCluster();
455     }
456   }
457 
458   private void runIncrementalPELoad(
459       Configuration conf, HTable table, Path outDir)
460   throws Exception {
461     Job job = new Job(conf, "testLocalMRIncrementalLoad");
462     job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
463     job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
464         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
465         KeyValueSerialization.class.getName());
466     setupRandomGeneratorMapper(job);
467     HFileOutputFormat.configureIncrementalLoad(job, table);
468     FileOutputFormat.setOutputPath(job, outDir);
469 
470     Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;
471 
472     assertEquals(table.getRegionLocations().size(), job.getNumReduceTasks());
473 
474     assertTrue(job.waitForCompletion(true));
475   }
476 
477   /**
478    * Test for {@link HFileOutputFormat#configureCompression(HTable,
479    * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
480    * (Configuration)}.
481    * Tests that the compression map is correctly serialized into
482    * and deserialized from configuration
483    *
484    * @throws IOException
485    */
486   @Test
487   public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
488     for (int numCfs = 0; numCfs <= 3; numCfs++) {
489       Configuration conf = new Configuration(this.util.getConfiguration());
490       Map<String, Compression.Algorithm> familyToCompression =
491           getMockColumnFamiliesForCompression(numCfs);
492       HTable table = Mockito.mock(HTable.class);
493       setupMockColumnFamiliesForCompression(table, familyToCompression);
494       HFileOutputFormat.configureCompression(table, conf);
495 
496       // read back family specific compression setting from the configuration
497       Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
498           .createFamilyCompressionMap(conf);
499 
500       // test that we have a value for all column families that matches with the
501       // used mock values
502       for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
503         assertEquals("Compression configuration incorrect for column family:"
504             + entry.getKey(), entry.getValue(),
505             retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
506       }
507     }
508   }
509 
510   private void setupMockColumnFamiliesForCompression(HTable table,
511       Map<String, Compression.Algorithm> familyToCompression) throws IOException {
512     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
513     for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
514       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
515           .setMaxVersions(1)
516           .setCompressionType(entry.getValue())
517           .setBlockCacheEnabled(false)
518           .setTimeToLive(0));
519     }
520     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
521   }
522 
523   /**
524    * @return a map from column family names to compression algorithms for
525    *         testing column family compression. Column family names have special characters
526    */
527   private Map<String, Compression.Algorithm>
528       getMockColumnFamiliesForCompression (int numCfs) {
529     Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
530     // use column family names having special characters
531     if (numCfs-- > 0) {
532       familyToCompression.put("Family1!@#!@#&", Compression.Algorithm.LZO);
533     }
534     if (numCfs-- > 0) {
535       familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
536     }
537     if (numCfs-- > 0) {
538       familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
539     }
540     if (numCfs-- > 0) {
541       familyToCompression.put("Family3", Compression.Algorithm.NONE);
542     }
543     return familyToCompression;
544   }
545 
546 
547   /**
548    * Test for {@link HFileOutputFormat#configureBloomType(HTable,
549    * Configuration)} and {@link HFileOutputFormat#createFamilyBloomTypeMap
550    * (Configuration)}.
551    * Tests that the compression map is correctly serialized into
552    * and deserialized from configuration
553    *
554    * @throws IOException
555    */
556   @Test
557   public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
558     for (int numCfs = 0; numCfs <= 2; numCfs++) {
559       Configuration conf = new Configuration(this.util.getConfiguration());
560       Map<String, BloomType> familyToBloomType =
561           getMockColumnFamiliesForBloomType(numCfs);
562       HTable table = Mockito.mock(HTable.class);
563       setupMockColumnFamiliesForBloomType(table,
564           familyToBloomType);
565       HFileOutputFormat.configureBloomType(table, conf);
566 
567       // read back family specific data block encoding settings from the
568       // configuration
569       Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
570           HFileOutputFormat
571               .createFamilyBloomTypeMap(conf);
572 
573       // test that we have a value for all column families that matches with the
574       // used mock values
575       for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
576         assertEquals("BloomType configuration incorrect for column family:"
577             + entry.getKey(), entry.getValue(),
578             retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
579       }
580     }
581   }
582 
583   private void setupMockColumnFamiliesForBloomType(HTable table,
584       Map<String, BloomType> familyToDataBlockEncoding) throws IOException {
585     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
586     for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) {
587       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
588           .setMaxVersions(1)
589           .setBloomFilterType(entry.getValue())
590           .setBlockCacheEnabled(false)
591           .setTimeToLive(0));
592     }
593     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
594   }
595 
596   /**
597    * @return a map from column family names to compression algorithms for
598    *         testing column family compression. Column family names have special characters
599    */
600   private Map<String, BloomType>
601   getMockColumnFamiliesForBloomType (int numCfs) {
602     Map<String, BloomType> familyToBloomType =
603         new HashMap<String, BloomType>();
604     // use column family names having special characters
605     if (numCfs-- > 0) {
606       familyToBloomType.put("Family1!@#!@#&", BloomType.ROW);
607     }
608     if (numCfs-- > 0) {
609       familyToBloomType.put("Family2=asdads&!AASD",
610           BloomType.ROWCOL);
611     }
612     if (numCfs-- > 0) {
613       familyToBloomType.put("Family3", BloomType.NONE);
614     }
615     return familyToBloomType;
616   }
617 
618   /**
619    * Test for {@link HFileOutputFormat#configureBlockSize(HTable,
620    * Configuration)} and {@link HFileOutputFormat#createFamilyBlockSizeMap
621    * (Configuration)}.
622    * Tests that the compression map is correctly serialized into
623    * and deserialized from configuration
624    *
625    * @throws IOException
626    */
627   @Test
628   public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException {
629     for (int numCfs = 0; numCfs <= 3; numCfs++) {
630       Configuration conf = new Configuration(this.util.getConfiguration());
631       Map<String, Integer> familyToBlockSize =
632           getMockColumnFamiliesForBlockSize(numCfs);
633       HTable table = Mockito.mock(HTable.class);
634       setupMockColumnFamiliesForBlockSize(table,
635           familyToBlockSize);
636       HFileOutputFormat.configureBlockSize(table, conf);
637 
638       // read back family specific data block encoding settings from the
639       // configuration
640       Map<byte[], Integer> retrievedFamilyToBlockSizeMap =
641           HFileOutputFormat
642               .createFamilyBlockSizeMap(conf);
643 
644       // test that we have a value for all column families that matches with the
645       // used mock values
646       for (Entry<String, Integer> entry : familyToBlockSize.entrySet()
647           ) {
648         assertEquals("BlockSize configuration incorrect for column family:"
649             + entry.getKey(), entry.getValue(),
650             retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes()));
651       }
652     }
653   }
654 
655   private void setupMockColumnFamiliesForBlockSize(HTable table,
656       Map<String, Integer> familyToDataBlockEncoding) throws IOException {
657     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
658     for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
659       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
660           .setMaxVersions(1)
661           .setBlocksize(entry.getValue())
662           .setBlockCacheEnabled(false)
663           .setTimeToLive(0));
664     }
665     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
666   }
667 
668   /**
669    * @return a map from column family names to compression algorithms for
670    *         testing column family compression. Column family names have special characters
671    */
672   private Map<String, Integer>
673   getMockColumnFamiliesForBlockSize (int numCfs) {
674     Map<String, Integer> familyToBlockSize =
675         new HashMap<String, Integer>();
676     // use column family names having special characters
677     if (numCfs-- > 0) {
678       familyToBlockSize.put("Family1!@#!@#&", 1234);
679     }
680     if (numCfs-- > 0) {
681       familyToBlockSize.put("Family2=asdads&!AASD",
682           Integer.MAX_VALUE);
683     }
684     if (numCfs-- > 0) {
685       familyToBlockSize.put("Family2=asdads&!AASD",
686           Integer.MAX_VALUE);
687     }
688     if (numCfs-- > 0) {
689       familyToBlockSize.put("Family3", 0);
690     }
691     return familyToBlockSize;
692   }
693 
694     /**
695    * Test for {@link HFileOutputFormat#configureDataBlockEncoding(HTable,
696    * Configuration)} and {@link HFileOutputFormat#createFamilyDataBlockEncodingMap
697    * (Configuration)}.
698    * Tests that the compression map is correctly serialized into
699    * and deserialized from configuration
700    *
701    * @throws IOException
702    */
703   @Test
704   public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
705     for (int numCfs = 0; numCfs <= 3; numCfs++) {
706       Configuration conf = new Configuration(this.util.getConfiguration());
707       Map<String, DataBlockEncoding> familyToDataBlockEncoding =
708           getMockColumnFamiliesForDataBlockEncoding(numCfs);
709       HTable table = Mockito.mock(HTable.class);
710       setupMockColumnFamiliesForDataBlockEncoding(table,
711           familyToDataBlockEncoding);
712       HFileOutputFormat.configureDataBlockEncoding(table, conf);
713 
714       // read back family specific data block encoding settings from the
715       // configuration
716       Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
717           HFileOutputFormat
718           .createFamilyDataBlockEncodingMap(conf);
719 
720       // test that we have a value for all column families that matches with the
721       // used mock values
722       for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
723         assertEquals("DataBlockEncoding configuration incorrect for column family:"
724             + entry.getKey(), entry.getValue(),
725             retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
726       }
727     }
728   }
729 
730   private void setupMockColumnFamiliesForDataBlockEncoding(HTable table,
731       Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
732     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
733     for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
734       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
735           .setMaxVersions(1)
736           .setDataBlockEncoding(entry.getValue())
737           .setBlockCacheEnabled(false)
738           .setTimeToLive(0));
739     }
740     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
741   }
742 
743   /**
744    * @return a map from column family names to compression algorithms for
745    *         testing column family compression. Column family names have special characters
746    */
747   private Map<String, DataBlockEncoding>
748       getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
749     Map<String, DataBlockEncoding> familyToDataBlockEncoding =
750         new HashMap<String, DataBlockEncoding>();
751     // use column family names having special characters
752     if (numCfs-- > 0) {
753       familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF);
754     }
755     if (numCfs-- > 0) {
756       familyToDataBlockEncoding.put("Family2=asdads&!AASD",
757           DataBlockEncoding.FAST_DIFF);
758     }
759     if (numCfs-- > 0) {
760       familyToDataBlockEncoding.put("Family2=asdads&!AASD",
761           DataBlockEncoding.PREFIX);
762     }
763     if (numCfs-- > 0) {
764       familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
765     }
766     return familyToDataBlockEncoding;
767   }
768 
769   private void setupMockStartKeys(HTable table) throws IOException {
770     byte[][] mockKeys = new byte[][] {
771         HConstants.EMPTY_BYTE_ARRAY,
772         Bytes.toBytes("aaa"),
773         Bytes.toBytes("ggg"),
774         Bytes.toBytes("zzz")
775     };
776     Mockito.doReturn(mockKeys).when(table).getStartKeys();
777   }
778 
779   /**
780    * Test that {@link HFileOutputFormat} RecordWriter uses compression and
781    * bloom filter settings from the column family descriptor
782    */
783   @Test
784   public void testColumnFamilySettings() throws Exception {
785     Configuration conf = new Configuration(this.util.getConfiguration());
786     RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
787     TaskAttemptContext context = null;
788     Path dir = util.getDataTestDir("testColumnFamilySettings");
789 
790     // Setup table descriptor
791     HTable table = Mockito.mock(HTable.class);
792     HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
793     Mockito.doReturn(htd).when(table).getTableDescriptor();
794     for (HColumnDescriptor hcd: this.util.generateColumnDescriptors()) {
795       htd.addFamily(hcd);
796     }
797 
798     // set up the table to return some mock keys
799     setupMockStartKeys(table);
800 
801     try {
802       // partial map red setup to get an operational writer for testing
803       // We turn off the sequence file compression, because DefaultCodec
804       // pollutes the GZip codec pool with an incompatible compressor.
805       conf.set("io.seqfile.compression.type", "NONE");
806       conf.set("hbase.fs.tmp.dir", dir.toString());
807       Job job = new Job(conf, "testLocalMRIncrementalLoad");
808       job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
809       setupRandomGeneratorMapper(job);
810       HFileOutputFormat.configureIncrementalLoad(job, table);
811       FileOutputFormat.setOutputPath(job, dir);
812       context = createTestTaskAttemptContext(job);
813       HFileOutputFormat hof = new HFileOutputFormat();
814       writer = hof.getRecordWriter(context);
815 
816       // write out random rows
817       writeRandomKeyValues(writer, context, htd.getFamiliesKeys(), ROWSPERSPLIT);
818       writer.close(context);
819 
820       // Make sure that a directory was created for every CF
821       FileSystem fs = dir.getFileSystem(conf);
822 
823       // commit so that the filesystem has one directory per column family
824       hof.getOutputCommitter(context).commitTask(context);
825       hof.getOutputCommitter(context).commitJob(context);
826       FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
827       assertEquals(htd.getFamilies().size(), families.length);
828       for (FileStatus f : families) {
829         String familyStr = f.getPath().getName();
830         HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
831         // verify that the compression on this file matches the configured
832         // compression
833         Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
834         Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), conf);
835         Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
836 
837         byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
838         if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE");
839         assertEquals("Incorrect bloom filter used for column family " + familyStr +
840           "(reader: " + reader + ")",
841           hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
842         assertEquals("Incorrect compression used for column family " + familyStr +
843           "(reader: " + reader + ")", hcd.getCompression(), reader.getFileContext().getCompression());
844       }
845     } finally {
846       dir.getFileSystem(conf).delete(dir, true);
847     }
848   }
849 
850   /**
851    * Write random values to the writer assuming a table created using
852    * {@link #FAMILIES} as column family descriptors
853    */
854   private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
855       TaskAttemptContext context, Set<byte[]> families, int numRows)
856       throws IOException, InterruptedException {
857     byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
858     int valLength = 10;
859     byte valBytes[] = new byte[valLength];
860 
861     int taskId = context.getTaskAttemptID().getTaskID().getId();
862     assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
863 
864     Random random = new Random();
865     for (int i = 0; i < numRows; i++) {
866 
867       Bytes.putInt(keyBytes, 0, i);
868       random.nextBytes(valBytes);
869       ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
870 
871       for (byte[] family : families) {
872         KeyValue kv = new KeyValue(keyBytes, family,
873             PerformanceEvaluation.QUALIFIER_NAME, valBytes);
874         writer.write(key, kv);
875       }
876     }
877   }
878 
879   /**
880    * This test is to test the scenario happened in HBASE-6901.
881    * All files are bulk loaded and excluded from minor compaction.
882    * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
883    * will be thrown.
884    */
885   @Ignore ("Flakey: See HBASE-9051") @Test
886   public void testExcludeAllFromMinorCompaction() throws Exception {
887     Configuration conf = util.getConfiguration();
888     conf.setInt("hbase.hstore.compaction.min", 2);
889     generateRandomStartKeys(5);
890 
891     try {
892       util.startMiniCluster();
893       final FileSystem fs = util.getDFSCluster().getFileSystem();
894       HBaseAdmin admin = new HBaseAdmin(conf);
895       HTable table = util.createTable(TABLE_NAME, FAMILIES);
896       assertEquals("Should start with empty table", 0, util.countRows(table));
897 
898       // deep inspection: get the StoreFile dir
899       final Path storePath = HStore.getStoreHomedir(
900           FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
901           admin.getTableRegions(TABLE_NAME).get(0),
902           FAMILIES[0]);
903       assertEquals(0, fs.listStatus(storePath).length);
904 
905       // Generate two bulk load files
906       conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
907           true);
908       util.startMiniMapReduceCluster();
909 
910       for (int i = 0; i < 2; i++) {
911         Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
912         runIncrementalPELoad(conf, table, testDir);
913         // Perform the actual load
914         new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
915       }
916 
917       // Ensure data shows up
918       int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
919       assertEquals("LoadIncrementalHFiles should put expected data in table",
920           expectedRows, util.countRows(table));
921 
922       // should have a second StoreFile now
923       assertEquals(2, fs.listStatus(storePath).length);
924 
925       // minor compactions shouldn't get rid of the file
926       admin.compact(TABLE_NAME.getName());
927       try {
928         quickPoll(new Callable<Boolean>() {
929           public Boolean call() throws Exception {
930             return fs.listStatus(storePath).length == 1;
931           }
932         }, 5000);
933         throw new IOException("SF# = " + fs.listStatus(storePath).length);
934       } catch (AssertionError ae) {
935         // this is expected behavior
936       }
937 
938       // a major compaction should work though
939       admin.majorCompact(TABLE_NAME.getName());
940       quickPoll(new Callable<Boolean>() {
941         public Boolean call() throws Exception {
942           return fs.listStatus(storePath).length == 1;
943         }
944       }, 5000);
945 
946     } finally {
947       util.shutdownMiniMapReduceCluster();
948       util.shutdownMiniCluster();
949     }
950   }
951 
952   @Test
953   public void testExcludeMinorCompaction() throws Exception {
954     Configuration conf = util.getConfiguration();
955     conf.setInt("hbase.hstore.compaction.min", 2);
956     generateRandomStartKeys(5);
957 
958     try {
959       util.startMiniCluster();
960       Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
961       final FileSystem fs = util.getDFSCluster().getFileSystem();
962       HBaseAdmin admin = new HBaseAdmin(conf);
963       HTable table = util.createTable(TABLE_NAME, FAMILIES);
964       assertEquals("Should start with empty table", 0, util.countRows(table));
965 
966       // deep inspection: get the StoreFile dir
967       final Path storePath = HStore.getStoreHomedir(
968           FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
969           admin.getTableRegions(TABLE_NAME).get(0),
970           FAMILIES[0]);
971       assertEquals(0, fs.listStatus(storePath).length);
972 
973       // put some data in it and flush to create a storefile
974       Put p = new Put(Bytes.toBytes("test"));
975       p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
976       table.put(p);
977       admin.flush(TABLE_NAME.getName());
978       assertEquals(1, util.countRows(table));
979       quickPoll(new Callable<Boolean>() {
980         public Boolean call() throws Exception {
981           return fs.listStatus(storePath).length == 1;
982         }
983       }, 5000);
984 
985       // Generate a bulk load file with more rows
986       conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
987           true);
988       util.startMiniMapReduceCluster();
989       runIncrementalPELoad(conf, table, testDir);
990 
991       // Perform the actual load
992       new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
993 
994       // Ensure data shows up
995       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
996       assertEquals("LoadIncrementalHFiles should put expected data in table",
997           expectedRows + 1, util.countRows(table));
998 
999       // should have a second StoreFile now
1000       assertEquals(2, fs.listStatus(storePath).length);
1001 
1002       // minor compactions shouldn't get rid of the file
1003       admin.compact(TABLE_NAME.getName());
1004       try {
1005         quickPoll(new Callable<Boolean>() {
1006           public Boolean call() throws Exception {
1007             return fs.listStatus(storePath).length == 1;
1008           }
1009         }, 5000);
1010         throw new IOException("SF# = " + fs.listStatus(storePath).length);
1011       } catch (AssertionError ae) {
1012         // this is expected behavior
1013       }
1014 
1015       // a major compaction should work though
1016       admin.majorCompact(TABLE_NAME.getName());
1017       quickPoll(new Callable<Boolean>() {
1018         public Boolean call() throws Exception {
1019           return fs.listStatus(storePath).length == 1;
1020         }
1021       }, 5000);
1022 
1023     } finally {
1024       util.shutdownMiniMapReduceCluster();
1025       util.shutdownMiniCluster();
1026     }
1027   }
1028 
1029   private void quickPoll(Callable<Boolean> c, int waitMs) throws Exception {
1030     int sleepMs = 10;
1031     int retries = (int) Math.ceil(((double) waitMs) / sleepMs);
1032     while (retries-- > 0) {
1033       if (c.call().booleanValue()) {
1034         return;
1035       }
1036       Thread.sleep(sleepMs);
1037     }
1038     fail();
1039   }
1040 
1041   public static void main(String args[]) throws Exception {
1042     new TestHFileOutputFormat().manualTest(args);
1043   }
1044 
1045   public void manualTest(String args[]) throws Exception {
1046     Configuration conf = HBaseConfiguration.create();
1047     util = new HBaseTestingUtility(conf);
1048     if ("newtable".equals(args[0])) {
1049       byte[] tname = args[1].getBytes();
1050       HTable table = util.createTable(tname, FAMILIES);
1051       HBaseAdmin admin = new HBaseAdmin(conf);
1052       admin.disableTable(tname);
1053       byte[][] startKeys = generateRandomStartKeys(5);
1054       util.createMultiRegions(conf, table, FAMILIES[0], startKeys);
1055       admin.enableTable(tname);
1056     } else if ("incremental".equals(args[0])) {
1057       byte[] tname = args[1].getBytes();
1058       HTable table = new HTable(conf, tname);
1059       Path outDir = new Path("incremental-out");
1060       runIncrementalPELoad(conf, table, outDir);
1061     } else {
1062       throw new RuntimeException(
1063           "usage: TestHFileOutputFormat newtable | incremental");
1064     }
1065   }
1066 
1067 }
1068