View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.test;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.security.SecureRandom;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.Iterator;
28  import java.util.List;
29  import java.util.Random;
30  import java.util.Set;
31  import java.util.UUID;
32  import java.util.concurrent.atomic.AtomicInteger;
33  
34  import org.apache.commons.cli.CommandLine;
35  import org.apache.commons.cli.GnuParser;
36  import org.apache.commons.cli.HelpFormatter;
37  import org.apache.commons.cli.Options;
38  import org.apache.commons.cli.ParseException;
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.conf.Configured;
43  import org.apache.hadoop.fs.FileSystem;
44  import org.apache.hadoop.fs.Path;
45  import org.apache.hadoop.hbase.HBaseConfiguration;
46  import org.apache.hadoop.hbase.HBaseTestingUtility;
47  import org.apache.hadoop.hbase.HColumnDescriptor;
48  import org.apache.hadoop.hbase.HRegionLocation;
49  import org.apache.hadoop.hbase.HTableDescriptor;
50  import org.apache.hadoop.hbase.IntegrationTestBase;
51  import org.apache.hadoop.hbase.IntegrationTestingUtility;
52  import org.apache.hadoop.hbase.testclassification.IntegrationTests;
53  import org.apache.hadoop.hbase.fs.HFileSystem;
54  import org.apache.hadoop.hbase.MasterNotRunningException;
55  import org.apache.hadoop.hbase.TableName;
56  import org.apache.hadoop.hbase.client.Get;
57  import org.apache.hadoop.hbase.client.HBaseAdmin;
58  import org.apache.hadoop.hbase.client.HConnection;
59  import org.apache.hadoop.hbase.client.HConnectionManager;
60  import org.apache.hadoop.hbase.client.HTable;
61  import org.apache.hadoop.hbase.client.Put;
62  import org.apache.hadoop.hbase.client.Result;
63  import org.apache.hadoop.hbase.client.ResultScanner;
64  import org.apache.hadoop.hbase.client.Scan;
65  import org.apache.hadoop.hbase.client.ScannerCallable;
66  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
67  import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
68  import org.apache.hadoop.hbase.mapreduce.TableMapper;
69  import org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl;
70  import org.apache.hadoop.hbase.util.AbstractHBaseTool;
71  import org.apache.hadoop.hbase.util.Bytes;
72  import org.apache.hadoop.hbase.util.RegionSplitter;
73  import org.apache.hadoop.io.BytesWritable;
74  import org.apache.hadoop.io.NullWritable;
75  import org.apache.hadoop.io.Text;
76  import org.apache.hadoop.io.Writable;
77  import org.apache.hadoop.mapreduce.Counter;
78  import org.apache.hadoop.mapreduce.CounterGroup;
79  import org.apache.hadoop.mapreduce.Counters;
80  import org.apache.hadoop.mapreduce.InputFormat;
81  import org.apache.hadoop.mapreduce.InputSplit;
82  import org.apache.hadoop.mapreduce.Job;
83  import org.apache.hadoop.mapreduce.JobContext;
84  import org.apache.hadoop.mapreduce.Mapper;
85  import org.apache.hadoop.mapreduce.RecordReader;
86  import org.apache.hadoop.mapreduce.Reducer;
87  import org.apache.hadoop.mapreduce.TaskAttemptContext;
88  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
89  import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
90  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
91  import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
92  import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
93  import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
94  import org.apache.hadoop.util.Tool;
95  import org.apache.hadoop.util.ToolRunner;
96  import org.junit.Test;
97  import org.junit.experimental.categories.Category;
98  
99  import com.google.common.collect.Sets;
100 
101 /**
102  * This is an integration test borrowed from goraci, written by Keith Turner,
103  * which is in turn inspired by the Accumulo test called continous ingest (ci).
104  * The original source code can be found here:
105  * https://github.com/keith-turner/goraci
106  * https://github.com/enis/goraci/
107  *
108  * Apache Accumulo [0] has a simple test suite that verifies that data is not
109  * lost at scale. This test suite is called continuous ingest. This test runs
110  * many ingest clients that continually create linked lists containing 25
111  * million nodes. At some point the clients are stopped and a map reduce job is
112  * run to ensure no linked list has a hole. A hole indicates data was lost.··
113  *
114  * The nodes in the linked list are random. This causes each linked list to
115  * spread across the table. Therefore if one part of a table loses data, then it
116  * will be detected by references in another part of the table.
117  *
118  * THE ANATOMY OF THE TEST
119  *
120  * Below is rough sketch of how data is written. For specific details look at
121  * the Generator code.
122  *
123  * 1 Write out 1 million nodes· 2 Flush the client· 3 Write out 1 million that
124  * reference previous million· 4 If this is the 25th set of 1 million nodes,
125  * then update 1st set of million to point to last· 5 goto 1
126  *
127  * The key is that nodes only reference flushed nodes. Therefore a node should
128  * never reference a missing node, even if the ingest client is killed at any
129  * point in time.
130  *
131  * When running this test suite w/ Accumulo there is a script running in
132  * parallel called the Aggitator that randomly and continuously kills server
133  * processes.·· The outcome was that many data loss bugs were found in Accumulo
134  * by doing this.· This test suite can also help find bugs that impact uptime
135  * and stability when· run for days or weeks.··
136  *
137  * This test suite consists the following· - a few Java programs· - a little
138  * helper script to run the java programs - a maven script to build it.··
139  *
140  * When generating data, its best to have each map task generate a multiple of
141  * 25 million. The reason for this is that circular linked list are generated
142  * every 25M. Not generating a multiple in 25M will result in some nodes in the
143  * linked list not having references. The loss of an unreferenced node can not
144  * be detected.
145  *
146  *
147  * Below is a description of the Java programs
148  *
149  * Generator - A map only job that generates data. As stated previously,·
150  * its best to generate data in multiples of 25M.
151  *
152  * Verify - A map reduce job that looks for holes. Look at the counts after running. REFERENCED and
153  * UNREFERENCED are· ok, any UNDEFINED counts are bad. Do not run at the· same
154  * time as the Generator.
155  *
156  * Walker - A standalone program that start following a linked list· and emits timing info.··
157  *
158  * Print - A standalone program that prints nodes in the linked list
159  *
160  * Delete - A standalone program that deletes a single node
161  *
162  * This class can be run as a unit test, as an integration test, or from the command line
163  */
164 @Category(IntegrationTests.class)
165 public class IntegrationTestBigLinkedList extends IntegrationTestBase {
166   protected static final byte[] NO_KEY = new byte[1];
167 
168   protected static String TABLE_NAME_KEY = "IntegrationTestBigLinkedList.table";
169 
170   protected static String DEFAULT_TABLE_NAME = "IntegrationTestBigLinkedList";
171 
172   protected static byte[] FAMILY_NAME = Bytes.toBytes("meta");
173 
174   //link to the id of the prev node in the linked list
175   protected static final byte[] COLUMN_PREV = Bytes.toBytes("prev");
176 
177   //identifier of the mapred task that generated this row
178   protected static final byte[] COLUMN_CLIENT = Bytes.toBytes("client");
179 
180   //the id of the row within the same client.
181   protected static final byte[] COLUMN_COUNT = Bytes.toBytes("count");
182 
183   /** How many rows to write per map task. This has to be a multiple of 25M */
184   private static final String GENERATOR_NUM_ROWS_PER_MAP_KEY
185     = "IntegrationTestBigLinkedList.generator.num_rows";
186 
187   private static final String GENERATOR_NUM_MAPPERS_KEY
188     = "IntegrationTestBigLinkedList.generator.map.tasks";
189 
190   private static final String GENERATOR_WIDTH_KEY
191     = "IntegrationTestBigLinkedList.generator.width";
192 
193   private static final String GENERATOR_WRAP_KEY
194     = "IntegrationTestBigLinkedList.generator.wrap";
195 
196   protected int NUM_SLAVES_BASE = 3; // number of slaves for the cluster
197 
198   private static final int MISSING_ROWS_TO_LOG = 10; // YARN complains when too many counters
199 
200   private static final int WIDTH_DEFAULT = 1000000;
201   private static final int WRAP_DEFAULT = 25;
202   private static final int ROWKEY_LENGTH = 16;
203 
204   protected String toRun;
205   protected String[] otherArgs;
206 
207   static class CINode {
208     byte[] key;
209     byte[] prev;
210     String client;
211     long count;
212   }
213 
214   /**
215    * A Map only job that generates random linked list and stores them.
216    */
217   static class Generator extends Configured implements Tool {
218 
219     private static final Log LOG = LogFactory.getLog(Generator.class);
220 
221     static class GeneratorInputFormat extends InputFormat<BytesWritable,NullWritable> {
222       static class GeneratorInputSplit extends InputSplit implements Writable {
223         @Override
224         public long getLength() throws IOException, InterruptedException {
225           return 1;
226         }
227         @Override
228         public String[] getLocations() throws IOException, InterruptedException {
229           return new String[0];
230         }
231         @Override
232         public void readFields(DataInput arg0) throws IOException {
233         }
234         @Override
235         public void write(DataOutput arg0) throws IOException {
236         }
237       }
238 
239       static class GeneratorRecordReader extends RecordReader<BytesWritable,NullWritable> {
240         private long count;
241         private long numNodes;
242         private Random rand;
243 
244         @Override
245         public void close() throws IOException {
246         }
247 
248         @Override
249         public BytesWritable getCurrentKey() throws IOException, InterruptedException {
250           byte[] bytes = new byte[ROWKEY_LENGTH];
251           rand.nextBytes(bytes);
252           return new BytesWritable(bytes);
253         }
254 
255         @Override
256         public NullWritable getCurrentValue() throws IOException, InterruptedException {
257           return NullWritable.get();
258         }
259 
260         @Override
261         public float getProgress() throws IOException, InterruptedException {
262           return (float)(count / (double)numNodes);
263         }
264 
265         @Override
266         public void initialize(InputSplit arg0, TaskAttemptContext context)
267             throws IOException, InterruptedException {
268           numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000);
269           // Use SecureRandom to avoid issue described in HBASE-13382.
270           rand = new SecureRandom();
271         }
272 
273         @Override
274         public boolean nextKeyValue() throws IOException, InterruptedException {
275           return count++ < numNodes;
276         }
277 
278       }
279 
280       @Override
281       public RecordReader<BytesWritable,NullWritable> createRecordReader(
282           InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
283         GeneratorRecordReader rr = new GeneratorRecordReader();
284         rr.initialize(split, context);
285         return rr;
286       }
287 
288       @Override
289       public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
290         int numMappers = job.getConfiguration().getInt(GENERATOR_NUM_MAPPERS_KEY, 1);
291 
292         ArrayList<InputSplit> splits = new ArrayList<InputSplit>(numMappers);
293 
294         for (int i = 0; i < numMappers; i++) {
295           splits.add(new GeneratorInputSplit());
296         }
297 
298         return splits;
299       }
300     }
301 
302     /** Ensure output files from prev-job go to map inputs for current job */
303     static class OneFilePerMapperSFIF<K, V> extends SequenceFileInputFormat<K, V> {
304       @Override
305       protected boolean isSplitable(JobContext context, Path filename) {
306         return false;
307       }
308     }
309 
310     /**
311      * Some ASCII art time:
312      * [ . . . ] represents one batch of random longs of length WIDTH
313      *
314      *                _________________________
315      *               |                  ______ |
316      *               |                 |      ||
317      *             .-+-----------------+-----.||
318      *             | |                 |     |||
319      * first   = [ . . . . . . . . . . . ]   |||
320      *             ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^     |||
321      *             | | | | | | | | | | |     |||
322      * prev    = [ . . . . . . . . . . . ]   |||
323      *             ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^     |||
324      *             | | | | | | | | | | |     |||
325      * current = [ . . . . . . . . . . . ]   |||
326      *                                       |||
327      * ...                                   |||
328      *                                       |||
329      * last    = [ . . . . . . . . . . . ]   |||
330      *             ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^_____|||
331      *             |                 |________||
332      *             |___________________________|
333      */
334     static class GeneratorMapper
335       extends Mapper<BytesWritable, NullWritable, NullWritable, NullWritable> {
336 
337       byte[][] first = null;
338       byte[][] prev = null;
339       byte[][] current = null;
340       byte[] id;
341       long count = 0;
342       int i;
343       HTable table;
344       long numNodes;
345       long wrap;
346       int width;
347 
348       @Override
349       protected void setup(Context context) throws IOException, InterruptedException {
350         id = Bytes.toBytes("Job: "+context.getJobID() + " Task: " + context.getTaskAttemptID());
351         Configuration conf = context.getConfiguration();
352         instantiateHTable(conf);
353         this.width = context.getConfiguration().getInt(GENERATOR_WIDTH_KEY, WIDTH_DEFAULT);
354         current = new byte[this.width][];
355         int wrapMultiplier = context.getConfiguration().getInt(GENERATOR_WRAP_KEY, WRAP_DEFAULT);
356         this.wrap = (long)wrapMultiplier * width;
357         this.numNodes = context.getConfiguration().getLong(
358             GENERATOR_NUM_ROWS_PER_MAP_KEY, (long)WIDTH_DEFAULT * WRAP_DEFAULT);
359         if (this.numNodes < this.wrap) {
360           this.wrap = this.numNodes;
361         }
362       }
363 
364       protected void instantiateHTable(Configuration conf) throws IOException {
365         table = new HTable(conf, getTableName(conf));
366         table.setAutoFlush(false, true);
367         table.setWriteBufferSize(4 * 1024 * 1024);
368       }
369 
370       @Override
371       protected void cleanup(Context context) throws IOException ,InterruptedException {
372         table.close();
373       }
374 
375       @Override
376       protected void map(BytesWritable key, NullWritable value, Context output) throws IOException {
377         current[i] = new byte[key.getLength()];
378         System.arraycopy(key.getBytes(), 0, current[i], 0, key.getLength());
379         if (++i == current.length) {
380           persist(output, count, prev, current, id);
381           i = 0;
382 
383           if (first == null)
384             first = current;
385           prev = current;
386           current = new byte[this.width][];
387 
388           count += current.length;
389           output.setStatus("Count " + count);
390 
391           if (count % wrap == 0) {
392             // this block of code turns the 1 million linked list of length 25 into one giant
393             //circular linked list of 25 million
394             circularLeftShift(first);
395 
396             persist(output, -1, prev, first, null);
397 
398             first = null;
399             prev = null;
400           }
401         }
402       }
403 
404       private static <T> void circularLeftShift(T[] first) {
405         T ez = first[0];
406         System.arraycopy(first, 1, first, 0, first.length - 1);
407         first[first.length - 1] = ez;
408       }
409 
410       protected void persist(Context output, long count, byte[][] prev, byte[][] current, byte[] id)
411           throws IOException {
412         for (int i = 0; i < current.length; i++) {
413           Put put = new Put(current[i]);
414           put.add(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]);
415 
416           if (count >= 0) {
417             put.add(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i));
418           }
419           if (id != null) {
420             put.add(FAMILY_NAME, COLUMN_CLIENT, id);
421           }
422           table.put(put);
423 
424           if (i % 1000 == 0) {
425             // Tickle progress every so often else maprunner will think us hung
426             output.progress();
427           }
428         }
429 
430         table.flushCommits();
431       }
432     }
433 
434     @Override
435     public int run(String[] args) throws Exception {
436       if (args.length < 3) {
437         System.out.println("Usage : " + Generator.class.getSimpleName() +
438             " <num mappers> <num nodes per map> <tmp output dir> [<width> <wrap multiplier>]");
439         System.out.println("   where <num nodes per map> should be a multiple of " +
440             " width*wrap multiplier, 25M by default");
441         return 0;
442       }
443 
444       int numMappers = Integer.parseInt(args[0]);
445       long numNodes = Long.parseLong(args[1]);
446       Path tmpOutput = new Path(args[2]);
447       Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
448       Integer wrapMuplitplier = (args.length < 5) ? null : Integer.parseInt(args[4]);
449       return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
450     }
451 
452     protected void createSchema() throws IOException {
453       Configuration conf = getConf();
454       HBaseAdmin admin = new HBaseAdmin(conf);
455       TableName tableName = getTableName(conf);
456       try {
457         if (!admin.tableExists(tableName)) {
458           HTableDescriptor htd = new HTableDescriptor(getTableName(getConf()));
459           htd.addFamily(new HColumnDescriptor(FAMILY_NAME));
460           int numberOfServers = admin.getClusterStatus().getServers().size();
461           if (numberOfServers == 0) {
462             throw new IllegalStateException("No live regionservers");
463           }
464           int regionsPerServer = conf.getInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY,
465                                 HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER);
466           int totalNumberOfRegions = numberOfServers * regionsPerServer;
467           LOG.info("Number of live regionservers: " + numberOfServers + ", " +
468               "pre-splitting table into " + totalNumberOfRegions + " regions " +
469               "(default regions per server: " + regionsPerServer + ")");
470 
471           byte[][] splits = new RegionSplitter.UniformSplit().split(
472               totalNumberOfRegions);
473 
474           admin.createTable(htd, splits);
475         }
476       } catch (MasterNotRunningException e) {
477         LOG.error("Master not running", e);
478         throw new IOException(e);
479       } finally {
480         admin.close();
481       }
482     }
483 
484     public int runRandomInputGenerator(int numMappers, long numNodes, Path tmpOutput,
485         Integer width, Integer wrapMuplitplier) throws Exception {
486       LOG.info("Running RandomInputGenerator with numMappers=" + numMappers
487           + ", numNodes=" + numNodes);
488       Job job = new Job(getConf());
489 
490       job.setJobName("Random Input Generator");
491       job.setNumReduceTasks(0);
492       job.setJarByClass(getClass());
493 
494       job.setInputFormatClass(GeneratorInputFormat.class);
495       job.setOutputKeyClass(BytesWritable.class);
496       job.setOutputValueClass(NullWritable.class);
497 
498       setJobConf(job, numMappers, numNodes, width, wrapMuplitplier);
499 
500       job.setMapperClass(Mapper.class); //identity mapper
501 
502       FileOutputFormat.setOutputPath(job, tmpOutput);
503       job.setOutputFormatClass(SequenceFileOutputFormat.class);
504 
505       boolean success = jobCompletion(job);
506 
507       return success ? 0 : 1;
508     }
509 
510     public int runGenerator(int numMappers, long numNodes, Path tmpOutput,
511         Integer width, Integer wrapMuplitplier) throws Exception {
512       LOG.info("Running Generator with numMappers=" + numMappers +", numNodes=" + numNodes);
513       createSchema();
514       Job job = new Job(getConf());
515 
516       job.setJobName("Link Generator");
517       job.setNumReduceTasks(0);
518       job.setJarByClass(getClass());
519 
520       FileInputFormat.setInputPaths(job, tmpOutput);
521       job.setInputFormatClass(OneFilePerMapperSFIF.class);
522       job.setOutputKeyClass(NullWritable.class);
523       job.setOutputValueClass(NullWritable.class);
524 
525       setJobConf(job, numMappers, numNodes, width, wrapMuplitplier);
526 
527       setMapperForGenerator(job);
528 
529       job.setOutputFormatClass(NullOutputFormat.class);
530 
531       job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
532       TableMapReduceUtil.addDependencyJars(job);
533       TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
534       TableMapReduceUtil.initCredentials(job);
535 
536       boolean success = jobCompletion(job);
537 
538       return success ? 0 : 1;
539     }
540 
541     protected boolean jobCompletion(Job job) throws IOException, InterruptedException,
542         ClassNotFoundException {
543       boolean success = job.waitForCompletion(true);
544       return success;
545     }
546 
547     protected void setMapperForGenerator(Job job) {
548       job.setMapperClass(GeneratorMapper.class);
549     }
550 
551     public int run(int numMappers, long numNodes, Path tmpOutput,
552         Integer width, Integer wrapMuplitplier) throws Exception {
553       int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
554       if (ret > 0) {
555         return ret;
556       }
557       return runGenerator(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
558     }
559   }
560 
561   /**
562    * A Map Reduce job that verifies that the linked lists generated by
563    * {@link Generator} do not have any holes.
564    */
565   static class Verify extends Configured implements Tool {
566 
567     private static final Log LOG = LogFactory.getLog(Verify.class);
568     protected static final BytesWritable DEF = new BytesWritable(NO_KEY);
569 
570     protected Job job;
571 
572     public static class VerifyMapper extends TableMapper<BytesWritable, BytesWritable> {
573       private BytesWritable row = new BytesWritable();
574       private BytesWritable ref = new BytesWritable();
575 
576       @Override
577       protected void map(ImmutableBytesWritable key, Result value, Context context)
578           throws IOException ,InterruptedException {
579         byte[] rowKey = key.get();
580         row.set(rowKey, 0, rowKey.length);
581         context.write(row, DEF);
582         byte[] prev = value.getValue(FAMILY_NAME, COLUMN_PREV);
583         if (prev != null && prev.length > 0) {
584           ref.set(prev, 0, prev.length);
585           context.write(ref, row);
586         } else {
587           LOG.warn(String.format("Prev is not set for: %s", Bytes.toStringBinary(rowKey)));
588         }
589       }
590     }
591 
592     public static enum Counts {
593       UNREFERENCED, UNDEFINED, REFERENCED, CORRUPT, EXTRAREFERENCES
594     }
595 
596     public static class VerifyReducer extends Reducer<BytesWritable,BytesWritable,Text,Text> {
597       private ArrayList<byte[]> refs = new ArrayList<byte[]>();
598 
599       private AtomicInteger rows = new AtomicInteger(0);
600 
601       @Override
602       public void reduce(BytesWritable key, Iterable<BytesWritable> values, Context context)
603           throws IOException, InterruptedException {
604 
605         int defCount = 0;
606 
607         refs.clear();
608         for (BytesWritable type : values) {
609           if (type.getLength() == DEF.getLength()) {
610             defCount++;
611           } else {
612             byte[] bytes = new byte[type.getLength()];
613             System.arraycopy(type.getBytes(), 0, bytes, 0, type.getLength());
614             refs.add(bytes);
615           }
616         }
617 
618         // TODO check for more than one def, should not happen
619 
620         StringBuilder refsSb = null;
621         String keyString = null;
622         if (defCount == 0 || refs.size() != 1) {
623           refsSb = new StringBuilder();
624           String comma = "";
625           for (byte[] ref : refs) {
626             refsSb.append(comma);
627             comma = ",";
628             refsSb.append(Bytes.toStringBinary(ref));
629           }
630           keyString = Bytes.toStringBinary(key.getBytes(), 0, key.getLength());
631 
632           LOG.error("Linked List error: Key = " + keyString + " References = " + refsSb.toString());
633         }
634 
635         if (defCount == 0 && refs.size() > 0) {
636           // this is bad, found a node that is referenced but not defined. It must have been
637           // lost, emit some info about this node for debugging purposes.
638           context.write(new Text(keyString), new Text(refsSb.toString()));
639           context.getCounter(Counts.UNDEFINED).increment(1);
640           if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
641             context.getCounter("undef", keyString).increment(1);
642           }
643         } else if (defCount > 0 && refs.size() == 0) {
644           // node is defined but not referenced
645           context.write(new Text(keyString), new Text("none"));
646           context.getCounter(Counts.UNREFERENCED).increment(1);
647           if (rows.addAndGet(1) < MISSING_ROWS_TO_LOG) {
648             context.getCounter("unref", keyString).increment(1);
649           }
650         } else {
651           if (refs.size() > 1) {
652             if (refsSb != null) {
653               context.write(new Text(keyString), new Text(refsSb.toString()));
654             }
655             context.getCounter(Counts.EXTRAREFERENCES).increment(refs.size() - 1);
656           }
657           // node is defined and referenced
658           context.getCounter(Counts.REFERENCED).increment(1);
659         }
660 
661       }
662     }
663 
664     @Override
665     public int run(String[] args) throws Exception {
666 
667       if (args.length != 2) {
668         System.out.println("Usage : " + Verify.class.getSimpleName() + " <output dir> <num reducers>");
669         return 0;
670       }
671 
672       String outputDir = args[0];
673       int numReducers = Integer.parseInt(args[1]);
674 
675        return run(outputDir, numReducers);
676     }
677 
678     public int run(String outputDir, int numReducers) throws Exception {
679       return run(new Path(outputDir), numReducers);
680     }
681 
682     public int run(Path outputDir, int numReducers) throws Exception {
683       LOG.info("Running Verify with outputDir=" + outputDir +", numReducers=" + numReducers);
684 
685       job = new Job(getConf());
686 
687       job.setJobName("Link Verifier");
688       job.setNumReduceTasks(numReducers);
689       job.setJarByClass(getClass());
690 
691       setJobScannerConf(job);
692 
693       Scan scan = new Scan();
694       scan.addColumn(FAMILY_NAME, COLUMN_PREV);
695       scan.setCaching(10000);
696       scan.setCacheBlocks(false);
697 
698       TableMapReduceUtil.initTableMapperJob(getTableName(getConf()).getName(), scan,
699           VerifyMapper.class, BytesWritable.class, BytesWritable.class, job);
700       TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
701 
702       job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
703 
704       job.setReducerClass(VerifyReducer.class);
705       job.setOutputFormatClass(TextOutputFormat.class);
706       TextOutputFormat.setOutputPath(job, outputDir);
707 
708       boolean success = job.waitForCompletion(true);
709 
710       if (success) {
711         Counters counters = job.getCounters();
712         if (null == counters) {
713           LOG.warn("Counters were null, cannot verify Job completion");
714           // We don't have access to the counters to know if we have "bad" counts
715           return 0;
716         }
717 
718         // If we find no unexpected values, the job didn't outright fail
719         if (verifyUnexpectedValues(counters)) {
720           // We didn't check referenced+unreferenced counts, leave that to visual inspection
721           return 0;
722         }
723       }
724 
725       // We failed
726       return 1;
727     }
728 
729     @SuppressWarnings("deprecation")
730     public boolean verify(long expectedReferenced) throws Exception {
731       if (job == null) {
732         throw new IllegalStateException("You should call run() first");
733       }
734 
735       Counters counters = job.getCounters();
736 
737       // Run through each check, even if we fail one early
738       boolean success = verifyExpectedValues(expectedReferenced, counters);
739 
740       if (!verifyUnexpectedValues(counters)) {
741         // We found counter objects which imply failure
742         success = false;
743       }
744 
745       if (!success) {
746         handleFailure(counters);
747       }
748       return success;
749     }
750 
751     /**
752      * Verify the values in the Counters against the expected number of entries written.
753      *
754      * @param expectedReferenced
755      *          Expected number of referenced entrires
756      * @param counters
757      *          The Job's Counters object
758      * @return True if the values match what's expected, false otherwise
759      */
760     protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
761       final Counter referenced = counters.findCounter(Counts.REFERENCED);
762       final Counter unreferenced = counters.findCounter(Counts.UNREFERENCED);
763       boolean success = true;
764 
765       if (expectedReferenced != referenced.getValue()) {
766         LOG.error("Expected referenced count does not match with actual referenced count. " +
767             "expected referenced=" + expectedReferenced + " ,actual=" + referenced.getValue());
768         success = false;
769       }
770 
771       if (unreferenced.getValue() > 0) {
772         final Counter multiref = counters.findCounter(Counts.EXTRAREFERENCES);
773         boolean couldBeMultiRef = (multiref.getValue() == unreferenced.getValue());
774         LOG.error("Unreferenced nodes were not expected. Unreferenced count=" + unreferenced.getValue()
775             + (couldBeMultiRef ? "; could be due to duplicate random numbers" : ""));
776         success = false;
777       }
778 
779       return success;
780     }
781 
782     /**
783      * Verify that the Counters don't contain values which indicate an outright failure from the Reducers.
784      *
785      * @param counters
786      *          The Job's counters
787      * @return True if the "bad" counter objects are 0, false otherwise
788      */
789     protected boolean verifyUnexpectedValues(Counters counters) {
790       final Counter undefined = counters.findCounter(Counts.UNDEFINED);
791       boolean success = true;
792 
793       if (undefined.getValue() > 0) {
794         LOG.error("Found an undefined node. Undefined count=" + undefined.getValue());
795         success = false;
796       }
797 
798       return success;
799     }
800 
801     protected void handleFailure(Counters counters) throws IOException {
802       Configuration conf = job.getConfiguration();
803       HConnection conn = HConnectionManager.getConnection(conf);
804       TableName tableName = getTableName(conf);
805       CounterGroup g = counters.getGroup("undef");
806       Iterator<Counter> it = g.iterator();
807       while (it.hasNext()) {
808         String keyString = it.next().getName();
809         byte[] key = Bytes.toBytes(keyString);
810         HRegionLocation loc = conn.relocateRegion(tableName, key);
811         LOG.error("undefined row " + keyString + ", " + loc);
812       }
813       g = counters.getGroup("unref");
814       it = g.iterator();
815       while (it.hasNext()) {
816         String keyString = it.next().getName();
817         byte[] key = Bytes.toBytes(keyString);
818         HRegionLocation loc = conn.relocateRegion(tableName, key);
819         LOG.error("unreferred row " + keyString + ", " + loc);
820       }
821     }
822   }
823 
824   /**
825    * Executes Generate and Verify in a loop. Data is not cleaned between runs, so each iteration
826    * adds more data.
827    */
828   static class Loop extends Configured implements Tool {
829 
830     private static final Log LOG = LogFactory.getLog(Loop.class);
831 
832     IntegrationTestBigLinkedList it;
833 
834     protected void runGenerator(int numMappers, long numNodes,
835         String outputDir, Integer width, Integer wrapMuplitplier) throws Exception {
836       Path outputPath = new Path(outputDir);
837       UUID uuid = UUID.randomUUID(); //create a random UUID.
838       Path generatorOutput = new Path(outputPath, uuid.toString());
839 
840       Generator generator = new Generator();
841       generator.setConf(getConf());
842       int retCode = generator.run(numMappers, numNodes, generatorOutput, width, wrapMuplitplier);
843       if (retCode > 0) {
844         throw new RuntimeException("Generator failed with return code: " + retCode);
845       }
846     }
847 
848     protected void runVerify(String outputDir,
849         int numReducers, long expectedNumNodes) throws Exception {
850       Path outputPath = new Path(outputDir);
851       UUID uuid = UUID.randomUUID(); //create a random UUID.
852       Path iterationOutput = new Path(outputPath, uuid.toString());
853 
854       Verify verify = new Verify();
855       verify.setConf(getConf());
856       int retCode = verify.run(iterationOutput, numReducers);
857       if (retCode > 0) {
858         throw new RuntimeException("Verify.run failed with return code: " + retCode);
859       }
860 
861       if (!verify.verify(expectedNumNodes)) {
862         throw new RuntimeException("Verify.verify failed");
863       }
864 
865       LOG.info("Verify finished with succees. Total nodes=" + expectedNumNodes);
866     }
867 
868     @Override
869     public int run(String[] args) throws Exception {
870       if (args.length < 5) {
871         System.err.println("Usage: Loop <num iterations> <num mappers> <num nodes per mapper> <output dir> <num reducers> [<width> <wrap multiplier>]");
872         return 1;
873       }
874       LOG.info("Running Loop with args:" + Arrays.deepToString(args));
875 
876       int numIterations = Integer.parseInt(args[0]);
877       int numMappers = Integer.parseInt(args[1]);
878       long numNodes = Long.parseLong(args[2]);
879       String outputDir = args[3];
880       int numReducers = Integer.parseInt(args[4]);
881       Integer width = (args.length < 6) ? null : Integer.parseInt(args[5]);
882       Integer wrapMuplitplier = (args.length < 7) ? null : Integer.parseInt(args[6]);
883 
884       long expectedNumNodes = 0;
885 
886       if (numIterations < 0) {
887         numIterations = Integer.MAX_VALUE; //run indefinitely (kind of)
888       }
889 
890       for (int i = 0; i < numIterations; i++) {
891         LOG.info("Starting iteration = " + i);
892         runGenerator(numMappers, numNodes, outputDir, width, wrapMuplitplier);
893         expectedNumNodes += numMappers * numNodes;
894 
895         runVerify(outputDir, numReducers, expectedNumNodes);
896       }
897 
898       return 0;
899     }
900   }
901 
902   /**
903    * A stand alone program that prints out portions of a list created by {@link Generator}
904    */
905   private static class Print extends Configured implements Tool {
906     @Override
907     public int run(String[] args) throws Exception {
908       Options options = new Options();
909       options.addOption("s", "start", true, "start key");
910       options.addOption("e", "end", true, "end key");
911       options.addOption("l", "limit", true, "number to print");
912 
913       GnuParser parser = new GnuParser();
914       CommandLine cmd = null;
915       try {
916         cmd = parser.parse(options, args);
917         if (cmd.getArgs().length != 0) {
918           throw new ParseException("Command takes no arguments");
919         }
920       } catch (ParseException e) {
921         System.err.println("Failed to parse command line " + e.getMessage());
922         System.err.println();
923         HelpFormatter formatter = new HelpFormatter();
924         formatter.printHelp(getClass().getSimpleName(), options);
925         System.exit(-1);
926       }
927 
928       HTable table = new HTable(getConf(), getTableName(getConf()));
929 
930       Scan scan = new Scan();
931       scan.setBatch(10000);
932 
933       if (cmd.hasOption("s"))
934         scan.setStartRow(Bytes.toBytesBinary(cmd.getOptionValue("s")));
935 
936       if (cmd.hasOption("e"))
937         scan.setStopRow(Bytes.toBytesBinary(cmd.getOptionValue("e")));
938 
939       int limit = 0;
940       if (cmd.hasOption("l"))
941         limit = Integer.parseInt(cmd.getOptionValue("l"));
942       else
943         limit = 100;
944 
945       ResultScanner scanner = table.getScanner(scan);
946 
947       CINode node = new CINode();
948       Result result = scanner.next();
949       int count = 0;
950       while (result != null && count++ < limit) {
951         node = getCINode(result, node);
952         System.out.printf("%s:%s:%012d:%s\n", Bytes.toStringBinary(node.key),
953             Bytes.toStringBinary(node.prev), node.count, node.client);
954         result = scanner.next();
955       }
956       scanner.close();
957       table.close();
958 
959       return 0;
960     }
961   }
962 
963   /**
964    * A stand alone program that deletes a single node.
965    */
966   private static class Delete extends Configured implements Tool {
967     @Override
968     public int run(String[] args) throws Exception {
969       if (args.length != 1) {
970         System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
971         return 0;
972       }
973       byte[] val = Bytes.toBytesBinary(args[0]);
974 
975       org.apache.hadoop.hbase.client.Delete delete
976         = new org.apache.hadoop.hbase.client.Delete(val);
977 
978       HTable table = new HTable(getConf(), getTableName(getConf()));
979 
980       table.delete(delete);
981       table.flushCommits();
982       table.close();
983 
984       System.out.println("Delete successful");
985       return 0;
986     }
987   }
988 
989   /**
990    * A stand alone program that follows a linked list created by {@link Generator} and prints timing info.
991    */
992   private static class Walker extends Configured implements Tool {
993     @Override
994     public int run(String[] args) throws IOException {
995       Options options = new Options();
996       options.addOption("n", "num", true, "number of queries");
997       options.addOption("s", "start", true, "key to start at, binary string");
998       options.addOption("l", "logevery", true, "log every N queries");
999 
1000       GnuParser parser = new GnuParser();
1001       CommandLine cmd = null;
1002       try {
1003         cmd = parser.parse(options, args);
1004         if (cmd.getArgs().length != 0) {
1005           throw new ParseException("Command takes no arguments");
1006         }
1007       } catch (ParseException e) {
1008         System.err.println("Failed to parse command line " + e.getMessage());
1009         System.err.println();
1010         HelpFormatter formatter = new HelpFormatter();
1011         formatter.printHelp(getClass().getSimpleName(), options);
1012         System.exit(-1);
1013       }
1014 
1015       long maxQueries = Long.MAX_VALUE;
1016       if (cmd.hasOption('n')) {
1017         maxQueries = Long.parseLong(cmd.getOptionValue("n"));
1018       }
1019       Random rand = new SecureRandom();
1020       boolean isSpecificStart = cmd.hasOption('s');
1021       byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null;
1022       int logEvery = cmd.hasOption('l') ? Integer.parseInt(cmd.getOptionValue('l')) : 1;
1023 
1024       HTable table = new HTable(getConf(), getTableName(getConf()));
1025       long numQueries = 0;
1026       // If isSpecificStart is set, only walk one list from that particular node.
1027       // Note that in case of circular (or P-shaped) list it will walk forever, as is
1028       // the case in normal run without startKey.
1029       while (numQueries < maxQueries && (numQueries == 0 || !isSpecificStart)) {
1030         if (!isSpecificStart) {
1031           startKey = new byte[ROWKEY_LENGTH];
1032           rand.nextBytes(startKey);
1033         }
1034         CINode node = findStartNode(table, startKey);
1035         if (node == null && isSpecificStart) {
1036           System.err.printf("Start node not found: %s \n", Bytes.toStringBinary(startKey));
1037         }
1038         numQueries++;
1039         while (node != null && node.prev.length != NO_KEY.length && numQueries < maxQueries) {
1040           byte[] prev = node.prev;
1041           long t1 = System.currentTimeMillis();
1042           node = getNode(prev, table, node);
1043           long t2 = System.currentTimeMillis();
1044           if (numQueries % logEvery == 0) {
1045             System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev));
1046           }
1047           numQueries++;
1048           if (node == null) {
1049             System.err.printf("UNDEFINED NODE %s \n", Bytes.toStringBinary(prev));
1050           } else if (node.prev.length == NO_KEY.length) {
1051             System.err.printf("TERMINATING NODE %s \n", Bytes.toStringBinary(node.key));
1052           }
1053         }
1054       }
1055 
1056       table.close();
1057       return 0;
1058     }
1059 
1060     private static CINode findStartNode(HTable table, byte[] startKey) throws IOException {
1061       Scan scan = new Scan();
1062       scan.setStartRow(startKey);
1063       scan.setBatch(1);
1064       scan.addColumn(FAMILY_NAME, COLUMN_PREV);
1065 
1066       long t1 = System.currentTimeMillis();
1067       ResultScanner scanner = table.getScanner(scan);
1068       Result result = scanner.next();
1069       long t2 = System.currentTimeMillis();
1070       scanner.close();
1071 
1072       if ( result != null) {
1073         CINode node = getCINode(result, new CINode());
1074         System.out.printf("FSR %d %s\n", t2 - t1, Bytes.toStringBinary(node.key));
1075         return node;
1076       }
1077 
1078       System.out.println("FSR " + (t2 - t1));
1079 
1080       return null;
1081     }
1082 
1083     private CINode getNode(byte[] row, HTable table, CINode node) throws IOException {
1084       Get get = new Get(row);
1085       get.addColumn(FAMILY_NAME, COLUMN_PREV);
1086       Result result = table.get(get);
1087       return getCINode(result, node);
1088     }
1089   }
1090 
1091   private static class Clean extends Configured implements Tool {
1092 
1093     @Override public int run(String[] args) throws Exception {
1094       if (args.length < 1) {
1095         System.err.println("Usage: Clean <output dir>");
1096         return -1;
1097       }
1098 
1099       Path p = new Path(args[0]);
1100       Configuration conf = getConf();
1101       TableName tableName = getTableName(conf);
1102 
1103       FileSystem fs = HFileSystem.get(conf);
1104       HBaseAdmin admin = new HBaseAdmin(conf);
1105       try {
1106         if (admin.tableExists(tableName)) {
1107           admin.disableTable(tableName);
1108           admin.deleteTable(tableName);
1109         }
1110       } finally {
1111         admin.close();
1112       }
1113 
1114       if (fs.exists(p)) {
1115         fs.delete(p, true);
1116       }
1117 
1118       return 0;
1119     }
1120   }
1121 
1122   static TableName getTableName(Configuration conf) {
1123     return TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
1124   }
1125 
1126   private static CINode getCINode(Result result, CINode node) {
1127     node.key = Bytes.copy(result.getRow());
1128     if (result.containsColumn(FAMILY_NAME, COLUMN_PREV)) {
1129       node.prev = Bytes.copy(result.getValue(FAMILY_NAME, COLUMN_PREV));
1130     } else {
1131       node.prev = NO_KEY;
1132     }
1133     if (result.containsColumn(FAMILY_NAME, COLUMN_COUNT)) {
1134       node.count = Bytes.toLong(result.getValue(FAMILY_NAME, COLUMN_COUNT));
1135     } else {
1136       node.count = -1;
1137     }
1138     if (result.containsColumn(FAMILY_NAME, COLUMN_CLIENT)) {
1139       node.client = Bytes.toString(result.getValue(FAMILY_NAME, COLUMN_CLIENT));
1140     } else {
1141       node.client = "";
1142     }
1143     return node;
1144   }
1145 
1146   protected IntegrationTestingUtility util;
1147 
1148   @Override
1149   public void setUpCluster() throws Exception {
1150     util = getTestingUtil(getConf());
1151     boolean isDistributed = util.isDistributedCluster();
1152     util.initializeCluster(isDistributed ? 1 : this.NUM_SLAVES_BASE);
1153     if (!isDistributed) {
1154       util.startMiniMapReduceCluster();
1155     }
1156     this.setConf(util.getConfiguration());
1157   }
1158 
1159   @Override
1160   public void cleanUpCluster() throws Exception {
1161     super.cleanUpCluster();
1162     if (util.isDistributedCluster()) {
1163       util.shutdownMiniMapReduceCluster();
1164     }
1165   }
1166 
1167   @Test
1168   public void testContinuousIngest() throws IOException, Exception {
1169     //Loop <num iterations> <num mappers> <num nodes per mapper> <output dir> <num reducers>
1170     int ret = ToolRunner.run(getTestingUtil(getConf()).getConfiguration(), new Loop(),
1171         new String[] {"1", "1", "2000000",
1172                      util.getDataTestDirOnTestFS("IntegrationTestBigLinkedList").toString(), "1"});
1173     org.junit.Assert.assertEquals(0, ret);
1174   }
1175 
1176   private void usage() {
1177     System.err.println("Usage: " + this.getClass().getSimpleName() + " COMMAND [COMMAND options]");
1178     printCommands();
1179   }
1180 
1181   private void printCommands() {
1182     System.err.println("Commands:");
1183     System.err.println(" Generator  Map only job that generates data.");
1184     System.err.println(" Verify     A map reduce job that looks for holes. Check return code and");
1185     System.err.println("            look at the counts after running. See REFERENCED and");
1186     System.err.println("            UNREFERENCED are ok. Any UNDEFINED counts are bad. Do not run");
1187     System.err.println("            with the Generator.");
1188     System.err.println(" Walker     " +
1189       "Standalong program that starts following a linked list & emits timing info.");
1190     System.err.println(" Print      Standalone program that prints nodes in the linked list.");
1191     System.err.println(" Delete     Standalone program that deletes a·single node.");
1192     System.err.println(" Loop       Program to Loop through Generator and Verify steps");
1193     System.err.println(" Clean      Program to clean all left over detritus.");
1194     System.err.flush();
1195   }
1196 
1197   @Override
1198   protected void processOptions(CommandLine cmd) {
1199     super.processOptions(cmd);
1200     String[] args = cmd.getArgs();
1201     //get the class, run with the conf
1202     if (args.length < 1) {
1203       printUsage(this.getClass().getSimpleName() +
1204         " <general options> COMMAND [<COMMAND options>]", "General options:", "");
1205       printCommands();
1206       throw new RuntimeException("Incorrect Number of args.");
1207     }
1208     toRun = args[0];
1209     otherArgs = Arrays.copyOfRange(args, 1, args.length);
1210   }
1211 
1212   @Override
1213   public int runTestFromCommandLine() throws Exception {
1214 
1215     Tool tool = null;
1216     if (toRun.equals("Generator")) {
1217       tool = new Generator();
1218     } else if (toRun.equalsIgnoreCase("Verify")) {
1219       tool = new Verify();
1220     } else if (toRun.equalsIgnoreCase("Loop")) {
1221       Loop loop = new Loop();
1222       loop.it = this;
1223       tool = loop;
1224     } else if (toRun.equalsIgnoreCase("Walker")) {
1225       tool = new Walker();
1226     } else if (toRun.equalsIgnoreCase("Print")) {
1227       tool = new Print();
1228     } else if (toRun.equalsIgnoreCase("Delete")) {
1229       tool = new Delete();
1230     } else if (toRun.equalsIgnoreCase("Clean")) {
1231       tool = new Clean();
1232     } else {
1233       usage();
1234       throw new RuntimeException("Unknown arg");
1235     }
1236 
1237     return ToolRunner.run(getConf(), tool, otherArgs);
1238   }
1239 
1240   @Override
1241   public String getTablename() {
1242     Configuration c = getConf();
1243     return c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME);
1244   }
1245 
1246   @Override
1247   protected Set<String> getColumnFamilies() {
1248     return Sets.newHashSet(Bytes.toString(FAMILY_NAME));
1249   }
1250 
1251   private static void setJobConf(Job job, int numMappers, long numNodes,
1252       Integer width, Integer wrapMultiplier) {
1253     job.getConfiguration().setInt(GENERATOR_NUM_MAPPERS_KEY, numMappers);
1254     job.getConfiguration().setLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, numNodes);
1255     if (width != null) {
1256       job.getConfiguration().setInt(GENERATOR_WIDTH_KEY, width);
1257     }
1258     if (wrapMultiplier != null) {
1259       job.getConfiguration().setInt(GENERATOR_WRAP_KEY, wrapMultiplier);
1260     }
1261   }
1262 
1263   public static void setJobScannerConf(Job job) {
1264     // Make sure scanners log something useful to make debugging possible.
1265     job.getConfiguration().setBoolean(ScannerCallable.LOG_SCANNER_ACTIVITY, true);
1266     job.getConfiguration().setInt(TableRecordReaderImpl.LOG_PER_ROW_COUNT, 100000);
1267   }
1268 
1269   public static void main(String[] args) throws Exception {
1270     Configuration conf = HBaseConfiguration.create();
1271     IntegrationTestingUtility.setUseDistributedCluster(conf);
1272     int ret = ToolRunner.run(conf, new IntegrationTestBigLinkedList(), args);
1273     System.exit(ret);
1274   }
1275 }