View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.HashSet;
29  import java.util.List;
30  import java.util.concurrent.CountDownLatch;
31  import java.util.concurrent.ThreadPoolExecutor;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.CellUtil;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HRegionLocation;
39  import org.apache.hadoop.hbase.testclassification.MediumTests;
40  import org.apache.hadoop.hbase.ServerName;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.Waiter;
43  import org.apache.hadoop.hbase.exceptions.OperationConflictException;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.apache.hadoop.hbase.util.JVMClusterUtil;
46  import org.apache.hadoop.hbase.util.Threads;
47  import org.junit.AfterClass;
48  import org.junit.Assert;
49  import org.junit.Before;
50  import org.junit.BeforeClass;
51  import org.junit.Test;
52  import org.junit.experimental.categories.Category;
53  
54  @Category(MediumTests.class)
55  public class TestMultiParallel {
56    private static final Log LOG = LogFactory.getLog(TestMultiParallel.class);
57  
58    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
59    private static final byte[] VALUE = Bytes.toBytes("value");
60    private static final byte[] QUALIFIER = Bytes.toBytes("qual");
61    private static final String FAMILY = "family";
62    private static final TableName TEST_TABLE = TableName.valueOf("multi_test_table");
63    private static final byte[] BYTES_FAMILY = Bytes.toBytes(FAMILY);
64    private static final byte[] ONE_ROW = Bytes.toBytes("xxx");
65    private static final byte [][] KEYS = makeKeys();
66  
67    private static final int slaves = 5; // also used for testing HTable pool size
68    private static Connection CONNECTION;
69  
70    @BeforeClass public static void beforeClass() throws Exception {
71      // Uncomment the following lines if more verbosity is needed for
72      // debugging (see HBASE-12285 for details).
73      //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
74      //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
75      //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
76      UTIL.startMiniCluster(slaves);
77      HTable t = UTIL.createTable(TEST_TABLE, Bytes.toBytes(FAMILY));
78      UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));
79      UTIL.waitTableEnabled(TEST_TABLE);
80      t.close();
81      CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());
82    }
83  
84    @AfterClass public static void afterClass() throws Exception {
85      CONNECTION.close();
86      UTIL.shutdownMiniCluster();
87    }
88  
89    @Before public void before() throws Exception {
90      LOG.info("before");
91      if (UTIL.ensureSomeRegionServersAvailable(slaves)) {
92        // Distribute regions
93        UTIL.getMiniHBaseCluster().getMaster().balance();
94  
95        // Wait until completing balance
96        UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
97      }
98      LOG.info("before done");
99    }
100 
101   private static byte[][] makeKeys() {
102     byte [][] starterKeys = HBaseTestingUtility.KEYS;
103     // Create a "non-uniform" test set with the following characteristics:
104     // a) Unequal number of keys per region
105 
106     // Don't use integer as a multiple, so that we have a number of keys that is
107     // not a multiple of the number of regions
108     int numKeys = (int) ((float) starterKeys.length * 10.33F);
109 
110     List<byte[]> keys = new ArrayList<byte[]>();
111     for (int i = 0; i < numKeys; i++) {
112       int kIdx = i % starterKeys.length;
113       byte[] k = starterKeys[kIdx];
114       byte[] cp = new byte[k.length + 1];
115       System.arraycopy(k, 0, cp, 0, k.length);
116       cp[k.length] = new Integer(i % 256).byteValue();
117       keys.add(cp);
118     }
119 
120     // b) Same duplicate keys (showing multiple Gets/Puts to the same row, which
121     // should work)
122     // c) keys are not in sorted order (within a region), to ensure that the
123     // sorting code and index mapping doesn't break the functionality
124     for (int i = 0; i < 100; i++) {
125       int kIdx = i % starterKeys.length;
126       byte[] k = starterKeys[kIdx];
127       byte[] cp = new byte[k.length + 1];
128       System.arraycopy(k, 0, cp, 0, k.length);
129       cp[k.length] = new Integer(i % 256).byteValue();
130       keys.add(cp);
131     }
132     return keys.toArray(new byte [][] {new byte [] {}});
133   }
134 
135 
136   /**
137    * This is for testing the active number of threads that were used while
138    * doing a batch operation. It inserts one row per region via the batch
139    * operation, and then checks the number of active threads.
140    * For HBASE-3553
141    * @throws IOException
142    * @throws InterruptedException
143    * @throws NoSuchFieldException
144    * @throws SecurityException
145    */
146   @Test(timeout=300000)
147   public void testActiveThreadsCount() throws Exception {
148     try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration())) {
149       ThreadPoolExecutor executor = HTable.getDefaultExecutor(UTIL.getConfiguration());
150       try {
151         try (Table t = connection.getTable(TEST_TABLE, executor)) {
152           List<Put> puts = constructPutRequests(); // creates a Put for every region
153           t.batch(puts);
154           HashSet<ServerName> regionservers = new HashSet<ServerName>();
155           try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) {
156             for (Row r : puts) {
157               HRegionLocation location = locator.getRegionLocation(r.getRow());
158               regionservers.add(location.getServerName());
159             }
160           }
161           assertEquals(regionservers.size(), executor.getLargestPoolSize());
162         }
163       } finally {
164         executor.shutdownNow();
165       }
166     }
167   }
168 
169   @Test(timeout=300000)
170   public void testBatchWithGet() throws Exception {
171     LOG.info("test=testBatchWithGet");
172     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
173 
174     // load test data
175     List<Put> puts = constructPutRequests();
176     table.batch(puts);
177 
178     // create a list of gets and run it
179     List<Row> gets = new ArrayList<Row>();
180     for (byte[] k : KEYS) {
181       Get get = new Get(k);
182       get.addColumn(BYTES_FAMILY, QUALIFIER);
183       gets.add(get);
184     }
185     Result[] multiRes = new Result[gets.size()];
186     table.batch(gets, multiRes);
187 
188     // Same gets using individual call API
189     List<Result> singleRes = new ArrayList<Result>();
190     for (Row get : gets) {
191       singleRes.add(table.get((Get) get));
192     }
193     // Compare results
194     Assert.assertEquals(singleRes.size(), multiRes.length);
195     for (int i = 0; i < singleRes.size(); i++) {
196       Assert.assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER));
197       Cell[] singleKvs = singleRes.get(i).rawCells();
198       Cell[] multiKvs = multiRes[i].rawCells();
199       for (int j = 0; j < singleKvs.length; j++) {
200         Assert.assertEquals(singleKvs[j], multiKvs[j]);
201         Assert.assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]),
202             CellUtil.cloneValue(multiKvs[j])));
203       }
204     }
205     table.close();
206   }
207 
208   @Test
209   public void testBadFam() throws Exception {
210     LOG.info("test=testBadFam");
211     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
212 
213     List<Row> actions = new ArrayList<Row>();
214     Put p = new Put(Bytes.toBytes("row1"));
215     p.add(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
216     actions.add(p);
217     p = new Put(Bytes.toBytes("row2"));
218     p.add(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
219     actions.add(p);
220 
221     // row1 and row2 should be in the same region.
222 
223     Object [] r = new Object[actions.size()];
224     try {
225       table.batch(actions, r);
226       fail();
227     } catch (RetriesExhaustedWithDetailsException ex) {
228       LOG.debug(ex);
229       // good!
230       assertFalse(ex.mayHaveClusterIssues());
231     }
232     assertEquals(2, r.length);
233     assertTrue(r[0] instanceof Throwable);
234     assertTrue(r[1] instanceof Result);
235     table.close();
236   }
237 
238   @Test (timeout=300000)
239   public void testFlushCommitsNoAbort() throws Exception {
240     LOG.info("test=testFlushCommitsNoAbort");
241     doTestFlushCommits(false);
242   }
243 
244   /**
245    * Only run one Multi test with a forced RegionServer abort. Otherwise, the
246    * unit tests will take an unnecessarily long time to run.
247    *
248    * @throws Exception
249    */
250   @Test (timeout=360000)
251   public void testFlushCommitsWithAbort() throws Exception {
252     LOG.info("test=testFlushCommitsWithAbort");
253     doTestFlushCommits(true);
254   }
255 
256   /**
257    * Set table auto flush to false and test flushing commits
258    * @param doAbort true if abort one regionserver in the testing
259    * @throws Exception
260    */
261   private void doTestFlushCommits(boolean doAbort) throws Exception {
262     // Load the data
263     LOG.info("get new table");
264     Table table = UTIL.getConnection().getTable(TEST_TABLE);
265     table.setWriteBufferSize(10 * 1024 * 1024);
266 
267     LOG.info("constructPutRequests");
268     List<Put> puts = constructPutRequests();
269     table.put(puts);
270     LOG.info("puts");
271     final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()
272         .size();
273     assert liveRScount > 0;
274     JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster()
275         .getLiveRegionServerThreads().get(0);
276     if (doAbort) {
277       liveRS.getRegionServer().abort("Aborting for tests",
278           new Exception("doTestFlushCommits"));
279       // If we wait for no regions being online after we abort the server, we
280       // could ensure the master has re-assigned the regions on killed server
281       // after writing successfully. It means the server we aborted is dead
282       // and detected by matser
283       while (liveRS.getRegionServer().getNumberOfOnlineRegions() != 0) {
284         Thread.sleep(100);
285       }
286       // try putting more keys after the abort. same key/qual... just validating
287       // no exceptions thrown
288       puts = constructPutRequests();
289       table.put(puts);
290     }
291 
292     LOG.info("validating loaded data");
293     validateLoadedData(table);
294 
295     // Validate server and region count
296     List<JVMClusterUtil.RegionServerThread> liveRSs = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
297     int count = 0;
298     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
299       count++;
300       LOG.info("Count=" + count + ", Alive=" + t.getRegionServer());
301     }
302     LOG.info("Count=" + count);
303     Assert.assertEquals("Server count=" + count + ", abort=" + doAbort,
304         (doAbort ? (liveRScount - 1) : liveRScount), count);
305     if (doAbort) {
306       UTIL.getMiniHBaseCluster().waitOnRegionServer(0);
307       UTIL.waitFor(15 * 1000, new Waiter.Predicate<Exception>() {
308         @Override
309         public boolean evaluate() throws Exception {
310           return UTIL.getMiniHBaseCluster().getMaster()
311               .getClusterStatus().getServersSize() == (liveRScount - 1);
312         }
313       });
314       UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
315     }
316 
317     table.close();
318     LOG.info("done");
319   }
320 
321   @Test (timeout=300000)
322   public void testBatchWithPut() throws Exception {
323     LOG.info("test=testBatchWithPut");
324     Table table = CONNECTION.getTable(TEST_TABLE);
325     // put multiple rows using a batch
326     List<Put> puts = constructPutRequests();
327 
328     Object[] results = table.batch(puts);
329     validateSizeAndEmpty(results, KEYS.length);
330 
331     if (true) {
332       int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size();
333       assert liveRScount > 0;
334       JVMClusterUtil.RegionServerThread liveRS =
335         UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0);
336       liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut"));
337       puts = constructPutRequests();
338       try {
339         results = table.batch(puts);
340       } catch (RetriesExhaustedWithDetailsException ree) {
341         LOG.info(ree.getExhaustiveDescription());
342         table.close();
343         throw ree;
344       }
345       validateSizeAndEmpty(results, KEYS.length);
346     }
347 
348     validateLoadedData(table);
349     table.close();
350   }
351 
352   @Test(timeout=300000)
353   public void testBatchWithDelete() throws Exception {
354     LOG.info("test=testBatchWithDelete");
355     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
356 
357     // Load some data
358     List<Put> puts = constructPutRequests();
359     Object[] results = table.batch(puts);
360     validateSizeAndEmpty(results, KEYS.length);
361 
362     // Deletes
363     List<Row> deletes = new ArrayList<Row>();
364     for (int i = 0; i < KEYS.length; i++) {
365       Delete delete = new Delete(KEYS[i]);
366       delete.addFamily(BYTES_FAMILY);
367       deletes.add(delete);
368     }
369     results = table.batch(deletes);
370     validateSizeAndEmpty(results, KEYS.length);
371 
372     // Get to make sure ...
373     for (byte[] k : KEYS) {
374       Get get = new Get(k);
375       get.addColumn(BYTES_FAMILY, QUALIFIER);
376       Assert.assertFalse(table.exists(get));
377     }
378     table.close();
379   }
380 
381   @Test(timeout=300000)
382   public void testHTableDeleteWithList() throws Exception {
383     LOG.info("test=testHTableDeleteWithList");
384     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
385 
386     // Load some data
387     List<Put> puts = constructPutRequests();
388     Object[] results = table.batch(puts);
389     validateSizeAndEmpty(results, KEYS.length);
390 
391     // Deletes
392     ArrayList<Delete> deletes = new ArrayList<Delete>();
393     for (int i = 0; i < KEYS.length; i++) {
394       Delete delete = new Delete(KEYS[i]);
395       delete.deleteFamily(BYTES_FAMILY);
396       deletes.add(delete);
397     }
398     table.delete(deletes);
399     Assert.assertTrue(deletes.isEmpty());
400 
401     // Get to make sure ...
402     for (byte[] k : KEYS) {
403       Get get = new Get(k);
404       get.addColumn(BYTES_FAMILY, QUALIFIER);
405       Assert.assertFalse(table.exists(get));
406     }
407     table.close();
408   }
409 
410   @Test(timeout=300000)
411   public void testBatchWithManyColsInOneRowGetAndPut() throws Exception {
412     LOG.info("test=testBatchWithManyColsInOneRowGetAndPut");
413     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
414 
415     List<Row> puts = new ArrayList<Row>();
416     for (int i = 0; i < 100; i++) {
417       Put put = new Put(ONE_ROW);
418       byte[] qual = Bytes.toBytes("column" + i);
419       put.add(BYTES_FAMILY, qual, VALUE);
420       puts.add(put);
421     }
422     Object[] results = table.batch(puts);
423 
424     // validate
425     validateSizeAndEmpty(results, 100);
426 
427     // get the data back and validate that it is correct
428     List<Row> gets = new ArrayList<Row>();
429     for (int i = 0; i < 100; i++) {
430       Get get = new Get(ONE_ROW);
431       byte[] qual = Bytes.toBytes("column" + i);
432       get.addColumn(BYTES_FAMILY, qual);
433       gets.add(get);
434     }
435 
436     Object[] multiRes = table.batch(gets);
437 
438     int idx = 0;
439     for (Object r : multiRes) {
440       byte[] qual = Bytes.toBytes("column" + idx);
441       validateResult(r, qual, VALUE);
442       idx++;
443     }
444     table.close();
445   }
446 
447   @Test(timeout=300000)
448   public void testBatchWithIncrementAndAppend() throws Exception {
449     LOG.info("test=testBatchWithIncrementAndAppend");
450     final byte[] QUAL1 = Bytes.toBytes("qual1");
451     final byte[] QUAL2 = Bytes.toBytes("qual2");
452     final byte[] QUAL3 = Bytes.toBytes("qual3");
453     final byte[] QUAL4 = Bytes.toBytes("qual4");
454     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
455     Delete d = new Delete(ONE_ROW);
456     table.delete(d);
457     Put put = new Put(ONE_ROW);
458     put.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
459     put.add(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
460     table.put(put);
461 
462     Increment inc = new Increment(ONE_ROW);
463     inc.addColumn(BYTES_FAMILY, QUAL2, 1);
464     inc.addColumn(BYTES_FAMILY, QUAL3, 1);
465 
466     Append a = new Append(ONE_ROW);
467     a.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("def"));
468     a.add(BYTES_FAMILY, QUAL4, Bytes.toBytes("xyz"));
469     List<Row> actions = new ArrayList<Row>();
470     actions.add(inc);
471     actions.add(a);
472 
473     Object[] multiRes = table.batch(actions);
474     validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef"));
475     validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));
476     validateResult(multiRes[0], QUAL2, Bytes.toBytes(2L));
477     validateResult(multiRes[0], QUAL3, Bytes.toBytes(1L));
478     table.close();
479   }
480 
481   @Test(timeout=300000)
482   public void testNonceCollision() throws Exception {
483     LOG.info("test=testNonceCollision");
484     final Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
485     Table table = connection.getTable(TEST_TABLE);
486     Put put = new Put(ONE_ROW);
487     put.add(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
488 
489     // Replace nonce manager with the one that returns each nonce twice.
490     NonceGenerator cnm = new PerClientRandomNonceGenerator() {
491       long lastNonce = -1;
492       @Override
493       public synchronized long newNonce() {
494         long nonce = 0;
495         if (lastNonce == -1) {
496           lastNonce = nonce = super.newNonce();
497         } else {
498           nonce = lastNonce;
499           lastNonce = -1L;
500         }
501         return nonce;
502       }
503     };
504 
505     NonceGenerator oldCnm =
506       ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm);
507 
508     // First test sequential requests.
509     try {
510       Increment inc = new Increment(ONE_ROW);
511       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
512       table.increment(inc);
513       inc = new Increment(ONE_ROW);
514       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
515       try {
516         table.increment(inc);
517         fail("Should have thrown an exception");
518       } catch (OperationConflictException ex) {
519       }
520       Get get = new Get(ONE_ROW);
521       get.addColumn(BYTES_FAMILY, QUALIFIER);
522       Result result = table.get(get);
523       validateResult(result, QUALIFIER, Bytes.toBytes(1L));
524 
525       // Now run a bunch of requests in parallel, exactly half should succeed.
526       int numRequests = 40;
527       final CountDownLatch startedLatch = new CountDownLatch(numRequests);
528       final CountDownLatch startLatch = new CountDownLatch(1);
529       final CountDownLatch doneLatch = new CountDownLatch(numRequests);
530       for (int i = 0; i < numRequests; ++i) {
531         Runnable r = new Runnable() {
532           @Override
533           public void run() {
534             Table table = null;
535             try {
536               table = connection.getTable(TEST_TABLE);
537             } catch (IOException e) {
538               fail("Not expected");
539             }
540             Increment inc = new Increment(ONE_ROW);
541             inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
542             startedLatch.countDown();
543             try {
544               startLatch.await();
545             } catch (InterruptedException e) {
546               fail("Not expected");
547             }
548             try {
549               table.increment(inc);
550             } catch (OperationConflictException ex) { // Some threads are expected to fail.
551             } catch (IOException ioEx) {
552               fail("Not expected");
553             }
554             doneLatch.countDown();
555           }
556         };
557         Threads.setDaemonThreadRunning(new Thread(r));
558       }
559       startedLatch.await(); // Wait until all threads are ready...
560       startLatch.countDown(); // ...and unleash the herd!
561       doneLatch.await();
562       // Now verify
563       get = new Get(ONE_ROW);
564       get.addColumn(BYTES_FAMILY, QUALIFIER);
565       result = table.get(get);
566       validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
567       table.close();
568     } finally {
569       ConnectionManager.injectNonceGeneratorForTesting((ClusterConnection)connection, oldCnm);
570     }
571   }
572 
573   @Test(timeout=300000)
574   public void testBatchWithMixedActions() throws Exception {
575     LOG.info("test=testBatchWithMixedActions");
576     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
577 
578     // Load some data to start
579     Object[] results = table.batch(constructPutRequests());
580     validateSizeAndEmpty(results, KEYS.length);
581 
582     // Batch: get, get, put(new col), delete, get, get of put, get of deleted,
583     // put
584     List<Row> actions = new ArrayList<Row>();
585 
586     byte[] qual2 = Bytes.toBytes("qual2");
587     byte[] val2 = Bytes.toBytes("putvalue2");
588 
589     // 0 get
590     Get get = new Get(KEYS[10]);
591     get.addColumn(BYTES_FAMILY, QUALIFIER);
592     actions.add(get);
593 
594     // 1 get
595     get = new Get(KEYS[11]);
596     get.addColumn(BYTES_FAMILY, QUALIFIER);
597     actions.add(get);
598 
599     // 2 put of new column
600     Put put = new Put(KEYS[10]);
601     put.add(BYTES_FAMILY, qual2, val2);
602     actions.add(put);
603 
604     // 3 delete
605     Delete delete = new Delete(KEYS[20]);
606     delete.deleteFamily(BYTES_FAMILY);
607     actions.add(delete);
608 
609     // 4 get
610     get = new Get(KEYS[30]);
611     get.addColumn(BYTES_FAMILY, QUALIFIER);
612     actions.add(get);
613 
614     // There used to be a 'get' of a previous put here, but removed
615     // since this API really cannot guarantee order in terms of mixed
616     // get/puts.
617 
618     // 5 put of new column
619     put = new Put(KEYS[40]);
620     put.add(BYTES_FAMILY, qual2, val2);
621     actions.add(put);
622 
623     results = table.batch(actions);
624 
625     // Validation
626 
627     validateResult(results[0]);
628     validateResult(results[1]);
629     validateEmpty(results[2]);
630     validateEmpty(results[3]);
631     validateResult(results[4]);
632     validateEmpty(results[5]);
633 
634     // validate last put, externally from the batch
635     get = new Get(KEYS[40]);
636     get.addColumn(BYTES_FAMILY, qual2);
637     Result r = table.get(get);
638     validateResult(r, qual2, val2);
639 
640     table.close();
641   }
642 
643   // // Helper methods ////
644 
645   private void validateResult(Object r) {
646     validateResult(r, QUALIFIER, VALUE);
647   }
648 
649   private void validateResult(Object r1, byte[] qual, byte[] val) {
650     Result r = (Result)r1;
651     Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual));
652     byte[] value = r.getValue(BYTES_FAMILY, qual);
653     if (0 != Bytes.compareTo(val, value)) {
654       fail("Expected [" + Bytes.toStringBinary(val)
655           + "] but got [" + Bytes.toStringBinary(value) + "]");
656     }
657   }
658 
659   private List<Put> constructPutRequests() {
660     List<Put> puts = new ArrayList<>();
661     for (byte[] k : KEYS) {
662       Put put = new Put(k);
663       put.add(BYTES_FAMILY, QUALIFIER, VALUE);
664       puts.add(put);
665     }
666     return puts;
667   }
668 
669   private void validateLoadedData(Table table) throws IOException {
670     // get the data back and validate that it is correct
671     LOG.info("Validating data on " + table);
672     for (byte[] k : KEYS) {
673       Get get = new Get(k);
674       get.addColumn(BYTES_FAMILY, QUALIFIER);
675       Result r = table.get(get);
676       Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
677       Assert.assertEquals(0, Bytes.compareTo(VALUE, r
678           .getValue(BYTES_FAMILY, QUALIFIER)));
679     }
680   }
681 
682   private void validateEmpty(Object r1) {
683     Result result = (Result)r1;
684     Assert.assertTrue(result != null);
685     Assert.assertTrue(result.getRow() == null);
686     Assert.assertEquals(0, result.rawCells().length);
687   }
688 
689   private void validateSizeAndEmpty(Object[] results, int expectedSize) {
690     // Validate got back the same number of Result objects, all empty
691     Assert.assertEquals(expectedSize, results.length);
692     for (Object result : results) {
693       validateEmpty(result);
694     }
695   }
696 }