View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  import java.lang.reflect.Field;
28  import java.util.ArrayList;
29  import java.util.List;
30  import java.util.concurrent.ThreadPoolExecutor;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.commons.logging.impl.Log4JLogger;
35  import org.apache.hadoop.hbase.HBaseTestingUtility;
36  import org.apache.hadoop.hbase.KeyValue;
37  import org.apache.hadoop.hbase.MediumTests;
38  import org.apache.hadoop.hbase.Waiter;
39  import org.apache.hadoop.hbase.ipc.RpcClient;
40  import org.apache.hadoop.hbase.ipc.RpcServer;
41  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.JVMClusterUtil;
44  import org.apache.log4j.Level;
45  import org.junit.AfterClass;
46  import org.junit.Assert;
47  import org.junit.Before;
48  import org.junit.BeforeClass;
49  import org.junit.Test;
50  import org.junit.experimental.categories.Category;
51  
52  @Category(MediumTests.class)
53  public class TestMultiParallel {
54    private static final Log LOG = LogFactory.getLog(TestMultiParallel.class);
55    {
56      ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
57      ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
58    }
59    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
60    private static final byte[] VALUE = Bytes.toBytes("value");
61    private static final byte[] QUALIFIER = Bytes.toBytes("qual");
62    private static final String FAMILY = "family";
63    private static final String TEST_TABLE = "multi_test_table";
64    private static final byte[] BYTES_FAMILY = Bytes.toBytes(FAMILY);
65    private static final byte[] ONE_ROW = Bytes.toBytes("xxx");
66    private static final byte [][] KEYS = makeKeys();
67  
68    private static final int slaves = 2; // also used for testing HTable pool size
69  
70    @BeforeClass public static void beforeClass() throws Exception {
71      ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
72      ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
73      ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
74      UTIL.startMiniCluster(slaves);
75      HTable t = UTIL.createTable(Bytes.toBytes(TEST_TABLE), Bytes.toBytes(FAMILY));
76      UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));
77      UTIL.waitTableEnabled(Bytes.toBytes(TEST_TABLE));
78      t.close();
79    }
80  
81    @AfterClass public static void afterClass() throws Exception {
82      UTIL.shutdownMiniCluster();
83    }
84  
85    @Before public void before() throws Exception {
86      LOG.info("before");
87      if (UTIL.ensureSomeRegionServersAvailable(slaves)) {
88        // Distribute regions
89        UTIL.getMiniHBaseCluster().getMaster().balance();
90  
91        // Wait until completing balance
92        UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
93      }
94      HConnection conn = HConnectionManager.getConnection(UTIL.getConfiguration());
95      conn.clearRegionCache();
96      conn.close();
97      LOG.info("before done");
98    }
99  
100   private static byte[][] makeKeys() {
101     byte [][] starterKeys = HBaseTestingUtility.KEYS;
102     // Create a "non-uniform" test set with the following characteristics:
103     // a) Unequal number of keys per region
104 
105     // Don't use integer as a multiple, so that we have a number of keys that is
106     // not a multiple of the number of regions
107     int numKeys = (int) ((float) starterKeys.length * 10.33F);
108 
109     List<byte[]> keys = new ArrayList<byte[]>();
110     for (int i = 0; i < numKeys; i++) {
111       int kIdx = i % starterKeys.length;
112       byte[] k = starterKeys[kIdx];
113       byte[] cp = new byte[k.length + 1];
114       System.arraycopy(k, 0, cp, 0, k.length);
115       cp[k.length] = new Integer(i % 256).byteValue();
116       keys.add(cp);
117     }
118 
119     // b) Same duplicate keys (showing multiple Gets/Puts to the same row, which
120     // should work)
121     // c) keys are not in sorted order (within a region), to ensure that the
122     // sorting code and index mapping doesn't break the functionality
123     for (int i = 0; i < 100; i++) {
124       int kIdx = i % starterKeys.length;
125       byte[] k = starterKeys[kIdx];
126       byte[] cp = new byte[k.length + 1];
127       System.arraycopy(k, 0, cp, 0, k.length);
128       cp[k.length] = new Integer(i % 256).byteValue();
129       keys.add(cp);
130     }
131     return keys.toArray(new byte [][] {new byte [] {}});
132   }
133 
134 
135   /**
136    * This is for testing the active number of threads that were used while
137    * doing a batch operation. It inserts one row per region via the batch
138    * operation, and then checks the number of active threads.
139    * For HBASE-3553
140    * @throws IOException
141    * @throws InterruptedException
142    * @throws NoSuchFieldException
143    * @throws SecurityException
144    */
145   @Test(timeout=300000)
146   public void testActiveThreadsCount() throws Exception{
147     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
148     List<Row> puts = constructPutRequests(); // creates a Put for every region
149     table.batch(puts);
150     Field poolField = table.getClass().getDeclaredField("pool");
151     poolField.setAccessible(true);
152     ThreadPoolExecutor tExecutor = (ThreadPoolExecutor) poolField.get(table);
153     assertEquals(slaves, tExecutor.getLargestPoolSize());
154     table.close();
155   }
156 
157   @Test(timeout=300000)
158   public void testBatchWithGet() throws Exception {
159     LOG.info("test=testBatchWithGet");
160     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
161 
162     // load test data
163     List<Row> puts = constructPutRequests();
164     table.batch(puts);
165 
166     // create a list of gets and run it
167     List<Row> gets = new ArrayList<Row>();
168     for (byte[] k : KEYS) {
169       Get get = new Get(k);
170       get.addColumn(BYTES_FAMILY, QUALIFIER);
171       gets.add(get);
172     }
173     Result[] multiRes = new Result[gets.size()];
174     table.batch(gets, multiRes);
175 
176     // Same gets using individual call API
177     List<Result> singleRes = new ArrayList<Result>();
178     for (Row get : gets) {
179       singleRes.add(table.get((Get) get));
180     }
181 
182     // Compare results
183     Assert.assertEquals(singleRes.size(), multiRes.length);
184     for (int i = 0; i < singleRes.size(); i++) {
185       Assert.assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER));
186       KeyValue[] singleKvs = singleRes.get(i).raw();
187       KeyValue[] multiKvs = multiRes[i].raw();
188       for (int j = 0; j < singleKvs.length; j++) {
189         Assert.assertEquals(singleKvs[j], multiKvs[j]);
190         Assert.assertEquals(0, Bytes.compareTo(singleKvs[j].getValue(), multiKvs[j]
191             .getValue()));
192       }
193     }
194     table.close();
195   }
196 
197   @Test
198   public void testBadFam() throws Exception {
199     LOG.info("test=testBadFam");
200     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
201 
202     List<Row> actions = new ArrayList<Row>();
203     Put p = new Put(Bytes.toBytes("row1"));
204     p.add(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
205     actions.add(p);
206     p = new Put(Bytes.toBytes("row2"));
207     p.add(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
208     actions.add(p);
209 
210     // row1 and row2 should be in the same region.
211 
212     Object [] r = new Object[actions.size()];
213     try {
214       table.batch(actions, r);
215       fail();
216     } catch (RetriesExhaustedWithDetailsException ex) {
217       LOG.debug(ex);
218       // good!
219       assertFalse(ex.mayHaveClusterIssues());
220     }
221     assertEquals(2, r.length);
222     assertTrue(r[0] instanceof Throwable);
223     assertTrue(r[1] instanceof Result);
224     table.close();
225   }
226 
227   @Test (timeout=300000)
228   public void testFlushCommitsNoAbort() throws Exception {
229     LOG.info("test=testFlushCommitsNoAbort");
230     doTestFlushCommits(false);
231   }
232 
233   /**
234    * Only run one Multi test with a forced RegionServer abort. Otherwise, the
235    * unit tests will take an unnecessarily long time to run.
236    *
237    * @throws Exception
238    */
239   @Test (timeout=300000)
240   public void testFlushCommitsWithAbort() throws Exception {
241     LOG.info("test=testFlushCommitsWithAbort");
242     doTestFlushCommits(true);
243   }
244 
245   /**
246    * Set table auto flush to false and test flushing commits
247    * @param doAbort true if abort one regionserver in the testing
248    * @throws Exception
249    */
250   private void doTestFlushCommits(boolean doAbort) throws Exception {
251     // Load the data
252     LOG.info("get new table");
253     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
254     table.setAutoFlush(false);
255     table.setWriteBufferSize(10 * 1024 * 1024);
256 
257     LOG.info("constructPutRequests");
258     List<Row> puts = constructPutRequests();
259     for (Row put : puts) {
260       table.put((Put) put);
261     }
262     LOG.info("puts");
263     table.flushCommits();
264     final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()
265         .size();
266     assert liveRScount > 0;
267     JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster()
268         .getLiveRegionServerThreads().get(0);
269     if (doAbort) {
270       liveRS.getRegionServer().abort("Aborting for tests",
271           new Exception("doTestFlushCommits"));
272       // If we wait for no regions being online after we abort the server, we
273       // could ensure the master has re-assigned the regions on killed server
274       // after writing successfully. It means the server we aborted is dead
275       // and detected by matser
276       while (liveRS.getRegionServer().getNumberOfOnlineRegions() != 0) {
277         Thread.sleep(10);
278       }
279       // try putting more keys after the abort. same key/qual... just validating
280       // no exceptions thrown
281       puts = constructPutRequests();
282       for (Row put : puts) {
283         table.put((Put) put);
284       }
285 
286       table.flushCommits();
287     }
288 
289     LOG.info("validating loaded data");
290     validateLoadedData(table);
291 
292     // Validate server and region count
293     List<JVMClusterUtil.RegionServerThread> liveRSs = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
294     int count = 0;
295     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
296       count++;
297       LOG.info("Count=" + count + ", Alive=" + t.getRegionServer());
298     }
299     LOG.info("Count=" + count);
300     Assert.assertEquals("Server count=" + count + ", abort=" + doAbort,
301         (doAbort ? (liveRScount - 1) : liveRScount), count);
302     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
303       int regions = ProtobufUtil.getOnlineRegions(t.getRegionServer()).size();
304       // Assert.assertTrue("Count of regions=" + regions, regions > 10);
305     }
306     if (doAbort) {
307       UTIL.getMiniHBaseCluster().waitOnRegionServer(0);
308       UTIL.waitFor(15 * 1000, new Waiter.Predicate<Exception>() {
309         @Override
310         public boolean evaluate() throws Exception {
311           return UTIL.getMiniHBaseCluster().getMaster()
312               .getClusterStatus().getServersSize() == (liveRScount - 1);
313         }
314       });
315       UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
316     }
317 
318     table.close();
319     LOG.info("done");
320   }
321 
322   @Test (timeout=300000)
323   public void testBatchWithPut() throws Exception {
324     LOG.info("test=testBatchWithPut");
325     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
326 
327     // put multiple rows using a batch
328     List<Row> puts = constructPutRequests();
329 
330     Object[] results = table.batch(puts);
331     validateSizeAndEmpty(results, KEYS.length);
332 
333     if (true) {
334       int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()
335           .size();
336       assert liveRScount > 0;
337       JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster()
338           .getLiveRegionServerThreads().get(0);
339       liveRS.getRegionServer().abort("Aborting for tests",
340           new Exception("testBatchWithPut"));
341 
342       puts = constructPutRequests();
343       results = table.batch(puts);
344       validateSizeAndEmpty(results, KEYS.length);
345     }
346 
347     validateLoadedData(table);
348     table.close();
349   }
350 
351   @Test(timeout=300000)
352   public void testBatchWithDelete() throws Exception {
353     LOG.info("test=testBatchWithDelete");
354     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
355 
356     // Load some data
357     List<Row> puts = constructPutRequests();
358     Object[] results = table.batch(puts);
359     validateSizeAndEmpty(results, KEYS.length);
360 
361     // Deletes
362     List<Row> deletes = new ArrayList<Row>();
363     for (int i = 0; i < KEYS.length; i++) {
364       Delete delete = new Delete(KEYS[i]);
365       delete.deleteFamily(BYTES_FAMILY);
366       deletes.add(delete);
367     }
368     results = table.batch(deletes);
369     validateSizeAndEmpty(results, KEYS.length);
370 
371     // Get to make sure ...
372     for (byte[] k : KEYS) {
373       Get get = new Get(k);
374       get.addColumn(BYTES_FAMILY, QUALIFIER);
375       Assert.assertFalse(table.exists(get));
376     }
377     table.close();
378   }
379 
380   @Test(timeout=300000)
381   public void testHTableDeleteWithList() throws Exception {
382     LOG.info("test=testHTableDeleteWithList");
383     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
384 
385     // Load some data
386     List<Row> puts = constructPutRequests();
387     Object[] results = table.batch(puts);
388     validateSizeAndEmpty(results, KEYS.length);
389 
390     // Deletes
391     ArrayList<Delete> deletes = new ArrayList<Delete>();
392     for (int i = 0; i < KEYS.length; i++) {
393       Delete delete = new Delete(KEYS[i]);
394       delete.deleteFamily(BYTES_FAMILY);
395       deletes.add(delete);
396     }
397     table.delete(deletes);
398     Assert.assertTrue(deletes.isEmpty());
399 
400     // Get to make sure ...
401     for (byte[] k : KEYS) {
402       Get get = new Get(k);
403       get.addColumn(BYTES_FAMILY, QUALIFIER);
404       Assert.assertFalse(table.exists(get));
405     }
406     table.close();
407   }
408 
409   @Test(timeout=300000)
410   public void testBatchWithManyColsInOneRowGetAndPut() throws Exception {
411     LOG.info("test=testBatchWithManyColsInOneRowGetAndPut");
412     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
413 
414     List<Row> puts = new ArrayList<Row>();
415     for (int i = 0; i < 100; i++) {
416       Put put = new Put(ONE_ROW);
417       byte[] qual = Bytes.toBytes("column" + i);
418       put.add(BYTES_FAMILY, qual, VALUE);
419       puts.add(put);
420     }
421     Object[] results = table.batch(puts);
422 
423     // validate
424     validateSizeAndEmpty(results, 100);
425 
426     // get the data back and validate that it is correct
427     List<Row> gets = new ArrayList<Row>();
428     for (int i = 0; i < 100; i++) {
429       Get get = new Get(ONE_ROW);
430       byte[] qual = Bytes.toBytes("column" + i);
431       get.addColumn(BYTES_FAMILY, qual);
432       gets.add(get);
433     }
434 
435     Object[] multiRes = table.batch(gets);
436 
437     int idx = 0;
438     for (Object r : multiRes) {
439       byte[] qual = Bytes.toBytes("column" + idx);
440       validateResult(r, qual, VALUE);
441       idx++;
442     }
443     table.close();
444   }
445 
446   @Test(timeout=300000)
447   public void testBatchWithIncrementAndAppend() throws Exception {
448     LOG.info("test=testBatchWithIncrementAndAppend");
449     final byte[] QUAL1 = Bytes.toBytes("qual1");
450     final byte[] QUAL2 = Bytes.toBytes("qual2");
451     final byte[] QUAL3 = Bytes.toBytes("qual3");
452     final byte[] QUAL4 = Bytes.toBytes("qual4");
453     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
454     Delete d = new Delete(ONE_ROW);
455     table.delete(d);
456     Put put = new Put(ONE_ROW);
457     put.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
458     put.add(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
459     table.put(put);
460 
461     Increment inc = new Increment(ONE_ROW);
462     inc.addColumn(BYTES_FAMILY, QUAL2, 1);
463     inc.addColumn(BYTES_FAMILY, QUAL3, 1);
464 
465     Append a = new Append(ONE_ROW);
466     a.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("def"));
467     a.add(BYTES_FAMILY, QUAL4, Bytes.toBytes("xyz"));
468     List<Row> actions = new ArrayList<Row>();
469     actions.add(inc);
470     actions.add(a);
471 
472     Object[] multiRes = table.batch(actions);
473     validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef"));
474     validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));
475     validateResult(multiRes[0], QUAL2, Bytes.toBytes(2L));
476     validateResult(multiRes[0], QUAL3, Bytes.toBytes(1L));
477     table.close();
478   }
479 
480   @Test(timeout=300000)
481   public void testBatchWithMixedActions() throws Exception {
482     LOG.info("test=testBatchWithMixedActions");
483     HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
484 
485     // Load some data to start
486     Object[] results = table.batch(constructPutRequests());
487     validateSizeAndEmpty(results, KEYS.length);
488 
489     // Batch: get, get, put(new col), delete, get, get of put, get of deleted,
490     // put
491     List<Row> actions = new ArrayList<Row>();
492 
493     byte[] qual2 = Bytes.toBytes("qual2");
494     byte[] val2 = Bytes.toBytes("putvalue2");
495 
496     // 0 get
497     Get get = new Get(KEYS[10]);
498     get.addColumn(BYTES_FAMILY, QUALIFIER);
499     actions.add(get);
500 
501     // 1 get
502     get = new Get(KEYS[11]);
503     get.addColumn(BYTES_FAMILY, QUALIFIER);
504     actions.add(get);
505 
506     // 2 put of new column
507     Put put = new Put(KEYS[10]);
508     put.add(BYTES_FAMILY, qual2, val2);
509     actions.add(put);
510 
511     // 3 delete
512     Delete delete = new Delete(KEYS[20]);
513     delete.deleteFamily(BYTES_FAMILY);
514     actions.add(delete);
515 
516     // 4 get
517     get = new Get(KEYS[30]);
518     get.addColumn(BYTES_FAMILY, QUALIFIER);
519     actions.add(get);
520 
521     // There used to be a 'get' of a previous put here, but removed
522     // since this API really cannot guarantee order in terms of mixed
523     // get/puts.
524 
525     // 5 put of new column
526     put = new Put(KEYS[40]);
527     put.add(BYTES_FAMILY, qual2, val2);
528     actions.add(put);
529 
530     results = table.batch(actions);
531 
532     // Validation
533 
534     validateResult(results[0]);
535     validateResult(results[1]);
536     validateEmpty(results[2]);
537     validateEmpty(results[3]);
538     validateResult(results[4]);
539     validateEmpty(results[5]);
540 
541     // validate last put, externally from the batch
542     get = new Get(KEYS[40]);
543     get.addColumn(BYTES_FAMILY, qual2);
544     Result r = table.get(get);
545     validateResult(r, qual2, val2);
546 
547     table.close();
548   }
549 
550   // // Helper methods ////
551 
552   private void validateResult(Object r) {
553     validateResult(r, QUALIFIER, VALUE);
554   }
555 
556   private void validateResult(Object r1, byte[] qual, byte[] val) {
557     // TODO provide nice assert here or something.
558     Result r = (Result)r1;
559     Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual));
560     Assert.assertEquals(0, Bytes.compareTo(val, r.getValue(BYTES_FAMILY, qual)));
561   }
562 
563   private List<Row> constructPutRequests() {
564     List<Row> puts = new ArrayList<Row>();
565     for (byte[] k : KEYS) {
566       Put put = new Put(k);
567       put.add(BYTES_FAMILY, QUALIFIER, VALUE);
568       puts.add(put);
569     }
570     return puts;
571   }
572 
573   private void validateLoadedData(HTable table) throws IOException {
574     // get the data back and validate that it is correct
575     for (byte[] k : KEYS) {
576       Get get = new Get(k);
577       get.addColumn(BYTES_FAMILY, QUALIFIER);
578       Result r = table.get(get);
579       Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
580       Assert.assertEquals(0, Bytes.compareTo(VALUE, r
581           .getValue(BYTES_FAMILY, QUALIFIER)));
582     }
583   }
584 
585   private void validateEmpty(Object r1) {
586     Result result = (Result)r1;
587     Assert.assertTrue(result != null);
588     Assert.assertTrue(result.getRow() == null);
589     Assert.assertEquals(0, result.raw().length);
590   }
591 
592   private void validateSizeAndEmpty(Object[] results, int expectedSize) {
593     // Validate got back the same number of Result objects, all empty
594     Assert.assertEquals(expectedSize, results.length);
595     for (Object result : results) {
596       validateEmpty(result);
597     }
598   }
599 
600 }
601