View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
20  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertNull;
23  import static org.junit.Assert.fail;
24  
25  import java.io.IOException;
26  import java.util.ArrayList;
27  import java.util.Arrays;
28  import java.util.List;
29  import java.util.Random;
30  import java.util.concurrent.CountDownLatch;
31  import java.util.concurrent.atomic.AtomicInteger;
32  import java.util.concurrent.atomic.AtomicLong;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.Cell;
40  import org.apache.hadoop.hbase.CellUtil;
41  import org.apache.hadoop.hbase.HBaseTestingUtility;
42  import org.apache.hadoop.hbase.HColumnDescriptor;
43  import org.apache.hadoop.hbase.HConstants;
44  import org.apache.hadoop.hbase.HRegionInfo;
45  import org.apache.hadoop.hbase.HTableDescriptor;
46  import org.apache.hadoop.hbase.MediumTests;
47  import org.apache.hadoop.hbase.MultithreadedTestUtil;
48  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
49  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
50  import org.apache.hadoop.hbase.TableName;
51  import org.apache.hadoop.hbase.client.Append;
52  import org.apache.hadoop.hbase.client.Delete;
53  import org.apache.hadoop.hbase.client.Durability;
54  import org.apache.hadoop.hbase.client.Get;
55  import org.apache.hadoop.hbase.client.Increment;
56  import org.apache.hadoop.hbase.client.Mutation;
57  import org.apache.hadoop.hbase.client.Put;
58  import org.apache.hadoop.hbase.client.Result;
59  import org.apache.hadoop.hbase.client.RowMutations;
60  import org.apache.hadoop.hbase.client.Scan;
61  import org.apache.hadoop.hbase.filter.BinaryComparator;
62  import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
63  import org.apache.hadoop.hbase.io.HeapSize;
64  import org.apache.hadoop.hbase.regionserver.wal.HLog;
65  import org.apache.hadoop.hbase.util.Bytes;
66  import org.junit.Before;
67  import org.junit.Rule;
68  import org.junit.Test;
69  import org.junit.experimental.categories.Category;
70  import org.junit.rules.TestName;
71  
72  
73  /**
74   * Testing of HRegion.incrementColumnValue, HRegion.increment,
75   * and HRegion.append
76   */
77  @Category(MediumTests.class) // Starts 100 threads
78  public class TestAtomicOperation {
79    static final Log LOG = LogFactory.getLog(TestAtomicOperation.class);
80    @Rule public TestName name = new TestName();
81  
82    HRegion region = null;
83    private HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
84  
85    // Test names
86    static  byte[] tableName;
87    static final byte[] qual1 = Bytes.toBytes("qual1");
88    static final byte[] qual2 = Bytes.toBytes("qual2");
89    static final byte[] qual3 = Bytes.toBytes("qual3");
90    static final byte[] value1 = Bytes.toBytes("value1");
91    static final byte[] value2 = Bytes.toBytes("value2");
92    static final byte [] row = Bytes.toBytes("rowA");
93    static final byte [] row2 = Bytes.toBytes("rowB");
94  
95    @Before 
96    public void setup() {
97      tableName = Bytes.toBytes(name.getMethodName());
98    }
99    
100   //////////////////////////////////////////////////////////////////////////////
101   // New tests that doesn't spin up a mini cluster but rather just test the
102   // individual code pieces in the HRegion. 
103   //////////////////////////////////////////////////////////////////////////////
104 
105   /**
106    * Test basic append operation.
107    * More tests in
108    * @see org.apache.hadoop.hbase.client.TestFromClientSide#testAppend()
109    */
110   @Test
111   public void testAppend() throws IOException {
112     initHRegion(tableName, name.getMethodName(), fam1);
113     String v1 = "Ultimate Answer to the Ultimate Question of Life,"+
114     " The Universe, and Everything";
115     String v2 = " is... 42.";
116     Append a = new Append(row);
117     a.setReturnResults(false);
118     a.add(fam1, qual1, Bytes.toBytes(v1));
119     a.add(fam1, qual2, Bytes.toBytes(v2));
120     assertNull(region.append(a));
121     a = new Append(row);
122     a.add(fam1, qual1, Bytes.toBytes(v2));
123     a.add(fam1, qual2, Bytes.toBytes(v1));
124     Result result = region.append(a);
125     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1+v2), result.getValue(fam1, qual1)));
126     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2+v1), result.getValue(fam1, qual2)));
127   }
128 
129   /**
130    * Test multi-threaded increments.
131    */
132   @Test
133   public void testIncrementMultiThreads() throws IOException {
134 
135     LOG.info("Starting test testIncrementMultiThreads");
136     // run a with mixed column families (1 and 3 versions)
137     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
138 
139     // create 100 threads, each will increment by its own quantity
140     int numThreads = 100;
141     int incrementsPerThread = 1000;
142     Incrementer[] all = new Incrementer[numThreads];
143     int expectedTotal = 0;
144 
145     // create all threads
146     for (int i = 0; i < numThreads; i++) {
147       all[i] = new Incrementer(region, i, i, incrementsPerThread);
148       expectedTotal += (i * incrementsPerThread);
149     }
150 
151     // run all threads
152     for (int i = 0; i < numThreads; i++) {
153       all[i].start();
154     }
155 
156     // wait for all threads to finish
157     for (int i = 0; i < numThreads; i++) {
158       try {
159         all[i].join();
160       } catch (InterruptedException e) {
161       }
162     }
163     assertICV(row, fam1, qual1, expectedTotal);
164     assertICV(row, fam1, qual2, expectedTotal*2);
165     assertICV(row, fam2, qual3, expectedTotal*3);
166     LOG.info("testIncrementMultiThreads successfully verified that total is " +
167              expectedTotal);
168   }
169 
170 
171   private void assertICV(byte [] row,
172                          byte [] familiy,
173                          byte[] qualifier,
174                          long amount) throws IOException {
175     // run a get and see?
176     Get get = new Get(row);
177     get.addColumn(familiy, qualifier);
178     Result result = region.get(get);
179     assertEquals(1, result.size());
180 
181     Cell kv = result.rawCells()[0];
182     long r = Bytes.toLong(CellUtil.cloneValue(kv));
183     assertEquals(amount, r);
184   }
185 
186   private void initHRegion (byte [] tableName, String callingMethod,
187       byte[] ... families)
188     throws IOException {
189     initHRegion(tableName, callingMethod, null, families);
190   }
191 
192   private void initHRegion (byte [] tableName, String callingMethod, int [] maxVersions,
193     byte[] ... families)
194   throws IOException {
195     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
196     int i=0;
197     for(byte [] family : families) {
198       HColumnDescriptor hcd = new HColumnDescriptor(family);
199       hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
200       htd.addFamily(hcd);
201     }
202     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
203     region = TEST_UTIL.createLocalHRegion(info, htd);
204   }
205 
206   /**
207    * A thread that makes a few increment calls
208    */
209   public static class Incrementer extends Thread {
210 
211     private final HRegion region;
212     private final int numIncrements;
213     private final int amount;
214 
215 
216     public Incrementer(HRegion region,
217         int threadNumber, int amount, int numIncrements) {
218       this.region = region;
219       this.numIncrements = numIncrements;
220       this.amount = amount;
221       setDaemon(true);
222     }
223 
224     @Override
225     public void run() {
226       for (int i=0; i<numIncrements; i++) {
227         try {
228           Increment inc = new Increment(row);
229           inc.addColumn(fam1, qual1, amount);
230           inc.addColumn(fam1, qual2, amount*2);
231           inc.addColumn(fam2, qual3, amount*3);
232           region.increment(inc);
233 
234           // verify: Make sure we only see completed increments
235           Get g = new Get(row);
236           Result result = region.get(g);
237           assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2))); 
238           assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3)));
239         } catch (IOException e) {
240           e.printStackTrace();
241         }
242       }
243     }
244   }
245 
246   @Test
247   public void testAppendMultiThreads() throws IOException {
248     LOG.info("Starting test testAppendMultiThreads");
249     // run a with mixed column families (1 and 3 versions)
250     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
251 
252     int numThreads = 100;
253     int opsPerThread = 100;
254     AtomicOperation[] all = new AtomicOperation[numThreads];
255     final byte[] val = new byte[]{1};
256 
257     AtomicInteger failures = new AtomicInteger(0);
258     // create all threads
259     for (int i = 0; i < numThreads; i++) {
260       all[i] = new AtomicOperation(region, opsPerThread, null, failures) {
261         @Override
262         public void run() {
263           for (int i=0; i<numOps; i++) {
264             try {
265               Append a = new Append(row);
266               a.add(fam1, qual1, val);
267               a.add(fam1, qual2, val);
268               a.add(fam2, qual3, val);
269               region.append(a);
270 
271               Get g = new Get(row);
272               Result result = region.get(g);
273               assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length); 
274               assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length); 
275             } catch (IOException e) {
276               e.printStackTrace();
277               failures.incrementAndGet();
278               fail();
279             }
280           }
281         }
282       };
283     }
284 
285     // run all threads
286     for (int i = 0; i < numThreads; i++) {
287       all[i].start();
288     }
289 
290     // wait for all threads to finish
291     for (int i = 0; i < numThreads; i++) {
292       try {
293         all[i].join();
294       } catch (InterruptedException e) {
295       }
296     }
297     assertEquals(0, failures.get());
298     Get g = new Get(row);
299     Result result = region.get(g);
300     assertEquals(result.getValue(fam1, qual1).length, 10000);
301     assertEquals(result.getValue(fam1, qual2).length, 10000);
302     assertEquals(result.getValue(fam2, qual3).length, 10000);
303   }
304   /**
305    * Test multi-threaded row mutations.
306    */
307   @Test
308   public void testRowMutationMultiThreads() throws IOException {
309 
310     LOG.info("Starting test testRowMutationMultiThreads");
311     initHRegion(tableName, name.getMethodName(), fam1);
312 
313     // create 10 threads, each will alternate between adding and
314     // removing a column
315     int numThreads = 10;
316     int opsPerThread = 500;
317     AtomicOperation[] all = new AtomicOperation[numThreads];
318 
319     AtomicLong timeStamps = new AtomicLong(0);
320     AtomicInteger failures = new AtomicInteger(0);
321     // create all threads
322     for (int i = 0; i < numThreads; i++) {
323       all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
324         @Override
325         public void run() {
326           boolean op = true;
327           for (int i=0; i<numOps; i++) {
328             try {
329               // throw in some flushes
330               if (i%10==0) {
331                 synchronized(region) {
332                   LOG.debug("flushing");
333                   region.flushcache();
334                   if (i%100==0) {
335                     region.compactStores();
336                   }
337                 }
338               }
339               long ts = timeStamps.incrementAndGet();
340               RowMutations rm = new RowMutations(row);
341               if (op) {
342                 Put p = new Put(row, ts);
343                 p.add(fam1, qual1, value1);
344                 rm.add(p);
345                 Delete d = new Delete(row);
346                 d.deleteColumns(fam1, qual2, ts);
347                 rm.add(d);
348               } else {
349                 Delete d = new Delete(row);
350                 d.deleteColumns(fam1, qual1, ts);
351                 rm.add(d);
352                 Put p = new Put(row, ts);
353                 p.add(fam1, qual2, value2);
354                 rm.add(p);
355               }
356               region.mutateRow(rm);
357               op ^= true;
358               // check: should always see exactly one column
359               Get g = new Get(row);
360               Result r = region.get(g);
361               if (r.size() != 1) {
362                 LOG.debug(r);
363                 failures.incrementAndGet();
364                 fail();
365               }
366             } catch (IOException e) {
367               e.printStackTrace();
368               failures.incrementAndGet();
369               fail();
370             }
371           }
372         }
373       };
374     }
375 
376     // run all threads
377     for (int i = 0; i < numThreads; i++) {
378       all[i].start();
379     }
380 
381     // wait for all threads to finish
382     for (int i = 0; i < numThreads; i++) {
383       try {
384         all[i].join();
385       } catch (InterruptedException e) {
386       }
387     }
388     assertEquals(0, failures.get());
389   }
390 
391 
392   /**
393    * Test multi-threaded region mutations.
394    */
395   @Test
396   public void testMultiRowMutationMultiThreads() throws IOException {
397 
398     LOG.info("Starting test testMultiRowMutationMultiThreads");
399     initHRegion(tableName, name.getMethodName(), fam1);
400 
401     // create 10 threads, each will alternate between adding and
402     // removing a column
403     int numThreads = 10;
404     int opsPerThread = 500;
405     AtomicOperation[] all = new AtomicOperation[numThreads];
406 
407     AtomicLong timeStamps = new AtomicLong(0);
408     AtomicInteger failures = new AtomicInteger(0);
409     final List<byte[]> rowsToLock = Arrays.asList(row, row2);
410     // create all threads
411     for (int i = 0; i < numThreads; i++) {
412       all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
413         @Override
414         public void run() {
415           boolean op = true;
416           for (int i=0; i<numOps; i++) {
417             try {
418               // throw in some flushes
419               if (i%10==0) {
420                 synchronized(region) {
421                   LOG.debug("flushing");
422                   region.flushcache();
423                   if (i%100==0) {
424                     region.compactStores();
425                   }
426                 }
427               }
428               long ts = timeStamps.incrementAndGet();
429               List<Mutation> mrm = new ArrayList<Mutation>();
430               if (op) {
431                 Put p = new Put(row2, ts);
432                 p.add(fam1, qual1, value1);
433                 mrm.add(p);
434                 Delete d = new Delete(row);
435                 d.deleteColumns(fam1, qual1, ts);
436                 mrm.add(d);
437               } else {
438                 Delete d = new Delete(row2);
439                 d.deleteColumns(fam1, qual1, ts);
440                 mrm.add(d);
441                 Put p = new Put(row, ts);
442                 p.add(fam1, qual1, value2);
443                 mrm.add(p);
444               }
445               region.mutateRowsWithLocks(mrm, rowsToLock);
446               op ^= true;
447               // check: should always see exactly one column
448               Scan s = new Scan(row);
449               RegionScanner rs = region.getScanner(s);
450               List<Cell> r = new ArrayList<Cell>();
451               while(rs.next(r));
452               rs.close();
453               if (r.size() != 1) {
454                 LOG.debug(r);
455                 failures.incrementAndGet();
456                 fail();
457               }
458             } catch (IOException e) {
459               e.printStackTrace();
460               failures.incrementAndGet();
461               fail();
462             }
463           }
464         }
465       };
466     }
467 
468     // run all threads
469     for (int i = 0; i < numThreads; i++) {
470       all[i].start();
471     }
472 
473     // wait for all threads to finish
474     for (int i = 0; i < numThreads; i++) {
475       try {
476         all[i].join();
477       } catch (InterruptedException e) {
478       }
479     }
480     assertEquals(0, failures.get());
481   }
482 
483   public static class AtomicOperation extends Thread {
484     protected final HRegion region;
485     protected final int numOps;
486     protected final AtomicLong timeStamps;
487     protected final AtomicInteger failures;
488     protected final Random r = new Random();
489 
490     public AtomicOperation(HRegion region, int numOps, AtomicLong timeStamps,
491         AtomicInteger failures) {
492       this.region = region;
493       this.numOps = numOps;
494       this.timeStamps = timeStamps;
495       this.failures = failures;
496     }
497   }
498   
499   private static CountDownLatch latch = new CountDownLatch(1);
500   private enum TestStep {
501     INIT,                  // initial put of 10 to set value of the cell
502     PUT_STARTED,           // began doing a put of 50 to cell
503     PUT_COMPLETED,         // put complete (released RowLock, but may not have advanced MVCC).
504     CHECKANDPUT_STARTED,   // began checkAndPut: if 10 -> 11
505     CHECKANDPUT_COMPLETED  // completed checkAndPut
506     // NOTE: at the end of these steps, the value of the cell should be 50, not 11!
507   }
508   private static volatile TestStep testStep = TestStep.INIT;
509   private final String family = "f1";
510      
511   /**
512    * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
513    * MVCC. 
514    * 
515    * Moved into TestAtomicOperation from its original location, TestHBase7051
516    */
517   @Test
518   public void testPutAndCheckAndPutInParallel() throws Exception {
519 
520     final String tableName = "testPutAndCheckAndPut";
521     Configuration conf = TEST_UTIL.getConfiguration();
522     conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
523     final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
524         null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));
525 
526     Put[] puts = new Put[1];
527     Put put = new Put(Bytes.toBytes("r1"));
528     put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
529     puts[0] = put;
530     
531     region.batchMutate(puts);
532     MultithreadedTestUtil.TestContext ctx =
533       new MultithreadedTestUtil.TestContext(conf);
534     ctx.addThread(new PutThread(ctx, region));
535     ctx.addThread(new CheckAndPutThread(ctx, region));
536     ctx.startThreads();
537     while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
538       Thread.sleep(100);
539     }
540     ctx.stop();
541     Scan s = new Scan();
542     RegionScanner scanner = region.getScanner(s);
543     List<Cell> results = new ArrayList<Cell>();
544     scanner.next(results, 2);
545     for (Cell keyValue : results) {
546       assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
547     }
548 
549   }
550 
551   private class PutThread extends TestThread {
552     private MockHRegion region;
553     PutThread(TestContext ctx, MockHRegion region) {
554       super(ctx);
555       this.region = region;
556     }
557 
558     public void doWork() throws Exception {
559       Put[] puts = new Put[1];
560       Put put = new Put(Bytes.toBytes("r1"));
561       put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50"));
562       puts[0] = put;
563       testStep = TestStep.PUT_STARTED;
564       region.batchMutate(puts);
565     }
566   }
567 
568   private class CheckAndPutThread extends TestThread {
569     private MockHRegion region;
570     CheckAndPutThread(TestContext ctx, MockHRegion region) {
571       super(ctx);
572       this.region = region;
573    }
574 
575     public void doWork() throws Exception {
576       Put[] puts = new Put[1];
577       Put put = new Put(Bytes.toBytes("r1"));
578       put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11"));
579       puts[0] = put;
580       while (testStep != TestStep.PUT_COMPLETED) {
581         Thread.sleep(100);
582       }
583       testStep = TestStep.CHECKANDPUT_STARTED;
584       region.checkAndMutate(Bytes.toBytes("r1"), Bytes.toBytes(family), Bytes.toBytes("q1"),
585         CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("10")), put, true);
586       testStep = TestStep.CHECKANDPUT_COMPLETED;
587     }
588   }
589 
590   public static class MockHRegion extends HRegion {
591 
592     public MockHRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
593         final HRegionInfo regionInfo, final HTableDescriptor htd, RegionServerServices rsServices) {
594       super(tableDir, log, fs, conf, regionInfo, htd, rsServices);
595     }
596 
597     @Override
598     public RowLock getRowLock(final byte[] row, boolean waitForLock) throws IOException {
599       if (testStep == TestStep.CHECKANDPUT_STARTED) {
600         latch.countDown();
601       }
602       return new WrappedRowLock(super.getRowLock(row, waitForLock));
603     }
604     
605     public class WrappedRowLock extends RowLock {
606 
607       private WrappedRowLock(RowLock rowLock) {
608         super(rowLock.context);
609       }
610 
611       @Override
612       public void release() {
613         if (testStep == TestStep.INIT) {
614           super.release();
615           return;
616         }
617 
618         if (testStep == TestStep.PUT_STARTED) {
619           try {
620             testStep = TestStep.PUT_COMPLETED;
621             super.release();
622             // put has been written to the memstore and the row lock has been released, but the
623             // MVCC has not been advanced.  Prior to fixing HBASE-7051, the following order of
624             // operations would cause the non-atomicity to show up:
625             // 1) Put releases row lock (where we are now)
626             // 2) CheckAndPut grabs row lock and reads the value prior to the put (10)
627             //    because the MVCC has not advanced
628             // 3) Put advances MVCC
629             // So, in order to recreate this order, we wait for the checkAndPut to grab the rowLock
630             // (see below), and then wait some more to give the checkAndPut time to read the old
631             // value.
632             latch.await();
633             Thread.sleep(1000);
634           } catch (InterruptedException e) {
635             Thread.currentThread().interrupt();
636           }
637         }
638         else if (testStep == TestStep.CHECKANDPUT_STARTED) {
639           super.release();
640         }
641       }
642     }
643   }
644 }