1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase;
21  
22  import java.io.IOException;
23  import java.util.List;
24  import java.util.Random;
25  import java.util.concurrent.atomic.AtomicLong;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
31  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
32  import org.apache.hadoop.hbase.client.Get;
33  import org.apache.hadoop.hbase.client.HBaseAdmin;
34  import org.apache.hadoop.hbase.client.HTable;
35  import org.apache.hadoop.hbase.client.Put;
36  import org.apache.hadoop.hbase.client.Result;
37  import org.apache.hadoop.hbase.client.ResultScanner;
38  import org.apache.hadoop.hbase.client.Scan;
39  import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.util.Tool;
42  import org.apache.hadoop.util.ToolRunner;
43  import org.junit.Test;
44  import org.junit.experimental.categories.Category;
45  
46  import com.google.common.collect.Lists;
47  
48  /**
49   * Test case that uses multiple threads to read and write multifamily rows
50   * into a table, verifying that reads never see partially-complete writes.
51   * 
52   * This can run as a junit test, or with a main() function which runs against
53   * a real cluster (eg for testing with failures, region movement, etc)
54   */
55  @Category(MediumTests.class)
56  public class TestAcidGuarantees implements Tool {
57    protected static final Log LOG = LogFactory.getLog(TestAcidGuarantees.class);
58    public static final byte [] TABLE_NAME = Bytes.toBytes("TestAcidGuarantees");
59    public static final byte [] FAMILY_A = Bytes.toBytes("A");
60    public static final byte [] FAMILY_B = Bytes.toBytes("B");
61    public static final byte [] FAMILY_C = Bytes.toBytes("C");
62    public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
63  
64    public static final byte[][] FAMILIES = new byte[][] {
65      FAMILY_A, FAMILY_B, FAMILY_C };
66  
67    private HBaseTestingUtility util;
68  
69    public static int NUM_COLS_TO_CHECK = 50;
70  
71    // when run as main
72    private Configuration conf;
73  
74    private void createTableIfMissing()
75      throws IOException {
76      try {
77        util.createTable(TABLE_NAME, FAMILIES);
78      } catch (TableExistsException tee) {
79      }
80    }
81  
82    public TestAcidGuarantees() {
83      // Set small flush size for minicluster so we exercise reseeking scanners
84      Configuration conf = HBaseConfiguration.create();
85      conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128*1024));
86      // prevent aggressive region split
87      conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
88        ConstantSizeRegionSplitPolicy.class.getName());
89      util = new HBaseTestingUtility(conf);
90    }
91    
92    /**
93     * Thread that does random full-row writes into a table.
94     */
95    public static class AtomicityWriter extends RepeatingTestThread {
96      Random rand = new Random();
97      byte data[] = new byte[10];
98      byte targetRows[][];
99      byte targetFamilies[][];
100     HTable table;
101     AtomicLong numWritten = new AtomicLong();
102     
103     public AtomicityWriter(TestContext ctx, byte targetRows[][],
104                            byte targetFamilies[][]) throws IOException {
105       super(ctx);
106       this.targetRows = targetRows;
107       this.targetFamilies = targetFamilies;
108       table = new HTable(ctx.getConf(), TABLE_NAME);
109     }
110     public void doAnAction() throws Exception {
111       // Pick a random row to write into
112       byte[] targetRow = targetRows[rand.nextInt(targetRows.length)];
113       Put p = new Put(targetRow); 
114       rand.nextBytes(data);
115 
116       for (byte[] family : targetFamilies) {
117         for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
118           byte qualifier[] = Bytes.toBytes("col" + i);
119           p.add(family, qualifier, data);
120         }
121       }
122       table.put(p);
123       numWritten.getAndIncrement();
124     }
125   }
126   
127   /**
128    * Thread that does single-row reads in a table, looking for partially
129    * completed rows.
130    */
131   public static class AtomicGetReader extends RepeatingTestThread {
132     byte targetRow[];
133     byte targetFamilies[][];
134     HTable table;
135     int numVerified = 0;
136     AtomicLong numRead = new AtomicLong();
137 
138     public AtomicGetReader(TestContext ctx, byte targetRow[],
139                            byte targetFamilies[][]) throws IOException {
140       super(ctx);
141       this.targetRow = targetRow;
142       this.targetFamilies = targetFamilies;
143       table = new HTable(ctx.getConf(), TABLE_NAME);
144     }
145 
146     public void doAnAction() throws Exception {
147       Get g = new Get(targetRow);
148       Result res = table.get(g);
149       byte[] gotValue = null;
150       if (res.getRow() == null) {
151         // Trying to verify but we didn't find the row - the writing
152         // thread probably just hasn't started writing yet, so we can
153         // ignore this action
154         return;
155       }
156       
157       for (byte[] family : targetFamilies) {
158         for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
159           byte qualifier[] = Bytes.toBytes("col" + i);
160           byte thisValue[] = res.getValue(family, qualifier);
161           if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
162             gotFailure(gotValue, res);
163           }
164           numVerified++;
165           gotValue = thisValue;
166         }
167       }
168       numRead.getAndIncrement();
169     }
170 
171     private void gotFailure(byte[] expected, Result res) {
172       StringBuilder msg = new StringBuilder();
173       msg.append("Failed after ").append(numVerified).append("!");
174       msg.append("Expected=").append(Bytes.toStringBinary(expected));
175       msg.append("Got:\n");
176       for (KeyValue kv : res.list()) {
177         msg.append(kv.toString());
178         msg.append(" val= ");
179         msg.append(Bytes.toStringBinary(kv.getValue()));
180         msg.append("\n");
181       }
182       throw new RuntimeException(msg.toString());
183     }
184   }
185   
186   /**
187    * Thread that does full scans of the table looking for any partially completed
188    * rows.
189    */
190   public static class AtomicScanReader extends RepeatingTestThread {
191     byte targetFamilies[][];
192     HTable table;
193     AtomicLong numScans = new AtomicLong();
194     AtomicLong numRowsScanned = new AtomicLong();
195 
196     public AtomicScanReader(TestContext ctx,
197                            byte targetFamilies[][]) throws IOException {
198       super(ctx);
199       this.targetFamilies = targetFamilies;
200       table = new HTable(ctx.getConf(), TABLE_NAME);
201     }
202 
203     public void doAnAction() throws Exception {
204       Scan s = new Scan();
205       for (byte[] family : targetFamilies) {
206         s.addFamily(family);
207       }
208       ResultScanner scanner = table.getScanner(s);
209       
210       for (Result res : scanner) {
211         byte[] gotValue = null;
212   
213         for (byte[] family : targetFamilies) {
214           for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
215             byte qualifier[] = Bytes.toBytes("col" + i);
216             byte thisValue[] = res.getValue(family, qualifier);
217             if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
218               gotFailure(gotValue, res);
219             }
220             gotValue = thisValue;
221           }
222         }
223         numRowsScanned.getAndIncrement();
224       }
225       numScans.getAndIncrement();
226     }
227 
228     private void gotFailure(byte[] expected, Result res) {
229       StringBuilder msg = new StringBuilder();
230       msg.append("Failed after ").append(numRowsScanned).append("!");
231       msg.append("Expected=").append(Bytes.toStringBinary(expected));
232       msg.append("Got:\n");
233       for (KeyValue kv : res.list()) {
234         msg.append(kv.toString());
235         msg.append(" val= ");
236         msg.append(Bytes.toStringBinary(kv.getValue()));
237         msg.append("\n");
238       }
239       throw new RuntimeException(msg.toString());
240     }
241   }
242 
243 
244   public void runTestAtomicity(long millisToRun,
245       int numWriters,
246       int numGetters,
247       int numScanners,
248       int numUniqueRows) throws Exception {
249     createTableIfMissing();
250     TestContext ctx = new TestContext(util.getConfiguration());
251     
252     byte rows[][] = new byte[numUniqueRows][];
253     for (int i = 0; i < numUniqueRows; i++) {
254       rows[i] = Bytes.toBytes("test_row_" + i);
255     }
256     
257     List<AtomicityWriter> writers = Lists.newArrayList();
258     for (int i = 0; i < numWriters; i++) {
259       AtomicityWriter writer = new AtomicityWriter(
260           ctx, rows, FAMILIES);
261       writers.add(writer);
262       ctx.addThread(writer);
263     }
264     // Add a flusher
265     ctx.addThread(new RepeatingTestThread(ctx) {
266       HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
267       public void doAnAction() throws Exception {
268         admin.flush(TABLE_NAME);
269       }
270     });
271 
272     List<AtomicGetReader> getters = Lists.newArrayList();
273     for (int i = 0; i < numGetters; i++) {
274       AtomicGetReader getter = new AtomicGetReader(
275           ctx, rows[i % numUniqueRows], FAMILIES);
276       getters.add(getter);
277       ctx.addThread(getter);
278     }
279     
280     List<AtomicScanReader> scanners = Lists.newArrayList();
281     for (int i = 0; i < numScanners; i++) {
282       AtomicScanReader scanner = new AtomicScanReader(ctx, FAMILIES);
283       scanners.add(scanner);
284       ctx.addThread(scanner);
285     }
286     
287     ctx.startThreads();
288     ctx.waitFor(millisToRun);
289     ctx.stop();
290     
291     LOG.info("Finished test. Writers:");
292     for (AtomicityWriter writer : writers) {
293       LOG.info("  wrote " + writer.numWritten.get());
294     }
295     LOG.info("Readers:");
296     for (AtomicGetReader reader : getters) {
297       LOG.info("  read " + reader.numRead.get());
298     }
299     LOG.info("Scanners:");
300     for (AtomicScanReader scanner : scanners) {
301       LOG.info("  scanned " + scanner.numScans.get());
302       LOG.info("  verified " + scanner.numRowsScanned.get() + " rows");
303     }
304   }
305 
306   @Test
307   public void testGetAtomicity() throws Exception {
308     util.startMiniCluster(1);
309     try {
310       runTestAtomicity(20000, 5, 5, 0, 3);
311     } finally {
312       util.shutdownMiniCluster();
313     }    
314   }
315 
316   @Test
317   public void testScanAtomicity() throws Exception {
318     util.startMiniCluster(1);
319     try {
320       runTestAtomicity(20000, 5, 0, 5, 3);
321     } finally {
322       util.shutdownMiniCluster();
323     }    
324   }
325 
326   @Test
327   public void testMixedAtomicity() throws Exception {
328     util.startMiniCluster(1);
329     try {
330       runTestAtomicity(20000, 5, 2, 2, 3);
331     } finally {
332       util.shutdownMiniCluster();
333     }    
334   }
335 
336   ////////////////////////////////////////////////////////////////////////////
337   // Tool interface
338   ////////////////////////////////////////////////////////////////////////////
339   @Override
340   public Configuration getConf() {
341     return conf;
342   }
343 
344   @Override
345   public void setConf(Configuration c) {
346     this.conf = c;
347     this.util = new HBaseTestingUtility(c);
348   }
349 
350   @Override
351   public int run(String[] arg0) throws Exception {
352     Configuration c = getConf();
353     int millis = c.getInt("millis", 5000);
354     int numWriters = c.getInt("numWriters", 50);
355     int numGetters = c.getInt("numGetters", 2);
356     int numScanners = c.getInt("numScanners", 2);
357     int numUniqueRows = c.getInt("numUniqueRows", 3);
358     runTestAtomicity(millis, numWriters, numGetters, numScanners, numUniqueRows);
359     return 0;
360   }
361 
362   public static void main(String args[]) throws Exception {
363     Configuration c = HBaseConfiguration.create();
364     int status;
365     try {
366       TestAcidGuarantees test = new TestAcidGuarantees();
367       status = ToolRunner.run(c, test, args);
368     } catch (Exception e) {
369       LOG.error("Exiting due to error", e);
370       status = -1;
371     }
372     System.exit(status);
373   }
374 
375 
376   @org.junit.Rule
377   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
378     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
379 }
380