View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.List;
23  import java.util.Random;
24  import java.util.concurrent.atomic.AtomicLong;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
30  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
31  import org.apache.hadoop.hbase.client.Get;
32  import org.apache.hadoop.hbase.client.HBaseAdmin;
33  import org.apache.hadoop.hbase.client.HTable;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.Result;
36  import org.apache.hadoop.hbase.client.ResultScanner;
37  import org.apache.hadoop.hbase.client.Scan;
38  import org.apache.hadoop.hbase.client.Table;
39  import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
40  import org.apache.hadoop.hbase.testclassification.MediumTests;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.util.StringUtils;
43  import org.apache.hadoop.util.Tool;
44  import org.apache.hadoop.util.ToolRunner;
45  import org.junit.BeforeClass;
46  import org.junit.Test;
47  import org.junit.experimental.categories.Category;
48  
49  import com.google.common.collect.Lists;
50  
51  /**
52   * Test case that uses multiple threads to read and write multifamily rows
53   * into a table, verifying that reads never see partially-complete writes.
54   *
55   * This can run as a junit test, or with a main() function which runs against
56   * a real cluster (eg for testing with failures, region movement, etc)
57   */
58  @Category(MediumTests.class)
59  public class TestAcidGuarantees implements Tool {
60    protected static final Log LOG = LogFactory.getLog(TestAcidGuarantees.class);
61    public static final TableName TABLE_NAME = TableName.valueOf("TestAcidGuarantees");
62    public static final byte [] FAMILY_A = Bytes.toBytes("A");
63    public static final byte [] FAMILY_B = Bytes.toBytes("B");
64    public static final byte [] FAMILY_C = Bytes.toBytes("C");
65    public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
66  
67    public static final byte[][] FAMILIES = new byte[][] {
68      FAMILY_A, FAMILY_B, FAMILY_C };
69  
70    private HBaseTestingUtility util;
71  
72    public static int NUM_COLS_TO_CHECK = 50;
73  
74    // when run as main
75    private Configuration conf;
76  
77    private void createTableIfMissing()
78      throws IOException {
79      try {
80        util.createTable(TABLE_NAME, FAMILIES);
81      } catch (TableExistsException tee) {
82      }
83    }
84  
85    public TestAcidGuarantees() {
86      // Set small flush size for minicluster so we exercise reseeking scanners
87      Configuration conf = HBaseConfiguration.create();
88      conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128*1024));
89      // prevent aggressive region split
90      conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
91              ConstantSizeRegionSplitPolicy.class.getName());
92      util = new HBaseTestingUtility(conf);
93    }
94  
95    public void setHBaseTestingUtil(HBaseTestingUtility util) {
96      this.util = util;
97    }
98  
99    /**
100    * Thread that does random full-row writes into a table.
101    */
102   public static class AtomicityWriter extends RepeatingTestThread {
103     Random rand = new Random();
104     byte data[] = new byte[10];
105     byte targetRows[][];
106     byte targetFamilies[][];
107     Table table;
108     AtomicLong numWritten = new AtomicLong();
109 
110     public AtomicityWriter(TestContext ctx, byte targetRows[][],
111                            byte targetFamilies[][]) throws IOException {
112       super(ctx);
113       this.targetRows = targetRows;
114       this.targetFamilies = targetFamilies;
115       table = new HTable(ctx.getConf(), TABLE_NAME);
116     }
117     public void doAnAction() throws Exception {
118       // Pick a random row to write into
119       byte[] targetRow = targetRows[rand.nextInt(targetRows.length)];
120       Put p = new Put(targetRow);
121       rand.nextBytes(data);
122 
123       for (byte[] family : targetFamilies) {
124         for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
125           byte qualifier[] = Bytes.toBytes("col" + i);
126           p.add(family, qualifier, data);
127         }
128       }
129       table.put(p);
130       numWritten.getAndIncrement();
131     }
132   }
133 
134   /**
135    * Thread that does single-row reads in a table, looking for partially
136    * completed rows.
137    */
138   public static class AtomicGetReader extends RepeatingTestThread {
139     byte targetRow[];
140     byte targetFamilies[][];
141     Table table;
142     int numVerified = 0;
143     AtomicLong numRead = new AtomicLong();
144 
145     public AtomicGetReader(TestContext ctx, byte targetRow[],
146                            byte targetFamilies[][]) throws IOException {
147       super(ctx);
148       this.targetRow = targetRow;
149       this.targetFamilies = targetFamilies;
150       table = new HTable(ctx.getConf(), TABLE_NAME);
151     }
152 
153     public void doAnAction() throws Exception {
154       Get g = new Get(targetRow);
155       Result res = table.get(g);
156       byte[] gotValue = null;
157       if (res.getRow() == null) {
158         // Trying to verify but we didn't find the row - the writing
159         // thread probably just hasn't started writing yet, so we can
160         // ignore this action
161         return;
162       }
163 
164       for (byte[] family : targetFamilies) {
165         for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
166           byte qualifier[] = Bytes.toBytes("col" + i);
167           byte thisValue[] = res.getValue(family, qualifier);
168           if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
169             gotFailure(gotValue, res);
170           }
171           numVerified++;
172           gotValue = thisValue;
173         }
174       }
175       numRead.getAndIncrement();
176     }
177 
178     private void gotFailure(byte[] expected, Result res) {
179       StringBuilder msg = new StringBuilder();
180       msg.append("Failed after ").append(numVerified).append("!");
181       msg.append("Expected=").append(Bytes.toStringBinary(expected));
182       msg.append("Got:\n");
183       for (Cell kv : res.listCells()) {
184         msg.append(kv.toString());
185         msg.append(" val= ");
186         msg.append(Bytes.toStringBinary(CellUtil.cloneValue(kv)));
187         msg.append("\n");
188       }
189       throw new RuntimeException(msg.toString());
190     }
191   }
192 
193   /**
194    * Thread that does full scans of the table looking for any partially completed
195    * rows.
196    */
197   public static class AtomicScanReader extends RepeatingTestThread {
198     byte targetFamilies[][];
199     Table table;
200     AtomicLong numScans = new AtomicLong();
201     AtomicLong numRowsScanned = new AtomicLong();
202 
203     public AtomicScanReader(TestContext ctx,
204                            byte targetFamilies[][]) throws IOException {
205       super(ctx);
206       this.targetFamilies = targetFamilies;
207       table = new HTable(ctx.getConf(), TABLE_NAME);
208     }
209 
210     public void doAnAction() throws Exception {
211       Scan s = new Scan();
212       for (byte[] family : targetFamilies) {
213         s.addFamily(family);
214       }
215       ResultScanner scanner = table.getScanner(s);
216 
217       for (Result res : scanner) {
218         byte[] gotValue = null;
219 
220         for (byte[] family : targetFamilies) {
221           for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
222             byte qualifier[] = Bytes.toBytes("col" + i);
223             byte thisValue[] = res.getValue(family, qualifier);
224             if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
225               gotFailure(gotValue, res);
226             }
227             gotValue = thisValue;
228           }
229         }
230         numRowsScanned.getAndIncrement();
231       }
232       numScans.getAndIncrement();
233     }
234 
235     private void gotFailure(byte[] expected, Result res) {
236       StringBuilder msg = new StringBuilder();
237       msg.append("Failed after ").append(numRowsScanned).append("!");
238       msg.append("Expected=").append(Bytes.toStringBinary(expected));
239       msg.append("Got:\n");
240       for (Cell kv : res.listCells()) {
241         msg.append(kv.toString());
242         msg.append(" val= ");
243         msg.append(Bytes.toStringBinary(CellUtil.cloneValue(kv)));
244         msg.append("\n");
245       }
246       throw new RuntimeException(msg.toString());
247     }
248   }
249 
250   public void runTestAtomicity(long millisToRun,
251       int numWriters,
252       int numGetters,
253       int numScanners,
254       int numUniqueRows) throws Exception {
255     runTestAtomicity(millisToRun, numWriters, numGetters, numScanners, numUniqueRows, false);
256   }
257 
258   public void runTestAtomicity(long millisToRun,
259       int numWriters,
260       int numGetters,
261       int numScanners,
262       int numUniqueRows,
263       final boolean systemTest) throws Exception {
264     createTableIfMissing();
265     TestContext ctx = new TestContext(util.getConfiguration());
266 
267     byte rows[][] = new byte[numUniqueRows][];
268     for (int i = 0; i < numUniqueRows; i++) {
269       rows[i] = Bytes.toBytes("test_row_" + i);
270     }
271 
272     List<AtomicityWriter> writers = Lists.newArrayList();
273     for (int i = 0; i < numWriters; i++) {
274       AtomicityWriter writer = new AtomicityWriter(
275           ctx, rows, FAMILIES);
276       writers.add(writer);
277       ctx.addThread(writer);
278     }
279     // Add a flusher
280     ctx.addThread(new RepeatingTestThread(ctx) {
281       HBaseAdmin admin = util.getHBaseAdmin();
282       public void doAnAction() throws Exception {
283         try {
284           admin.flush(TABLE_NAME);
285         } catch(IOException ioe) {
286           LOG.warn("Ignoring exception while flushing: " + StringUtils.stringifyException(ioe));
287         }
288         // Flushing has been a source of ACID violations previously (see HBASE-2856), so ideally,
289         // we would flush as often as possible.  On a running cluster, this isn't practical:
290         // (1) we will cause a lot of load due to all the flushing and compacting
291         // (2) we cannot change the flushing/compacting related Configuration options to try to
292         // alleviate this
293         // (3) it is an unrealistic workload, since no one would actually flush that often.
294         // Therefore, let's flush every minute to have more flushes than usual, but not overload
295         // the running cluster.
296         if (systemTest) Thread.sleep(60000);
297       }
298     });
299 
300     List<AtomicGetReader> getters = Lists.newArrayList();
301     for (int i = 0; i < numGetters; i++) {
302       AtomicGetReader getter = new AtomicGetReader(
303           ctx, rows[i % numUniqueRows], FAMILIES);
304       getters.add(getter);
305       ctx.addThread(getter);
306     }
307 
308     List<AtomicScanReader> scanners = Lists.newArrayList();
309     for (int i = 0; i < numScanners; i++) {
310       AtomicScanReader scanner = new AtomicScanReader(ctx, FAMILIES);
311       scanners.add(scanner);
312       ctx.addThread(scanner);
313     }
314 
315     ctx.startThreads();
316     ctx.waitFor(millisToRun);
317     ctx.stop();
318 
319     LOG.info("Finished test. Writers:");
320     for (AtomicityWriter writer : writers) {
321       LOG.info("  wrote " + writer.numWritten.get());
322     }
323     LOG.info("Readers:");
324     for (AtomicGetReader reader : getters) {
325       LOG.info("  read " + reader.numRead.get());
326     }
327     LOG.info("Scanners:");
328     for (AtomicScanReader scanner : scanners) {
329       LOG.info("  scanned " + scanner.numScans.get());
330       LOG.info("  verified " + scanner.numRowsScanned.get() + " rows");
331     }
332   }
333 
334   @Test
335   public void testGetAtomicity() throws Exception {
336     util.startMiniCluster(1);
337     try {
338       runTestAtomicity(20000, 5, 5, 0, 3);
339     } finally {
340       util.shutdownMiniCluster();
341     }
342   }
343 
344   @Test
345   public void testScanAtomicity() throws Exception {
346     util.startMiniCluster(1);
347     try {
348       runTestAtomicity(20000, 5, 0, 5, 3);
349     } finally {
350       util.shutdownMiniCluster();
351     }
352   }
353 
354   @Test
355   public void testMixedAtomicity() throws Exception {
356     util.startMiniCluster(1);
357     try {
358       runTestAtomicity(20000, 5, 2, 2, 3);
359     } finally {
360       util.shutdownMiniCluster();
361     }
362   }
363 
364   ////////////////////////////////////////////////////////////////////////////
365   // Tool interface
366   ////////////////////////////////////////////////////////////////////////////
367   @Override
368   public Configuration getConf() {
369     return conf;
370   }
371 
372   @Override
373   public void setConf(Configuration c) {
374     this.conf = c;
375     this.util = new HBaseTestingUtility(c);
376   }
377 
378   @Override
379   public int run(String[] arg0) throws Exception {
380     Configuration c = getConf();
381     int millis = c.getInt("millis", 5000);
382     int numWriters = c.getInt("numWriters", 50);
383     int numGetters = c.getInt("numGetters", 2);
384     int numScanners = c.getInt("numScanners", 2);
385     int numUniqueRows = c.getInt("numUniqueRows", 3);
386     runTestAtomicity(millis, numWriters, numGetters, numScanners, numUniqueRows, true);
387     return 0;
388   }
389 
390   public static void main(String args[]) throws Exception {
391     Configuration c = HBaseConfiguration.create();
392     int status;
393     try {
394       TestAcidGuarantees test = new TestAcidGuarantees();
395       status = ToolRunner.run(c, test, args);
396     } catch (Exception e) {
397       LOG.error("Exiting due to error", e);
398       status = -1;
399     }
400     System.exit(status);
401   }
402 
403 
404 }
405