View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.List;
23  import java.util.concurrent.atomic.AtomicLong;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.*;
31  import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
32  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
33  import org.apache.hadoop.hbase.client.HConnection;
34  import org.apache.hadoop.hbase.client.HTable;
35  import org.apache.hadoop.hbase.client.RegionServerCallable;
36  import org.apache.hadoop.hbase.client.Result;
37  import org.apache.hadoop.hbase.client.ResultScanner;
38  import org.apache.hadoop.hbase.client.RpcRetryingCaller;
39  import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
40  import org.apache.hadoop.hbase.client.Scan;
41  import org.apache.hadoop.hbase.TableExistsException;
42  import org.apache.hadoop.hbase.io.compress.Compression;
43  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
44  import org.apache.hadoop.hbase.io.hfile.HFile;
45  import org.apache.hadoop.hbase.protobuf.RequestConverter;
46  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
47  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
48  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
49  import org.apache.hadoop.hbase.util.Bytes;
50  import org.apache.hadoop.hbase.util.Pair;
51  import org.junit.Test;
52  
53  import com.google.common.collect.Lists;
54  import org.junit.experimental.categories.Category;
55  
56  /**
57   * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
58   * the region server's bullkLoad functionality.
59   */
60  @Category(LargeTests.class)
61  public class TestHRegionServerBulkLoad {
62    final static Log LOG = LogFactory.getLog(TestHRegionServerBulkLoad.class);
63    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
64    private final static Configuration conf = UTIL.getConfiguration();
65    private final static byte[] QUAL = Bytes.toBytes("qual");
66    private final static int NUM_CFS = 10;
67    public static int BLOCKSIZE = 64 * 1024;
68    public static String COMPRESSION = Compression.Algorithm.NONE.getName();
69  
70    private final static byte[][] families = new byte[NUM_CFS][];
71    static {
72      for (int i = 0; i < NUM_CFS; i++) {
73        families[i] = Bytes.toBytes(family(i));
74      }
75    }
76  
77    static byte[] rowkey(int i) {
78      return Bytes.toBytes(String.format("row_%08d", i));
79    }
80  
81    static String family(int i) {
82      return String.format("family_%04d", i);
83    }
84  
85    /**
86     * Create an HFile with the given number of rows with a specified value.
87     */
88    public static void createHFile(FileSystem fs, Path path, byte[] family,
89        byte[] qualifier, byte[] value, int numRows) throws IOException {
90      HFile.Writer writer = HFile
91          .getWriterFactory(conf, new CacheConfig(conf))
92          .withPath(fs, path)
93          .withBlockSize(BLOCKSIZE)
94          .withCompression(COMPRESSION)
95          .create();
96      long now = System.currentTimeMillis();
97      try {
98        // subtract 2 since iterateOnSplits doesn't include boundary keys
99        for (int i = 0; i < numRows; i++) {
100         KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
101         writer.append(kv);
102       }
103     } finally {
104       writer.close();
105     }
106   }
107 
108   /**
109    * Thread that does full scans of the table looking for any partially
110    * completed rows.
111    *
112    * Each iteration of this loads 10 hdfs files, which occupies 5 file open file
113    * handles. So every 10 iterations (500 file handles) it does a region
114    * compaction to reduce the number of open file handles.
115    */
116   public static class AtomicHFileLoader extends RepeatingTestThread {
117     final AtomicLong numBulkLoads = new AtomicLong();
118     final AtomicLong numCompactions = new AtomicLong();
119     private String tableName;
120 
121     public AtomicHFileLoader(String tableName, TestContext ctx,
122         byte targetFamilies[][]) throws IOException {
123       super(ctx);
124       this.tableName = tableName;
125     }
126 
127     public void doAnAction() throws Exception {
128       long iteration = numBulkLoads.getAndIncrement();
129       Path dir =  UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d",
130           iteration));
131 
132       // create HFiles for different column families
133       FileSystem fs = UTIL.getTestFileSystem();
134       byte[] val = Bytes.toBytes(String.format("%010d", iteration));
135       final List<Pair<byte[], String>> famPaths = new ArrayList<Pair<byte[], String>>(
136           NUM_CFS);
137       for (int i = 0; i < NUM_CFS; i++) {
138         Path hfile = new Path(dir, family(i));
139         byte[] fam = Bytes.toBytes(family(i));
140         createHFile(fs, hfile, fam, QUAL, val, 1000);
141         famPaths.add(new Pair<byte[], String>(fam, hfile.toString()));
142       }
143 
144       // bulk load HFiles
145       final HConnection conn = UTIL.getHBaseAdmin().getConnection();
146       TableName tbl = TableName.valueOf(tableName);
147       RegionServerCallable<Void> callable =
148           new RegionServerCallable<Void>(conn, tbl, Bytes.toBytes("aaa")) {
149         @Override
150         public Void call() throws Exception {
151           LOG.debug("Going to connect to server " + getLocation() + " for row "
152               + Bytes.toStringBinary(getRow()));
153           byte[] regionName = getLocation().getRegionInfo().getRegionName();
154           BulkLoadHFileRequest request =
155             RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName, true);
156           getStub().bulkLoadHFile(null, request);
157           return null;
158         }
159       };
160       RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
161       RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
162       caller.callWithRetries(callable);
163 
164       // Periodically do compaction to reduce the number of open file handles.
165       if (numBulkLoads.get() % 10 == 0) {
166         // 10 * 50 = 500 open file handles!
167         callable = new RegionServerCallable<Void>(conn, tbl, Bytes.toBytes("aaa")) {
168           @Override
169           public Void call() throws Exception {
170             LOG.debug("compacting " + getLocation() + " for row "
171                 + Bytes.toStringBinary(getRow()));
172             AdminProtos.AdminService.BlockingInterface server =
173               conn.getAdmin(getLocation().getServerName());
174             CompactRegionRequest request =
175               RequestConverter.buildCompactRegionRequest(
176                 getLocation().getRegionInfo().getRegionName(), true, null);
177             server.compactRegion(null, request);
178             numCompactions.incrementAndGet();
179             return null;
180           }
181         };
182         caller.callWithRetries(callable);
183       }
184     }
185   }
186 
187   /**
188    * Thread that does full scans of the table looking for any partially
189    * completed rows.
190    */
191   public static class AtomicScanReader extends RepeatingTestThread {
192     byte targetFamilies[][];
193     HTable table;
194     AtomicLong numScans = new AtomicLong();
195     AtomicLong numRowsScanned = new AtomicLong();
196     String TABLE_NAME;
197 
198     public AtomicScanReader(String TABLE_NAME, TestContext ctx,
199         byte targetFamilies[][]) throws IOException {
200       super(ctx);
201       this.TABLE_NAME = TABLE_NAME;
202       this.targetFamilies = targetFamilies;
203       table = new HTable(conf, TABLE_NAME);
204     }
205 
206     public void doAnAction() throws Exception {
207       Scan s = new Scan();
208       for (byte[] family : targetFamilies) {
209         s.addFamily(family);
210       }
211       ResultScanner scanner = table.getScanner(s);
212 
213       for (Result res : scanner) {
214         byte[] lastRow = null, lastFam = null, lastQual = null;
215         byte[] gotValue = null;
216         for (byte[] family : targetFamilies) {
217           byte qualifier[] = QUAL;
218           byte thisValue[] = res.getValue(family, qualifier);
219           if (gotValue != null && thisValue != null
220               && !Bytes.equals(gotValue, thisValue)) {
221 
222             StringBuilder msg = new StringBuilder();
223             msg.append("Failed on scan ").append(numScans)
224                 .append(" after scanning ").append(numRowsScanned)
225                 .append(" rows!\n");
226             msg.append("Current  was " + Bytes.toString(res.getRow()) + "/"
227                 + Bytes.toString(family) + ":" + Bytes.toString(qualifier)
228                 + " = " + Bytes.toString(thisValue) + "\n");
229             msg.append("Previous  was " + Bytes.toString(lastRow) + "/"
230                 + Bytes.toString(lastFam) + ":" + Bytes.toString(lastQual)
231                 + " = " + Bytes.toString(gotValue));
232             throw new RuntimeException(msg.toString());
233           }
234 
235           lastFam = family;
236           lastQual = qualifier;
237           lastRow = res.getRow();
238           gotValue = thisValue;
239         }
240         numRowsScanned.getAndIncrement();
241       }
242       numScans.getAndIncrement();
243     }
244   }
245 
246   /**
247    * Creates a table with given table name and specified number of column
248    * families if the table does not already exist.
249    */
250   private void setupTable(String table, int cfs) throws IOException {
251     try {
252       LOG.info("Creating table " + table);
253       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
254       for (int i = 0; i < 10; i++) {
255         htd.addFamily(new HColumnDescriptor(family(i)));
256       }
257 
258       UTIL.getHBaseAdmin().createTable(htd);
259     } catch (TableExistsException tee) {
260       LOG.info("Table " + table + " already exists");
261     }
262   }
263 
264   /**
265    * Atomic bulk load.
266    */
267   @Test
268   public void testAtomicBulkLoad() throws Exception {
269     String TABLE_NAME = "atomicBulkLoad";
270 
271     int millisToRun = 30000;
272     int numScanners = 50;
273 
274     UTIL.startMiniCluster(1);
275     try {
276       runAtomicBulkloadTest(TABLE_NAME, millisToRun, numScanners);
277     } finally {
278       UTIL.shutdownMiniCluster();
279     }
280   }
281 
282   void runAtomicBulkloadTest(String tableName, int millisToRun, int numScanners)
283       throws Exception {
284     setupTable(tableName, 10);
285 
286     TestContext ctx = new TestContext(UTIL.getConfiguration());
287 
288     AtomicHFileLoader loader = new AtomicHFileLoader(tableName, ctx, null);
289     ctx.addThread(loader);
290 
291     List<AtomicScanReader> scanners = Lists.newArrayList();
292     for (int i = 0; i < numScanners; i++) {
293       AtomicScanReader scanner = new AtomicScanReader(tableName, ctx, families);
294       scanners.add(scanner);
295       ctx.addThread(scanner);
296     }
297 
298     ctx.startThreads();
299     ctx.waitFor(millisToRun);
300     ctx.stop();
301 
302     LOG.info("Loaders:");
303     LOG.info("  loaded " + loader.numBulkLoads.get());
304     LOG.info("  compations " + loader.numCompactions.get());
305 
306     LOG.info("Scanners:");
307     for (AtomicScanReader scanner : scanners) {
308       LOG.info("  scanned " + scanner.numScans.get());
309       LOG.info("  verified " + scanner.numRowsScanned.get() + " rows");
310     }
311   }
312 
313   /**
314    * Run test on an HBase instance for 5 minutes. This assumes that the table
315    * under test only has a single region.
316    */
317   public static void main(String args[]) throws Exception {
318     try {
319       Configuration c = HBaseConfiguration.create();
320       TestHRegionServerBulkLoad test = new TestHRegionServerBulkLoad();
321       test.setConf(c);
322       test.runAtomicBulkloadTest("atomicTableTest", 5 * 60 * 1000, 50);
323     } finally {
324       System.exit(0); // something hangs (believe it is lru threadpool)
325     }
326   }
327 
328   private void setConf(Configuration c) {
329     UTIL = new HBaseTestingUtility(c);
330   }
331 
332 }
333