1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase;
20
21 import java.io.IOException;
22
23 import junit.framework.Assert;
24
25 import org.apache.commons.logging.Log;
26 import org.apache.commons.logging.LogFactory;
27 import org.apache.hadoop.hbase.util.Bytes;
28 import org.apache.hadoop.hbase.util.LoadTestTool;
29
30
31
32
33
34 public abstract class IngestIntegrationTestBase {
35 private static String tableName = null;
36
37
38 private static final String RUN_TIME_KEY = "hbase.%s.runtime";
39
40 protected static final Log LOG = LogFactory.getLog(IngestIntegrationTestBase.class);
41 protected IntegrationTestingUtility util;
42 protected HBaseCluster cluster;
43 private LoadTestTool loadTool;
44
45 protected void setUp(int numSlavesBase) throws Exception {
46 tableName = this.getClass().getSimpleName();
47 util = new IntegrationTestingUtility();
48 LOG.info("Initializing cluster with " + numSlavesBase + " servers");
49 util.initializeCluster(numSlavesBase);
50 LOG.info("Done initializing cluster");
51 cluster = util.getHBaseClusterInterface();
52 deleteTableIfNecessary();
53 loadTool = new LoadTestTool();
54 loadTool.setConf(util.getConfiguration());
55
56
57 int ret = loadTool.run(new String[] { "-tn", tableName, "-init_only" });
58 Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
59 }
60
61 protected void tearDown() throws Exception {
62 LOG.info("Restoring the cluster");
63 util.restoreCluster();
64 LOG.info("Done restoring the cluster");
65 }
66
67 private void deleteTableIfNecessary() throws IOException {
68 if (util.getHBaseAdmin().tableExists(tableName)) {
69 util.deleteTable(Bytes.toBytes(tableName));
70 }
71 }
72
73 protected void runIngestTest(long defaultRunTime, int keysPerServerPerIter,
74 int colsPerKey, int recordSize, int writeThreads) throws Exception {
75 LOG.info("Running ingest");
76 LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize());
77
78 long start = System.currentTimeMillis();
79 String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
80 long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
81 long startKey = 0;
82
83 long numKeys = getNumKeys(keysPerServerPerIter);
84 while (System.currentTimeMillis() - start < 0.9 * runtime) {
85 LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
86 ((runtime - (System.currentTimeMillis() - start))/60000) + " min");
87
88 int ret = loadTool.run(new String[] {
89 "-tn", tableName,
90 "-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
91 "-start_key", String.valueOf(startKey),
92 "-num_keys", String.valueOf(numKeys),
93 "-skip_init"
94 });
95 if (0 != ret) {
96 String errorMsg = "Load failed with error code " + ret;
97 LOG.error(errorMsg);
98 Assert.fail(errorMsg);
99 }
100
101 ret = loadTool.run(new String[] {
102 "-tn", tableName,
103 "-read", "100:20",
104 "-start_key", String.valueOf(startKey),
105 "-num_keys", String.valueOf(numKeys),
106 "-skip_init"
107 });
108 if (0 != ret) {
109 String errorMsg = "Verification failed with error code " + ret;
110 LOG.error(errorMsg);
111 Assert.fail(errorMsg);
112 }
113 startKey += numKeys;
114 }
115 }
116
117
118 private long getNumKeys(int keysPerServer)
119 throws IOException {
120 int numRegionServers = cluster.getClusterStatus().getServersSize();
121 return keysPerServer * numRegionServers;
122 }
123 }