View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.Set;
23  
24  import com.google.common.collect.Sets;
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.hbase.util.Bytes;
29  import org.apache.hadoop.hbase.util.LoadTestTool;
30  import org.apache.hadoop.util.ToolRunner;
31  import org.junit.Assert;
32  import org.junit.Test;
33  import org.junit.experimental.categories.Category;
34  
35  /**
36   * A base class for tests that do something with the cluster while running
37   * {@link LoadTestTool} to write and verify some data.
38   */
39  @Category(IntegrationTests.class)
40  public class IntegrationTestIngest extends IntegrationTestBase {
41    private static final int SERVER_COUNT = 4; // number of slaves for the smallest cluster
42    private static final long DEFAULT_RUN_TIME = 20 * 60 * 1000;
43  
44    protected static String tableName = null;
45  
46    /** A soft limit on how long we should run */
47    private static final String RUN_TIME_KEY = "hbase.%s.runtime";
48  
49    protected static final Log LOG = LogFactory.getLog(IntegrationTestIngest.class);
50    protected IntegrationTestingUtility util;
51    protected HBaseCluster cluster;
52    private LoadTestTool loadTool;
53  
54    protected void setUp(int numSlavesBase) throws Exception {
55      tableName = this.getClass().getSimpleName();
56      util = getTestingUtil(null);
57      LOG.debug("Initializing/checking cluster has " + numSlavesBase + " servers");
58      util.initializeCluster(numSlavesBase);
59      LOG.debug("Done initializing/checking cluster");
60      cluster = util.getHBaseClusterInterface();
61      deleteTableIfNecessary();
62      loadTool = new LoadTestTool();
63      loadTool.setConf(util.getConfiguration());
64      // Initialize load test tool before we start breaking things;
65      // LoadTestTool init, even when it is a no-op, is very fragile.
66      int ret = loadTool.run(new String[] { "-tn", tableName, "-init_only" });
67      Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
68    }
69  
70    @Override
71    public void setUp() throws Exception {
72      setUp(SERVER_COUNT);
73    }
74  
75    @Override
76    public void cleanUp() throws Exception {
77      LOG.debug("Restoring the cluster");
78      util.restoreCluster();
79      LOG.debug("Done restoring the cluster");
80    }
81  
82    @Override
83    public int runTestFromCommandLine() throws Exception {
84      internalRunIngestTest();
85      return 0;
86    }
87  
88    @Test
89    public void internalRunIngestTest() throws Exception {
90      runIngestTest(DEFAULT_RUN_TIME, 2500, 10, 1024, 10);
91    }
92  
93    @Override
94    public String getTablename() {
95      return tableName;
96    }
97  
98    @Override
99    protected Set<String> getColumnFamilies() {
100     return Sets.newHashSet(Bytes.toString(LoadTestTool.COLUMN_FAMILY));
101   }
102 
103   private void deleteTableIfNecessary() throws IOException {
104     if (util.getHBaseAdmin().tableExists(tableName)) {
105       util.deleteTable(Bytes.toBytes(tableName));
106     }
107   }
108 
109   protected void runIngestTest(long defaultRunTime, int keysPerServerPerIter,
110       int colsPerKey, int recordSize, int writeThreads) throws Exception {
111     LOG.info("Running ingest");
112     LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize());
113 
114     long start = System.currentTimeMillis();
115     String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
116     long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
117     long startKey = 0;
118 
119     long numKeys = getNumKeys(keysPerServerPerIter);
120     while (System.currentTimeMillis() - start < 0.9 * runtime) {
121       LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
122           ((runtime - (System.currentTimeMillis() - start))/60000) + " min");
123 
124       int ret = loadTool.run(new String[] {
125           "-tn", tableName,
126           "-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
127           "-start_key", String.valueOf(startKey),
128           "-num_keys", String.valueOf(numKeys),
129           "-skip_init"
130       });
131       if (0 != ret) {
132         String errorMsg = "Load failed with error code " + ret;
133         LOG.error(errorMsg);
134         Assert.fail(errorMsg);
135       }
136 
137       ret = loadTool.run(new String[] {
138           "-tn", tableName,
139           "-update", String.format("60:%d", writeThreads),
140           "-start_key", String.valueOf(startKey),
141           "-num_keys", String.valueOf(numKeys),
142           "-skip_init"
143       });
144       if (0 != ret) {
145         String errorMsg = "Update failed with error code " + ret;
146         LOG.error(errorMsg);
147         Assert.fail(errorMsg);
148       }
149 
150       ret = loadTool.run(new String[] {
151           "-tn", tableName,
152           "-read", "100:20",
153           "-start_key", String.valueOf(startKey),
154           "-num_keys", String.valueOf(numKeys),
155           "-skip_init"
156       });
157       if (0 != ret) {
158         String errorMsg = "Verification failed with error code " + ret;
159         LOG.error(errorMsg);
160         Assert.fail(errorMsg);
161       }
162       startKey += numKeys;
163     }
164   }
165 
166   /** Estimates a data size based on the cluster size */
167   private long getNumKeys(int keysPerServer)
168       throws IOException {
169     int numRegionServers = cluster.getClusterStatus().getServersSize();
170     return keysPerServer * numRegionServers;
171   }
172 
173   public static void main(String[] args) throws Exception {
174     Configuration conf = HBaseConfiguration.create();
175     IntegrationTestingUtility.setUseDistributedCluster(conf);
176     int ret = ToolRunner.run(conf, new IntegrationTestIngest(), args);
177     System.exit(ret);
178   }
179 }