View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase;
20  
21  import java.io.IOException;
22  import java.util.Set;
23  
24  import com.google.common.collect.Sets;
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.hbase.util.Bytes;
29  import org.apache.hadoop.hbase.util.LoadTestTool;
30  import org.apache.hadoop.util.ToolRunner;
31  import org.junit.Assert;
32  import org.junit.Test;
33  import org.junit.experimental.categories.Category;
34  
35  /**
36   * A base class for tests that do something with the cluster while running
37   * {@link LoadTestTool} to write and verify some data.
38   */
39  @Category(IntegrationTests.class)
40  public class IntegrationTestIngest extends IntegrationTestBase {
41    private static final int SERVER_COUNT = 4; // number of slaves for the smallest cluster
42    private static final long DEFAULT_RUN_TIME = 20 * 60 * 1000;
43    private static final long JUNIT_RUN_TIME = 10 * 60 * 1000;
44  
45    /** A soft limit on how long we should run */
46    private static final String RUN_TIME_KEY = "hbase.%s.runtime";
47  
48    protected static final Log LOG = LogFactory.getLog(IntegrationTestIngest.class);
49    protected IntegrationTestingUtility util;
50    protected HBaseCluster cluster;
51    private LoadTestTool loadTool;
52  
53    @Override
54    public void setUpCluster() throws Exception {
55      util = getTestingUtil(null);
56      LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers");
57      util.initializeCluster(SERVER_COUNT);
58      LOG.debug("Done initializing/checking cluster");
59      cluster = util.getHBaseClusterInterface();
60      deleteTableIfNecessary();
61      loadTool = new LoadTestTool();
62      loadTool.setConf(util.getConfiguration());
63      // Initialize load test tool before we start breaking things;
64      // LoadTestTool init, even when it is a no-op, is very fragile.
65      int ret = loadTool.run(new String[] { "-tn", getTablename(), "-init_only" });
66      Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
67    }
68  
69    @Override
70    public int runTestFromCommandLine() throws Exception {
71      internalRunIngestTest(DEFAULT_RUN_TIME);
72      return 0;
73    }
74  
75    @Test
76    public void testIngest() throws Exception {
77     runIngestTest(JUNIT_RUN_TIME, 2500, 10, 1024, 10);
78    }
79  
80    private void internalRunIngestTest(long runTime) throws Exception {
81     runIngestTest(runTime, 2500, 10, 1024, 10);
82    }
83  
84    @Override
85    public String getTablename() {
86      return this.getClass().getSimpleName();
87    }
88  
89    @Override
90    protected Set<String> getColumnFamilies() {
91      return Sets.newHashSet(Bytes.toString(LoadTestTool.COLUMN_FAMILY));
92    }
93  
94    private void deleteTableIfNecessary() throws IOException {
95      if (util.getHBaseAdmin().tableExists(getTablename())) {
96        util.deleteTable(Bytes.toBytes(getTablename()));
97      }
98    }
99  
100   protected void runIngestTest(long defaultRunTime, int keysPerServerPerIter,
101       int colsPerKey, int recordSize, int writeThreads) throws Exception {
102     LOG.info("Running ingest");
103     LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize());
104 
105     long start = System.currentTimeMillis();
106     String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
107     long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
108     long startKey = 0;
109 
110     long numKeys = getNumKeys(keysPerServerPerIter);
111     while (System.currentTimeMillis() - start < 0.9 * runtime) {
112       LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
113           ((runtime - (System.currentTimeMillis() - start))/60000) + " min");
114 
115       int ret = loadTool.run(new String[] {
116           "-tn", getTablename(),
117           "-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
118           "-start_key", String.valueOf(startKey),
119           "-num_keys", String.valueOf(numKeys),
120           "-skip_init"
121       });
122       if (0 != ret) {
123         String errorMsg = "Load failed with error code " + ret;
124         LOG.error(errorMsg);
125         Assert.fail(errorMsg);
126       }
127 
128       ret = loadTool.run(new String[] {
129           "-tn", getTablename(),
130           "-update", String.format("60:%d", writeThreads),
131           "-start_key", String.valueOf(startKey),
132           "-num_keys", String.valueOf(numKeys),
133           "-skip_init"
134       });
135       if (0 != ret) {
136         String errorMsg = "Update failed with error code " + ret;
137         LOG.error(errorMsg);
138         Assert.fail(errorMsg);
139       }
140 
141       ret = loadTool.run(new String[] {
142           "-tn", getTablename(),
143           "-read", "100:20",
144           "-start_key", String.valueOf(startKey),
145           "-num_keys", String.valueOf(numKeys),
146           "-skip_init"
147       });
148       if (0 != ret) {
149         String errorMsg = "Verification failed with error code " + ret;
150         LOG.error(errorMsg);
151         Assert.fail(errorMsg);
152       }
153       startKey += numKeys;
154     }
155   }
156 
157   /** Estimates a data size based on the cluster size */
158   private long getNumKeys(int keysPerServer)
159       throws IOException {
160     int numRegionServers = cluster.getClusterStatus().getServersSize();
161     return keysPerServer * numRegionServers;
162   }
163 
164   public static void main(String[] args) throws Exception {
165     Configuration conf = HBaseConfiguration.create();
166     IntegrationTestingUtility.setUseDistributedCluster(conf);
167     int ret = ToolRunner.run(conf, new IntegrationTestIngest(), args);
168     System.exit(ret);
169   }
170 }