1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.util;
18  
19  import static org.junit.Assert.assertEquals;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Collection;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.hbase.HBaseTestingUtility;
30  import org.apache.hadoop.hbase.HColumnDescriptor;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HTableDescriptor;
33  import org.apache.hadoop.hbase.LargeTests;
34  import org.apache.hadoop.hbase.TableNotFoundException;
35  import org.apache.hadoop.hbase.client.HBaseAdmin;
36  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
37  import org.apache.hadoop.hbase.io.hfile.Compression;
38  import org.junit.After;
39  import org.junit.Before;
40  import org.junit.Test;
41  import org.junit.experimental.categories.Category;
42  import org.junit.runner.RunWith;
43  import org.junit.runners.Parameterized;
44  import org.junit.runners.Parameterized.Parameters;
45  
46  /**
47   * A write/read/verify load test on a mini HBase cluster. Tests reading
48   * and then writing.
49   */
50  @Category(LargeTests.class)
51  @RunWith(Parameterized.class)
52  public class TestMiniClusterLoadSequential {
53  
54    private static final Log LOG = LogFactory.getLog(
55        TestMiniClusterLoadSequential.class);
56  
57    protected static final byte[] TABLE = Bytes.toBytes("load_test_tbl");
58    protected static final byte[] CF = Bytes.toBytes("load_test_cf");
59    protected static final int NUM_THREADS = 8;
60    protected static final int NUM_RS = 2;
61    protected static final int TIMEOUT_MS = 180000;
62    protected static final HBaseTestingUtility TEST_UTIL =
63        new HBaseTestingUtility();
64  
65    protected final Configuration conf = TEST_UTIL.getConfiguration();
66    protected final boolean isMultiPut;
67    protected final DataBlockEncoding dataBlockEncoding;
68  
69    protected MultiThreadedWriter writerThreads;
70    protected MultiThreadedReader readerThreads;
71    protected int numKeys;
72  
73    protected Compression.Algorithm compression = Compression.Algorithm.NONE;
74  
75    public TestMiniClusterLoadSequential(boolean isMultiPut,
76        DataBlockEncoding dataBlockEncoding) {
77      this.isMultiPut = isMultiPut;
78      this.dataBlockEncoding = dataBlockEncoding;
79      conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
80    }
81  
82    @Parameters
83    public static Collection<Object[]> parameters() {
84      List<Object[]> parameters = new ArrayList<Object[]>();
85      for (boolean multiPut : new boolean[]{false, true}) {
86        for (DataBlockEncoding dataBlockEncoding : new DataBlockEncoding[] {
87            DataBlockEncoding.NONE, DataBlockEncoding.PREFIX }) {
88          parameters.add(new Object[]{multiPut, dataBlockEncoding});
89        }
90      }
91      return parameters;
92    }
93  
94    @Before
95    public void setUp() throws Exception {
96      LOG.debug("Test setup: isMultiPut=" + isMultiPut);
97      TEST_UTIL.startMiniCluster(1, NUM_RS);
98    }
99  
100   @After
101   public void tearDown() throws Exception {
102     LOG.debug("Test teardown: isMultiPut=" + isMultiPut);
103     TEST_UTIL.shutdownMiniCluster();
104   }
105 
106   protected MultiThreadedReader prepareReaderThreads(LoadTestDataGenerator dataGen,
107       Configuration conf, byte[] tableName, double verifyPercent) {
108     MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent);
109     return reader;
110   }
111 
112   protected MultiThreadedWriter prepareWriterThreads(LoadTestDataGenerator dataGen,
113       Configuration conf, byte[] tableName) {
114     MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, tableName);
115     writer.setMultiPut(isMultiPut);
116     return writer;
117   }
118 
119   @Test(timeout=TIMEOUT_MS)
120   public void loadTest() throws Exception {
121     prepareForLoadTest();
122     runLoadTestOnExistingTable();
123   }
124 
125   protected void runLoadTestOnExistingTable() throws IOException {
126     writerThreads.start(0, numKeys, NUM_THREADS);
127     writerThreads.waitForFinish();
128     assertEquals(0, writerThreads.getNumWriteFailures());
129 
130     readerThreads.start(0, numKeys, NUM_THREADS);
131     readerThreads.waitForFinish();
132     assertEquals(0, readerThreads.getNumReadFailures());
133     assertEquals(0, readerThreads.getNumReadErrors());
134     assertEquals(numKeys, readerThreads.getNumKeysVerified());
135   }
136 
137   protected void createPreSplitLoadTestTable(HTableDescriptor htd, HColumnDescriptor hcd)
138       throws IOException {
139     HBaseTestingUtility.createPreSplitLoadTestTable(conf, htd, hcd);
140     TEST_UTIL.waitUntilAllRegionsAssigned(htd.getName());
141   }
142 
143   protected void prepareForLoadTest() throws IOException {
144     LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding +
145         ", isMultiPut=" + isMultiPut);
146     numKeys = numKeys();
147     HBaseAdmin admin = new HBaseAdmin(conf);
148     while (admin.getClusterStatus().getServers().size() < NUM_RS) {
149       LOG.info("Sleeping until " + NUM_RS + " RSs are online");
150       Threads.sleepWithoutInterrupt(1000);
151     }
152     admin.close();
153 
154     HTableDescriptor htd = new HTableDescriptor(TABLE);
155     HColumnDescriptor hcd = new HColumnDescriptor(CF)
156       .setCompressionType(compression)
157       .setDataBlockEncoding(dataBlockEncoding);
158     createPreSplitLoadTestTable(htd, hcd);
159 
160     LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(CF);
161     writerThreads = prepareWriterThreads(dataGen, conf, TABLE);
162     readerThreads = prepareReaderThreads(dataGen, conf, TABLE, 100);
163   }
164 
165   protected int numKeys() {
166     return 10000;
167   }
168 
169   protected HColumnDescriptor getColumnDesc(HBaseAdmin admin)
170       throws TableNotFoundException, IOException {
171     return admin.getTableDescriptor(TABLE).getFamily(CF);
172   }
173 
174 }