View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.IOException;
25  import java.util.ArrayList;
26  import java.util.List;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.MediumTests;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.HTableUtil;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.Result;
36  import org.apache.hadoop.hbase.client.Row;
37  import org.apache.hadoop.hbase.client.Scan;
38  import org.apache.hadoop.hbase.io.hfile.BlockCache;
39  import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
40  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.junit.AfterClass;
43  import org.junit.BeforeClass;
44  import org.junit.Test;
45  import org.junit.experimental.categories.Category;
46  
47  /**
48   * Tests the block cache summary functionality in StoreFile, 
49   * which contains the BlockCache
50   *
51   */
52  @Category(MediumTests.class)
53  public class TestStoreFileBlockCacheSummary {
54    final Log LOG = LogFactory.getLog(getClass());
55    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();  
56    private static final String TEST_TABLE = "testTable";
57    private static final String TEST_TABLE2 = "testTable2";
58    private static final String TEST_CF = "testFamily";
59    private static byte [] FAMILY = Bytes.toBytes(TEST_CF);
60    private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
61    private static byte [] VALUE = Bytes.toBytes("testValue");
62  
63    private final int TOTAL_ROWS = 4;
64    
65    /**
66     * @throws java.lang.Exception exception
67     */
68    @BeforeClass
69    public static void setUpBeforeClass() throws Exception {
70      TEST_UTIL.startMiniCluster();
71    }
72  
73    /**
74     * @throws java.lang.Exception exception
75     */
76    @AfterClass
77    public static void tearDownAfterClass() throws Exception {
78      TEST_UTIL.shutdownMiniCluster();
79    }
80    
81  
82    private Put createPut(byte[] family, String row) {
83      Put put = new Put( Bytes.toBytes(row));
84      put.add(family, QUALIFIER, VALUE);
85      return put;
86    }
87    
88    /**
89    * This test inserts data into multiple tables and then reads both tables to ensure
90    * they are in the block cache.
91    *
92    * @throws Exception exception
93    */
94   @Test
95   public void testBlockCacheSummary() throws Exception {
96     HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
97     addRows(ht, FAMILY);
98  
99     HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
100    addRows(ht2, FAMILY);
101 
102    TEST_UTIL.flush();
103    
104    scan(ht, FAMILY);
105    scan(ht2, FAMILY);
106       
107    BlockCache bc =
108      new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
109    List<BlockCacheColumnFamilySummary> bcs = 
110      bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
111    LOG.info("blockCacheSummary: " + bcs);
112 
113    assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);
114 
115    BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
116    assertEquals("table", TEST_TABLE, e.getTable());
117    assertEquals("cf", TEST_CF, e.getColumnFamily());
118 
119    e = bcs.get(bcs.size()-1);
120    assertEquals("table", TEST_TABLE2, e.getTable());
121    assertEquals("cf", TEST_CF, e.getColumnFamily());
122 
123    ht.close();
124    ht2.close();
125  }
126 
127  private void addRows(HTable ht, byte[] family) throws IOException {
128  
129    List<Row> rows = new ArrayList<Row>();
130    for (int i = 0; i < TOTAL_ROWS;i++) {
131      rows.add(createPut(family, "row" + i));
132    }
133    
134    HTableUtil.bucketRsBatch( ht, rows);
135  }
136 
137  private void scan(HTable ht, byte[] family) throws IOException {
138    Scan scan = new Scan();
139    scan.addColumn(family, QUALIFIER);
140    
141    int count = 0;
142    for(@SuppressWarnings("unused") Result result : ht.getScanner(scan)) {
143      count++;
144    }
145    if (TOTAL_ROWS != count) {
146      throw new IOException("Incorrect number of rows!");
147    }
148  }
149 
150 }
151