1   /*
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.assertEquals;
23  
24  import java.io.IOException;
25  import java.util.ArrayList;
26  import java.util.List;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.MediumTests;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.HTableUtil;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.Result;
36  import org.apache.hadoop.hbase.client.Row;
37  import org.apache.hadoop.hbase.client.Scan;
38  import org.apache.hadoop.hbase.io.hfile.BlockCache;
39  import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
40  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.junit.AfterClass;
43  import org.junit.BeforeClass;
44  import org.junit.Test;
45  import org.junit.experimental.categories.Category;
46  
47  /**
48   * Tests the block cache summary functionality in StoreFile, 
49   * which contains the BlockCache
50   *
51   */
52  @Category(MediumTests.class)
53  public class TestStoreFileBlockCacheSummary {
54    final Log LOG = LogFactory.getLog(getClass());
55    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();  
56    private static final String TEST_TABLE = "testTable";
57    private static final String TEST_TABLE2 = "testTable2";
58    private static final String TEST_CF = "testFamily";
59    private static byte [] FAMILY = Bytes.toBytes(TEST_CF);
60    private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
61    private static byte [] VALUE = Bytes.toBytes("testValue");
62  
63    private final int TOTAL_ROWS = 4;
64    
65    /**
66     * @throws java.lang.Exception exception
67     */
68    @BeforeClass
69    public static void setUpBeforeClass() throws Exception {
70      TEST_UTIL.startMiniCluster();
71    }
72  
73    /**
74     * @throws java.lang.Exception exception
75     */
76    @AfterClass
77    public static void tearDownAfterClass() throws Exception {
78      TEST_UTIL.shutdownMiniCluster();
79    }
80    
81  
82    private Put createPut(byte[] family, String row) {
83      Put put = new Put( Bytes.toBytes(row));
84      put.add(family, QUALIFIER, VALUE);
85      return put;
86    }
87    
88    /**
89    * This test inserts data into multiple tables and then reads both tables to ensure
90    * they are in the block cache.
91    *
92    * @throws Exception exception
93    */
94   @Test
95   public void testBlockCacheSummary() throws Exception {
96     HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
97     addRows(ht, FAMILY);
98  
99     HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
100    addRows(ht2, FAMILY);
101 
102    TEST_UTIL.flush();
103    
104    scan(ht, FAMILY);
105    scan(ht2, FAMILY);
106       
107    BlockCache bc =
108      new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
109    List<BlockCacheColumnFamilySummary> bcs = 
110      bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
111    LOG.info("blockCacheSummary: " + bcs);
112 
113    assertEquals("blockCache summary has entries", 3, bcs.size());
114    
115    BlockCacheColumnFamilySummary e = bcs.get(0);
116    assertEquals("table", "-ROOT-", e.getTable());
117    assertEquals("cf", "info", e.getColumnFamily());
118 
119    e = bcs.get(1);
120    assertEquals("table", TEST_TABLE, e.getTable());
121    assertEquals("cf", TEST_CF, e.getColumnFamily());
122 
123    e = bcs.get(2);
124    assertEquals("table", TEST_TABLE2, e.getTable());
125    assertEquals("cf", TEST_CF, e.getColumnFamily());
126 
127    ht.close();
128    ht2.close();
129  }
130 
131  private void addRows(HTable ht, byte[] family) throws IOException {
132  
133    List<Row> rows = new ArrayList<Row>();
134    for (int i = 0; i < TOTAL_ROWS;i++) {
135      rows.add(createPut(family, "row" + i));
136    }
137    
138    HTableUtil.bucketRsBatch( ht, rows);
139  }
140 
141  private void scan(HTable ht, byte[] family) throws IOException {
142    Scan scan = new Scan();
143    scan.addColumn(family, QUALIFIER);
144    
145    int count = 0;
146    for(@SuppressWarnings("unused") Result result : ht.getScanner(scan)) {
147      count++;
148    }
149    if (TOTAL_ROWS != count) {
150      throw new IOException("Incorrect number of rows!");
151    }
152  }
153 
154   @org.junit.Rule
155   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
156     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
157 }
158