1   /*
2    * Copyright The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one or more
5    * contributor license agreements. See the NOTICE file distributed with this
6    * work for additional information regarding copyright ownership. The ASF
7    * licenses this file to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance with the License.
9    * You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16   * License for the specific language governing permissions and limitations
17   * under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.junit.Assert.assertEquals;
22  
23  import java.io.IOException;
24  import java.util.Arrays;
25  import java.util.Map;
26  import java.util.Set;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.MediumTests;
34  import org.apache.hadoop.hbase.client.HBaseAdmin;
35  import org.apache.hadoop.hbase.client.Append;
36  import org.apache.hadoop.hbase.client.Delete;
37  import org.apache.hadoop.hbase.client.Get;
38  import org.apache.hadoop.hbase.client.HTable;
39  import org.apache.hadoop.hbase.client.Increment;
40  import org.apache.hadoop.hbase.client.Put;
41  import org.apache.hadoop.hbase.client.Result;
42  import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
43  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
44  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
45      StoreMetricType;
46  import org.apache.hadoop.hbase.util.Bytes;
47  import org.apache.hadoop.hbase.util.Pair;
48  import org.junit.After;
49  import org.junit.Before;
50  import org.junit.Test;
51  import org.junit.experimental.categories.Category;
52  
53  
54  /**
55   * Test metrics incremented on region server operations.
56   */
57  @Category(MediumTests.class)
58  public class TestRegionServerMetrics {
59  
60    private static final Log LOG =
61        LogFactory.getLog(TestRegionServerMetrics.class.getName());
62  
63    private final static String TABLE_NAME =
64        TestRegionServerMetrics.class.getSimpleName() + "Table";
65    private String[] FAMILIES = new String[] { "cf1", "cf2", "anotherCF" };
66    private static final int MAX_VERSIONS = 1;
67    private static final int NUM_COLS_PER_ROW = 15;
68    private static final int NUM_FLUSHES = 3;
69    private static final int NUM_REGIONS = 4;
70  
71    private static final SchemaMetrics ALL_METRICS =
72        SchemaMetrics.ALL_SCHEMA_METRICS;
73  
74    private final HBaseTestingUtility TEST_UTIL =
75        new HBaseTestingUtility();
76  
77    private Map<String, Long> startingMetrics;
78  
79    private final int META_AND_ROOT = 2;
80  
81    @Before
82    public void setUp() throws Exception {
83      SchemaMetrics.setUseTableNameInTest(true);
84      startingMetrics = SchemaMetrics.getMetricsSnapshot();
85      TEST_UTIL.startMiniCluster();
86    }
87  
88    @After
89    public void tearDown() throws Exception {
90      TEST_UTIL.shutdownMiniCluster();
91      SchemaMetrics.validateMetricChanges(startingMetrics);
92    }
93  
94    private void assertTimeVaryingMetricCount(int expectedCount, String table, String cf,
95        String regionName, String metricPrefix) {
96  
97      Integer expectedCountInteger = new Integer(expectedCount);
98  
99      if (cf != null) {
100       String cfKey =
101           SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.CF_PREFIX + cf + "."
102               + metricPrefix;
103       Pair<Long, Integer> cfPair = RegionMetricsStorage.getTimeVaryingMetric(cfKey);
104       assertEquals(expectedCountInteger, cfPair.getSecond());
105     }
106 
107     if (regionName != null) {
108       String rKey =
109           SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.REGION_PREFIX + regionName + "."
110               + metricPrefix;
111 
112       Pair<Long, Integer> regionPair = RegionMetricsStorage.getTimeVaryingMetric(rKey);
113       assertEquals(expectedCountInteger, regionPair.getSecond());
114     }
115   }
116   
117   private void assertStoreMetricEquals(long expected,
118       SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
119     final String storeMetricName =
120         schemaMetrics.getStoreMetricName(storeMetricType);
121     Long startValue = startingMetrics.get(storeMetricName);
122     assertEquals("Invalid value for store metric " + storeMetricName
123         + " (type " + storeMetricType + ")", expected,
124         RegionMetricsStorage.getNumericMetric(storeMetricName)
125             - (startValue != null ? startValue : 0));
126   }
127   
128     @Test
129     public void testOperationMetrics() throws IOException {
130       String cf = "OPCF";
131       String otherCf = "otherCF";
132       String rk = "testRK";
133       String icvCol = "icvCol";
134       String appendCol = "appendCol";
135       String regionName = null;
136       HTable hTable =
137           TEST_UTIL.createTable(TABLE_NAME.getBytes(),
138               new byte[][] { cf.getBytes(), otherCf.getBytes() });
139       Set<HRegionInfo> regionInfos = hTable.getRegionLocations().keySet();
140   
141       regionName = regionInfos.toArray(new HRegionInfo[regionInfos.size()])[0].getEncodedName();
142   
143       //Do a multi put that has one cf.  Since they are in different rk's
144       //The lock will still be obtained and everything will be applied in one multiput.
145       Put pOne = new Put(rk.getBytes());
146       pOne.add(cf.getBytes(), icvCol.getBytes(), Bytes.toBytes(0L));
147       Put pTwo = new Put("ignored1RK".getBytes());
148       pTwo.add(cf.getBytes(), "ignored".getBytes(), Bytes.toBytes(0L));
149       
150       hTable.put(Arrays.asList(new Put[] {pOne, pTwo}));
151   
152       // Do a multiput where the cf doesn't stay consistent.
153       Put pThree = new Put("ignored2RK".getBytes());
154       pThree.add(cf.getBytes(), "ignored".getBytes(), Bytes.toBytes("TEST1"));
155       Put pFour = new Put("ignored3RK".getBytes());
156       pFour.add(otherCf.getBytes(), "ignored".getBytes(), Bytes.toBytes(0L));
157   
158       hTable.put(Arrays.asList(new Put[] { pThree, pFour }));
159   
160       hTable.incrementColumnValue(rk.getBytes(), cf.getBytes(), icvCol.getBytes(), 1L);
161       
162       Increment i = new Increment(rk.getBytes());
163       i.addColumn(cf.getBytes(), icvCol.getBytes(), 1L);
164       hTable.increment(i);
165   
166       Get g = new Get(rk.getBytes());
167       g.addColumn(cf.getBytes(), appendCol.getBytes());
168       hTable.get(g);
169   
170       Append a = new Append(rk.getBytes());
171       a.add(cf.getBytes(), appendCol.getBytes(), Bytes.toBytes("-APPEND"));
172       hTable.append(a);
173   
174       Delete dOne = new Delete(rk.getBytes());
175       dOne.deleteFamily(cf.getBytes());
176       hTable.delete(dOne);
177   
178       Delete dTwo = new Delete(rk.getBytes());
179       hTable.delete(dTwo);
180   
181       // There should be one multi put where the cf is consistent
182       assertTimeVaryingMetricCount(1, TABLE_NAME, cf, null, "multiput_");
183   
184       // There were two multiputs to the cf.
185       assertTimeVaryingMetricCount(2, TABLE_NAME, null, regionName, "multiput_");
186   
187       // There was one multiput where the cf was not consistent.
188       assertTimeVaryingMetricCount(1, TABLE_NAME, "__unknown", null, "multiput_");
189   
190       // One increment and one append
191       assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "incrementColumnValue_");
192       assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "increment_");
193       assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "append_");
194   
195       // One delete where the cf is known
196       assertTimeVaryingMetricCount(1, TABLE_NAME, cf, null, "delete_");
197   
198       // two deletes in the region.
199       assertTimeVaryingMetricCount(2, TABLE_NAME, null, regionName, "delete_");
200   
201       // Three gets. one for gets. One for append. One for increment.
202       assertTimeVaryingMetricCount(4, TABLE_NAME, cf, regionName, "get_");
203   
204       hTable.close();
205     }
206   
207   @Test
208   public void testRemoveRegionMetrics() throws IOException, InterruptedException {
209     String cf = "REMOVECF";
210     HTable hTable = TEST_UTIL.createTable(TABLE_NAME.getBytes(), cf.getBytes());
211     HRegionInfo[] regionInfos =
212         hTable.getRegionLocations().keySet()
213             .toArray(new HRegionInfo[hTable.getRegionLocations().keySet().size()]);
214 
215     String regionName = regionInfos[0].getEncodedName();
216 
217     // Do some operations so there are metrics.
218     Put pOne = new Put("TEST".getBytes());
219     pOne.add(cf.getBytes(), "test".getBytes(), "test".getBytes());
220     hTable.put(pOne);
221 
222     Get g = new Get("TEST".getBytes());
223     g.addFamily(cf.getBytes());
224     hTable.get(g);
225     assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "get_");
226     HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
227     admin.disableTable(TABLE_NAME.getBytes());
228     admin.deleteTable(TABLE_NAME.getBytes());
229 
230     assertTimeVaryingMetricCount(0, TABLE_NAME, cf, regionName, "get_");
231 
232     hTable.close();
233   }
234   
235   @Test
236   public void testMultipleRegions() throws IOException, InterruptedException {
237 
238     TEST_UTIL.createRandomTable(
239         TABLE_NAME,
240         Arrays.asList(FAMILIES),
241         MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
242 
243     final HRegionServer rs =
244         TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
245 
246     assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());
247 
248     rs.doMetrics();
249     for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
250         Bytes.toBytes(TABLE_NAME))) {
251       for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
252         LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
253             Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
254             ": " + storeEntry.getValue().getStorefiles());
255       }
256     }
257 
258     assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
259         + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);
260 
261     for (String cf : FAMILIES) {
262       SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
263       assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
264           StoreMetricType.STORE_FILE_COUNT);
265     }
266 
267     // ensure that the max value is also maintained
268     final String storeMetricName = ALL_METRICS
269         .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
270     assertEquals("Invalid value for store metric " + storeMetricName,
271         NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
272   }
273 
274 
275   @org.junit.Rule
276   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
277     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
278 
279   private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
280     // we have getsize & nextsize for each column family
281     assertEquals(cfs.length * 2, metrics.length);
282 
283     for (int i =0; i < cfs.length; ++i) {
284       String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
285       String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
286       String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;
287 
288       // verify getsize and nextsize matches
289       int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
290           RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
291       int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
292           RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;
293 
294       assertEquals(metrics[i], getSize);
295       assertEquals(metrics[cfs.length + i], nextSize);
296     }
297   }
298 
299   @Test
300   public void testGetNextSize() throws IOException, InterruptedException {
301     String rowName = "row1";
302     byte[] ROW = Bytes.toBytes(rowName);
303     String tableName = "SizeMetricTest";
304     byte[] TABLE = Bytes.toBytes(tableName);
305     String cf1Name = "cf1";
306     String cf2Name = "cf2";
307     String[] cfs = new String[] {cf1Name, cf2Name};
308     byte[] CF1 = Bytes.toBytes(cf1Name);
309     byte[] CF2 = Bytes.toBytes(cf2Name);
310 
311     long ts = 1234;
312     HTable hTable = TEST_UTIL.createTable(TABLE, new byte[][]{CF1, CF2});
313     HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
314 
315     Put p = new Put(ROW);
316     p.add(CF1, CF1, ts, CF1);
317     p.add(CF2, CF2, ts, CF2);
318     hTable.put(p);
319 
320     KeyValue kv1 = new KeyValue(ROW, CF1, CF1, ts, CF1);
321     KeyValue kv2 = new KeyValue(ROW, CF2, CF2, ts, CF2);
322     int kvLength = kv1.getLength();
323     assertEquals(kvLength, kv2.getLength());
324 
325     // only cf1.getsize is set on Get
326     hTable.get(new Get(ROW).addFamily(CF1));
327     assertSizeMetric(tableName, cfs, new int[] {kvLength, 0, 0, 0});
328 
329     // only cf2.getsize is set on Get
330     hTable.get(new Get(ROW).addFamily(CF2));
331     assertSizeMetric(tableName, cfs, new int[] {kvLength, kvLength, 0, 0});
332 
333     // only cf2.nextsize is set
334     for (Result res : hTable.getScanner(CF2)) {
335     }
336     assertSizeMetric(tableName, cfs,
337         new int[] {kvLength, kvLength, 0, kvLength});
338 
339     // only cf2.nextsize is set
340     for (Result res : hTable.getScanner(CF1)) {
341     }
342     assertSizeMetric(tableName, cfs,
343         new int[] {kvLength, kvLength, kvLength, kvLength});
344 
345     // getsize/nextsize should not be set on flush or compaction
346     for (HRegion hr : TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE)) {
347       hr.flushcache();
348       hr.compactStores();
349     }
350     assertSizeMetric(tableName, cfs,
351         new int[] {kvLength, kvLength, kvLength, kvLength});
352 
353     hTable.close();
354   }
355 }
356