View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
3    * agreements. See the NOTICE file distributed with this work for additional information regarding
4    * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
5    * "License"); you may not use this file except in compliance with the License. You may obtain a
6    * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
7    * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
8    * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
9    * for the specific language governing permissions and limitations under the License.
10   */
11  
12  package org.apache.hadoop.hbase.coprocessor;
13  
14  
15  import static org.junit.Assert.assertEquals;
16  import static org.junit.Assert.assertNotNull;
17  import static org.junit.Assert.assertTrue;
18  
19  import java.io.IOException;
20  
21  import org.apache.commons.logging.Log;
22  import org.apache.commons.logging.LogFactory;
23  import org.apache.hadoop.hbase.Coprocessor;
24  import org.apache.hadoop.hbase.CoprocessorEnvironment;
25  import org.apache.hadoop.hbase.HBaseTestingUtility;
26  import org.apache.hadoop.hbase.HColumnDescriptor;
27  import org.apache.hadoop.hbase.HTableDescriptor;
28  import org.apache.hadoop.hbase.MiniHBaseCluster;
29  import org.apache.hadoop.hbase.TableName;
30  import org.apache.hadoop.hbase.client.CoprocessorHConnection;
31  import org.apache.hadoop.hbase.client.Get;
32  import org.apache.hadoop.hbase.client.HBaseAdmin;
33  import org.apache.hadoop.hbase.client.HConnection;
34  import org.apache.hadoop.hbase.client.HTable;
35  import org.apache.hadoop.hbase.client.HTableInterface;
36  import org.apache.hadoop.hbase.client.Put;
37  import org.apache.hadoop.hbase.client.Result;
38  import org.apache.hadoop.hbase.regionserver.HRegion;
39  import org.apache.hadoop.hbase.testclassification.MediumTests;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.junit.AfterClass;
42  import org.junit.BeforeClass;
43  import org.junit.Test;
44  import org.junit.experimental.categories.Category;
45  
46  @Category(MediumTests.class)
47  public class TestCoprocessorHConnection {
48  
49    static final Log LOG = LogFactory.getLog(TestCoprocessorHConnection.class);
50  
51    public final static byte[] A = Bytes.toBytes("a");
52    private static final int ROWSIZE = 20;
53    private static final byte[] rowSeperator1 = Bytes.toBytes(5);
54    private static final byte[] rowSeperator2 = Bytes.toBytes(12);
55    
56    private static HBaseTestingUtility util = new HBaseTestingUtility();
57    private static MiniHBaseCluster cluster = null;
58  
59    public static class FooCoprocessor extends BaseRegionObserver {
60      private HRegion region;
61      private CoprocessorEnvironment env;
62      
63      @Override
64      public void start(CoprocessorEnvironment e) {
65        region = ((RegionCoprocessorEnvironment)e).getRegion();
66        env = e;
67      }
68      @Override
69      public void stop(CoprocessorEnvironment e) {
70        region = null;
71      }
72      
73      public byte[] getRegionStartKey() {
74        return region.getStartKey();
75      }
76      
77      public Result getOnCoprocessorHConnection(TableName tableName, byte[] key)
78          throws IOException {
79        HConnection conn = CoprocessorHConnection.getConnectionForEnvironment(env);
80        // conn returned by CoprocessorHConnection#getConnectionForEnvironment is the expected type
81        assertTrue(conn instanceof CoprocessorHConnection);      
82        HTableInterface hTable = conn.getTable(tableName);
83        Get get = new Get(key);
84        Result result = hTable.get(get);
85        return result;
86      }
87    }
88    
89    @BeforeClass
90    public static void setupBeforeClass() throws Exception {
91      util.startMiniCluster();
92      cluster = util.getMiniHBaseCluster();
93    }
94  
95    @AfterClass
96    public static void tearDownAfterClass() throws Exception {
97      util.shutdownMiniCluster();
98    }
99  
100   @Test
101   public void testHConnection() throws Exception {
102     HBaseAdmin admin = util.getHBaseAdmin();
103     TableName testTable = TableName.valueOf("TestHConnection");
104 
105     try {
106       // Check table exists
107       if (admin.tableExists(testTable)) {
108         admin.disableTable(testTable);
109         admin.deleteTable(testTable);
110       }
111 
112       HTableDescriptor htd = new HTableDescriptor(testTable);
113       htd.addFamily(new HColumnDescriptor(A));
114 
115       // Register FooCoprocessor as a table coprocessor
116       htd.addCoprocessor(FooCoprocessor.class.getName());
117 
118       // Create a table with 3 region
119       admin.createTable(htd, new byte[][] { rowSeperator1, rowSeperator2 });
120       util.waitUntilAllRegionsAssigned(testTable);
121     } finally {
122       admin.close();
123     }
124 
125     //Get Table
126     HTable table = new HTable(util.getConfiguration(), testTable);
127 
128     try{
129       // Put some data
130       for (long i = 0; i < ROWSIZE; i++) {
131         byte[] iBytes = Bytes.toBytes(i);
132         Put put = new Put(iBytes);
133         put.add(A, A, iBytes);
134         table.put(put);
135       }
136 
137       // Get Table's First Region
138       HRegion firstRegion = cluster.getRegions(testTable).get(0);
139 
140       // Look up the coprocessor instance running the Region
141       Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(FooCoprocessor.class.getName());
142       assertNotNull("FooCoprocessor coprocessor should be loaded", cp);
143       FooCoprocessor fc = (FooCoprocessor) cp;
144 
145       // Find the start key for the region that FooCoprocessor is running on.
146       byte[] regionStartKey = fc.getRegionStartKey();
147       
148       if (regionStartKey == null || regionStartKey.length <= 0) {
149         // Its the start row.  Can't ask for null.  Ask for minimal key instead.
150         regionStartKey = new byte [] {0};
151       }
152  
153       // Get Key Data
154       Get get =  new Get(regionStartKey);
155       Result keyData = table.get(get);
156 
157       // Get Key Data using with CoprocessorHConnection
158       Result cpData = fc.getOnCoprocessorHConnection(testTable, regionStartKey);
159       // Check them equals
160       assertEquals(keyData.getValue(A, A), cpData.getValue(A, A));
161     } finally {
162       table.close();
163     }
164   }
165 }