1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.coprocessor;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertTrue;
22  
23  import java.io.IOException;
24  
25  import org.apache.hadoop.hbase.HBaseTestingUtility;
26  import org.apache.hadoop.hbase.HColumnDescriptor;
27  import org.apache.hadoop.hbase.HTableDescriptor;
28  import org.apache.hadoop.hbase.MediumTests;
29  import org.apache.hadoop.hbase.client.HBaseAdmin;
30  import org.apache.hadoop.hbase.client.HTable;
31  import org.apache.hadoop.hbase.client.HTableInterface;
32  import org.apache.hadoop.hbase.client.Put;
33  import org.apache.hadoop.hbase.client.Result;
34  import org.apache.hadoop.hbase.client.ResultScanner;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
37  import org.apache.hadoop.hbase.coprocessor.ObserverContext;
38  import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
39  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.junit.Test;
42  import org.junit.experimental.categories.Category;
43  
44  /**
45   * Test that a coprocessor can open a connection and write to another table, inside a hook.
46   */
47  @Category(MediumTests.class)
48  public class TestOpenTableInCoprocessor {
49  
50    private static final byte[] otherTable = Bytes.toBytes("otherTable");
51    private static final byte[] family = new byte[] { 'f' };
52  
53    private static boolean completed = false;
54  
55    /**
56     * Custom coprocessor that just copies the write to another table.
57     */
58    public static class SendToOtherTableCoprocessor extends BaseRegionObserver {
59  
60      @Override
61      public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
62          boolean writeToWAL) throws IOException {
63        HTableInterface table = e.getEnvironment().getTable(otherTable);
64        Put p = new Put(new byte[] { 'a' });
65        p.add(family, null, new byte[] { 'a' });
66        table.put(put);
67        table.flushCommits();
68        completed = true;
69        table.close();
70      }
71  
72    }
73  
74    @Test
75    public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable {
76      HBaseTestingUtility UTIL = new HBaseTestingUtility();
77      HTableDescriptor primary = new HTableDescriptor("primary");
78      primary.addFamily(new HColumnDescriptor(family));
79      // add our coprocessor
80      primary.addCoprocessor(SendToOtherTableCoprocessor.class.getName());
81  
82      HTableDescriptor other = new HTableDescriptor(otherTable);
83      other.addFamily(new HColumnDescriptor(family));
84      UTIL.startMiniCluster();
85  
86      HBaseAdmin admin = UTIL.getHBaseAdmin();
87      admin.createTable(primary);
88      admin.createTable(other);
89      admin.close();
90  
91      HTable table = new HTable(UTIL.getConfiguration(), "primary");
92      Put p = new Put(new byte[] { 'a' });
93      p.add(family, null, new byte[] { 'a' });
94      table.put(p);
95      table.flushCommits();
96      table.close();
97  
98      HTable target = new HTable(UTIL.getConfiguration(), otherTable);
99      assertTrue("Didn't complete update to target table!", completed);
100     assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
101     target.close();
102 
103     UTIL.shutdownMiniCluster();
104   }
105 
106   /**
107    * Count the number of keyvalue in the table. Scans all possible versions
108    * @param table table to scan
109    * @return number of keyvalues over all rows in the table
110    * @throws IOException
111    */
112   private int getKeyValueCount(HTable table) throws IOException {
113     Scan scan = new Scan();
114     scan.setMaxVersions(Integer.MAX_VALUE - 1);
115 
116     ResultScanner results = table.getScanner(scan);
117     int count = 0;
118     for (Result res : results) {
119       count += res.list().size();
120       System.out.println(count + ") " + res);
121     }
122     results.close();
123 
124     return count;
125   }
126 }