1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.coprocessor;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertTrue;
22  
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.concurrent.ExecutorService;
26  import java.util.concurrent.SynchronousQueue;
27  import java.util.concurrent.ThreadPoolExecutor;
28  import java.util.concurrent.TimeUnit;
29  
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.HColumnDescriptor;
32  import org.apache.hadoop.hbase.HTableDescriptor;
33  import org.apache.hadoop.hbase.MediumTests;
34  import org.apache.hadoop.hbase.client.HBaseAdmin;
35  import org.apache.hadoop.hbase.client.HTable;
36  import org.apache.hadoop.hbase.client.HTableInterface;
37  import org.apache.hadoop.hbase.client.Put;
38  import org.apache.hadoop.hbase.client.Result;
39  import org.apache.hadoop.hbase.client.ResultScanner;
40  import org.apache.hadoop.hbase.client.Scan;
41  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.Threads;
44  import org.junit.After;
45  import org.junit.AfterClass;
46  import org.junit.BeforeClass;
47  import org.junit.Test;
48  import org.junit.experimental.categories.Category;
49  
50  /**
51   * Test that a coprocessor can open a connection and write to another table, inside a hook.
52   */
53  @Category(MediumTests.class)
54  public class TestOpenTableInCoprocessor {
55  
56    private static final byte[] otherTable = Bytes.toBytes("otherTable");
57    private static final byte[] primaryTable = Bytes.toBytes("primary");
58    private static final byte[] family = new byte[] { 'f' };
59  
60    private static boolean [] completed = new boolean[1];
61  
62    /**
63     * Custom coprocessor that just copies the write to another table.
64     */
65    public static class SendToOtherTableCoprocessor extends BaseRegionObserver {
66  
67      @Override
68      public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
69          boolean writeToWAL) throws IOException {
70        HTableInterface table = e.getEnvironment().getTable(otherTable);
71        Put p = new Put(new byte[] { 'a' });
72        p.add(family, null, new byte[] { 'a' });
73        table.put(put);
74        table.flushCommits();
75        completed[0] = true;
76        table.close();
77      }
78  
79    }
80  
81    private static boolean []  completedWithPool = new boolean [1] ;
82  
83    public static class CustomThreadPoolCoprocessor extends BaseRegionObserver {
84  
85      /**
86       * Get a pool that has only ever one thread. A second action added to the pool (running
87       * concurrently), will cause an exception.
88       * @return
89       */
90      private ExecutorService getPool() {
91        int maxThreads = 1;
92        long keepAliveTime = 60;
93        ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime,
94            TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
95            Threads.newDaemonThreadFactory("hbase-table"));
96        pool.allowCoreThreadTimeOut(true);
97        return pool;
98      }
99  
100     @Override
101     public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
102         boolean writeToWAL) throws IOException {
103       HTableInterface table = e.getEnvironment().getTable(otherTable, getPool());
104       Put p = new Put(new byte[] { 'a' });
105       p.add(family, null, new byte[] { 'a' });
106       try {
107         table.batch(Collections.singletonList(put));
108       } catch (InterruptedException e1) {
109         throw new IOException(e1);
110       }
111       completedWithPool[0] = true;
112       table.close();
113     }
114   }
115 
116   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
117 
118   @BeforeClass
119   public static void setupCluster() throws Exception {
120     UTIL.startMiniCluster();
121   }
122 
123   @After
124   public void cleanupTestTable() throws Exception {
125     UTIL.getHBaseAdmin().disableTable(primaryTable);
126     UTIL.getHBaseAdmin().deleteTable(primaryTable);
127 
128     UTIL.getHBaseAdmin().disableTable(otherTable);
129     UTIL.getHBaseAdmin().deleteTable(otherTable);
130 
131   }
132 
133   @AfterClass
134   public static void teardownCluster() throws Exception{
135     UTIL.shutdownMiniCluster();
136   }
137 
138   @Test
139   public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable {
140     runCoprocessorConnectionToRemoteTable(SendToOtherTableCoprocessor.class, completed);
141   }
142 
143   @Test
144   public void testCoprocessorCanCreateConnectionToRemoteTableWithCustomPool() throws Throwable {
145     runCoprocessorConnectionToRemoteTable(CustomThreadPoolCoprocessor.class, completedWithPool);
146   }
147 
148   private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
149       boolean[] completeCheck) throws Throwable {
150     HTableDescriptor primary = new HTableDescriptor(primaryTable);
151     primary.addFamily(new HColumnDescriptor(family));
152     // add our coprocessor
153     primary.addCoprocessor(clazz.getName());
154 
155     HTableDescriptor other = new HTableDescriptor(otherTable);
156     other.addFamily(new HColumnDescriptor(family));
157 
158 
159     HBaseAdmin admin = UTIL.getHBaseAdmin();
160     admin.createTable(primary);
161     admin.createTable(other);
162 
163     HTable table = new HTable(UTIL.getConfiguration(), "primary");
164     Put p = new Put(new byte[] { 'a' });
165     p.add(family, null, new byte[] { 'a' });
166     table.put(p);
167     table.flushCommits();
168     table.close();
169 
170     HTable target = new HTable(UTIL.getConfiguration(), otherTable);
171     assertTrue("Didn't complete update to target table!", completeCheck[0]);
172     assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
173     target.close();
174 
175   }
176 
177   /**
178    * Count the number of keyvalue in the table. Scans all possible versions
179    * @param table table to scan
180    * @return number of keyvalues over all rows in the table
181    * @throws IOException
182    */
183   private int getKeyValueCount(HTable table) throws IOException {
184     Scan scan = new Scan();
185     scan.setMaxVersions(Integer.MAX_VALUE - 1);
186 
187     ResultScanner results = table.getScanner(scan);
188     int count = 0;
189     for (Result res : results) {
190       count += res.list().size();
191       System.out.println(count + ") " + res);
192     }
193     results.close();
194 
195     return count;
196   }
197 }