View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.coprocessor;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.IOException;
25  import java.util.Collections;
26  import java.util.concurrent.ExecutorService;
27  import java.util.concurrent.SynchronousQueue;
28  import java.util.concurrent.ThreadPoolExecutor;
29  import java.util.concurrent.TimeUnit;
30  
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HColumnDescriptor;
33  import org.apache.hadoop.hbase.HTableDescriptor;
34  import org.apache.hadoop.hbase.MediumTests;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.client.Durability;
37  import org.apache.hadoop.hbase.client.HBaseAdmin;
38  import org.apache.hadoop.hbase.client.HTable;
39  import org.apache.hadoop.hbase.client.HTableInterface;
40  import org.apache.hadoop.hbase.client.Put;
41  import org.apache.hadoop.hbase.client.Result;
42  import org.apache.hadoop.hbase.client.ResultScanner;
43  import org.apache.hadoop.hbase.client.Scan;
44  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
45  import org.apache.hadoop.hbase.util.Threads;
46  import org.junit.After;
47  import org.junit.AfterClass;
48  import org.junit.BeforeClass;
49  import org.junit.Test;
50  import org.junit.experimental.categories.Category;
51  
52  /**
53   * Test that a coprocessor can open a connection and write to another table, inside a hook.
54   */
55  @Category(MediumTests.class)
56  public class TestOpenTableInCoprocessor {
57  
58    private static final TableName otherTable = TableName.valueOf("otherTable");
59    private static final TableName primaryTable = TableName.valueOf("primary");
60    private static final byte[] family = new byte[] { 'f' };
61  
62    private static boolean[] completed = new boolean[1];
63    /**
64     * Custom coprocessor that just copies the write to another table.
65     */
66    public static class SendToOtherTableCoprocessor extends BaseRegionObserver {
67  
68      @Override
69      public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
70          final WALEdit edit, final Durability durability) throws IOException {
71        HTableInterface table = e.getEnvironment().getTable(otherTable);
72        Put p = new Put(new byte[] { 'a' });
73        p.add(family, null, new byte[] { 'a' });
74        table.put(put);
75        table.flushCommits();
76        completed[0] = true;
77        table.close();
78      }
79  
80    }
81  
82    private static boolean[] completedWithPool = new boolean[1];
83    /**
84     * Coprocessor that creates an HTable with a pool to write to another table
85     */
86    public static class CustomThreadPoolCoprocessor extends BaseRegionObserver {
87  
88      /**
89       * Get a pool that has only ever one thread. A second action added to the pool (running
90       * concurrently), will cause an exception.
91       * @return
92       */
93      private ExecutorService getPool() {
94        int maxThreads = 1;
95        long keepAliveTime = 60;
96        ThreadPoolExecutor pool =
97            new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
98                new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("hbase-table"));
99        pool.allowCoreThreadTimeOut(true);
100       return pool;
101     }
102 
103     @Override
104     public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
105         final WALEdit edit, final Durability durability) throws IOException {
106       HTableInterface table = e.getEnvironment().getTable(otherTable, getPool());
107       Put p = new Put(new byte[] { 'a' });
108       p.add(family, null, new byte[] { 'a' });
109       try {
110         table.batch(Collections.singletonList(put));
111       } catch (InterruptedException e1) {
112         throw new IOException(e1);
113       }
114       completedWithPool[0] = true;
115       table.close();
116     }
117   }
118 
119   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
120 
121   @BeforeClass
122   public static void setupCluster() throws Exception {
123     UTIL.startMiniCluster();
124   }
125 
126   @After
127   public void cleanupTestTable() throws Exception {
128     UTIL.getHBaseAdmin().disableTable(primaryTable);
129     UTIL.getHBaseAdmin().deleteTable(primaryTable);
130 
131     UTIL.getHBaseAdmin().disableTable(otherTable);
132     UTIL.getHBaseAdmin().deleteTable(otherTable);
133 
134   }
135 
136   @AfterClass
137   public static void teardownCluster() throws Exception {
138     UTIL.shutdownMiniCluster();
139   }
140 
141   @Test
142   public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable {
143     runCoprocessorConnectionToRemoteTable(SendToOtherTableCoprocessor.class, completed);
144   }
145 
146   @Test
147   public void testCoprocessorCanCreateConnectionToRemoteTableWithCustomPool() throws Throwable {
148     runCoprocessorConnectionToRemoteTable(CustomThreadPoolCoprocessor.class, completedWithPool);
149   }
150 
151   private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
152       boolean[] completeCheck) throws Throwable {
153     HTableDescriptor primary = new HTableDescriptor(primaryTable);
154     primary.addFamily(new HColumnDescriptor(family));
155     // add our coprocessor
156     primary.addCoprocessor(clazz.getName());
157 
158     HTableDescriptor other = new HTableDescriptor(otherTable);
159     other.addFamily(new HColumnDescriptor(family));
160 
161 
162     HBaseAdmin admin = UTIL.getHBaseAdmin();
163     admin.createTable(primary);
164     admin.createTable(other);
165 
166     HTable table = new HTable(UTIL.getConfiguration(), "primary");
167     Put p = new Put(new byte[] { 'a' });
168     p.add(family, null, new byte[] { 'a' });
169     table.put(p);
170     table.flushCommits();
171     table.close();
172 
173     HTable target = new HTable(UTIL.getConfiguration(), otherTable);
174     assertTrue("Didn't complete update to target table!", completeCheck[0]);
175     assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
176     target.close();
177   }
178 
179   /**
180    * Count the number of keyvalue in the table. Scans all possible versions
181    * @param table table to scan
182    * @return number of keyvalues over all rows in the table
183    * @throws IOException
184    */
185   private int getKeyValueCount(HTable table) throws IOException {
186     Scan scan = new Scan();
187     scan.setMaxVersions(Integer.MAX_VALUE - 1);
188 
189     ResultScanner results = table.getScanner(scan);
190     int count = 0;
191     for (Result res : results) {
192       count += res.listCells().size();
193       System.out.println(count + ") " + res);
194     }
195     results.close();
196 
197     return count;
198   }
199 }