View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNull;
24  
25  import java.io.IOException;
26  import java.util.Collections;
27  import java.util.List;
28  import java.util.NavigableSet;
29  import java.util.concurrent.CountDownLatch;
30  
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.Cell;
35  import org.apache.hadoop.hbase.Coprocessor;
36  import org.apache.hadoop.hbase.HBaseConfiguration;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.TableName;
43  import org.apache.hadoop.hbase.client.Get;
44  import org.apache.hadoop.hbase.client.HBaseAdmin;
45  import org.apache.hadoop.hbase.client.HTable;
46  import org.apache.hadoop.hbase.client.IsolationLevel;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.Result;
49  import org.apache.hadoop.hbase.client.Scan;
50  import org.apache.hadoop.hbase.filter.FilterBase;
51  import org.apache.hadoop.hbase.regionserver.HRegion;
52  import org.apache.hadoop.hbase.regionserver.HRegionServer;
53  import org.apache.hadoop.hbase.regionserver.HStore;
54  import org.apache.hadoop.hbase.regionserver.InternalScanner;
55  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
56  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
57  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
58  import org.apache.hadoop.hbase.regionserver.ScanType;
59  import org.apache.hadoop.hbase.regionserver.Store;
60  import org.apache.hadoop.hbase.regionserver.StoreScanner;
61  import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
62  import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
63  import org.apache.hadoop.hbase.regionserver.wal.HLog;
64  import org.apache.hadoop.hbase.security.User;
65  import org.apache.hadoop.hbase.testclassification.MediumTests;
66  import org.apache.hadoop.hbase.util.Bytes;
67  import org.junit.Test;
68  import org.junit.experimental.categories.Category;
69  
70  @Category(MediumTests.class)
71  public class TestRegionObserverScannerOpenHook {
72    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
73    static final Path DIR = UTIL.getDataTestDir();
74  
75    public static class NoDataFilter extends FilterBase {
76  
77      @Override
78      public ReturnCode filterKeyValue(Cell ignored) throws IOException {
79        return ReturnCode.SKIP;
80      }
81  
82      @Override
83      public boolean filterAllRemaining() throws IOException {
84        return true;
85      }
86  
87      @Override
88      public boolean filterRow() throws IOException {
89        return true;
90      }
91    }
92  
93    /**
94     * Do the same logic as the {@link BaseRegionObserver}. Needed since {@link BaseRegionObserver} is
95     * an abstract class.
96     */
97    public static class EmptyRegionObsever extends BaseRegionObserver {
98    }
99  
100   /**
101    * Don't return any data from a scan by creating a custom {@link StoreScanner}.
102    */
103   public static class NoDataFromScan extends BaseRegionObserver {
104     @Override
105     public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
106         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
107         throws IOException {
108       scan.setFilter(new NoDataFilter());
109       return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
110         ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
111     }
112   }
113 
114   /**
115    * Don't allow any data in a flush by creating a custom {@link StoreScanner}.
116    */
117   public static class NoDataFromFlush extends BaseRegionObserver {
118     @Override
119     public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
120         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
121       Scan scan = new Scan();
122       scan.setFilter(new NoDataFilter());
123       return new StoreScanner(store, store.getScanInfo(), scan,
124           Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
125           store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
126     }
127   }
128 
129   /**
130    * Don't allow any data to be written out in the compaction by creating a custom
131    * {@link StoreScanner}.
132    */
133   public static class NoDataFromCompaction extends BaseRegionObserver {
134     @Override
135     public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
136         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
137         long earliestPutTs, InternalScanner s) throws IOException {
138       Scan scan = new Scan();
139       scan.setFilter(new NoDataFilter());
140       return new StoreScanner(store, store.getScanInfo(), scan, scanners,
141           ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
142           HConstants.OLDEST_TIMESTAMP);
143     }
144   }
145 
146   HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
147       byte[]... families) throws IOException {
148     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
149     for (byte[] family : families) {
150       htd.addFamily(new HColumnDescriptor(family));
151     }
152     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
153     Path path = new Path(DIR + callingMethod);
154     HRegion r = HRegion.createHRegion(info, path, conf, htd);
155     // this following piece is a hack. currently a coprocessorHost
156     // is secretly loaded at OpenRegionHandler. we don't really
157     // start a region server here, so just manually create cphost
158     // and set it to region.
159     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
160     r.setCoprocessorHost(host);
161     return r;
162   }
163 
164   @Test
165   public void testRegionObserverScanTimeStacking() throws Exception {
166     byte[] ROW = Bytes.toBytes("testRow");
167     byte[] TABLE = Bytes.toBytes(getClass().getName());
168     byte[] A = Bytes.toBytes("A");
169     byte[][] FAMILIES = new byte[][] { A };
170 
171     Configuration conf = HBaseConfiguration.create();
172     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
173     RegionCoprocessorHost h = region.getCoprocessorHost();
174     h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
175     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
176 
177     Put put = new Put(ROW);
178     put.add(A, A, A);
179     region.put(put);
180 
181     Get get = new Get(ROW);
182     Result r = region.get(get);
183     assertNull(
184       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
185           + r, r.listCells());
186   }
187 
188   @Test
189   public void testRegionObserverFlushTimeStacking() throws Exception {
190     byte[] ROW = Bytes.toBytes("testRow");
191     byte[] TABLE = Bytes.toBytes(getClass().getName());
192     byte[] A = Bytes.toBytes("A");
193     byte[][] FAMILIES = new byte[][] { A };
194 
195     Configuration conf = HBaseConfiguration.create();
196     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
197     RegionCoprocessorHost h = region.getCoprocessorHost();
198     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
199     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
200 
201     // put a row and flush it to disk
202     Put put = new Put(ROW);
203     put.add(A, A, A);
204     region.put(put);
205     region.flushcache();
206     Get get = new Get(ROW);
207     Result r = region.get(get);
208     assertNull(
209       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
210           + r, r.listCells());
211   }
212 
213   /*
214    * Custom HRegion which uses CountDownLatch to signal the completion of compaction
215    */
216   public static class CompactionCompletionNotifyingRegion extends HRegion {
217     private static volatile CountDownLatch compactionStateChangeLatch = null;
218     
219     @SuppressWarnings("deprecation")
220     public CompactionCompletionNotifyingRegion(Path tableDir, HLog log,
221         FileSystem fs, Configuration confParam, HRegionInfo info,
222         HTableDescriptor htd, RegionServerServices rsServices) {
223       super(tableDir, log, fs, confParam, info, htd, rsServices);
224     }
225     
226     public CountDownLatch getCompactionStateChangeLatch() {
227       if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
228       return compactionStateChangeLatch;
229     }
230 
231     @Override
232     public boolean compact(CompactionContext compaction, Store store,
233         CompactionThroughputController throughputController) throws IOException {
234       boolean ret = super.compact(compaction, store, throughputController);
235       if (ret) compactionStateChangeLatch.countDown();
236       return ret;
237     }
238 
239     @Override
240     public boolean compact(CompactionContext compaction, Store store,
241         CompactionThroughputController throughputController, User user) throws IOException {
242       boolean ret = super.compact(compaction, store, throughputController, user);
243       if (ret) compactionStateChangeLatch.countDown();
244       return ret;
245     }
246   }
247   
248   /**
249    * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
250    * the usual compaction mechanism on the region, rather than going through the backdoor to the
251    * region
252    */
253   @Test
254   public void testRegionObserverCompactionTimeStacking() throws Exception {
255     // setup a mini cluster so we can do a real compaction on a region
256     Configuration conf = UTIL.getConfiguration();
257     conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
258     conf.setInt("hbase.hstore.compaction.min", 2);
259     UTIL.startMiniCluster();
260     String tableName = "testRegionObserverCompactionTimeStacking";
261     byte[] ROW = Bytes.toBytes("testRow");
262     byte[] A = Bytes.toBytes("A");
263     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
264     desc.addFamily(new HColumnDescriptor(A));
265     desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
266     desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
267       null);
268 
269     HBaseAdmin admin = UTIL.getHBaseAdmin();
270     admin.createTable(desc);
271 
272     HTable table = new HTable(conf, desc.getTableName());
273 
274     // put a row and flush it to disk
275     Put put = new Put(ROW);
276     put.add(A, A, A);
277     table.put(put);
278     table.flushCommits();
279 
280     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
281     List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
282     assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
283     HRegion region = regions.get(0);
284     admin.flush(region.getRegionName());
285     CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
286         .getCompactionStateChangeLatch();
287     
288     // put another row and flush that too
289     put = new Put(Bytes.toBytes("anotherrow"));
290     put.add(A, A, A);
291     table.put(put);
292     table.flushCommits();
293     admin.flush(region.getRegionName());
294 
295     // run a compaction, which normally would should get rid of the data
296     // wait for the compaction checker to complete
297     latch.await();
298     // check both rows to ensure that they aren't there
299     Get get = new Get(ROW);
300     Result r = table.get(get);
301     assertNull(
302       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
303           + r, r.listCells());
304 
305     get = new Get(Bytes.toBytes("anotherrow"));
306     r = table.get(get);
307     assertNull(
308       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
309           + r, r.listCells());
310 
311     table.close();
312     UTIL.shutdownMiniCluster();
313   }
314 }