1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNull;
24  
25  import java.io.DataInput;
26  import java.io.DataOutput;
27  import java.io.IOException;
28  import java.util.Collections;
29  import java.util.List;
30  import java.util.NavigableSet;
31  import java.util.concurrent.CountDownLatch;
32  
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.Path;
35  import org.apache.hadoop.hbase.Coprocessor;
36  import org.apache.hadoop.hbase.HBaseConfiguration;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.KeyValue;
43  import org.apache.hadoop.hbase.MediumTests;
44  import org.apache.hadoop.hbase.SmallTests;
45  import org.apache.hadoop.hbase.client.Get;
46  import org.apache.hadoop.hbase.client.HBaseAdmin;
47  import org.apache.hadoop.hbase.client.HTable;
48  import org.apache.hadoop.hbase.client.Put;
49  import org.apache.hadoop.hbase.client.Result;
50  import org.apache.hadoop.hbase.client.Scan;
51  import org.apache.hadoop.hbase.filter.FilterBase;
52  import org.apache.hadoop.hbase.regionserver.HRegion;
53  import org.apache.hadoop.hbase.regionserver.HRegionServer;
54  import org.apache.hadoop.hbase.regionserver.InternalScanner;
55  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
56  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
57  import org.apache.hadoop.hbase.regionserver.ScanType;
58  import org.apache.hadoop.hbase.regionserver.Store;
59  import org.apache.hadoop.hbase.regionserver.StoreScanner;
60  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
61  import org.apache.hadoop.hbase.util.Bytes;
62  import org.junit.Test;
63  import org.junit.experimental.categories.Category;
64  
65  @Category(SmallTests.class)
66  public class TestRegionObserverScannerOpenHook {
67    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
68    static final Path DIR = UTIL.getDataTestDir();
69  
70    public static class NoDataFilter extends FilterBase {
71  
72      @Override
73      public ReturnCode filterKeyValue(KeyValue ignored) {
74        return ReturnCode.SKIP;
75      }
76  
77      @Override
78      public boolean filterAllRemaining() {
79        return true;
80      }
81  
82      @Override
83      public boolean filterRow() {
84        return true;
85      }
86  
87      @Override
88      public void readFields(DataInput arg0) throws IOException {
89        // noop
90      }
91  
92      @Override
93      public void write(DataOutput arg0) throws IOException {
94        // noop
95      }
96    }
97  
98    /**
99     * Do the same logic as the {@link BaseRegionObserver}. Needed since {@link BaseRegionObserver} is
100    * an abstract class.
101    */
102   public static class EmptyRegionObsever extends BaseRegionObserver {
103   }
104 
105   /**
106    * Don't return any data from a scan by creating a custom {@link StoreScanner}.
107    */
108   public static class NoDataFromScan extends BaseRegionObserver {
109     @Override
110     public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
111         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
112         throws IOException {
113       scan.setFilter(new NoDataFilter());
114       return new StoreScanner(store, store.getScanInfo(), scan, targetCols);
115     }
116   }
117 
118   /**
119    * Don't allow any data in a flush by creating a custom {@link StoreScanner}.
120    */
121   public static class NoDataFromFlush extends BaseRegionObserver {
122     @Override
123     public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
124         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
125       Scan scan = new Scan();
126       scan.setFilter(new NoDataFilter());
127       return new StoreScanner(store, store.getScanInfo(), scan,
128           Collections.singletonList(memstoreScanner), ScanType.MINOR_COMPACT, store.getHRegion()
129               .getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
130     }
131   }
132 
133   /**
134    * Don't allow any data to be written out in the compaction by creating a custom
135    * {@link StoreScanner}.
136    */
137   public static class NoDataFromCompaction extends BaseRegionObserver {
138     @Override
139     public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
140         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
141         long earliestPutTs, InternalScanner s) throws IOException {
142       Scan scan = new Scan();
143       scan.setFilter(new NoDataFilter());
144       return new StoreScanner(store, store.getScanInfo(), scan, scanners, ScanType.MINOR_COMPACT,
145           store.getHRegion().getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
146     }
147   }
148 
149   HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
150       byte[]... families) throws IOException {
151     HTableDescriptor htd = new HTableDescriptor(tableName);
152     for (byte[] family : families) {
153       htd.addFamily(new HColumnDescriptor(family));
154     }
155     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
156     Path path = new Path(DIR + callingMethod);
157     HRegion r = HRegion.createHRegion(info, path, conf, htd);
158     // this following piece is a hack. currently a coprocessorHost
159     // is secretly loaded at OpenRegionHandler. we don't really
160     // start a region server here, so just manually create cphost
161     // and set it to region.
162     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
163     r.setCoprocessorHost(host);
164     return r;
165   }
166 
167   @Test
168   public void testRegionObserverScanTimeStacking() throws Exception {
169     byte[] ROW = Bytes.toBytes("testRow");
170     byte[] TABLE = Bytes.toBytes(getClass().getName());
171     byte[] A = Bytes.toBytes("A");
172     byte[][] FAMILIES = new byte[][] { A };
173 
174     Configuration conf = HBaseConfiguration.create();
175     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
176     RegionCoprocessorHost h = region.getCoprocessorHost();
177     h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
178     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
179 
180     Put put = new Put(ROW);
181     put.add(A, A, A);
182     region.put(put);
183 
184     Get get = new Get(ROW);
185     Result r = region.get(get);
186     assertNull(
187       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
188           + r, r.list());
189   }
190 
191   @Test
192   public void testRegionObserverFlushTimeStacking() throws Exception {
193     byte[] ROW = Bytes.toBytes("testRow");
194     byte[] TABLE = Bytes.toBytes(getClass().getName());
195     byte[] A = Bytes.toBytes("A");
196     byte[][] FAMILIES = new byte[][] { A };
197 
198     Configuration conf = HBaseConfiguration.create();
199     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
200     RegionCoprocessorHost h = region.getCoprocessorHost();
201     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
202     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
203 
204     // put a row and flush it to disk
205     Put put = new Put(ROW);
206     put.add(A, A, A);
207     region.put(put);
208     region.flushcache();
209     Get get = new Get(ROW);
210     Result r = region.get(get);
211     assertNull(
212       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
213           + r, r.list());
214   }
215 
216   /**
217    * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
218    * the usual compaction mechanism on the region, rather than going through the backdoor to the
219    * region
220    */
221   @Test
222   @Category(MediumTests.class)
223   public void testRegionObserverCompactionTimeStacking() throws Exception {
224     // setup a mini cluster so we can do a real compaction on a region
225     Configuration conf = UTIL.getConfiguration();
226     conf.setInt("hbase.hstore.compaction.min", 2);
227     UTIL.startMiniCluster();
228     String tableName = "testRegionObserverCompactionTimeStacking";
229     byte[] ROW = Bytes.toBytes("testRow");
230     byte[] A = Bytes.toBytes("A");
231     HTableDescriptor desc = new HTableDescriptor(tableName);
232     desc.addFamily(new HColumnDescriptor(A));
233     desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
234     desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
235       null);
236 
237     HBaseAdmin admin = UTIL.getHBaseAdmin();
238     admin.createTable(desc);
239 
240     HTable table = new HTable(conf, desc.getName());
241 
242     // put a row and flush it to disk
243     Put put = new Put(ROW);
244     put.add(A, A, A);
245     table.put(put);
246     table.flushCommits();
247 
248     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getName());
249     List<HRegion> regions = rs.getOnlineRegions(desc.getName());
250     assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
251     HRegion region = regions.get(0);
252     admin.flush(region.getRegionName());
253 
254     // put another row and flush that too
255     put = new Put(Bytes.toBytes("anotherrow"));
256     put.add(A, A, A);
257     table.put(put);
258     table.flushCommits();
259     admin.flush(region.getRegionName());
260 
261     // run a compaction, which normally would should get rid of the data
262     Store s = region.getStores().get(A);
263     CountDownLatch latch = new CountDownLatch(1);
264     WaitableCompactionRequest request = new WaitableCompactionRequest(region, s, latch);
265     rs.compactSplitThread.requestCompaction(region, s,
266       "compact for testRegionObserverCompactionTimeStacking", Store.PRIORITY_USER, request);
267     // wait for the compaction to complete
268     latch.await();
269 
270     // check both rows to ensure that they aren't there
271     Get get = new Get(ROW);
272     Result r = table.get(get);
273     assertNull(
274       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
275           + r, r.list());
276 
277     get = new Get(Bytes.toBytes("anotherrow"));
278     r = table.get(get);
279     assertNull(
280       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
281           + r, r.list());
282 
283     table.close();
284     UTIL.shutdownMiniCluster();
285   }
286 
287   /**
288    * A simple compaction on which you can wait for the passed in latch until the compaction finishes
289    * (either successfully or if it failed).
290    */
291   public static class WaitableCompactionRequest extends CompactionRequest {
292     private CountDownLatch done;
293 
294     /**
295      * Constructor for a custom compaction. Uses the setXXX methods to update the state of the
296      * compaction before being used.
297      */
298     public WaitableCompactionRequest(HRegion region, Store store, CountDownLatch finished) {
299       super(region, store, Store.PRIORITY_USER);
300       this.done = finished;
301     }
302 
303     @Override
304     public void finishRequest() {
305       super.finishRequest();
306       this.done.countDown();
307     }
308   }
309 }