1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNull;
24  
25  import java.io.DataInput;
26  import java.io.DataOutput;
27  import java.io.IOException;
28  import java.util.Collections;
29  import java.util.List;
30  import java.util.NavigableSet;
31  import java.util.concurrent.CountDownLatch;
32  
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.FileSystem;
35  import org.apache.hadoop.fs.Path;
36  import org.apache.hadoop.hbase.Coprocessor;
37  import org.apache.hadoop.hbase.HBaseConfiguration;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HColumnDescriptor;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.HTableDescriptor;
43  import org.apache.hadoop.hbase.KeyValue;
44  import org.apache.hadoop.hbase.MediumTests;
45  import org.apache.hadoop.hbase.SmallTests;
46  import org.apache.hadoop.hbase.client.Get;
47  import org.apache.hadoop.hbase.client.HBaseAdmin;
48  import org.apache.hadoop.hbase.client.HTable;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.Result;
51  import org.apache.hadoop.hbase.client.Scan;
52  import org.apache.hadoop.hbase.filter.FilterBase;
53  import org.apache.hadoop.hbase.regionserver.HRegion;
54  import org.apache.hadoop.hbase.regionserver.HRegionServer;
55  import org.apache.hadoop.hbase.regionserver.InternalScanner;
56  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
57  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
58  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
59  import org.apache.hadoop.hbase.regionserver.ScanType;
60  import org.apache.hadoop.hbase.regionserver.Store;
61  import org.apache.hadoop.hbase.regionserver.StoreScanner;
62  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
63  import org.apache.hadoop.hbase.regionserver.wal.HLog;
64  import org.apache.hadoop.hbase.util.Bytes;
65  import org.junit.Test;
66  import org.junit.experimental.categories.Category;
67  
68  @Category(SmallTests.class)
69  public class TestRegionObserverScannerOpenHook {
70    private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
71    static final Path DIR = UTIL.getDataTestDir();
72  
73    public static class NoDataFilter extends FilterBase {
74  
75      @Override
76      public ReturnCode filterKeyValue(KeyValue ignored) {
77        return ReturnCode.SKIP;
78      }
79  
80      @Override
81      public boolean filterAllRemaining() {
82        return true;
83      }
84  
85      @Override
86      public boolean filterRow() {
87        return true;
88      }
89  
90      @Override
91      public void readFields(DataInput arg0) throws IOException {
92        // noop
93      }
94  
95      @Override
96      public void write(DataOutput arg0) throws IOException {
97        // noop
98      }
99    }
100 
101   /**
102    * Do the same logic as the {@link BaseRegionObserver}. Needed since {@link BaseRegionObserver} is
103    * an abstract class.
104    */
105   public static class EmptyRegionObsever extends BaseRegionObserver {
106   }
107 
108   /**
109    * Don't return any data from a scan by creating a custom {@link StoreScanner}.
110    */
111   public static class NoDataFromScan extends BaseRegionObserver {
112     @Override
113     public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
114         Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
115         throws IOException {
116       scan.setFilter(new NoDataFilter());
117       return new StoreScanner(store, store.getScanInfo(), scan, targetCols);
118     }
119   }
120 
121   /**
122    * Don't allow any data in a flush by creating a custom {@link StoreScanner}.
123    */
124   public static class NoDataFromFlush extends BaseRegionObserver {
125     @Override
126     public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
127         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
128       Scan scan = new Scan();
129       scan.setFilter(new NoDataFilter());
130       return new StoreScanner(store, store.getScanInfo(), scan,
131           Collections.singletonList(memstoreScanner), ScanType.MINOR_COMPACT, store.getHRegion()
132               .getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
133     }
134   }
135 
136   /**
137    * Don't allow any data to be written out in the compaction by creating a custom
138    * {@link StoreScanner}.
139    */
140   public static class NoDataFromCompaction extends BaseRegionObserver {
141     @Override
142     public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
143         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
144         long earliestPutTs, InternalScanner s) throws IOException {
145       Scan scan = new Scan();
146       scan.setFilter(new NoDataFilter());
147       return new StoreScanner(store, store.getScanInfo(), scan, scanners, ScanType.MINOR_COMPACT,
148           store.getHRegion().getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
149     }
150   }
151 
152 
153   HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
154       byte[]... families) throws IOException {
155     HTableDescriptor htd = new HTableDescriptor(tableName);
156     for (byte[] family : families) {
157       htd.addFamily(new HColumnDescriptor(family));
158     }
159     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
160     Path path = new Path(DIR + callingMethod);
161     HRegion r = HRegion.createHRegion(info, path, conf, htd);
162     // this following piece is a hack. currently a coprocessorHost
163     // is secretly loaded at OpenRegionHandler. we don't really
164     // start a region server here, so just manually create cphost
165     // and set it to region.
166     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
167     r.setCoprocessorHost(host);
168     return r;
169   }
170 
171   @Test
172   public void testRegionObserverScanTimeStacking() throws Exception {
173     byte[] ROW = Bytes.toBytes("testRow");
174     byte[] TABLE = Bytes.toBytes(getClass().getName());
175     byte[] A = Bytes.toBytes("A");
176     byte[][] FAMILIES = new byte[][] { A };
177 
178     Configuration conf = HBaseConfiguration.create();
179     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
180     RegionCoprocessorHost h = region.getCoprocessorHost();
181     h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
182     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
183 
184     Put put = new Put(ROW);
185     put.add(A, A, A);
186     region.put(put);
187 
188     Get get = new Get(ROW);
189     Result r = region.get(get);
190     assertNull(
191       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
192           + r, r.list());
193   }
194 
195   @Test
196   public void testRegionObserverFlushTimeStacking() throws Exception {
197     byte[] ROW = Bytes.toBytes("testRow");
198     byte[] TABLE = Bytes.toBytes(getClass().getName());
199     byte[] A = Bytes.toBytes("A");
200     byte[][] FAMILIES = new byte[][] { A };
201 
202     Configuration conf = HBaseConfiguration.create();
203     HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
204     RegionCoprocessorHost h = region.getCoprocessorHost();
205     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
206     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
207 
208     // put a row and flush it to disk
209     Put put = new Put(ROW);
210     put.add(A, A, A);
211     region.put(put);
212     region.flushcache();
213     Get get = new Get(ROW);
214     Result r = region.get(get);
215     assertNull(
216       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
217           + r, r.list());
218   }
219 
220   /*
221    * Custom HRegion which uses CountDownLatch to signal the completion of compaction
222    */
223   public static class CompactionCompletionNotifyingRegion extends HRegion {
224     private static volatile CountDownLatch compactionStateChangeLatch = null;
225 
226     public CompactionCompletionNotifyingRegion(Path tableDir, HLog log, FileSystem fs,
227         Configuration confParam, HRegionInfo info, HTableDescriptor htd,
228         RegionServerServices rsServices) {
229       super(tableDir, log, fs, confParam, info, htd, rsServices);
230     }
231 
232     public CountDownLatch getCompactionStateChangeLatch() {
233       if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
234       return compactionStateChangeLatch;
235     }
236 
237     @Override
238     public boolean compact(CompactionRequest cr) throws IOException {
239       boolean ret = super.compact(cr);
240       if (ret) compactionStateChangeLatch.countDown();
241       return ret;
242     }
243   }
244 
245   /**
246    * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
247    * the usual compaction mechanism on the region, rather than going through the backdoor to the
248    * region
249    */
250   @Test
251   @Category(MediumTests.class)
252   public void testRegionObserverCompactionTimeStacking() throws Exception {
253     // setup a mini cluster so we can do a real compaction on a region
254     Configuration conf = UTIL.getConfiguration();
255     conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
256     conf.setInt("hbase.hstore.compaction.min", 2);
257     UTIL.startMiniCluster();
258     String tableName = "testRegionObserverCompactionTimeStacking";
259     byte[] ROW = Bytes.toBytes("testRow");
260     byte[] A = Bytes.toBytes("A");
261     HTableDescriptor desc = new HTableDescriptor(tableName);
262     desc.addFamily(new HColumnDescriptor(A));
263     desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
264     desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
265       null);
266 
267     HBaseAdmin admin = UTIL.getHBaseAdmin();
268     admin.createTable(desc);
269 
270     HTable table = new HTable(conf, desc.getName());
271 
272     // put a row and flush it to disk
273     Put put = new Put(ROW);
274     put.add(A, A, A);
275     table.put(put);
276     table.flushCommits();
277 
278     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getName());
279     List<HRegion> regions = rs.getOnlineRegions(desc.getName());
280     assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
281     HRegion region = regions.get(0);
282     admin.flush(region.getRegionName());
283     CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
284         .getCompactionStateChangeLatch();
285 
286     // put another row and flush that too
287     put = new Put(Bytes.toBytes("anotherrow"));
288     put.add(A, A, A);
289     table.put(put);
290     table.flushCommits();
291     admin.flush(region.getRegionName());
292 
293     // run a compaction, which normally would should get rid of the data
294     Store s = region.getStores().get(A);
295     CompactionRequest request = new CompactionRequest(region, s, Store.PRIORITY_USER);
296     rs.compactSplitThread.requestCompaction(region, s,
297       "compact for testRegionObserverCompactionTimeStacking", Store.PRIORITY_USER, request);
298     // wait for the compaction to complete
299     latch.await();
300 
301     // check both rows to ensure that they aren't there
302     Get get = new Get(ROW);
303     Result r = table.get(get);
304     assertNull(
305       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
306           + r, r.list());
307 
308     get = new Get(Bytes.toBytes("anotherrow"));
309     r = table.get(get);
310     assertNull(
311       "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
312           + r, r.list());
313 
314     table.close();
315     UTIL.shutdownMiniCluster();
316   }
317 }