1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.coprocessor;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertNull;
24 import static org.junit.Assert.assertTrue;
25
26 import java.io.IOException;
27 import java.util.Collection;
28 import java.util.Collections;
29 import java.util.List;
30 import java.util.NavigableSet;
31 import java.util.concurrent.CountDownLatch;
32
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.fs.Path;
35 import org.apache.hadoop.hbase.Coprocessor;
36 import org.apache.hadoop.hbase.HBaseConfiguration;
37 import org.apache.hadoop.hbase.HBaseTestingUtility;
38 import org.apache.hadoop.hbase.HColumnDescriptor;
39 import org.apache.hadoop.hbase.HConstants;
40 import org.apache.hadoop.hbase.HRegionInfo;
41 import org.apache.hadoop.hbase.HTableDescriptor;
42 import org.apache.hadoop.hbase.KeyValue;
43 import org.apache.hadoop.hbase.MediumTests;
44 import org.apache.hadoop.hbase.SmallTests;
45 import org.apache.hadoop.hbase.TableName;
46 import org.apache.hadoop.hbase.client.Get;
47 import org.apache.hadoop.hbase.client.HBaseAdmin;
48 import org.apache.hadoop.hbase.client.HTable;
49 import org.apache.hadoop.hbase.client.Put;
50 import org.apache.hadoop.hbase.client.Result;
51 import org.apache.hadoop.hbase.client.Scan;
52 import org.apache.hadoop.hbase.filter.FilterBase;
53 import org.apache.hadoop.hbase.regionserver.HRegion;
54 import org.apache.hadoop.hbase.regionserver.HRegionServer;
55 import org.apache.hadoop.hbase.regionserver.InternalScanner;
56 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
57 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
58 import org.apache.hadoop.hbase.regionserver.ScanType;
59 import org.apache.hadoop.hbase.regionserver.Store;
60 import org.apache.hadoop.hbase.regionserver.StoreFile;
61 import org.apache.hadoop.hbase.regionserver.StoreScanner;
62 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
63 import org.apache.hadoop.hbase.util.Bytes;
64 import org.junit.Test;
65 import org.junit.experimental.categories.Category;
66
67 @Category(SmallTests.class)
68 public class TestRegionObserverScannerOpenHook {
69 private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
70 static final Path DIR = UTIL.getDataTestDir();
71
72 public static class NoDataFilter extends FilterBase {
73
74 @Override
75 public ReturnCode filterKeyValue(KeyValue ignored) throws IOException {
76 return ReturnCode.SKIP;
77 }
78
79 @Override
80 public boolean filterAllRemaining() throws IOException {
81 return true;
82 }
83
84 @Override
85 public boolean filterRow() throws IOException {
86 return true;
87 }
88 }
89
90
91
92
93
94 public static class EmptyRegionObsever extends BaseRegionObserver {
95 }
96
97
98
99
100 public static class NoDataFromScan extends BaseRegionObserver {
101 @Override
102 public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
103 Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
104 throws IOException {
105 scan.setFilter(new NoDataFilter());
106 return new StoreScanner(store, store.getScanInfo(), scan, targetCols);
107 }
108 }
109
110
111
112
113 public static class NoDataFromFlush extends BaseRegionObserver {
114 @Override
115 public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
116 Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
117 Scan scan = new Scan();
118 scan.setFilter(new NoDataFilter());
119 return new StoreScanner(store, store.getScanInfo(), scan,
120 Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
121 store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
122 }
123 }
124
125
126
127
128
129 public static class NoDataFromCompaction extends BaseRegionObserver {
130 @Override
131 public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
132 Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
133 long earliestPutTs, InternalScanner s) throws IOException {
134 Scan scan = new Scan();
135 scan.setFilter(new NoDataFilter());
136 return new StoreScanner(store, store.getScanInfo(), scan, scanners,
137 ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
138 HConstants.OLDEST_TIMESTAMP);
139 }
140 }
141
142 HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
143 byte[]... families) throws IOException {
144 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
145 for (byte[] family : families) {
146 htd.addFamily(new HColumnDescriptor(family));
147 }
148 HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
149 Path path = new Path(DIR + callingMethod);
150 HRegion r = HRegion.createHRegion(info, path, conf, htd);
151
152
153
154
155 RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
156 r.setCoprocessorHost(host);
157 return r;
158 }
159
160 @Test
161 public void testRegionObserverScanTimeStacking() throws Exception {
162 byte[] ROW = Bytes.toBytes("testRow");
163 byte[] TABLE = Bytes.toBytes(getClass().getName());
164 byte[] A = Bytes.toBytes("A");
165 byte[][] FAMILIES = new byte[][] { A };
166
167 Configuration conf = HBaseConfiguration.create();
168 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
169 RegionCoprocessorHost h = region.getCoprocessorHost();
170 h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
171 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
172
173 Put put = new Put(ROW);
174 put.add(A, A, A);
175 region.put(put);
176
177 Get get = new Get(ROW);
178 Result r = region.get(get);
179 assertNull(
180 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
181 + r, r.list());
182 }
183
184 @Test
185 public void testRegionObserverFlushTimeStacking() throws Exception {
186 byte[] ROW = Bytes.toBytes("testRow");
187 byte[] TABLE = Bytes.toBytes(getClass().getName());
188 byte[] A = Bytes.toBytes("A");
189 byte[][] FAMILIES = new byte[][] { A };
190
191 Configuration conf = HBaseConfiguration.create();
192 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
193 RegionCoprocessorHost h = region.getCoprocessorHost();
194 h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
195 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
196
197
198 Put put = new Put(ROW);
199 put.add(A, A, A);
200 region.put(put);
201 region.flushcache();
202 Get get = new Get(ROW);
203 Result r = region.get(get);
204 assertNull(
205 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
206 + r, r.list());
207 }
208
209
210
211
212
213
214 @Test
215 @Category(MediumTests.class)
216 public void testRegionObserverCompactionTimeStacking() throws Exception {
217
218 Configuration conf = UTIL.getConfiguration();
219 conf.setInt("hbase.hstore.compaction.min", 2);
220 UTIL.startMiniCluster();
221 String tableName = "testRegionObserverCompactionTimeStacking";
222 byte[] ROW = Bytes.toBytes("testRow");
223 byte[] A = Bytes.toBytes("A");
224 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
225 desc.addFamily(new HColumnDescriptor(A));
226 desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
227 desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
228 null);
229
230 HBaseAdmin admin = UTIL.getHBaseAdmin();
231 admin.createTable(desc);
232
233 HTable table = new HTable(conf, desc.getTableName());
234
235
236 Put put = new Put(ROW);
237 put.add(A, A, A);
238 table.put(put);
239 table.flushCommits();
240
241 HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
242 List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
243 assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
244 HRegion region = regions.get(0);
245 admin.flush(region.getRegionName());
246
247
248 put = new Put(Bytes.toBytes("anotherrow"));
249 put.add(A, A, A);
250 table.put(put);
251 table.flushCommits();
252 admin.flush(region.getRegionName());
253
254
255 Store s = region.getStores().get(A);
256 CountDownLatch latch = new CountDownLatch(1);
257 WaitableCompactionRequest request = new WaitableCompactionRequest(s.getStorefiles(), latch);
258 rs.compactSplitThread.requestCompaction(region, s,
259 "compact for testRegionObserverCompactionTimeStacking", Store.PRIORITY_USER, request);
260
261 latch.await();
262
263
264 Get get = new Get(ROW);
265 Result r = table.get(get);
266 assertNull(
267 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
268 + r, r.list());
269
270 get = new Get(Bytes.toBytes("anotherrow"));
271 r = table.get(get);
272 assertNull(
273 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
274 + r, r.list());
275
276 table.close();
277 UTIL.shutdownMiniCluster();
278 }
279
280
281
282
283
284 public static class WaitableCompactionRequest extends CompactionRequest {
285 private CountDownLatch done;
286
287
288
289
290
291 public WaitableCompactionRequest(Collection<StoreFile> files, CountDownLatch finished) {
292 super(files);
293 this.done = finished;
294 }
295
296 @Override
297 public void afterExecute() {
298 this.done.countDown();
299 }
300 }
301 }