1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.coprocessor;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertNull;
24
25 import java.io.IOException;
26 import java.util.Collections;
27 import java.util.List;
28 import java.util.NavigableSet;
29 import java.util.concurrent.CountDownLatch;
30
31 import org.apache.hadoop.conf.Configuration;
32 import org.apache.hadoop.fs.FileSystem;
33 import org.apache.hadoop.fs.Path;
34 import org.apache.hadoop.hbase.Cell;
35 import org.apache.hadoop.hbase.Coprocessor;
36 import org.apache.hadoop.hbase.HBaseConfiguration;
37 import org.apache.hadoop.hbase.HBaseTestingUtility;
38 import org.apache.hadoop.hbase.HColumnDescriptor;
39 import org.apache.hadoop.hbase.HConstants;
40 import org.apache.hadoop.hbase.HRegionInfo;
41 import org.apache.hadoop.hbase.HTableDescriptor;
42 import org.apache.hadoop.hbase.TableName;
43 import org.apache.hadoop.hbase.client.Get;
44 import org.apache.hadoop.hbase.client.HBaseAdmin;
45 import org.apache.hadoop.hbase.client.HTable;
46 import org.apache.hadoop.hbase.client.IsolationLevel;
47 import org.apache.hadoop.hbase.client.Put;
48 import org.apache.hadoop.hbase.client.Result;
49 import org.apache.hadoop.hbase.client.Scan;
50 import org.apache.hadoop.hbase.filter.FilterBase;
51 import org.apache.hadoop.hbase.regionserver.HRegion;
52 import org.apache.hadoop.hbase.regionserver.HRegionServer;
53 import org.apache.hadoop.hbase.regionserver.HStore;
54 import org.apache.hadoop.hbase.regionserver.InternalScanner;
55 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
56 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
57 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
58 import org.apache.hadoop.hbase.regionserver.ScanType;
59 import org.apache.hadoop.hbase.regionserver.Store;
60 import org.apache.hadoop.hbase.regionserver.StoreScanner;
61 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
62 import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
63 import org.apache.hadoop.hbase.regionserver.wal.HLog;
64 import org.apache.hadoop.hbase.security.User;
65 import org.apache.hadoop.hbase.testclassification.MediumTests;
66 import org.apache.hadoop.hbase.util.Bytes;
67 import org.junit.Test;
68 import org.junit.experimental.categories.Category;
69
70 @Category(MediumTests.class)
71 public class TestRegionObserverScannerOpenHook {
72 private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
73 static final Path DIR = UTIL.getDataTestDir();
74
75 public static class NoDataFilter extends FilterBase {
76
77 @Override
78 public ReturnCode filterKeyValue(Cell ignored) throws IOException {
79 return ReturnCode.SKIP;
80 }
81
82 @Override
83 public boolean filterAllRemaining() throws IOException {
84 return true;
85 }
86
87 @Override
88 public boolean filterRow() throws IOException {
89 return true;
90 }
91 }
92
93
94
95
96
97 public static class EmptyRegionObsever extends BaseRegionObserver {
98 }
99
100
101
102
103 public static class NoDataFromScan extends BaseRegionObserver {
104 @Override
105 public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
106 Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
107 throws IOException {
108 scan.setFilter(new NoDataFilter());
109 return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
110 ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
111 }
112 }
113
114
115
116
117 public static class NoDataFromFlush extends BaseRegionObserver {
118 @Override
119 public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
120 Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
121 Scan scan = new Scan();
122 scan.setFilter(new NoDataFilter());
123 return new StoreScanner(store, store.getScanInfo(), scan,
124 Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
125 store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
126 }
127 }
128
129
130
131
132
133 public static class NoDataFromCompaction extends BaseRegionObserver {
134 @Override
135 public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
136 Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
137 long earliestPutTs, InternalScanner s) throws IOException {
138 Scan scan = new Scan();
139 scan.setFilter(new NoDataFilter());
140 return new StoreScanner(store, store.getScanInfo(), scan, scanners,
141 ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
142 HConstants.OLDEST_TIMESTAMP);
143 }
144 }
145
146 HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
147 byte[]... families) throws IOException {
148 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
149 for (byte[] family : families) {
150 htd.addFamily(new HColumnDescriptor(family));
151 }
152 HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
153 Path path = new Path(DIR + callingMethod);
154 HRegion r = HRegion.createHRegion(info, path, conf, htd);
155
156
157
158
159 RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
160 r.setCoprocessorHost(host);
161 return r;
162 }
163
164 @Test
165 public void testRegionObserverScanTimeStacking() throws Exception {
166 byte[] ROW = Bytes.toBytes("testRow");
167 byte[] TABLE = Bytes.toBytes(getClass().getName());
168 byte[] A = Bytes.toBytes("A");
169 byte[][] FAMILIES = new byte[][] { A };
170
171 Configuration conf = HBaseConfiguration.create();
172 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
173 RegionCoprocessorHost h = region.getCoprocessorHost();
174 h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
175 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
176
177 Put put = new Put(ROW);
178 put.add(A, A, A);
179 region.put(put);
180
181 Get get = new Get(ROW);
182 Result r = region.get(get);
183 assertNull(
184 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
185 + r, r.listCells());
186 }
187
188 @Test
189 public void testRegionObserverFlushTimeStacking() throws Exception {
190 byte[] ROW = Bytes.toBytes("testRow");
191 byte[] TABLE = Bytes.toBytes(getClass().getName());
192 byte[] A = Bytes.toBytes("A");
193 byte[][] FAMILIES = new byte[][] { A };
194
195 Configuration conf = HBaseConfiguration.create();
196 HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
197 RegionCoprocessorHost h = region.getCoprocessorHost();
198 h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
199 h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
200
201
202 Put put = new Put(ROW);
203 put.add(A, A, A);
204 region.put(put);
205 region.flushcache();
206 Get get = new Get(ROW);
207 Result r = region.get(get);
208 assertNull(
209 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
210 + r, r.listCells());
211 }
212
213
214
215
216 public static class CompactionCompletionNotifyingRegion extends HRegion {
217 private static volatile CountDownLatch compactionStateChangeLatch = null;
218
219 @SuppressWarnings("deprecation")
220 public CompactionCompletionNotifyingRegion(Path tableDir, HLog log,
221 FileSystem fs, Configuration confParam, HRegionInfo info,
222 HTableDescriptor htd, RegionServerServices rsServices) {
223 super(tableDir, log, fs, confParam, info, htd, rsServices);
224 }
225
226 public CountDownLatch getCompactionStateChangeLatch() {
227 if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
228 return compactionStateChangeLatch;
229 }
230
231 @Override
232 public boolean compact(CompactionContext compaction, Store store,
233 CompactionThroughputController throughputController) throws IOException {
234 boolean ret = super.compact(compaction, store, throughputController);
235 if (ret) compactionStateChangeLatch.countDown();
236 return ret;
237 }
238
239 @Override
240 public boolean compact(CompactionContext compaction, Store store,
241 CompactionThroughputController throughputController, User user) throws IOException {
242 boolean ret = super.compact(compaction, store, throughputController, user);
243 if (ret) compactionStateChangeLatch.countDown();
244 return ret;
245 }
246 }
247
248
249
250
251
252
253 @Test
254 public void testRegionObserverCompactionTimeStacking() throws Exception {
255
256 Configuration conf = UTIL.getConfiguration();
257 conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
258 conf.setInt("hbase.hstore.compaction.min", 2);
259 UTIL.startMiniCluster();
260 String tableName = "testRegionObserverCompactionTimeStacking";
261 byte[] ROW = Bytes.toBytes("testRow");
262 byte[] A = Bytes.toBytes("A");
263 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
264 desc.addFamily(new HColumnDescriptor(A));
265 desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
266 desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST,
267 null);
268
269 HBaseAdmin admin = UTIL.getHBaseAdmin();
270 admin.createTable(desc);
271
272 HTable table = new HTable(conf, desc.getTableName());
273
274
275 Put put = new Put(ROW);
276 put.add(A, A, A);
277 table.put(put);
278 table.flushCommits();
279
280 HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
281 List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
282 assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
283 HRegion region = regions.get(0);
284 admin.flush(region.getRegionName());
285 CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
286 .getCompactionStateChangeLatch();
287
288
289 put = new Put(Bytes.toBytes("anotherrow"));
290 put.add(A, A, A);
291 table.put(put);
292 table.flushCommits();
293 admin.flush(region.getRegionName());
294
295
296
297 latch.await();
298
299 Get get = new Get(ROW);
300 Result r = table.get(get);
301 assertNull(
302 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
303 + r, r.listCells());
304
305 get = new Get(Bytes.toBytes("anotherrow"));
306 r = table.get(get);
307 assertNull(
308 "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
309 + r, r.listCells());
310
311 table.close();
312 UTIL.shutdownMiniCluster();
313 }
314 }