View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertTrue;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.util.ArrayList;
30  import java.util.List;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.hbase.Coprocessor;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.HBaseTestingUtility;
40  import org.apache.hadoop.hbase.HColumnDescriptor;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.HTableDescriptor;
43  import org.apache.hadoop.hbase.KeyValue;
44  import org.apache.hadoop.hbase.MediumTests;
45  import org.apache.hadoop.hbase.MiniHBaseCluster;
46  import org.apache.hadoop.hbase.client.Delete;
47  import org.apache.hadoop.hbase.client.Get;
48  import org.apache.hadoop.hbase.client.HBaseAdmin;
49  import org.apache.hadoop.hbase.client.HTable;
50  import org.apache.hadoop.hbase.client.Increment;
51  import org.apache.hadoop.hbase.client.Put;
52  import org.apache.hadoop.hbase.client.Result;
53  import org.apache.hadoop.hbase.client.ResultScanner;
54  import org.apache.hadoop.hbase.client.RowMutations;
55  import org.apache.hadoop.hbase.client.Scan;
56  import org.apache.hadoop.hbase.client.Durability;
57  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
58  import org.apache.hadoop.hbase.io.hfile.HFile;
59  import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
60  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
61  import org.apache.hadoop.hbase.regionserver.HRegion;
62  import org.apache.hadoop.hbase.regionserver.InternalScanner;
63  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
64  import org.apache.hadoop.hbase.regionserver.Store;
65  import org.apache.hadoop.hbase.regionserver.ScanType;
66  import org.apache.hadoop.hbase.regionserver.StoreFile;
67  import org.apache.hadoop.hbase.util.Bytes;
68  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
69  import org.apache.hadoop.hbase.util.JVMClusterUtil;
70  import org.junit.AfterClass;
71  import org.junit.BeforeClass;
72  import org.junit.Test;
73  import org.junit.experimental.categories.Category;
74  
75  @Category(MediumTests.class)
76  public class TestRegionObserverInterface {
77    static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class);
78  
79    public static final TableName TEST_TABLE =
80        TableName.valueOf("TestTable");
81    public final static byte[] A = Bytes.toBytes("a");
82    public final static byte[] B = Bytes.toBytes("b");
83    public final static byte[] C = Bytes.toBytes("c");
84    public final static byte[] ROW = Bytes.toBytes("testrow");
85  
86    private static HBaseTestingUtility util = new HBaseTestingUtility();
87    private static MiniHBaseCluster cluster = null;
88  
89    @BeforeClass
90    public static void setupBeforeClass() throws Exception {
91      // set configure to indicate which cp should be loaded
92      Configuration conf = util.getConfiguration();
93      conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
94          "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver");
95  
96      util.startMiniCluster();
97      cluster = util.getMiniHBaseCluster();
98    }
99  
100   @AfterClass
101   public static void tearDownAfterClass() throws Exception {
102     util.shutdownMiniCluster();
103   }
104 
105   @Test
106   public void testRegionObserver() throws IOException {
107     TableName tableName = TEST_TABLE;
108     // recreate table every time in order to reset the status of the
109     // coprocessor.
110     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
111     verifyMethodResult(SimpleRegionObserver.class,
112         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
113             "hadDelete"},
114         TEST_TABLE,
115         new Boolean[] {false, false, false, false, false});
116 
117     Put put = new Put(ROW);
118     put.add(A, A, A);
119     put.add(B, B, B);
120     put.add(C, C, C);
121     table.put(put);
122 
123     verifyMethodResult(SimpleRegionObserver.class,
124         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
125             "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
126         TEST_TABLE,
127         new Boolean[] {false, false, true, true, true, true, false}
128     );
129 
130     verifyMethodResult(SimpleRegionObserver.class,
131         new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
132         TEST_TABLE,
133         new Integer[] {1, 1, 0, 0});
134 
135     Get get = new Get(ROW);
136     get.addColumn(A, A);
137     get.addColumn(B, B);
138     get.addColumn(C, C);
139     table.get(get);
140 
141     verifyMethodResult(SimpleRegionObserver.class,
142         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
143             "hadDelete"},
144         TEST_TABLE,
145         new Boolean[] {true, true, true, true, false}
146     );
147 
148     Delete delete = new Delete(ROW);
149     delete.deleteColumn(A, A);
150     delete.deleteColumn(B, B);
151     delete.deleteColumn(C, C);
152     table.delete(delete);
153 
154     verifyMethodResult(SimpleRegionObserver.class,
155         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
156             "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
157         TEST_TABLE,
158         new Boolean[] {true, true, true, true, true, true, true}
159     );
160     util.deleteTable(tableName);
161     table.close();
162 
163     verifyMethodResult(SimpleRegionObserver.class,
164         new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
165         TEST_TABLE,
166         new Integer[] {1, 1, 1, 1});
167   }
168 
169   @Test
170   public void testRowMutation() throws IOException {
171     TableName tableName = TEST_TABLE;
172     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
173     verifyMethodResult(SimpleRegionObserver.class,
174         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
175             "hadDeleted"},
176         TEST_TABLE,
177         new Boolean[] {false, false, false, false, false});
178 
179     Put put = new Put(ROW);
180     put.add(A, A, A);
181     put.add(B, B, B);
182     put.add(C, C, C);
183 
184     Delete delete = new Delete(ROW);
185     delete.deleteColumn(A, A);
186     delete.deleteColumn(B, B);
187     delete.deleteColumn(C, C);
188 
189     RowMutations arm = new RowMutations(ROW);
190     arm.add(put);
191     arm.add(delete);
192     table.mutateRow(arm);
193 
194     verifyMethodResult(SimpleRegionObserver.class,
195         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
196             "hadDeleted"},
197         TEST_TABLE,
198         new Boolean[] {false, false, true, true, true}
199     );
200     util.deleteTable(tableName);
201     table.close();
202   }
203 
204   @Test
205   public void testIncrementHook() throws IOException {
206     TableName tableName = TEST_TABLE;
207 
208     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
209     Increment inc = new Increment(Bytes.toBytes(0));
210     inc.addColumn(A, A, 1);
211 
212     verifyMethodResult(SimpleRegionObserver.class,
213         new String[] {"hadPreIncrement", "hadPostIncrement"},
214         tableName,
215         new Boolean[] {false, false}
216     );
217 
218     table.increment(inc);
219 
220     verifyMethodResult(SimpleRegionObserver.class,
221         new String[] {"hadPreIncrement", "hadPostIncrement"},
222         tableName,
223         new Boolean[] {true, true}
224     );
225     util.deleteTable(tableName);
226     table.close();
227   }
228 
229   @Test
230   // HBase-3583
231   public void testHBase3583() throws IOException {
232     TableName tableName =
233         TableName.valueOf("testHBase3583");
234     util.createTable(tableName, new byte[][] {A, B, C});
235 
236     verifyMethodResult(SimpleRegionObserver.class,
237         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
238             "wasScannerCloseCalled"},
239         tableName,
240         new Boolean[] {false, false, false, false}
241     );
242 
243     HTable table = new HTable(util.getConfiguration(), tableName);
244     Put put = new Put(ROW);
245     put.add(A, A, A);
246     table.put(put);
247 
248     Get get = new Get(ROW);
249     get.addColumn(A, A);
250     table.get(get);
251 
252     // verify that scannerNext and scannerClose upcalls won't be invoked
253     // when we perform get().
254     verifyMethodResult(SimpleRegionObserver.class,
255         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
256             "wasScannerCloseCalled"},
257         tableName,
258         new Boolean[] {true, true, false, false}
259     );
260 
261     Scan s = new Scan();
262     ResultScanner scanner = table.getScanner(s);
263     try {
264       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
265       }
266     } finally {
267       scanner.close();
268     }
269 
270     // now scanner hooks should be invoked.
271     verifyMethodResult(SimpleRegionObserver.class,
272         new String[] {"wasScannerNextCalled", "wasScannerCloseCalled"},
273         tableName,
274         new Boolean[] {true, true}
275     );
276     util.deleteTable(tableName);
277     table.close();
278   }
279 
280   @Test
281   // HBase-3758
282   public void testHBase3758() throws IOException {
283     TableName tableName =
284         TableName.valueOf("testHBase3758");
285     util.createTable(tableName, new byte[][] {A, B, C});
286 
287     verifyMethodResult(SimpleRegionObserver.class,
288         new String[] {"hadDeleted", "wasScannerOpenCalled"},
289         tableName,
290         new Boolean[] {false, false}
291     );
292 
293     HTable table = new HTable(util.getConfiguration(), tableName);
294     Put put = new Put(ROW);
295     put.add(A, A, A);
296     table.put(put);
297 
298     Delete delete = new Delete(ROW);
299     table.delete(delete);
300 
301     verifyMethodResult(SimpleRegionObserver.class,
302         new String[] {"hadDeleted", "wasScannerOpenCalled"},
303         tableName,
304         new Boolean[] {true, false}
305     );
306 
307     Scan s = new Scan();
308     ResultScanner scanner = table.getScanner(s);
309     try {
310       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
311       }
312     } finally {
313       scanner.close();
314     }
315 
316     // now scanner hooks should be invoked.
317     verifyMethodResult(SimpleRegionObserver.class,
318         new String[] {"wasScannerOpenCalled"},
319         tableName,
320         new Boolean[] {true}
321     );
322     util.deleteTable(tableName);
323     table.close();
324   }
325 
326   /* Overrides compaction to only output rows with keys that are even numbers */
327   public static class EvenOnlyCompactor extends BaseRegionObserver {
328     long lastCompaction;
329     long lastFlush;
330 
331     @Override
332     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
333         Store store, final InternalScanner scanner, final ScanType scanType) {
334       return new InternalScanner() {
335         @Override
336         public boolean next(List<KeyValue> results) throws IOException {
337           return next(results, -1);
338         }
339 
340         @Override
341         public boolean next(List<KeyValue> results, int limit)
342             throws IOException{
343           List<KeyValue> internalResults = new ArrayList<KeyValue>();
344           boolean hasMore;
345           do {
346             hasMore = scanner.next(internalResults, limit);
347             if (!internalResults.isEmpty()) {
348               long row = Bytes.toLong(internalResults.get(0).getRow());
349               if (row % 2 == 0) {
350                 // return this row
351                 break;
352               }
353               // clear and continue
354               internalResults.clear();
355             }
356           } while (hasMore);
357 
358           if (!internalResults.isEmpty()) {
359             results.addAll(internalResults);
360           }
361           return hasMore;
362         }
363 
364         @Override
365         public void close() throws IOException {
366           scanner.close();
367         }
368       };
369     }
370 
371     @Override
372     public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
373         Store store, StoreFile resultFile) {
374       lastCompaction = EnvironmentEdgeManager.currentTimeMillis();
375     }
376 
377     @Override
378     public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) {
379       lastFlush = EnvironmentEdgeManager.currentTimeMillis();
380     }
381   }
382   /**
383    * Tests overriding compaction handling via coprocessor hooks
384    * @throws Exception
385    */
386   @Test
387   public void testCompactionOverride() throws Exception {
388     byte[] compactTable = Bytes.toBytes("TestCompactionOverride");
389     HBaseAdmin admin = util.getHBaseAdmin();
390     if (admin.tableExists(compactTable)) {
391       admin.disableTable(compactTable);
392       admin.deleteTable(compactTable);
393     }
394 
395     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable));
396     htd.addFamily(new HColumnDescriptor(A));
397     htd.addCoprocessor(EvenOnlyCompactor.class.getName());
398     admin.createTable(htd);
399 
400     HTable table = new HTable(util.getConfiguration(), compactTable);
401     for (long i=1; i<=10; i++) {
402       byte[] iBytes = Bytes.toBytes(i);
403       Put put = new Put(iBytes);
404       put.setDurability(Durability.SKIP_WAL);
405       put.add(A, A, iBytes);
406       table.put(put);
407     }
408 
409     HRegion firstRegion = cluster.getRegions(compactTable).get(0);
410     Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(
411         EvenOnlyCompactor.class.getName());
412     assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
413     EvenOnlyCompactor compactor = (EvenOnlyCompactor)cp;
414 
415     // force a compaction
416     long ts = System.currentTimeMillis();
417     admin.flush(compactTable);
418     // wait for flush
419     for (int i=0; i<10; i++) {
420       if (compactor.lastFlush >= ts) {
421         break;
422       }
423       Thread.sleep(1000);
424     }
425     assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
426     LOG.debug("Flush complete");
427 
428     ts = compactor.lastFlush;
429     admin.majorCompact(compactTable);
430     // wait for compaction
431     for (int i=0; i<30; i++) {
432       if (compactor.lastCompaction >= ts) {
433         break;
434       }
435       Thread.sleep(1000);
436     }
437     LOG.debug("Last compaction was at "+compactor.lastCompaction);
438     assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
439 
440     // only even rows should remain
441     ResultScanner scanner = table.getScanner(new Scan());
442     try {
443       for (long i=2; i<=10; i+=2) {
444         Result r = scanner.next();
445         assertNotNull(r);
446         assertFalse(r.isEmpty());
447         byte[] iBytes = Bytes.toBytes(i);
448         assertArrayEquals("Row should be "+i, r.getRow(), iBytes);
449         assertArrayEquals("Value should be "+i, r.getValue(A, A), iBytes);
450       }
451     } finally {
452       scanner.close();
453     }
454     table.close();
455   }
456 
457   @Test
458   public void bulkLoadHFileTest() throws Exception {
459     String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
460     TableName tableName = TEST_TABLE;
461     Configuration conf = util.getConfiguration();
462     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
463 
464     verifyMethodResult(SimpleRegionObserver.class,
465         new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
466         tableName,
467         new Boolean[] {false, false}
468     );
469 
470     FileSystem fs = util.getTestFileSystem();
471     final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
472     Path familyDir = new Path(dir, Bytes.toString(A));
473 
474     createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
475 
476     //Bulk load
477     new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));
478 
479     verifyMethodResult(SimpleRegionObserver.class,
480         new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
481         tableName,
482         new Boolean[] {true, true}
483     );
484     util.deleteTable(tableName);
485     table.close();
486   }
487 
488   // check each region whether the coprocessor upcalls are called or not.
489   private void verifyMethodResult(Class c, String methodName[], TableName tableName,
490                                   Object value[]) throws IOException {
491     try {
492       for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
493         for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer())) {
494           if (!r.getTableName().equals(tableName)) {
495             continue;
496           }
497           RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
498               getCoprocessorHost();
499 
500           Coprocessor cp = cph.findCoprocessor(c.getName());
501           assertNotNull(cp);
502           for (int i = 0; i < methodName.length; ++i) {
503             Method m = c.getMethod(methodName[i]);
504             Object o = m.invoke(cp);
505             assertTrue("Result of " + c.getName() + "." + methodName[i]
506                 + " is expected to be " + value[i].toString()
507                 + ", while we get " + o.toString(), o.equals(value[i]));
508           }
509         }
510       }
511     } catch (Exception e) {
512       throw new IOException(e.toString());
513     }
514   }
515 
516   private static void createHFile(
517       Configuration conf,
518       FileSystem fs, Path path,
519       byte[] family, byte[] qualifier) throws IOException {
520     HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
521         .withPath(fs, path)
522         .withComparator(KeyValue.KEY_COMPARATOR)
523         .create();
524     long now = System.currentTimeMillis();
525     try {
526       for (int i =1;i<=9;i++) {
527         KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
528         writer.append(kv);
529       }
530     } finally {
531       writer.close();
532     }
533   }
534 
535   private static byte [][] makeN(byte [] base, int n) {
536     byte [][] ret = new byte[n][];
537     for(int i=0;i<n;i++) {
538       ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%02d", i)));
539     }
540     return ret;
541   }
542 
543 }
544 
545