1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.coprocessor;
21
22 import org.apache.commons.logging.Log;
23 import org.apache.commons.logging.LogFactory;
24
25 import org.apache.hadoop.conf.Configuration;
26 import org.apache.hadoop.hbase.*;
27 import org.apache.hadoop.hbase.client.HBaseAdmin;
28 import org.apache.hadoop.hbase.regionserver.HRegion;
29 import org.apache.hadoop.hdfs.MiniDFSCluster;
30 import org.apache.hadoop.fs.FileSystem;
31 import org.apache.hadoop.fs.Path;
32
33 import javax.tools.*;
34 import java.io.*;
35 import java.util.*;
36 import java.util.jar.*;
37
38 import org.junit.*;
39 import org.junit.experimental.categories.Category;
40
41 import static org.junit.Assert.assertEquals;
42 import static org.junit.Assert.assertTrue;
43 import static org.junit.Assert.assertFalse;
44
45
46
47
48 @Category(MediumTests.class)
49 public class TestClassLoading {
50 private static final Log LOG = LogFactory.getLog(TestClassLoading.class);
51 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
52
53 private static MiniDFSCluster cluster;
54
55 static final int BUFFER_SIZE = 4096;
56 static final String tableName = "TestClassLoading";
57 static final String cpName1 = "TestCP1";
58 static final String cpName2 = "TestCP2";
59 static final String cpName3 = "TestCP3";
60 static final String cpName4 = "TestCP4";
61 static final String cpName5 = "TestCP5";
62 static final String cpName6 = "TestCP6";
63 static final String cpNameInvalid = "TestCPInvalid";
64
65 private static Class<?> regionCoprocessor1 = ColumnAggregationEndpoint.class;
66 private static Class<?> regionCoprocessor2 = GenericEndpoint.class;
67 private static Class<?> regionServerCoprocessor = SampleRegionWALObserver.class;
68 private static Class<?> masterCoprocessor = BaseMasterObserver.class;
69
70 private static final String[] regionServerSystemCoprocessors =
71 new String[]{
72 regionServerCoprocessor.getSimpleName()
73 };
74
75 @BeforeClass
76 public static void setUpBeforeClass() throws Exception {
77 Configuration conf = TEST_UTIL.getConfiguration();
78
79
80
81 conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
82 regionCoprocessor1.getName());
83
84
85
86
87 conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
88 regionCoprocessor2.getName());
89
90 conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
91 regionServerCoprocessor.getName());
92 conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
93 masterCoprocessor.getName());
94 TEST_UTIL.startMiniCluster(1);
95 cluster = TEST_UTIL.getDFSCluster();
96 }
97
98 @AfterClass
99 public static void tearDownAfterClass() throws Exception {
100 TEST_UTIL.shutdownMiniCluster();
101 }
102
103
104 private boolean createJarArchive(File archiveFile, File[] tobeJared) {
105 try {
106 byte buffer[] = new byte[BUFFER_SIZE];
107
108 FileOutputStream stream = new FileOutputStream(archiveFile);
109 JarOutputStream out = new JarOutputStream(stream, new Manifest());
110
111 for (int i = 0; i < tobeJared.length; i++) {
112 if (tobeJared[i] == null || !tobeJared[i].exists()
113 || tobeJared[i].isDirectory()) {
114 continue;
115 }
116
117
118 JarEntry jarAdd = new JarEntry(tobeJared[i].getName());
119 jarAdd.setTime(tobeJared[i].lastModified());
120 out.putNextEntry(jarAdd);
121
122
123 FileInputStream in = new FileInputStream(tobeJared[i]);
124 while (true) {
125 int nRead = in.read(buffer, 0, buffer.length);
126 if (nRead <= 0)
127 break;
128 out.write(buffer, 0, nRead);
129 }
130 in.close();
131 }
132 out.close();
133 stream.close();
134 LOG.info("Adding classes to jar file completed");
135 return true;
136 } catch (Exception ex) {
137 LOG.error("Error: " + ex.getMessage());
138 return false;
139 }
140 }
141
142 private File buildCoprocessorJar(String className) throws Exception {
143
144 String javaCode = "import org.apache.hadoop.hbase.coprocessor.*;" +
145 "public class " + className + " extends BaseRegionObserver {}";
146 Path baseDir = TEST_UTIL.getDataTestDir();
147 Path srcDir = new Path(TEST_UTIL.getDataTestDir(), "src");
148 File srcDirPath = new File(srcDir.toString());
149 srcDirPath.mkdirs();
150 File sourceCodeFile = new File(srcDir.toString(), className + ".java");
151 BufferedWriter bw = new BufferedWriter(new FileWriter(sourceCodeFile));
152 bw.write(javaCode);
153 bw.close();
154
155
156 JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
157 ArrayList<String> srcFileNames = new ArrayList<String>();
158 srcFileNames.add(sourceCodeFile.toString());
159 StandardJavaFileManager fm = compiler.getStandardFileManager(null, null,
160 null);
161 Iterable<? extends JavaFileObject> cu =
162 fm.getJavaFileObjects(sourceCodeFile);
163 List<String> options = new ArrayList<String>();
164 options.add("-classpath");
165
166
167 String currentDir = new File(".").getAbsolutePath();
168 String classpath =
169 currentDir + File.separator + "target"+ File.separator + "classes" +
170 System.getProperty("path.separator") +
171 System.getProperty("surefire.test.class.path");
172 options.add(classpath);
173 LOG.debug("Setting classpath to: "+classpath);
174
175 JavaCompiler.CompilationTask task = compiler.getTask(null, fm, null,
176 options, null, cu);
177 assertTrue("Compile file " + sourceCodeFile + " failed.", task.call());
178
179
180 String jarFileName = className + ".jar";
181 File jarFile = new File(baseDir.toString(), jarFileName);
182 if (!createJarArchive(jarFile,
183 new File[]{new File(srcDir.toString(), className + ".class")})){
184 assertTrue("Build jar file failed.", false);
185 }
186
187 return jarFile;
188 }
189
190 @Test
191
192 public void testClassLoadingFromHDFS() throws Exception {
193 FileSystem fs = cluster.getFileSystem();
194
195 File jarFile1 = buildCoprocessorJar(cpName1);
196 File jarFile2 = buildCoprocessorJar(cpName2);
197
198
199 fs.copyFromLocalFile(new Path(jarFile1.getPath()),
200 new Path(fs.getUri().toString() + Path.SEPARATOR));
201 String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR +
202 jarFile1.getName();
203 Path pathOnHDFS1 = new Path(jarFileOnHDFS1);
204 assertTrue("Copy jar file to HDFS failed.",
205 fs.exists(pathOnHDFS1));
206 LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1);
207
208 fs.copyFromLocalFile(new Path(jarFile2.getPath()),
209 new Path(fs.getUri().toString() + Path.SEPARATOR));
210 String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR +
211 jarFile2.getName();
212 Path pathOnHDFS2 = new Path(jarFileOnHDFS2);
213 assertTrue("Copy jar file to HDFS failed.",
214 fs.exists(pathOnHDFS2));
215 LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
216
217
218 HTableDescriptor htd = new HTableDescriptor(tableName);
219 htd.addFamily(new HColumnDescriptor("test"));
220
221 htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 +
222 "|" + Coprocessor.PRIORITY_USER);
223
224 htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 +
225 "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
226
227 htd.setValue("COPROCESSOR$3", jarFileOnHDFS2.toString() + "|" + cpNameInvalid +
228 "|" + Coprocessor.PRIORITY_USER);
229 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
230 if (admin.tableExists(tableName)) {
231 admin.disableTable(tableName);
232 admin.deleteTable(tableName);
233 }
234 CoprocessorHost.classLoadersCache.clear();
235 byte[] startKey = {10, 63};
236 byte[] endKey = {12, 43};
237 admin.createTable(htd, startKey, endKey, 4);
238 waitForTable(htd.getName());
239
240
241 boolean foundTableRegion=false;
242 boolean found_invalid = true, found1 = true, found2 = true, found2_k1 = true,
243 found2_k2 = true, found2_k3 = true;
244 Map<HRegion, Set<ClassLoader>> regionsActiveClassLoaders =
245 new HashMap<HRegion, Set<ClassLoader>>();
246 MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
247 for (HRegion region:
248 hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
249 if (region.getRegionNameAsString().startsWith(tableName)) {
250 foundTableRegion = true;
251 CoprocessorEnvironment env;
252 env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
253 found1 = found1 && (env != null);
254 env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
255 found2 = found2 && (env != null);
256 if (env != null) {
257 Configuration conf = env.getConfiguration();
258 found2_k1 = found2_k1 && (conf.get("k1") != null);
259 found2_k2 = found2_k2 && (conf.get("k2") != null);
260 found2_k3 = found2_k3 && (conf.get("k3") != null);
261 } else {
262 found2_k1 = found2_k2 = found2_k3 = false;
263 }
264 env = region.getCoprocessorHost().findCoprocessorEnvironment(cpNameInvalid);
265 found_invalid = found_invalid && (env != null);
266
267 regionsActiveClassLoaders
268 .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders());
269 }
270 }
271
272 assertTrue("No region was found for table " + tableName, foundTableRegion);
273 assertTrue("Class " + cpName1 + " was missing on a region", found1);
274 assertTrue("Class " + cpName2 + " was missing on a region", found2);
275
276 assertFalse("Class " + cpNameInvalid + " was found on a region", found_invalid);
277 assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
278 assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
279 assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
280
281 assertTrue(jarFileOnHDFS1 + " was not cached",
282 CoprocessorHost.classLoadersCache.containsKey(pathOnHDFS1));
283 assertTrue(jarFileOnHDFS2 + " was not cached",
284 CoprocessorHost.classLoadersCache.containsKey(pathOnHDFS2));
285
286 assertEquals("The number of cached classloaders should be equal to the number" +
287 " of external jar files",
288 2, CoprocessorHost.classLoadersCache.size());
289
290 Set<ClassLoader> externalClassLoaders = new HashSet<ClassLoader>(
291 CoprocessorHost.classLoadersCache.values());
292 for (Map.Entry<HRegion, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
293 assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached."
294 + " ClassLoader Cache:" + externalClassLoaders
295 + " Region ClassLoaders:" + regionCP.getValue(),
296 externalClassLoaders.containsAll(regionCP.getValue()));
297 }
298 }
299
300 private String getLocalPath(File file) {
301 return new Path(file.toURI()).toString();
302 }
303
304 @Test
305
306 public void testClassLoadingFromLocalFS() throws Exception {
307 File jarFile = buildCoprocessorJar(cpName3);
308
309
310 HTableDescriptor htd = new HTableDescriptor(cpName3);
311 htd.addFamily(new HColumnDescriptor("test"));
312 htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" +
313 Coprocessor.PRIORITY_USER);
314 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
315 admin.createTable(htd);
316 waitForTable(htd.getName());
317
318
319 boolean found = false;
320 MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
321 for (HRegion region:
322 hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
323 if (region.getRegionNameAsString().startsWith(cpName3)) {
324 found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
325 }
326 }
327 assertTrue("Class " + cpName3 + " was missing on a region", found);
328 }
329
330 @Test
331
332 public void testPrivateClassLoader() throws Exception {
333 File jarFile = buildCoprocessorJar(cpName4);
334
335
336 HTableDescriptor htd = new HTableDescriptor(cpName4);
337 htd.addFamily(new HColumnDescriptor("test"));
338 htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" +
339 Coprocessor.PRIORITY_USER);
340 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
341 admin.createTable(htd);
342 waitForTable(htd.getName());
343
344
345 boolean found = false;
346 MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
347 for (HRegion region:
348 hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
349 if (region.getRegionNameAsString().startsWith(cpName4)) {
350 Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4);
351 if (cp != null) {
352 found = true;
353 assertEquals("Class " + cpName4 + " was not loaded by CoprocessorClassLoader",
354 cp.getClass().getClassLoader().getClass(), CoprocessorClassLoader.class);
355 }
356 }
357 }
358 assertTrue("Class " + cpName4 + " was missing on a region", found);
359 }
360
361 @Test
362
363
364 public void testHBase3810() throws Exception {
365
366
367 File jarFile1 = buildCoprocessorJar(cpName1);
368 File jarFile2 = buildCoprocessorJar(cpName2);
369 File jarFile5 = buildCoprocessorJar(cpName5);
370 File jarFile6 = buildCoprocessorJar(cpName6);
371
372 String cpKey1 = "COPROCESSOR$1";
373 String cpKey2 = " Coprocessor$2 ";
374 String cpKey3 = " coprocessor$03 ";
375
376 String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" +
377 Coprocessor.PRIORITY_USER;
378 String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | ";
379
380 String cpValue3 =
381 " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v ";
382
383
384 HTableDescriptor htd = new HTableDescriptor(tableName);
385 htd.addFamily(new HColumnDescriptor("test"));
386
387
388 htd.setValue(cpKey1, cpValue1);
389 htd.setValue(cpKey2, cpValue2);
390 htd.setValue(cpKey3, cpValue3);
391
392
393 htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)),
394 Coprocessor.PRIORITY_USER, null);
395 Map<String, String> kvs = new HashMap<String, String>();
396 kvs.put("k1", "v1");
397 kvs.put("k2", "v2");
398 kvs.put("k3", "v3");
399 htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)),
400 Coprocessor.PRIORITY_USER, kvs);
401
402 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
403 if (admin.tableExists(tableName)) {
404 admin.disableTable(tableName);
405 admin.deleteTable(tableName);
406 }
407 admin.createTable(htd);
408 waitForTable(htd.getName());
409
410
411 boolean found_2 = false, found_1 = false, found_3 = false,
412 found_5 = false, found_6 = false;
413 boolean found6_k1 = false, found6_k2 = false, found6_k3 = false,
414 found6_k4 = false;
415
416 MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
417 for (HRegion region:
418 hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
419 if (region.getRegionNameAsString().startsWith(tableName)) {
420 found_1 = found_1 ||
421 (region.getCoprocessorHost().findCoprocessor(cpName1) != null);
422 found_2 = found_2 ||
423 (region.getCoprocessorHost().findCoprocessor(cpName2) != null);
424 found_3 = found_3 ||
425 (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver")
426 != null);
427 found_5 = found_5 ||
428 (region.getCoprocessorHost().findCoprocessor(cpName5) != null);
429
430 CoprocessorEnvironment env =
431 region.getCoprocessorHost().findCoprocessorEnvironment(cpName6);
432 if (env != null) {
433 found_6 = true;
434 Configuration conf = env.getConfiguration();
435 found6_k1 = conf.get("k1") != null;
436 found6_k2 = conf.get("k2") != null;
437 found6_k3 = conf.get("k3") != null;
438 }
439 }
440 }
441
442 assertTrue("Class " + cpName1 + " was missing on a region", found_1);
443 assertTrue("Class " + cpName2 + " was missing on a region", found_2);
444 assertTrue("Class SimpleRegionObserver was missing on a region", found_3);
445 assertTrue("Class " + cpName5 + " was missing on a region", found_5);
446 assertTrue("Class " + cpName6 + " was missing on a region", found_6);
447
448 assertTrue("Configuration key 'k1' was missing on a region", found6_k1);
449 assertTrue("Configuration key 'k2' was missing on a region", found6_k2);
450 assertTrue("Configuration key 'k3' was missing on a region", found6_k3);
451 assertFalse("Configuration key 'k4' wasn't configured", found6_k4);
452 }
453
454 @Test
455 public void testClassLoadingFromLibDirInJar() throws Exception {
456 FileSystem fs = cluster.getFileSystem();
457
458 File innerJarFile1 = buildCoprocessorJar(cpName1);
459 File innerJarFile2 = buildCoprocessorJar(cpName2);
460 File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
461
462 byte buffer[] = new byte[BUFFER_SIZE];
463
464
465
466 FileOutputStream stream = new FileOutputStream(outerJarFile);
467 JarOutputStream out = new JarOutputStream(stream, new Manifest());
468
469 for (File jarFile: new File[] { innerJarFile1, innerJarFile2 }) {
470
471 JarEntry jarAdd = new JarEntry("/lib/" + jarFile.getName());
472 jarAdd.setTime(jarFile.lastModified());
473 out.putNextEntry(jarAdd);
474
475
476 FileInputStream in = new FileInputStream(jarFile);
477 while (true) {
478 int nRead = in.read(buffer, 0, buffer.length);
479 if (nRead <= 0)
480 break;
481 out.write(buffer, 0, nRead);
482 }
483 in.close();
484 }
485 out.close();
486 stream.close();
487 LOG.info("Adding jar file to outer jar file completed");
488
489
490 fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
491 new Path(fs.getUri().toString() + Path.SEPARATOR));
492 String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
493 outerJarFile.getName();
494 assertTrue("Copy jar file to HDFS failed.",
495 fs.exists(new Path(jarFileOnHDFS)));
496 LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
497
498
499 HTableDescriptor htd = new HTableDescriptor(tableName);
500 htd.addFamily(new HColumnDescriptor("test"));
501
502 htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
503 "|" + Coprocessor.PRIORITY_USER);
504
505 htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
506 "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
507 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
508 if (admin.tableExists(tableName)) {
509 admin.disableTable(tableName);
510 admin.deleteTable(tableName);
511 }
512 admin.createTable(htd);
513 waitForTable(htd.getName());
514
515
516 boolean found1 = false, found2 = false, found2_k1 = false,
517 found2_k2 = false, found2_k3 = false;
518 MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
519 for (HRegion region:
520 hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
521 if (region.getRegionNameAsString().startsWith(tableName)) {
522 CoprocessorEnvironment env;
523 env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
524 if (env != null) {
525 found1 = true;
526 }
527 env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
528 if (env != null) {
529 found2 = true;
530 Configuration conf = env.getConfiguration();
531 found2_k1 = conf.get("k1") != null;
532 found2_k2 = conf.get("k2") != null;
533 found2_k3 = conf.get("k3") != null;
534 }
535 }
536 }
537 assertTrue("Class " + cpName1 + " was missing on a region", found1);
538 assertTrue("Class " + cpName2 + " was missing on a region", found2);
539 assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
540 assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
541 assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
542 }
543
544 @Test
545 public void testRegionServerCoprocessorsReported() throws Exception {
546
547
548
549
550 HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
551 assertAllRegionServers(regionServerSystemCoprocessors,null);
552 }
553
554
555
556
557
558
559
560
561
562
563 Map<ServerName, HServerLoad> serversForTable(String tableName) {
564 Map<ServerName, HServerLoad> serverLoadHashMap =
565 new HashMap<ServerName, HServerLoad>();
566 for(Map.Entry<ServerName,HServerLoad> server:
567 TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
568 getOnlineServers().entrySet()) {
569 for(Map.Entry<byte[], HServerLoad.RegionLoad> region:
570 server.getValue().getRegionsLoad().entrySet()) {
571 if (region.getValue().getNameAsString().equals(tableName)) {
572
573 serverLoadHashMap.put(server.getKey(),server.getValue());
574
575 break;
576 }
577 }
578 }
579 return serverLoadHashMap;
580 }
581
582 void assertAllRegionServers(String[] expectedCoprocessors, String tableName)
583 throws InterruptedException {
584 Map<ServerName, HServerLoad> servers;
585 String[] actualCoprocessors = null;
586 boolean success = false;
587 for(int i = 0; i < 5; i++) {
588 if (tableName == null) {
589
590 servers =
591 TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
592 getOnlineServers();
593 } else {
594 servers = serversForTable(tableName);
595 }
596 boolean any_failed = false;
597 for(Map.Entry<ServerName,HServerLoad> server: servers.entrySet()) {
598 actualCoprocessors = server.getValue().getRsCoprocessors();
599 if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
600 LOG.debug("failed comparison: actual: " +
601 Arrays.toString(actualCoprocessors) +
602 " ; expected: " + Arrays.toString(expectedCoprocessors));
603 any_failed = true;
604 break;
605 }
606 }
607 if (any_failed == false) {
608 success = true;
609 break;
610 }
611 LOG.debug("retrying after failed comparison: " + i);
612 Thread.sleep(1000);
613 }
614 assertTrue(success);
615 }
616
617 @Test
618 public void testMasterCoprocessorsReported() {
619
620
621
622 final String loadedMasterCoprocessorsVerify =
623 "[" + masterCoprocessor.getSimpleName() + "]";
624 String loadedMasterCoprocessors =
625 java.util.Arrays.toString(
626 TEST_UTIL.getHBaseCluster().getMaster().getCoprocessors());
627 assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors);
628 }
629
630 private void waitForTable(byte[] name) throws InterruptedException, IOException {
631
632 TEST_UTIL.waitTableEnabled(name, 5000);
633
634 Thread.sleep(1000);
635 }
636
637 @org.junit.Rule
638 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
639 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
640 }
641