1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Method;
28 import java.lang.reflect.Modifier;
29 import java.net.InetAddress;
30 import java.net.InetSocketAddress;
31 import java.net.ServerSocket;
32 import java.net.Socket;
33 import java.net.UnknownHostException;
34 import java.security.MessageDigest;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.HashSet;
40 import java.util.List;
41 import java.util.Map;
42 import java.util.NavigableSet;
43 import java.util.Random;
44 import java.util.Set;
45 import java.util.UUID;
46 import java.util.concurrent.TimeUnit;
47
48 import org.apache.commons.logging.Log;
49 import org.apache.commons.logging.LogFactory;
50 import org.apache.commons.logging.impl.Jdk14Logger;
51 import org.apache.commons.logging.impl.Log4JLogger;
52 import org.apache.hadoop.hbase.classification.InterfaceAudience;
53 import org.apache.hadoop.hbase.classification.InterfaceStability;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FileSystem;
56 import org.apache.hadoop.fs.Path;
57 import org.apache.hadoop.hbase.Waiter.Predicate;
58 import org.apache.hadoop.hbase.catalog.MetaEditor;
59 import org.apache.hadoop.hbase.client.Delete;
60 import org.apache.hadoop.hbase.client.Durability;
61 import org.apache.hadoop.hbase.client.Get;
62 import org.apache.hadoop.hbase.client.HBaseAdmin;
63 import org.apache.hadoop.hbase.client.HConnection;
64 import org.apache.hadoop.hbase.client.HTable;
65 import org.apache.hadoop.hbase.client.Put;
66 import org.apache.hadoop.hbase.client.Result;
67 import org.apache.hadoop.hbase.client.ResultScanner;
68 import org.apache.hadoop.hbase.client.Scan;
69 import org.apache.hadoop.hbase.fs.HFileSystem;
70 import org.apache.hadoop.hbase.io.compress.Compression;
71 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
72 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
73 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
74 import org.apache.hadoop.hbase.io.hfile.HFile;
75 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
76 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
77 import org.apache.hadoop.hbase.master.HMaster;
78 import org.apache.hadoop.hbase.master.RegionStates;
79 import org.apache.hadoop.hbase.master.ServerManager;
80 import org.apache.hadoop.hbase.regionserver.BloomType;
81 import org.apache.hadoop.hbase.regionserver.HRegion;
82 import org.apache.hadoop.hbase.regionserver.HRegionServer;
83 import org.apache.hadoop.hbase.regionserver.HStore;
84 import org.apache.hadoop.hbase.regionserver.InternalScanner;
85 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
86 import org.apache.hadoop.hbase.regionserver.wal.HLog;
87 import org.apache.hadoop.hbase.security.User;
88 import org.apache.hadoop.hbase.tool.Canary;
89 import org.apache.hadoop.hbase.util.Bytes;
90 import org.apache.hadoop.hbase.util.FSUtils;
91 import org.apache.hadoop.hbase.util.JVMClusterUtil;
92 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
93 import org.apache.hadoop.hbase.util.RegionSplitter;
94 import org.apache.hadoop.hbase.util.RetryCounter;
95 import org.apache.hadoop.hbase.util.Threads;
96 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
97 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
98 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
99 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
100 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
101 import org.apache.hadoop.hdfs.DFSClient;
102 import org.apache.hadoop.hdfs.DistributedFileSystem;
103 import org.apache.hadoop.hdfs.MiniDFSCluster;
104 import org.apache.hadoop.mapred.JobConf;
105 import org.apache.hadoop.mapred.MiniMRCluster;
106 import org.apache.hadoop.mapred.TaskLog;
107 import org.apache.hadoop.security.UserGroupInformation;
108 import org.apache.zookeeper.KeeperException;
109 import org.apache.zookeeper.KeeperException.NodeExistsException;
110 import org.apache.zookeeper.WatchedEvent;
111 import org.apache.zookeeper.ZooKeeper;
112 import org.apache.zookeeper.ZooKeeper.States;
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 @InterfaceAudience.Public
129 @InterfaceStability.Evolving
130 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
131 private MiniZooKeeperCluster zkCluster = null;
132
133 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
134
135
136
137
138 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
139
140
141
142
143
144 private boolean passedZkCluster = false;
145 private MiniDFSCluster dfsCluster = null;
146
147 private HBaseCluster hbaseCluster = null;
148 private MiniMRCluster mrCluster = null;
149
150
151 private boolean miniClusterRunning;
152
153 private String hadoopLogDir;
154
155
156 private File clusterTestDir = null;
157
158
159
160 private Path dataTestDirOnTestFS = null;
161
162
163
164
165
166
167
168
169 @Deprecated
170 private static final String TEST_DIRECTORY_KEY = "test.build.data";
171
172
173 private static String FS_URI;
174
175
176 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
177
178
179 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
180 Arrays.asList(new Object[][] {
181 { Compression.Algorithm.NONE },
182 { Compression.Algorithm.GZ }
183 });
184
185
186 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
187 Arrays.asList(new Object[][] {
188 { new Boolean(false) },
189 { new Boolean(true) }
190 });
191
192
193 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
194
195 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
196 Compression.Algorithm.NONE, Compression.Algorithm.GZ
197 };
198
199
200
201
202
203 private static List<Object[]> bloomAndCompressionCombinations() {
204 List<Object[]> configurations = new ArrayList<Object[]>();
205 for (Compression.Algorithm comprAlgo :
206 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
207 for (BloomType bloomType : BloomType.values()) {
208 configurations.add(new Object[] { comprAlgo, bloomType });
209 }
210 }
211 return Collections.unmodifiableList(configurations);
212 }
213
214
215
216
217 private static List<Object[]> memStoreTSAndTagsCombination() {
218 List<Object[]> configurations = new ArrayList<Object[]>();
219 configurations.add(new Object[] { false, false });
220 configurations.add(new Object[] { false, true });
221 configurations.add(new Object[] { true, false });
222 configurations.add(new Object[] { true, true });
223 return Collections.unmodifiableList(configurations);
224 }
225
226 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
227 bloomAndCompressionCombinations();
228
229 public HBaseTestingUtility() {
230 this(HBaseConfiguration.create());
231 }
232
233 public HBaseTestingUtility(Configuration conf) {
234 super(conf);
235
236
237 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
238 }
239
240
241
242
243
244
245
246 public static HBaseTestingUtility createLocalHTU() {
247 Configuration c = HBaseConfiguration.create();
248 return createLocalHTU(c);
249 }
250
251
252
253
254
255
256
257
258 public static HBaseTestingUtility createLocalHTU(Configuration c) {
259 HBaseTestingUtility htu = new HBaseTestingUtility(c);
260 String dataTestDir = htu.getDataTestDir().toString();
261 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
262 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
263 return htu;
264 }
265
266
267
268
269
270 @Deprecated
271 public void setHDFSClientRetry(final int retries) {
272 this.conf.setInt("hdfs.client.retries.number", retries);
273 if (0 == retries) {
274 makeDFSClientNonRetrying();
275 }
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289 @Override
290 public Configuration getConfiguration() {
291 return super.getConfiguration();
292 }
293
294 public void setHBaseCluster(HBaseCluster hbaseCluster) {
295 this.hbaseCluster = hbaseCluster;
296 }
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 @Override
315 protected Path setupDataTestDir() {
316 Path testPath = super.setupDataTestDir();
317 if (null == testPath) {
318 return null;
319 }
320
321 createSubDirAndSystemProperty(
322 "hadoop.log.dir",
323 testPath, "hadoop-log-dir");
324
325
326
327 createSubDirAndSystemProperty(
328 "hadoop.tmp.dir",
329 testPath, "hadoop-tmp-dir");
330
331
332 createSubDir(
333 "mapred.local.dir",
334 testPath, "mapred-local-dir");
335
336 return testPath;
337 }
338
339 private void createSubDirAndSystemProperty(
340 String propertyName, Path parent, String subDirName){
341
342 String sysValue = System.getProperty(propertyName);
343
344 if (sysValue != null) {
345
346
347 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
348 sysValue + " so I do NOT create it in " + parent);
349 String confValue = conf.get(propertyName);
350 if (confValue != null && !confValue.endsWith(sysValue)){
351 LOG.warn(
352 propertyName + " property value differs in configuration and system: "+
353 "Configuration="+confValue+" while System="+sysValue+
354 " Erasing configuration value by system value."
355 );
356 }
357 conf.set(propertyName, sysValue);
358 } else {
359
360 createSubDir(propertyName, parent, subDirName);
361 System.setProperty(propertyName, conf.get(propertyName));
362 }
363 }
364
365
366
367
368
369
370
371 private Path getBaseTestDirOnTestFS() throws IOException {
372 FileSystem fs = getTestFileSystem();
373 return new Path(fs.getWorkingDirectory(), "test-data");
374 }
375
376
377
378
379
380
381 Path getClusterTestDir() {
382 if (clusterTestDir == null){
383 setupClusterTestDir();
384 }
385 return new Path(clusterTestDir.getAbsolutePath());
386 }
387
388
389
390
391 private void setupClusterTestDir() {
392 if (clusterTestDir != null) {
393 return;
394 }
395
396
397
398 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
399 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
400
401 boolean b = deleteOnExit();
402 if (b) clusterTestDir.deleteOnExit();
403 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
404 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
405 }
406
407
408
409
410
411
412
413 public Path getDataTestDirOnTestFS() throws IOException {
414 if (dataTestDirOnTestFS == null) {
415 setupDataTestDirOnTestFS();
416 }
417
418 return dataTestDirOnTestFS;
419 }
420
421
422
423
424
425
426
427
428 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
429 return new Path(getDataTestDirOnTestFS(), subdirName);
430 }
431
432
433
434
435 private void setupDataTestDirOnTestFS() throws IOException {
436 if (dataTestDirOnTestFS != null) {
437 LOG.warn("Data test on test fs dir already setup in "
438 + dataTestDirOnTestFS.toString());
439 return;
440 }
441
442
443
444
445
446 FileSystem fs = getTestFileSystem();
447 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
448 File dataTestDir = new File(getDataTestDir().toString());
449 if (deleteOnExit()) dataTestDir.deleteOnExit();
450 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
451 } else {
452 Path base = getBaseTestDirOnTestFS();
453 String randomStr = UUID.randomUUID().toString();
454 dataTestDirOnTestFS = new Path(base, randomStr);
455 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
456 }
457 }
458
459
460
461
462
463
464 public boolean cleanupDataTestDirOnTestFS() throws IOException {
465 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
466 if (ret)
467 dataTestDirOnTestFS = null;
468 return ret;
469 }
470
471
472
473
474
475
476 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
477 Path cpath = getDataTestDirOnTestFS(subdirName);
478 return getTestFileSystem().delete(cpath, true);
479 }
480
481
482
483
484
485
486
487
488 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
489 return startMiniDFSCluster(servers, null);
490 }
491
492
493
494
495
496
497
498
499
500
501
502
503 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
504 throws Exception {
505 if ( hosts != null && hosts.length != 0) {
506 return startMiniDFSCluster(hosts.length, hosts);
507 } else {
508 return startMiniDFSCluster(1, null);
509 }
510 }
511
512
513
514
515
516
517
518
519
520
521 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
522 throws Exception {
523 createDirsAndSetProperties();
524 try {
525 Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
526 .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
527 m.invoke(null, new Object[] {true});
528 } catch (ClassNotFoundException e) {
529 LOG.info("EditLogFileOutputStream not found");
530 }
531
532
533 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
534 setLevel(org.apache.log4j.Level.ERROR);
535 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
536 setLevel(org.apache.log4j.Level.ERROR);
537
538
539 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
540 true, null, null, hosts, null);
541
542
543 FileSystem fs = this.dfsCluster.getFileSystem();
544 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
545
546
547 this.dfsCluster.waitClusterUp();
548
549
550 dataTestDirOnTestFS = null;
551
552 return this.dfsCluster;
553 }
554
555
556 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
557 throws Exception {
558 createDirsAndSetProperties();
559 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
560 true, null, racks, hosts, null);
561
562
563 FileSystem fs = this.dfsCluster.getFileSystem();
564 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
565
566
567 this.dfsCluster.waitClusterUp();
568
569
570 dataTestDirOnTestFS = null;
571
572 return this.dfsCluster;
573 }
574
575 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
576 createDirsAndSetProperties();
577 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
578 null, null, null);
579 return dfsCluster;
580 }
581
582
583 private void createDirsAndSetProperties() throws IOException {
584 setupClusterTestDir();
585 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
586 createDirAndSetProperty("cache_data", "test.cache.data");
587 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
588 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
589 createDirAndSetProperty("mapred_local", "mapred.local.dir");
590 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
591 enableShortCircuit();
592
593 Path root = getDataTestDirOnTestFS("hadoop");
594 conf.set(MapreduceTestingShim.getMROutputDirProp(),
595 new Path(root, "mapred-output-dir").toString());
596 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
597 conf.set("mapreduce.jobtracker.staging.root.dir",
598 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
599 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
600 }
601
602
603
604
605
606
607
608 public boolean isReadShortCircuitOn(){
609 final String propName = "hbase.tests.use.shortcircuit.reads";
610 String readOnProp = System.getProperty(propName);
611 if (readOnProp != null){
612 return Boolean.parseBoolean(readOnProp);
613 } else {
614 return conf.getBoolean(propName, false);
615 }
616 }
617
618
619
620
621 private void enableShortCircuit() {
622 if (isReadShortCircuitOn()) {
623 String curUser = System.getProperty("user.name");
624 LOG.info("read short circuit is ON for user " + curUser);
625
626 conf.set("dfs.block.local-path-access.user", curUser);
627
628 conf.setBoolean("dfs.client.read.shortcircuit", true);
629
630 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
631 } else {
632 LOG.info("read short circuit is OFF");
633 }
634 }
635
636 private String createDirAndSetProperty(final String relPath, String property) {
637 String path = getDataTestDir(relPath).toString();
638 System.setProperty(property, path);
639 conf.set(property, path);
640 new File(path).mkdirs();
641 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
642 return path;
643 }
644
645
646
647
648
649
650 public void shutdownMiniDFSCluster() throws IOException {
651 if (this.dfsCluster != null) {
652
653 this.dfsCluster.shutdown();
654 dfsCluster = null;
655 dataTestDirOnTestFS = null;
656 FSUtils.setFsDefault(this.conf, new Path("file:///"));
657 }
658 }
659
660
661
662
663
664
665
666
667 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
668 return startMiniZKCluster(1);
669 }
670
671
672
673
674
675
676
677
678
679 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
680 throws Exception {
681 setupClusterTestDir();
682 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
683 }
684
685 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
686 throws Exception {
687 return startMiniZKCluster(dir,1);
688 }
689
690
691
692
693
694 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
695 int zooKeeperServerNum)
696 throws Exception {
697 if (this.zkCluster != null) {
698 throw new IOException("Cluster already running at " + dir);
699 }
700 this.passedZkCluster = false;
701 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
702 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
703 if (defPort > 0){
704
705 this.zkCluster.setDefaultClientPort(defPort);
706 }
707 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
708 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
709 Integer.toString(clientPort));
710 return this.zkCluster;
711 }
712
713
714
715
716
717
718
719 public void shutdownMiniZKCluster() throws IOException {
720 if (this.zkCluster != null) {
721 this.zkCluster.shutdown();
722 this.zkCluster = null;
723 }
724 }
725
726
727
728
729
730
731
732 public MiniHBaseCluster startMiniCluster() throws Exception {
733 return startMiniCluster(1, 1);
734 }
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749 public MiniHBaseCluster startMiniCluster(final int numSlaves)
750 throws Exception {
751 return startMiniCluster(1, numSlaves);
752 }
753
754
755
756
757
758
759
760
761 public MiniHBaseCluster startMiniCluster(final int numMasters,
762 final int numSlaves)
763 throws Exception {
764 return startMiniCluster(numMasters, numSlaves, null);
765 }
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791 public MiniHBaseCluster startMiniCluster(final int numMasters,
792 final int numSlaves, final String[] dataNodeHosts) throws Exception {
793 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
794 }
795
796
797
798
799
800 public MiniHBaseCluster startMiniCluster(final int numMasters,
801 final int numSlaves, final int numDataNodes) throws Exception {
802 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
803 }
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832 public MiniHBaseCluster startMiniCluster(final int numMasters,
833 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
834 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
835 throws Exception {
836 return startMiniCluster(
837 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
838 }
839
840
841
842
843
844
845 public MiniHBaseCluster startMiniCluster(final int numMasters,
846 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
847 Class<? extends HMaster> masterClass,
848 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
849 throws Exception {
850 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
851 numDataNodes = dataNodeHosts.length;
852 }
853
854 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
855 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
856
857
858 if (miniClusterRunning) {
859 throw new IllegalStateException("A mini-cluster is already running");
860 }
861 miniClusterRunning = true;
862
863 setupClusterTestDir();
864 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
865
866
867
868 startMiniDFSCluster(numDataNodes, dataNodeHosts);
869
870
871 if (this.zkCluster == null) {
872 startMiniZKCluster(clusterTestDir);
873 }
874
875
876 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
877 }
878
879 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
880 throws IOException, InterruptedException{
881 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
882 }
883
884
885
886
887
888
889
890
891
892
893
894
895 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
896 final int numSlaves, Class<? extends HMaster> masterClass,
897 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
898 throws IOException, InterruptedException {
899
900 createRootDir();
901
902
903
904 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
905 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
906 }
907 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
908 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
909 }
910
911 Configuration c = new Configuration(this.conf);
912 this.hbaseCluster =
913 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
914
915 HTable t = new HTable(c, TableName.META_TABLE_NAME);
916 ResultScanner s = t.getScanner(new Scan());
917 while (s.next() != null) {
918 continue;
919 }
920 s.close();
921 t.close();
922
923 getHBaseAdmin();
924 LOG.info("Minicluster is up");
925 return (MiniHBaseCluster)this.hbaseCluster;
926 }
927
928
929
930
931
932
933
934 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
935 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
936
937 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
938 ResultScanner s = t.getScanner(new Scan());
939 while (s.next() != null) {
940
941 }
942 LOG.info("HBase has been restarted");
943 s.close();
944 t.close();
945 }
946
947
948
949
950
951
952 public MiniHBaseCluster getMiniHBaseCluster() {
953 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
954 return (MiniHBaseCluster)this.hbaseCluster;
955 }
956 throw new RuntimeException(hbaseCluster + " not an instance of " +
957 MiniHBaseCluster.class.getName());
958 }
959
960
961
962
963
964
965 public void shutdownMiniCluster() throws Exception {
966 LOG.info("Shutting down minicluster");
967 shutdownMiniHBaseCluster();
968 if (!this.passedZkCluster){
969 shutdownMiniZKCluster();
970 }
971 shutdownMiniDFSCluster();
972
973 cleanupTestDir();
974 miniClusterRunning = false;
975 LOG.info("Minicluster is down");
976 }
977
978
979
980
981
982 @Override
983 public boolean cleanupTestDir() throws IOException {
984 boolean ret = super.cleanupTestDir();
985 if (deleteDir(this.clusterTestDir)) {
986 this.clusterTestDir = null;
987 return ret & true;
988 }
989 return false;
990 }
991
992
993
994
995
996 public void shutdownMiniHBaseCluster() throws IOException {
997 if (hbaseAdmin != null) {
998 hbaseAdmin.close0();
999 hbaseAdmin = null;
1000 }
1001
1002
1003 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1004 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1005 if (this.hbaseCluster != null) {
1006 this.hbaseCluster.shutdown();
1007
1008 this.hbaseCluster.waitUntilShutDown();
1009 this.hbaseCluster = null;
1010 }
1011
1012 if (zooKeeperWatcher != null) {
1013 zooKeeperWatcher.close();
1014 zooKeeperWatcher = null;
1015 }
1016 }
1017
1018
1019
1020
1021
1022
1023
1024 public Path getDefaultRootDirPath() throws IOException {
1025 FileSystem fs = FileSystem.get(this.conf);
1026 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1027 }
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 public Path createRootDir() throws IOException {
1038 FileSystem fs = FileSystem.get(this.conf);
1039 Path hbaseRootdir = getDefaultRootDirPath();
1040 FSUtils.setRootDir(this.conf, hbaseRootdir);
1041 fs.mkdirs(hbaseRootdir);
1042 FSUtils.setVersion(fs, hbaseRootdir);
1043 return hbaseRootdir;
1044 }
1045
1046
1047
1048
1049
1050 public void flush() throws IOException {
1051 getMiniHBaseCluster().flushcache();
1052 }
1053
1054
1055
1056
1057
1058 public void flush(TableName tableName) throws IOException {
1059 getMiniHBaseCluster().flushcache(tableName);
1060 }
1061
1062
1063
1064
1065
1066 public void compact(boolean major) throws IOException {
1067 getMiniHBaseCluster().compact(major);
1068 }
1069
1070
1071
1072
1073
1074 public void compact(TableName tableName, boolean major) throws IOException {
1075 getMiniHBaseCluster().compact(tableName, major);
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085 public HTable createTable(String tableName, String family)
1086 throws IOException{
1087 return createTable(TableName.valueOf(tableName), new String[]{family});
1088 }
1089
1090
1091
1092
1093
1094
1095
1096
1097 public HTable createTable(byte[] tableName, byte[] family)
1098 throws IOException{
1099 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109 public HTable createTable(TableName tableName, String[] families)
1110 throws IOException {
1111 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1112 for (String family : families) {
1113 fams.add(Bytes.toBytes(family));
1114 }
1115 return createTable(tableName, fams.toArray(new byte[0][]));
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125 public HTable createTable(TableName tableName, byte[] family)
1126 throws IOException{
1127 return createTable(tableName, new byte[][]{family});
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 public HTable createTable(byte[] tableName, byte[][] families)
1139 throws IOException {
1140 return createTable(tableName, families,
1141 new Configuration(getConfiguration()));
1142 }
1143
1144
1145
1146
1147
1148
1149
1150
1151 public HTable createTable(TableName tableName, byte[][] families)
1152 throws IOException {
1153 return createTable(tableName, families,
1154 new Configuration(getConfiguration()));
1155 }
1156
1157 public HTable createTable(byte[] tableName, byte[][] families,
1158 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1159 return createTable(TableName.valueOf(tableName), families, numVersions,
1160 startKey, endKey, numRegions);
1161 }
1162
1163 public HTable createTable(String tableName, byte[][] families,
1164 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1165 return createTable(TableName.valueOf(tableName), families, numVersions,
1166 startKey, endKey, numRegions);
1167 }
1168
1169 public HTable createTable(TableName tableName, byte[][] families,
1170 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1171 throws IOException{
1172 HTableDescriptor desc = new HTableDescriptor(tableName);
1173 for (byte[] family : families) {
1174 HColumnDescriptor hcd = new HColumnDescriptor(family)
1175 .setMaxVersions(numVersions);
1176 desc.addFamily(hcd);
1177 }
1178 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1179
1180 waitUntilAllRegionsAssigned(tableName);
1181 return new HTable(getConfiguration(), tableName);
1182 }
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1193 throws IOException {
1194 for(byte[] family : families) {
1195 HColumnDescriptor hcd = new HColumnDescriptor(family);
1196
1197
1198
1199 hcd.setBloomFilterType(BloomType.NONE);
1200 htd.addFamily(hcd);
1201 }
1202 getHBaseAdmin().createTable(htd);
1203
1204 waitUntilAllRegionsAssigned(htd.getTableName());
1205 return new HTable(c, htd.getTableName());
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 public HTable createTable(TableName tableName, byte[][] families,
1217 final Configuration c)
1218 throws IOException {
1219 return createTable(new HTableDescriptor(tableName), families, c);
1220 }
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 public HTable createTable(byte[] tableName, byte[][] families,
1231 final Configuration c)
1232 throws IOException {
1233 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1234 for(byte[] family : families) {
1235 HColumnDescriptor hcd = new HColumnDescriptor(family);
1236
1237
1238
1239 hcd.setBloomFilterType(BloomType.NONE);
1240 desc.addFamily(hcd);
1241 }
1242 getHBaseAdmin().createTable(desc);
1243 return new HTable(c, tableName);
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 public HTable createTable(TableName tableName, byte[][] families,
1256 final Configuration c, int numVersions)
1257 throws IOException {
1258 HTableDescriptor desc = new HTableDescriptor(tableName);
1259 for(byte[] family : families) {
1260 HColumnDescriptor hcd = new HColumnDescriptor(family)
1261 .setMaxVersions(numVersions);
1262 desc.addFamily(hcd);
1263 }
1264 getHBaseAdmin().createTable(desc);
1265
1266 waitUntilAllRegionsAssigned(tableName);
1267 return new HTable(c, tableName);
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 public HTable createTable(byte[] tableName, byte[][] families,
1280 final Configuration c, int numVersions)
1281 throws IOException {
1282 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1283 for(byte[] family : families) {
1284 HColumnDescriptor hcd = new HColumnDescriptor(family)
1285 .setMaxVersions(numVersions);
1286 desc.addFamily(hcd);
1287 }
1288 getHBaseAdmin().createTable(desc);
1289 return new HTable(c, tableName);
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1301 throws IOException {
1302 return createTable(tableName, new byte[][]{family}, numVersions);
1303 }
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1314 throws IOException {
1315 return createTable(tableName, new byte[][]{family}, numVersions);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 public HTable createTable(byte[] tableName, byte[][] families,
1327 int numVersions)
1328 throws IOException {
1329 return createTable(TableName.valueOf(tableName), families, numVersions);
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 public HTable createTable(TableName tableName, byte[][] families,
1341 int numVersions)
1342 throws IOException {
1343 HTableDescriptor desc = new HTableDescriptor(tableName);
1344 for (byte[] family : families) {
1345 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1346 desc.addFamily(hcd);
1347 }
1348 getHBaseAdmin().createTable(desc);
1349
1350 waitUntilAllRegionsAssigned(tableName);
1351 return new HTable(new Configuration(getConfiguration()), tableName);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public HTable createTable(byte[] tableName, byte[][] families,
1363 int numVersions, int blockSize) throws IOException {
1364 return createTable(TableName.valueOf(tableName),
1365 families, numVersions, blockSize);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 public HTable createTable(TableName tableName, byte[][] families,
1377 int numVersions, int blockSize) throws IOException {
1378 HTableDescriptor desc = new HTableDescriptor(tableName);
1379 for (byte[] family : families) {
1380 HColumnDescriptor hcd = new HColumnDescriptor(family)
1381 .setMaxVersions(numVersions)
1382 .setBlocksize(blockSize);
1383 desc.addFamily(hcd);
1384 }
1385 getHBaseAdmin().createTable(desc);
1386
1387 waitUntilAllRegionsAssigned(tableName);
1388 return new HTable(new Configuration(getConfiguration()), tableName);
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 public HTable createTable(byte[] tableName, byte[][] families,
1400 int[] numVersions)
1401 throws IOException {
1402 return createTable(TableName.valueOf(tableName), families, numVersions);
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 public HTable createTable(TableName tableName, byte[][] families,
1414 int[] numVersions)
1415 throws IOException {
1416 HTableDescriptor desc = new HTableDescriptor(tableName);
1417 int i = 0;
1418 for (byte[] family : families) {
1419 HColumnDescriptor hcd = new HColumnDescriptor(family)
1420 .setMaxVersions(numVersions[i]);
1421 desc.addFamily(hcd);
1422 i++;
1423 }
1424 getHBaseAdmin().createTable(desc);
1425
1426 waitUntilAllRegionsAssigned(tableName);
1427 return new HTable(new Configuration(getConfiguration()), tableName);
1428 }
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1439 throws IOException{
1440 return createTable(TableName.valueOf(tableName), family, splitRows);
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1452 throws IOException {
1453 HTableDescriptor desc = new HTableDescriptor(tableName);
1454 HColumnDescriptor hcd = new HColumnDescriptor(family);
1455 desc.addFamily(hcd);
1456 getHBaseAdmin().createTable(desc, splitRows);
1457
1458 waitUntilAllRegionsAssigned(tableName);
1459 return new HTable(getConfiguration(), tableName);
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1471 throws IOException {
1472 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1473 for(byte[] family:families) {
1474 HColumnDescriptor hcd = new HColumnDescriptor(family);
1475 desc.addFamily(hcd);
1476 }
1477 getHBaseAdmin().createTable(desc, splitRows);
1478
1479 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1480 return new HTable(getConfiguration(), tableName);
1481 }
1482
1483
1484
1485
1486
1487 public void deleteTable(String tableName) throws IOException {
1488 deleteTable(TableName.valueOf(tableName));
1489 }
1490
1491
1492
1493
1494
1495 public void deleteTable(byte[] tableName) throws IOException {
1496 deleteTable(TableName.valueOf(tableName));
1497 }
1498
1499
1500
1501
1502
1503 public void deleteTable(TableName tableName) throws IOException {
1504 try {
1505 getHBaseAdmin().disableTable(tableName);
1506 } catch (TableNotEnabledException e) {
1507 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1508 }
1509 getHBaseAdmin().deleteTable(tableName);
1510 }
1511
1512
1513
1514
1515
1516 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1517 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1518 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1519 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1520 private static final int MAXVERSIONS = 3;
1521
1522 public static final char FIRST_CHAR = 'a';
1523 public static final char LAST_CHAR = 'z';
1524 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1525 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1526
1527
1528
1529
1530
1531
1532
1533
1534 public HTableDescriptor createTableDescriptor(final String name,
1535 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1536 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1537 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1538 htd.addFamily(new HColumnDescriptor(cfName)
1539 .setMinVersions(minVersions)
1540 .setMaxVersions(versions)
1541 .setKeepDeletedCells(keepDeleted)
1542 .setBlockCacheEnabled(false)
1543 .setTimeToLive(ttl)
1544 );
1545 }
1546 return htd;
1547 }
1548
1549
1550
1551
1552
1553
1554
1555 public HTableDescriptor createTableDescriptor(final String name) {
1556 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1557 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1569 byte [] endKey)
1570 throws IOException {
1571 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1572 return createLocalHRegion(hri, desc);
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1583 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1584 }
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1595 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1596 }
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1612 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1613 HLog hlog, byte[]... families) throws IOException {
1614 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1615 htd.setReadOnly(isReadOnly);
1616 for (byte[] family : families) {
1617 HColumnDescriptor hcd = new HColumnDescriptor(family);
1618
1619 hcd.setMaxVersions(Integer.MAX_VALUE);
1620 htd.addFamily(hcd);
1621 }
1622 htd.setDurability(durability);
1623 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1624 return createLocalHRegion(info, htd, hlog);
1625 }
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 public HTable truncateTable(byte[] tableName) throws IOException {
1636 return truncateTable(TableName.valueOf(tableName));
1637 }
1638
1639
1640
1641
1642
1643
1644
1645 public HTable truncateTable(TableName tableName) throws IOException {
1646 HTable table = new HTable(getConfiguration(), tableName);
1647 Scan scan = new Scan();
1648 ResultScanner resScan = table.getScanner(scan);
1649 for(Result res : resScan) {
1650 Delete del = new Delete(res.getRow());
1651 table.delete(del);
1652 }
1653 resScan = table.getScanner(scan);
1654 resScan.close();
1655 return table;
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665 public int loadTable(final HTable t, final byte[] f) throws IOException {
1666 return loadTable(t, new byte[][] {f});
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1677 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1688 return loadTable(t, f, null);
1689 }
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1700 return loadTable(t, f, value, true);
1701 }
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1712 t.setAutoFlush(false);
1713 int rowCount = 0;
1714 for (byte[] row : HBaseTestingUtility.ROWS) {
1715 Put put = new Put(row);
1716 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1717 for (int i = 0; i < f.length; i++) {
1718 put.add(f[i], null, value != null ? value : row);
1719 }
1720 t.put(put);
1721 rowCount++;
1722 }
1723 t.flushCommits();
1724 return rowCount;
1725 }
1726
1727
1728
1729
1730 public static class SeenRowTracker {
1731 int dim = 'z' - 'a' + 1;
1732 int[][][] seenRows = new int[dim][dim][dim];
1733 byte[] startRow;
1734 byte[] stopRow;
1735
1736 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1737 this.startRow = startRow;
1738 this.stopRow = stopRow;
1739 }
1740
1741 void reset() {
1742 for (byte[] row : ROWS) {
1743 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1744 }
1745 }
1746
1747 int i(byte b) {
1748 return b - 'a';
1749 }
1750
1751 public void addRow(byte[] row) {
1752 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1753 }
1754
1755
1756
1757
1758 public void validate() {
1759 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1760 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1761 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1762 int count = seenRows[i(b1)][i(b2)][i(b3)];
1763 int expectedCount = 0;
1764 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1765 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1766 expectedCount = 1;
1767 }
1768 if (count != expectedCount) {
1769 String row = new String(new byte[] {b1,b2,b3});
1770 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1771 }
1772 }
1773 }
1774 }
1775 }
1776 }
1777
1778 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1779 return loadRegion(r, f, false);
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1791 throws IOException {
1792 byte[] k = new byte[3];
1793 int rowCount = 0;
1794 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1795 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1796 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1797 k[0] = b1;
1798 k[1] = b2;
1799 k[2] = b3;
1800 Put put = new Put(k);
1801 put.setDurability(Durability.SKIP_WAL);
1802 put.add(f, null, k);
1803 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1804
1805 int preRowCount = rowCount;
1806 int pause = 10;
1807 int maxPause = 1000;
1808 while (rowCount == preRowCount) {
1809 try {
1810 r.put(put);
1811 rowCount++;
1812 } catch (RegionTooBusyException e) {
1813 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1814 Threads.sleep(pause);
1815 }
1816 }
1817 }
1818 }
1819 if (flush) {
1820 r.flushcache();
1821 }
1822 }
1823 return rowCount;
1824 }
1825
1826 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1827 for (int i = startRow; i < endRow; i++) {
1828 byte[] data = Bytes.toBytes(String.valueOf(i));
1829 Put put = new Put(data);
1830 put.add(f, null, data);
1831 t.put(put);
1832 }
1833 }
1834
1835
1836
1837
1838 public int countRows(final HTable table) throws IOException {
1839 Scan scan = new Scan();
1840 ResultScanner results = table.getScanner(scan);
1841 int count = 0;
1842 for (@SuppressWarnings("unused") Result res : results) {
1843 count++;
1844 }
1845 results.close();
1846 return count;
1847 }
1848
1849 public int countRows(final HTable table, final byte[]... families) throws IOException {
1850 Scan scan = new Scan();
1851 for (byte[] family: families) {
1852 scan.addFamily(family);
1853 }
1854 ResultScanner results = table.getScanner(scan);
1855 int count = 0;
1856 for (@SuppressWarnings("unused") Result res : results) {
1857 count++;
1858 }
1859 results.close();
1860 return count;
1861 }
1862
1863
1864
1865
1866 public String checksumRows(final HTable table) throws Exception {
1867 Scan scan = new Scan();
1868 ResultScanner results = table.getScanner(scan);
1869 MessageDigest digest = MessageDigest.getInstance("MD5");
1870 for (Result res : results) {
1871 digest.update(res.getRow());
1872 }
1873 results.close();
1874 return digest.toString();
1875 }
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 public int createMultiRegions(HTable table, byte[] columnFamily)
1886 throws IOException {
1887 return createMultiRegions(getConfiguration(), table, columnFamily);
1888 }
1889
1890
1891 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1892 static {
1893 int i = 0;
1894 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1895 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1896 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1897 ROWS[i][0] = b1;
1898 ROWS[i][1] = b2;
1899 ROWS[i][2] = b3;
1900 i++;
1901 }
1902 }
1903 }
1904 }
1905
1906 public static final byte[][] KEYS = {
1907 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1908 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1909 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1910 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1911 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1912 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1913 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1914 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1915 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1916 };
1917
1918 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1919 Bytes.toBytes("bbb"),
1920 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1921 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1922 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1923 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1924 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1925 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1926 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1927 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1928 };
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938 public int createMultiRegions(final Configuration c, final HTable table,
1939 final byte[] columnFamily)
1940 throws IOException {
1941 return createMultiRegions(c, table, columnFamily, KEYS);
1942 }
1943
1944 void makeDFSClientNonRetrying() {
1945 if (null == this.dfsCluster) {
1946 LOG.debug("dfsCluster has not started, can't make client non-retrying.");
1947 return;
1948 }
1949 try {
1950 final FileSystem filesystem = this.dfsCluster.getFileSystem();
1951 if (!(filesystem instanceof DistributedFileSystem)) {
1952 LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
1953 return;
1954 }
1955
1956 final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
1957
1958 final Field dfsField = fs.getClass().getDeclaredField("dfs");
1959 dfsField.setAccessible(true);
1960 final Class<?> dfsClazz = dfsField.getType();
1961 final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
1962
1963
1964 final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
1965 createRPCNamenode.setAccessible(true);
1966
1967
1968 final Field nnField = dfsClazz.getDeclaredField("nnAddress");
1969 nnField.setAccessible(true);
1970 final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
1971 final Field confField = dfsClazz.getDeclaredField("conf");
1972 confField.setAccessible(true);
1973 final Configuration conf = Configuration.class.cast(confField.get(dfs));
1974 final Field ugiField = dfsClazz.getDeclaredField("ugi");
1975 ugiField.setAccessible(true);
1976 final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
1977
1978
1979 final Field namenodeField = dfsClazz.getDeclaredField("namenode");
1980 namenodeField.setAccessible(true);
1981 namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
1982 LOG.debug("Set DSFClient namenode to bare RPC");
1983 } catch (Exception exception) {
1984 LOG.info("Could not alter DFSClient to be non-retrying.", exception);
1985 }
1986 }
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997 public int createMultiRegions(final Configuration c, final HTable table,
1998 final byte [] family, int numRegions)
1999 throws IOException {
2000 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2001 byte [] startKey = Bytes.toBytes("aaaaa");
2002 byte [] endKey = Bytes.toBytes("zzzzz");
2003 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2004 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2005 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2006 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2007 return createMultiRegions(c, table, family, regionStartKeys);
2008 }
2009
2010 @SuppressWarnings("deprecation")
2011 public int createMultiRegions(final Configuration c, final HTable table,
2012 final byte[] columnFamily, byte [][] startKeys)
2013 throws IOException {
2014 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2015 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2016 HTableDescriptor htd = table.getTableDescriptor();
2017 if(!htd.hasFamily(columnFamily)) {
2018 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2019 htd.addFamily(hcd);
2020 }
2021
2022
2023
2024
2025 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2026 String regionToDeleteInFS = table
2027 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2028 .getRegionInfo().getEncodedName();
2029 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2030
2031 int count = 0;
2032 for (int i = 0; i < startKeys.length; i++) {
2033 int j = (i + 1) % startKeys.length;
2034 HRegionInfo hri = new HRegionInfo(table.getName(),
2035 startKeys[i], startKeys[j]);
2036 MetaEditor.addRegionToMeta(meta, hri);
2037 newRegions.add(hri);
2038 count++;
2039 }
2040
2041 for (byte[] row : rows) {
2042 LOG.info("createMultiRegions: deleting meta row -> " +
2043 Bytes.toStringBinary(row));
2044 meta.delete(new Delete(row));
2045 }
2046
2047 Path tableDir = new Path(getDefaultRootDirPath().toString()
2048 + System.getProperty("file.separator") + htd.getTableName()
2049 + System.getProperty("file.separator") + regionToDeleteInFS);
2050 FileSystem.get(c).delete(tableDir);
2051
2052 HConnection conn = table.getConnection();
2053 conn.clearRegionCache();
2054
2055 HBaseAdmin admin = getHBaseAdmin();
2056 if (admin.isTableEnabled(table.getTableName())) {
2057 for(HRegionInfo hri : newRegions) {
2058 admin.assign(hri.getRegionName());
2059 }
2060 }
2061
2062 meta.close();
2063
2064 return count;
2065 }
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2078 final HTableDescriptor htd, byte [][] startKeys)
2079 throws IOException {
2080 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2081 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2082 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2083
2084 for (int i = 0; i < startKeys.length; i++) {
2085 int j = (i + 1) % startKeys.length;
2086 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2087 startKeys[j]);
2088 MetaEditor.addRegionToMeta(meta, hri);
2089 newRegions.add(hri);
2090 }
2091
2092 meta.close();
2093 return newRegions;
2094 }
2095
2096
2097
2098
2099
2100
2101 public List<byte[]> getMetaTableRows() throws IOException {
2102
2103 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2104 List<byte[]> rows = new ArrayList<byte[]>();
2105 ResultScanner s = t.getScanner(new Scan());
2106 for (Result result : s) {
2107 LOG.info("getMetaTableRows: row -> " +
2108 Bytes.toStringBinary(result.getRow()));
2109 rows.add(result.getRow());
2110 }
2111 s.close();
2112 t.close();
2113 return rows;
2114 }
2115
2116
2117
2118
2119
2120
2121 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2122
2123 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2124 List<byte[]> rows = new ArrayList<byte[]>();
2125 ResultScanner s = t.getScanner(new Scan());
2126 for (Result result : s) {
2127 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2128 if (info == null) {
2129 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2130
2131 continue;
2132 }
2133
2134 if (info.getTable().equals(tableName)) {
2135 LOG.info("getMetaTableRows: row -> " +
2136 Bytes.toStringBinary(result.getRow()) + info);
2137 rows.add(result.getRow());
2138 }
2139 }
2140 s.close();
2141 t.close();
2142 return rows;
2143 }
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2157 throws IOException, InterruptedException {
2158 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2159 }
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2171 throws IOException, InterruptedException {
2172 List<byte[]> metaRows = getMetaTableRows(tableName);
2173 if (metaRows == null || metaRows.isEmpty()) {
2174 return null;
2175 }
2176 LOG.debug("Found " + metaRows.size() + " rows for table " +
2177 tableName);
2178 byte [] firstrow = metaRows.get(0);
2179 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2180 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2181 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2182 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2183 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2184 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2185 while(retrier.shouldRetry()) {
2186 int index = getMiniHBaseCluster().getServerWith(firstrow);
2187 if (index != -1) {
2188 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2189 }
2190
2191 retrier.sleepUntilNextRetry();
2192 }
2193 return null;
2194 }
2195
2196
2197
2198
2199
2200
2201
2202 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2203 startMiniMapReduceCluster(2);
2204 return mrCluster;
2205 }
2206
2207
2208
2209
2210
2211 private void forceChangeTaskLogDir() {
2212 Field logDirField;
2213 try {
2214 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2215 logDirField.setAccessible(true);
2216
2217 Field modifiersField = Field.class.getDeclaredField("modifiers");
2218 modifiersField.setAccessible(true);
2219 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2220
2221 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2222 } catch (SecurityException e) {
2223 throw new RuntimeException(e);
2224 } catch (NoSuchFieldException e) {
2225
2226 throw new RuntimeException(e);
2227 } catch (IllegalArgumentException e) {
2228 throw new RuntimeException(e);
2229 } catch (IllegalAccessException e) {
2230 throw new RuntimeException(e);
2231 }
2232 }
2233
2234
2235
2236
2237
2238
2239
2240 private void startMiniMapReduceCluster(final int servers) throws IOException {
2241 if (mrCluster != null) {
2242 throw new IllegalStateException("MiniMRCluster is already running");
2243 }
2244 LOG.info("Starting mini mapreduce cluster...");
2245 setupClusterTestDir();
2246 createDirsAndSetProperties();
2247
2248 forceChangeTaskLogDir();
2249
2250
2251
2252
2253 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2254
2255
2256
2257 conf.setBoolean("mapreduce.map.speculative", false);
2258 conf.setBoolean("mapreduce.reduce.speculative", false);
2259
2260
2261
2262 mrCluster = new MiniMRCluster(servers,
2263 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2264 null, null, new JobConf(this.conf));
2265 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2266 if (jobConf == null) {
2267 jobConf = mrCluster.createJobConf();
2268 }
2269
2270 jobConf.set("mapred.local.dir",
2271 conf.get("mapred.local.dir"));
2272 LOG.info("Mini mapreduce cluster started");
2273
2274
2275
2276
2277 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2278
2279 conf.set("mapreduce.framework.name", "yarn");
2280 conf.setBoolean("yarn.is.minicluster", true);
2281 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2282 if (rmAddress != null) {
2283 conf.set("yarn.resourcemanager.address", rmAddress);
2284 }
2285 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2286 if (historyAddress != null) {
2287 conf.set("mapreduce.jobhistory.address", historyAddress);
2288 }
2289 String schedulerAddress =
2290 jobConf.get("yarn.resourcemanager.scheduler.address");
2291 if (schedulerAddress != null) {
2292 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2293 }
2294 }
2295
2296
2297
2298
2299 public void shutdownMiniMapReduceCluster() {
2300 LOG.info("Stopping mini mapreduce cluster...");
2301 if (mrCluster != null) {
2302 mrCluster.shutdown();
2303 mrCluster = null;
2304 }
2305
2306 conf.set("mapred.job.tracker", "local");
2307 LOG.info("Mini mapreduce cluster stopped");
2308 }
2309
2310
2311
2312
2313 public RegionServerServices createMockRegionServerService() throws IOException {
2314 return createMockRegionServerService((ServerName)null);
2315 }
2316
2317
2318
2319
2320
2321 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2322 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2323 rss.setFileSystem(getTestFileSystem());
2324 rss.setRpcServer(rpc);
2325 return rss;
2326 }
2327
2328
2329
2330
2331
2332 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2333 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2334 rss.setFileSystem(getTestFileSystem());
2335 return rss;
2336 }
2337
2338
2339
2340
2341
2342
2343 public void enableDebug(Class<?> clazz) {
2344 Log l = LogFactory.getLog(clazz);
2345 if (l instanceof Log4JLogger) {
2346 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2347 } else if (l instanceof Jdk14Logger) {
2348 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2349 }
2350 }
2351
2352
2353
2354
2355
2356 public void expireMasterSession() throws Exception {
2357 HMaster master = getMiniHBaseCluster().getMaster();
2358 expireSession(master.getZooKeeper(), false);
2359 }
2360
2361
2362
2363
2364
2365
2366 public void expireRegionServerSession(int index) throws Exception {
2367 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2368 expireSession(rs.getZooKeeper(), false);
2369 decrementMinRegionServerCount();
2370 }
2371
2372 private void decrementMinRegionServerCount() {
2373
2374
2375 decrementMinRegionServerCount(getConfiguration());
2376
2377
2378 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2379 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2380 }
2381 }
2382
2383 private void decrementMinRegionServerCount(Configuration conf) {
2384 int currentCount = conf.getInt(
2385 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2386 if (currentCount != -1) {
2387 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2388 Math.max(currentCount - 1, 1));
2389 }
2390 }
2391
2392 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2393 expireSession(nodeZK, false);
2394 }
2395
2396 @Deprecated
2397 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2398 throws Exception {
2399 expireSession(nodeZK, false);
2400 }
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2414 throws Exception {
2415 Configuration c = new Configuration(this.conf);
2416 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2417 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2418 byte[] password = zk.getSessionPasswd();
2419 long sessionID = zk.getSessionId();
2420
2421
2422
2423
2424
2425
2426
2427
2428 ZooKeeper monitor = new ZooKeeper(quorumServers,
2429 1000, new org.apache.zookeeper.Watcher(){
2430 @Override
2431 public void process(WatchedEvent watchedEvent) {
2432 LOG.info("Monitor ZKW received event="+watchedEvent);
2433 }
2434 } , sessionID, password);
2435
2436
2437 ZooKeeper newZK = new ZooKeeper(quorumServers,
2438 1000, EmptyWatcher.instance, sessionID, password);
2439
2440
2441
2442 long start = System.currentTimeMillis();
2443 while (newZK.getState() != States.CONNECTED
2444 && System.currentTimeMillis() - start < 1000) {
2445 Thread.sleep(1);
2446 }
2447 newZK.close();
2448 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2449
2450
2451 monitor.close();
2452
2453 if (checkStatus) {
2454 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2455 }
2456 }
2457
2458
2459
2460
2461
2462
2463
2464 public MiniHBaseCluster getHBaseCluster() {
2465 return getMiniHBaseCluster();
2466 }
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476 public HBaseCluster getHBaseClusterInterface() {
2477
2478
2479 return hbaseCluster;
2480 }
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491 public synchronized HBaseAdmin getHBaseAdmin()
2492 throws IOException {
2493 if (hbaseAdmin == null){
2494 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2495 }
2496 return hbaseAdmin;
2497 }
2498
2499 private HBaseAdminForTests hbaseAdmin = null;
2500 private static class HBaseAdminForTests extends HBaseAdmin {
2501 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2502 ZooKeeperConnectionException, IOException {
2503 super(c);
2504 }
2505
2506 @Override
2507 public synchronized void close() throws IOException {
2508 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2509 }
2510
2511 private synchronized void close0() throws IOException {
2512 super.close();
2513 }
2514 }
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2526 throws IOException {
2527 if (zooKeeperWatcher == null) {
2528 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2529 new Abortable() {
2530 @Override public void abort(String why, Throwable e) {
2531 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2532 }
2533 @Override public boolean isAborted() {return false;}
2534 });
2535 }
2536 return zooKeeperWatcher;
2537 }
2538 private ZooKeeperWatcher zooKeeperWatcher;
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548 public void closeRegion(String regionName) throws IOException {
2549 closeRegion(Bytes.toBytes(regionName));
2550 }
2551
2552
2553
2554
2555
2556
2557
2558 public void closeRegion(byte[] regionName) throws IOException {
2559 getHBaseAdmin().closeRegion(regionName, null);
2560 }
2561
2562
2563
2564
2565
2566
2567
2568
2569 public void closeRegionByRow(String row, HTable table) throws IOException {
2570 closeRegionByRow(Bytes.toBytes(row), table);
2571 }
2572
2573
2574
2575
2576
2577
2578
2579
2580 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2581 HRegionLocation hrl = table.getRegionLocation(row);
2582 closeRegion(hrl.getRegionInfo().getRegionName());
2583 }
2584
2585
2586
2587
2588
2589
2590
2591
2592 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2593 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2594 int regCount = regions.size();
2595 Set<Integer> attempted = new HashSet<Integer>();
2596 int idx;
2597 int attempts = 0;
2598 do {
2599 regions = getHBaseCluster().getRegions(tableName);
2600 if (regCount != regions.size()) {
2601
2602 attempted.clear();
2603 }
2604 regCount = regions.size();
2605
2606
2607 if (regCount > 0) {
2608 idx = random.nextInt(regCount);
2609
2610 if (attempted.contains(idx))
2611 continue;
2612 try {
2613 regions.get(idx).checkSplit();
2614 return regions.get(idx);
2615 } catch (Exception ex) {
2616 LOG.warn("Caught exception", ex);
2617 attempted.add(idx);
2618 }
2619 }
2620 attempts++;
2621 } while (maxAttempts == -1 || attempts < maxAttempts);
2622 return null;
2623 }
2624
2625 public MiniZooKeeperCluster getZkCluster() {
2626 return zkCluster;
2627 }
2628
2629 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2630 this.passedZkCluster = true;
2631 this.zkCluster = zkCluster;
2632 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2633 }
2634
2635 public MiniDFSCluster getDFSCluster() {
2636 return dfsCluster;
2637 }
2638
2639 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2640 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2641 throw new IOException("DFSCluster is already running! Shut it down first.");
2642 }
2643 this.dfsCluster = cluster;
2644 }
2645
2646 public FileSystem getTestFileSystem() throws IOException {
2647 return HFileSystem.get(conf);
2648 }
2649
2650
2651
2652
2653
2654
2655
2656
2657 public void waitTableAvailable(byte[] table)
2658 throws InterruptedException, IOException {
2659 waitTableAvailable(getHBaseAdmin(), table, 30000);
2660 }
2661
2662 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2663 throws InterruptedException, IOException {
2664 waitTableAvailable(admin, table, 30000);
2665 }
2666
2667
2668
2669
2670
2671
2672
2673
2674 public void waitTableAvailable(byte[] table, long timeoutMillis)
2675 throws InterruptedException, IOException {
2676 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2677 }
2678
2679 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2680 throws InterruptedException, IOException {
2681 long startWait = System.currentTimeMillis();
2682 while (!admin.isTableAvailable(table)) {
2683 assertTrue("Timed out waiting for table to become available " +
2684 Bytes.toStringBinary(table),
2685 System.currentTimeMillis() - startWait < timeoutMillis);
2686 Thread.sleep(200);
2687 }
2688 }
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699 public void waitTableEnabled(byte[] table)
2700 throws InterruptedException, IOException {
2701 waitTableEnabled(getHBaseAdmin(), table, 30000);
2702 }
2703
2704 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2705 throws InterruptedException, IOException {
2706 waitTableEnabled(admin, table, 30000);
2707 }
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718 public void waitTableEnabled(byte[] table, long timeoutMillis)
2719 throws InterruptedException, IOException {
2720 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2721 }
2722
2723 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2724 throws InterruptedException, IOException {
2725 long startWait = System.currentTimeMillis();
2726 waitTableAvailable(admin, table, timeoutMillis);
2727 while (!admin.isTableEnabled(table)) {
2728 assertTrue("Timed out waiting for table to become available and enabled " +
2729 Bytes.toStringBinary(table),
2730 System.currentTimeMillis() - startWait < timeoutMillis);
2731 Thread.sleep(200);
2732 }
2733
2734
2735
2736
2737
2738 try {
2739 Canary.sniff(admin, TableName.valueOf(table));
2740 } catch (Exception e) {
2741 throw new IOException(e);
2742 }
2743 }
2744
2745
2746
2747
2748
2749
2750
2751
2752 public boolean ensureSomeRegionServersAvailable(final int num)
2753 throws IOException {
2754 boolean startedServer = false;
2755 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2756 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2757 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2758 startedServer = true;
2759 }
2760
2761 return startedServer;
2762 }
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2774 throws IOException {
2775 boolean startedServer = ensureSomeRegionServersAvailable(num);
2776
2777 int nonStoppedServers = 0;
2778 for (JVMClusterUtil.RegionServerThread rst :
2779 getMiniHBaseCluster().getRegionServerThreads()) {
2780
2781 HRegionServer hrs = rst.getRegionServer();
2782 if (hrs.isStopping() || hrs.isStopped()) {
2783 LOG.info("A region server is stopped or stopping:"+hrs);
2784 } else {
2785 nonStoppedServers++;
2786 }
2787 }
2788 for (int i=nonStoppedServers; i<num; ++i) {
2789 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2790 startedServer = true;
2791 }
2792 return startedServer;
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 public static User getDifferentUser(final Configuration c,
2806 final String differentiatingSuffix)
2807 throws IOException {
2808 FileSystem currentfs = FileSystem.get(c);
2809 if (!(currentfs instanceof DistributedFileSystem)) {
2810 return User.getCurrent();
2811 }
2812
2813
2814 String username = User.getCurrent().getName() +
2815 differentiatingSuffix;
2816 User user = User.createUserForTesting(c, username,
2817 new String[]{"supergroup"});
2818 return user;
2819 }
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2835 final int max) {
2836 try {
2837 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2838 for (Class<?> clazz: clazzes) {
2839 String className = clazz.getSimpleName();
2840 if (className.equals("DFSOutputStream")) {
2841 if (clazz.isInstance(stream)) {
2842 Field maxRecoveryErrorCountField =
2843 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2844 maxRecoveryErrorCountField.setAccessible(true);
2845 maxRecoveryErrorCountField.setInt(stream, max);
2846 break;
2847 }
2848 }
2849 }
2850 } catch (Exception e) {
2851 LOG.info("Could not set max recovery field", e);
2852 }
2853 }
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2864 waitUntilAllRegionsAssigned(tableName, 60000);
2865 }
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2877 throws IOException {
2878 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2879 try {
2880 waitFor(timeout, 200, true, new Predicate<IOException>() {
2881 @Override
2882 public boolean evaluate() throws IOException {
2883 boolean allRegionsAssigned = true;
2884 Scan scan = new Scan();
2885 scan.addFamily(HConstants.CATALOG_FAMILY);
2886 ResultScanner s = meta.getScanner(scan);
2887 try {
2888 Result r;
2889 while ((r = s.next()) != null) {
2890 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2891 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2892 if (info != null && info.getTable().equals(tableName)) {
2893 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2894 allRegionsAssigned &= (b != null);
2895 }
2896 }
2897 } finally {
2898 s.close();
2899 }
2900 return allRegionsAssigned;
2901 }
2902 });
2903 } finally {
2904 meta.close();
2905 }
2906 }
2907
2908
2909
2910
2911
2912 public static List<Cell> getFromStoreFile(HStore store,
2913 Get get) throws IOException {
2914 Scan scan = new Scan(get);
2915 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2916 scan.getFamilyMap().get(store.getFamily().getName()),
2917
2918
2919 0);
2920
2921 List<Cell> result = new ArrayList<Cell>();
2922 scanner.next(result);
2923 if (!result.isEmpty()) {
2924
2925 Cell kv = result.get(0);
2926 if (!CellUtil.matchingRow(kv, get.getRow())) {
2927 result.clear();
2928 }
2929 }
2930 scanner.close();
2931 return result;
2932 }
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2943 assertTrue(numRegions>3);
2944 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2945 byte [][] result = new byte[tmpSplitKeys.length+1][];
2946 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
2947 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2948 return result;
2949 }
2950
2951
2952
2953
2954
2955 public static List<Cell> getFromStoreFile(HStore store,
2956 byte [] row,
2957 NavigableSet<byte[]> columns
2958 ) throws IOException {
2959 Get get = new Get(row);
2960 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2961 s.put(store.getFamily().getName(), columns);
2962
2963 return getFromStoreFile(store,get);
2964 }
2965
2966
2967
2968
2969
2970 public static ZooKeeperWatcher getZooKeeperWatcher(
2971 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2972 IOException {
2973 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2974 "unittest", new Abortable() {
2975 boolean aborted = false;
2976
2977 @Override
2978 public void abort(String why, Throwable e) {
2979 aborted = true;
2980 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2981 }
2982
2983 @Override
2984 public boolean isAborted() {
2985 return aborted;
2986 }
2987 });
2988 return zkw;
2989 }
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3003 HBaseTestingUtility TEST_UTIL, HRegion region,
3004 ServerName serverName) throws ZooKeeperConnectionException,
3005 IOException, KeeperException, NodeExistsException {
3006 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3007 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3008 int version = ZKAssign.transitionNodeOpening(zkw, region
3009 .getRegionInfo(), serverName);
3010 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3011 version);
3012 return zkw;
3013 }
3014
3015 public static void assertKVListsEqual(String additionalMsg,
3016 final List<? extends Cell> expected,
3017 final List<? extends Cell> actual) {
3018 final int eLen = expected.size();
3019 final int aLen = actual.size();
3020 final int minLen = Math.min(eLen, aLen);
3021
3022 int i;
3023 for (i = 0; i < minLen
3024 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3025 ++i) {}
3026
3027 if (additionalMsg == null) {
3028 additionalMsg = "";
3029 }
3030 if (!additionalMsg.isEmpty()) {
3031 additionalMsg = ". " + additionalMsg;
3032 }
3033
3034 if (eLen != aLen || i != minLen) {
3035 throw new AssertionError(
3036 "Expected and actual KV arrays differ at position " + i + ": " +
3037 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3038 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3039 }
3040 }
3041
3042 private static <T> String safeGetAsStr(List<T> lst, int i) {
3043 if (0 <= i && i < lst.size()) {
3044 return lst.get(i).toString();
3045 } else {
3046 return "<out_of_range>";
3047 }
3048 }
3049
3050 public String getClusterKey() {
3051 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3052 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3053 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3054 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3055 }
3056
3057
3058 public HTable createRandomTable(String tableName,
3059 final Collection<String> families,
3060 final int maxVersions,
3061 final int numColsPerRow,
3062 final int numFlushes,
3063 final int numRegions,
3064 final int numRowsPerFlush)
3065 throws IOException, InterruptedException {
3066
3067 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3068 " regions, " + numFlushes + " storefiles per region, " +
3069 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3070 "\n");
3071
3072 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3073 final int numCF = families.size();
3074 final byte[][] cfBytes = new byte[numCF][];
3075 {
3076 int cfIndex = 0;
3077 for (String cf : families) {
3078 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3079 }
3080 }
3081
3082 final int actualStartKey = 0;
3083 final int actualEndKey = Integer.MAX_VALUE;
3084 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3085 final int splitStartKey = actualStartKey + keysPerRegion;
3086 final int splitEndKey = actualEndKey - keysPerRegion;
3087 final String keyFormat = "%08x";
3088 final HTable table = createTable(tableName, cfBytes,
3089 maxVersions,
3090 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3091 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3092 numRegions);
3093
3094 if (hbaseCluster != null) {
3095 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3096 }
3097
3098 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3099 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3100 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3101 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3102
3103 Put put = new Put(row);
3104 Delete del = new Delete(row);
3105 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3106 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3107 final long ts = rand.nextInt();
3108 final byte[] qual = Bytes.toBytes("col" + iCol);
3109 if (rand.nextBoolean()) {
3110 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3111 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3112 ts + "_random_" + rand.nextLong());
3113 put.add(cf, qual, ts, value);
3114 } else if (rand.nextDouble() < 0.8) {
3115 del.deleteColumn(cf, qual, ts);
3116 } else {
3117 del.deleteColumns(cf, qual, ts);
3118 }
3119 }
3120
3121 if (!put.isEmpty()) {
3122 table.put(put);
3123 }
3124
3125 if (!del.isEmpty()) {
3126 table.delete(del);
3127 }
3128 }
3129 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3130 table.flushCommits();
3131 if (hbaseCluster != null) {
3132 getMiniHBaseCluster().flushcache(table.getName());
3133 }
3134 }
3135
3136 return table;
3137 }
3138
3139 private static final int MIN_RANDOM_PORT = 0xc000;
3140 private static final int MAX_RANDOM_PORT = 0xfffe;
3141 private static Random random = new Random();
3142
3143
3144
3145
3146
3147 public static int randomPort() {
3148 return MIN_RANDOM_PORT
3149 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3150 }
3151
3152
3153
3154
3155
3156 public static int randomFreePort() {
3157 int port = 0;
3158 do {
3159 port = randomPort();
3160 if (takenRandomPorts.contains(port)) {
3161 continue;
3162 }
3163 takenRandomPorts.add(port);
3164
3165 try {
3166 ServerSocket sock = new ServerSocket(port);
3167 sock.close();
3168 } catch (IOException ex) {
3169 port = 0;
3170 }
3171 } while (port == 0);
3172 return port;
3173 }
3174
3175
3176 public static String randomMultiCastAddress() {
3177 return "226.1.1." + random.nextInt(254);
3178 }
3179
3180
3181
3182 public static void waitForHostPort(String host, int port)
3183 throws IOException {
3184 final int maxTimeMs = 10000;
3185 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3186 IOException savedException = null;
3187 LOG.info("Waiting for server at " + host + ":" + port);
3188 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3189 try {
3190 Socket sock = new Socket(InetAddress.getByName(host), port);
3191 sock.close();
3192 savedException = null;
3193 LOG.info("Server at " + host + ":" + port + " is available");
3194 break;
3195 } catch (UnknownHostException e) {
3196 throw new IOException("Failed to look up " + host, e);
3197 } catch (IOException e) {
3198 savedException = e;
3199 }
3200 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3201 }
3202
3203 if (savedException != null) {
3204 throw savedException;
3205 }
3206 }
3207
3208
3209
3210
3211
3212
3213 public static int createPreSplitLoadTestTable(Configuration conf,
3214 TableName tableName, byte[] columnFamily, Algorithm compression,
3215 DataBlockEncoding dataBlockEncoding) throws IOException {
3216 HTableDescriptor desc = new HTableDescriptor(tableName);
3217 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3218 hcd.setDataBlockEncoding(dataBlockEncoding);
3219 hcd.setCompressionType(compression);
3220 return createPreSplitLoadTestTable(conf, desc, hcd);
3221 }
3222
3223
3224
3225
3226
3227
3228 public static int createPreSplitLoadTestTable(Configuration conf,
3229 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3230 if (!desc.hasFamily(hcd.getName())) {
3231 desc.addFamily(hcd);
3232 }
3233
3234 int totalNumberOfRegions = 0;
3235 HBaseAdmin admin = new HBaseAdmin(conf);
3236 try {
3237
3238
3239
3240 int numberOfServers = admin.getClusterStatus().getServers().size();
3241 if (numberOfServers == 0) {
3242 throw new IllegalStateException("No live regionservers");
3243 }
3244
3245 int regionsPerServer = conf.getInt(REGIONS_PER_SERVER_KEY, DEFAULT_REGIONS_PER_SERVER);
3246 totalNumberOfRegions = numberOfServers * regionsPerServer;
3247 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3248 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3249 "(default regions per server: " + regionsPerServer + ")");
3250
3251 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3252 totalNumberOfRegions);
3253
3254 admin.createTable(desc, splits);
3255 } catch (MasterNotRunningException e) {
3256 LOG.error("Master not running", e);
3257 throw new IOException(e);
3258 } catch (TableExistsException e) {
3259 LOG.warn("Table " + desc.getTableName() +
3260 " already exists, continuing");
3261 } finally {
3262 admin.close();
3263 }
3264 return totalNumberOfRegions;
3265 }
3266
3267 public static int getMetaRSPort(Configuration conf) throws IOException {
3268 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3269 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3270 table.close();
3271 return hloc.getPort();
3272 }
3273
3274
3275
3276
3277
3278
3279
3280 public void assertRegionOnServer(
3281 final HRegionInfo hri, final ServerName server,
3282 final long timeout) throws IOException, InterruptedException {
3283 long timeoutTime = System.currentTimeMillis() + timeout;
3284 while (true) {
3285 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3286 if (regions.contains(hri)) return;
3287 long now = System.currentTimeMillis();
3288 if (now > timeoutTime) break;
3289 Thread.sleep(10);
3290 }
3291 fail("Could not find region " + hri.getRegionNameAsString()
3292 + " on server " + server);
3293 }
3294
3295
3296
3297
3298
3299 public void assertRegionOnlyOnServer(
3300 final HRegionInfo hri, final ServerName server,
3301 final long timeout) throws IOException, InterruptedException {
3302 long timeoutTime = System.currentTimeMillis() + timeout;
3303 while (true) {
3304 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3305 if (regions.contains(hri)) {
3306 List<JVMClusterUtil.RegionServerThread> rsThreads =
3307 getHBaseCluster().getLiveRegionServerThreads();
3308 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3309 HRegionServer rs = rsThread.getRegionServer();
3310 if (server.equals(rs.getServerName())) {
3311 continue;
3312 }
3313 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3314 for (HRegion r: hrs) {
3315 assertTrue("Region should not be double assigned",
3316 r.getRegionId() != hri.getRegionId());
3317 }
3318 }
3319 return;
3320 }
3321 long now = System.currentTimeMillis();
3322 if (now > timeoutTime) break;
3323 Thread.sleep(10);
3324 }
3325 fail("Could not find region " + hri.getRegionNameAsString()
3326 + " on server " + server);
3327 }
3328
3329 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3330 throws IOException {
3331 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3332 htd.addFamily(hcd);
3333 HRegionInfo info =
3334 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3335 HRegion region =
3336 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3337 return region;
3338 }
3339
3340 public void setFileSystemURI(String fsURI) {
3341 FS_URI = fsURI;
3342 }
3343
3344
3345
3346
3347 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3348 throws E {
3349 return Waiter.waitFor(this.conf, timeout, predicate);
3350 }
3351
3352
3353
3354
3355 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3356 throws E {
3357 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3358 }
3359
3360
3361
3362
3363 public <E extends Exception> long waitFor(long timeout, long interval,
3364 boolean failIfTimeout, Predicate<E> predicate) throws E {
3365 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3366 }
3367
3368
3369
3370
3371 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3372 return new Waiter.Predicate<Exception>() {
3373 @Override
3374 public boolean evaluate() throws Exception {
3375 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3376 .getAssignmentManager().getRegionStates();
3377 return !regionStates.isRegionsInTransition();
3378 }
3379 };
3380 }
3381
3382
3383
3384
3385 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3386 return new Waiter.Predicate<Exception>() {
3387 @Override
3388 public boolean evaluate() throws Exception {
3389 return getHBaseAdmin().isTableEnabled(tableName);
3390 }
3391 };
3392 }
3393
3394
3395
3396
3397
3398
3399 public static List<HColumnDescriptor> generateColumnDescriptors() {
3400 return generateColumnDescriptors("");
3401 }
3402
3403
3404
3405
3406
3407
3408
3409 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3410 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3411 long familyId = 0;
3412 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3413 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3414 for (BloomType bloomType: BloomType.values()) {
3415 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3416 HColumnDescriptor htd = new HColumnDescriptor(name);
3417 htd.setCompressionType(compressionType);
3418 htd.setDataBlockEncoding(encodingType);
3419 htd.setBloomFilterType(bloomType);
3420 htds.add(htd);
3421 familyId++;
3422 }
3423 }
3424 }
3425 return htds;
3426 }
3427
3428
3429
3430
3431
3432 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3433 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3434 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3435 for (String algoName : allAlgos) {
3436 try {
3437 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3438 algo.getCompressor();
3439 supportedAlgos.add(algo);
3440 } catch (Throwable t) {
3441
3442 }
3443 }
3444 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3445 }
3446 }