1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Method;
28 import java.lang.reflect.Modifier;
29 import java.net.InetAddress;
30 import java.net.InetSocketAddress;
31 import java.net.ServerSocket;
32 import java.net.Socket;
33 import java.net.UnknownHostException;
34 import java.security.MessageDigest;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.HashSet;
40 import java.util.List;
41 import java.util.Map;
42 import java.util.NavigableSet;
43 import java.util.Random;
44 import java.util.Set;
45 import java.util.UUID;
46 import java.util.concurrent.TimeUnit;
47
48 import org.apache.commons.logging.Log;
49 import org.apache.commons.logging.LogFactory;
50 import org.apache.commons.logging.impl.Jdk14Logger;
51 import org.apache.commons.logging.impl.Log4JLogger;
52 import org.apache.hadoop.hbase.classification.InterfaceAudience;
53 import org.apache.hadoop.hbase.classification.InterfaceStability;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FileSystem;
56 import org.apache.hadoop.fs.Path;
57 import org.apache.hadoop.hbase.Waiter.Predicate;
58 import org.apache.hadoop.hbase.catalog.MetaEditor;
59 import org.apache.hadoop.hbase.client.Delete;
60 import org.apache.hadoop.hbase.client.Durability;
61 import org.apache.hadoop.hbase.client.Get;
62 import org.apache.hadoop.hbase.client.HBaseAdmin;
63 import org.apache.hadoop.hbase.client.HConnection;
64 import org.apache.hadoop.hbase.client.HTable;
65 import org.apache.hadoop.hbase.client.Put;
66 import org.apache.hadoop.hbase.client.Result;
67 import org.apache.hadoop.hbase.client.ResultScanner;
68 import org.apache.hadoop.hbase.client.Scan;
69 import org.apache.hadoop.hbase.fs.HFileSystem;
70 import org.apache.hadoop.hbase.io.compress.Compression;
71 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
72 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
73 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
74 import org.apache.hadoop.hbase.io.hfile.HFile;
75 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
76 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
77 import org.apache.hadoop.hbase.master.HMaster;
78 import org.apache.hadoop.hbase.master.RegionStates;
79 import org.apache.hadoop.hbase.master.ServerManager;
80 import org.apache.hadoop.hbase.regionserver.BloomType;
81 import org.apache.hadoop.hbase.regionserver.HRegion;
82 import org.apache.hadoop.hbase.regionserver.HRegionServer;
83 import org.apache.hadoop.hbase.regionserver.HStore;
84 import org.apache.hadoop.hbase.regionserver.InternalScanner;
85 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
86 import org.apache.hadoop.hbase.regionserver.wal.HLog;
87 import org.apache.hadoop.hbase.security.User;
88 import org.apache.hadoop.hbase.tool.Canary;
89 import org.apache.hadoop.hbase.util.Bytes;
90 import org.apache.hadoop.hbase.util.FSTableDescriptors;
91 import org.apache.hadoop.hbase.util.FSUtils;
92 import org.apache.hadoop.hbase.util.JVMClusterUtil;
93 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
94 import org.apache.hadoop.hbase.util.RegionSplitter;
95 import org.apache.hadoop.hbase.util.RetryCounter;
96 import org.apache.hadoop.hbase.util.Threads;
97 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
98 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
99 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
100 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
101 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
102 import org.apache.hadoop.hdfs.DFSClient;
103 import org.apache.hadoop.hdfs.DistributedFileSystem;
104 import org.apache.hadoop.hdfs.MiniDFSCluster;
105 import org.apache.hadoop.mapred.JobConf;
106 import org.apache.hadoop.mapred.MiniMRCluster;
107 import org.apache.hadoop.mapred.TaskLog;
108 import org.apache.hadoop.security.UserGroupInformation;
109 import org.apache.zookeeper.KeeperException;
110 import org.apache.zookeeper.KeeperException.NodeExistsException;
111 import org.apache.zookeeper.WatchedEvent;
112 import org.apache.zookeeper.ZooKeeper;
113 import org.apache.zookeeper.ZooKeeper.States;
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129 @InterfaceAudience.Public
130 @InterfaceStability.Evolving
131 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
132 private MiniZooKeeperCluster zkCluster = null;
133
134 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
135
136
137
138
139 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
140
141
142
143
144
145 private boolean passedZkCluster = false;
146 private MiniDFSCluster dfsCluster = null;
147
148 private HBaseCluster hbaseCluster = null;
149 private MiniMRCluster mrCluster = null;
150
151
152 private boolean miniClusterRunning;
153
154 private String hadoopLogDir;
155
156
157 private File clusterTestDir = null;
158
159
160
161 private Path dataTestDirOnTestFS = null;
162
163
164
165
166
167
168
169
170 @Deprecated
171 private static final String TEST_DIRECTORY_KEY = "test.build.data";
172
173
174 private static String FS_URI;
175
176
177 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
178
179
180 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
181 Arrays.asList(new Object[][] {
182 { Compression.Algorithm.NONE },
183 { Compression.Algorithm.GZ }
184 });
185
186
187 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
188 Arrays.asList(new Object[][] {
189 { new Boolean(false) },
190 { new Boolean(true) }
191 });
192
193
194 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
195
196 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
197 Compression.Algorithm.NONE, Compression.Algorithm.GZ
198 };
199
200
201
202
203
204 private static List<Object[]> bloomAndCompressionCombinations() {
205 List<Object[]> configurations = new ArrayList<Object[]>();
206 for (Compression.Algorithm comprAlgo :
207 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
208 for (BloomType bloomType : BloomType.values()) {
209 configurations.add(new Object[] { comprAlgo, bloomType });
210 }
211 }
212 return Collections.unmodifiableList(configurations);
213 }
214
215
216
217
218 private static List<Object[]> memStoreTSAndTagsCombination() {
219 List<Object[]> configurations = new ArrayList<Object[]>();
220 configurations.add(new Object[] { false, false });
221 configurations.add(new Object[] { false, true });
222 configurations.add(new Object[] { true, false });
223 configurations.add(new Object[] { true, true });
224 return Collections.unmodifiableList(configurations);
225 }
226
227 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
228 bloomAndCompressionCombinations();
229
230 public HBaseTestingUtility() {
231 this(HBaseConfiguration.create());
232 }
233
234 public HBaseTestingUtility(Configuration conf) {
235 super(conf);
236
237
238 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
239 }
240
241
242
243
244
245
246
247 public static HBaseTestingUtility createLocalHTU() {
248 Configuration c = HBaseConfiguration.create();
249 return createLocalHTU(c);
250 }
251
252
253
254
255
256
257
258
259 public static HBaseTestingUtility createLocalHTU(Configuration c) {
260 HBaseTestingUtility htu = new HBaseTestingUtility(c);
261 String dataTestDir = htu.getDataTestDir().toString();
262 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
263 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
264 return htu;
265 }
266
267
268
269
270
271 @Deprecated
272 public void setHDFSClientRetry(final int retries) {
273 this.conf.setInt("hdfs.client.retries.number", retries);
274 if (0 == retries) {
275 makeDFSClientNonRetrying();
276 }
277 }
278
279
280
281
282
283
284
285
286
287
288
289
290 @Override
291 public Configuration getConfiguration() {
292 return super.getConfiguration();
293 }
294
295 public void setHBaseCluster(HBaseCluster hbaseCluster) {
296 this.hbaseCluster = hbaseCluster;
297 }
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 @Override
316 protected Path setupDataTestDir() {
317 Path testPath = super.setupDataTestDir();
318 if (null == testPath) {
319 return null;
320 }
321
322 createSubDirAndSystemProperty(
323 "hadoop.log.dir",
324 testPath, "hadoop-log-dir");
325
326
327
328 createSubDirAndSystemProperty(
329 "hadoop.tmp.dir",
330 testPath, "hadoop-tmp-dir");
331
332
333 createSubDir(
334 "mapred.local.dir",
335 testPath, "mapred-local-dir");
336
337 return testPath;
338 }
339
340 private void createSubDirAndSystemProperty(
341 String propertyName, Path parent, String subDirName){
342
343 String sysValue = System.getProperty(propertyName);
344
345 if (sysValue != null) {
346
347
348 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
349 sysValue + " so I do NOT create it in " + parent);
350 String confValue = conf.get(propertyName);
351 if (confValue != null && !confValue.endsWith(sysValue)){
352 LOG.warn(
353 propertyName + " property value differs in configuration and system: "+
354 "Configuration="+confValue+" while System="+sysValue+
355 " Erasing configuration value by system value."
356 );
357 }
358 conf.set(propertyName, sysValue);
359 } else {
360
361 createSubDir(propertyName, parent, subDirName);
362 System.setProperty(propertyName, conf.get(propertyName));
363 }
364 }
365
366
367
368
369
370
371
372 private Path getBaseTestDirOnTestFS() throws IOException {
373 FileSystem fs = getTestFileSystem();
374 return new Path(fs.getWorkingDirectory(), "test-data");
375 }
376
377
378
379
380 public HTableDescriptor getMetaTableDescriptor() {
381 try {
382 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
383 } catch (IOException e) {
384 throw new RuntimeException("Unable to create META table descriptor", e);
385 }
386 }
387
388
389
390
391
392
393 Path getClusterTestDir() {
394 if (clusterTestDir == null){
395 setupClusterTestDir();
396 }
397 return new Path(clusterTestDir.getAbsolutePath());
398 }
399
400
401
402
403 private void setupClusterTestDir() {
404 if (clusterTestDir != null) {
405 return;
406 }
407
408
409
410 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
411 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
412
413 boolean b = deleteOnExit();
414 if (b) clusterTestDir.deleteOnExit();
415 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
416 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
417 }
418
419
420
421
422
423
424
425 public Path getDataTestDirOnTestFS() throws IOException {
426 if (dataTestDirOnTestFS == null) {
427 setupDataTestDirOnTestFS();
428 }
429
430 return dataTestDirOnTestFS;
431 }
432
433
434
435
436
437
438
439
440 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
441 return new Path(getDataTestDirOnTestFS(), subdirName);
442 }
443
444
445
446
447 private void setupDataTestDirOnTestFS() throws IOException {
448 if (dataTestDirOnTestFS != null) {
449 LOG.warn("Data test on test fs dir already setup in "
450 + dataTestDirOnTestFS.toString());
451 return;
452 }
453
454
455
456
457
458 FileSystem fs = getTestFileSystem();
459 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
460 File dataTestDir = new File(getDataTestDir().toString());
461 if (deleteOnExit()) dataTestDir.deleteOnExit();
462 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
463 } else {
464 Path base = getBaseTestDirOnTestFS();
465 String randomStr = UUID.randomUUID().toString();
466 dataTestDirOnTestFS = new Path(base, randomStr);
467 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
468 }
469 }
470
471
472
473
474
475
476 public boolean cleanupDataTestDirOnTestFS() throws IOException {
477 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
478 if (ret)
479 dataTestDirOnTestFS = null;
480 return ret;
481 }
482
483
484
485
486
487
488 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
489 Path cpath = getDataTestDirOnTestFS(subdirName);
490 return getTestFileSystem().delete(cpath, true);
491 }
492
493
494
495
496
497
498
499
500 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
501 return startMiniDFSCluster(servers, null);
502 }
503
504
505
506
507
508
509
510
511
512
513
514
515 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
516 throws Exception {
517 if ( hosts != null && hosts.length != 0) {
518 return startMiniDFSCluster(hosts.length, hosts);
519 } else {
520 return startMiniDFSCluster(1, null);
521 }
522 }
523
524
525
526
527
528
529
530
531
532
533 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
534 throws Exception {
535 createDirsAndSetProperties();
536 try {
537 Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
538 .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
539 m.invoke(null, new Object[] {true});
540 } catch (ClassNotFoundException e) {
541 LOG.info("EditLogFileOutputStream not found");
542 }
543
544
545 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
546 setLevel(org.apache.log4j.Level.ERROR);
547 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
548 setLevel(org.apache.log4j.Level.ERROR);
549
550
551 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
552 true, null, null, hosts, null);
553
554
555 FileSystem fs = this.dfsCluster.getFileSystem();
556 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
557
558
559 this.dfsCluster.waitClusterUp();
560
561
562 dataTestDirOnTestFS = null;
563
564 return this.dfsCluster;
565 }
566
567
568 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
569 throws Exception {
570 createDirsAndSetProperties();
571 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
572 true, null, racks, hosts, null);
573
574
575 FileSystem fs = this.dfsCluster.getFileSystem();
576 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
577
578
579 this.dfsCluster.waitClusterUp();
580
581
582 dataTestDirOnTestFS = null;
583
584 return this.dfsCluster;
585 }
586
587 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
588 createDirsAndSetProperties();
589 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
590 null, null, null);
591 return dfsCluster;
592 }
593
594
595 private void createDirsAndSetProperties() throws IOException {
596 setupClusterTestDir();
597 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
598 createDirAndSetProperty("cache_data", "test.cache.data");
599 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
600 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
601 createDirAndSetProperty("mapred_local", "mapred.local.dir");
602 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
603 enableShortCircuit();
604
605 Path root = getDataTestDirOnTestFS("hadoop");
606 conf.set(MapreduceTestingShim.getMROutputDirProp(),
607 new Path(root, "mapred-output-dir").toString());
608 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
609 conf.set("mapreduce.jobtracker.staging.root.dir",
610 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
611 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
612 }
613
614
615
616
617
618
619
620 public boolean isReadShortCircuitOn(){
621 final String propName = "hbase.tests.use.shortcircuit.reads";
622 String readOnProp = System.getProperty(propName);
623 if (readOnProp != null){
624 return Boolean.parseBoolean(readOnProp);
625 } else {
626 return conf.getBoolean(propName, false);
627 }
628 }
629
630
631
632
633 private void enableShortCircuit() {
634 if (isReadShortCircuitOn()) {
635 String curUser = System.getProperty("user.name");
636 LOG.info("read short circuit is ON for user " + curUser);
637
638 conf.set("dfs.block.local-path-access.user", curUser);
639
640 conf.setBoolean("dfs.client.read.shortcircuit", true);
641
642 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
643 } else {
644 LOG.info("read short circuit is OFF");
645 }
646 }
647
648 private String createDirAndSetProperty(final String relPath, String property) {
649 String path = getDataTestDir(relPath).toString();
650 System.setProperty(property, path);
651 conf.set(property, path);
652 new File(path).mkdirs();
653 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
654 return path;
655 }
656
657
658
659
660
661
662 public void shutdownMiniDFSCluster() throws IOException {
663 if (this.dfsCluster != null) {
664
665 this.dfsCluster.shutdown();
666 dfsCluster = null;
667 dataTestDirOnTestFS = null;
668 FSUtils.setFsDefault(this.conf, new Path("file:///"));
669 }
670 }
671
672
673
674
675
676
677
678
679 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
680 return startMiniZKCluster(1);
681 }
682
683
684
685
686
687
688
689
690
691 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
692 throws Exception {
693 setupClusterTestDir();
694 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
695 }
696
697 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
698 throws Exception {
699 return startMiniZKCluster(dir,1);
700 }
701
702
703
704
705
706 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
707 int zooKeeperServerNum)
708 throws Exception {
709 if (this.zkCluster != null) {
710 throw new IOException("Cluster already running at " + dir);
711 }
712 this.passedZkCluster = false;
713 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
714 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
715 if (defPort > 0){
716
717 this.zkCluster.setDefaultClientPort(defPort);
718 }
719 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
720 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
721 Integer.toString(clientPort));
722 return this.zkCluster;
723 }
724
725
726
727
728
729
730
731 public void shutdownMiniZKCluster() throws IOException {
732 if (this.zkCluster != null) {
733 this.zkCluster.shutdown();
734 this.zkCluster = null;
735 }
736 }
737
738
739
740
741
742
743
744 public MiniHBaseCluster startMiniCluster() throws Exception {
745 return startMiniCluster(1, 1);
746 }
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 public MiniHBaseCluster startMiniCluster(final int numSlaves)
762 throws Exception {
763 return startMiniCluster(1, numSlaves);
764 }
765
766
767
768
769
770
771
772
773 public MiniHBaseCluster startMiniCluster(final int numMasters,
774 final int numSlaves)
775 throws Exception {
776 return startMiniCluster(numMasters, numSlaves, null);
777 }
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803 public MiniHBaseCluster startMiniCluster(final int numMasters,
804 final int numSlaves, final String[] dataNodeHosts) throws Exception {
805 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
806 }
807
808
809
810
811
812 public MiniHBaseCluster startMiniCluster(final int numMasters,
813 final int numSlaves, final int numDataNodes) throws Exception {
814 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
815 }
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844 public MiniHBaseCluster startMiniCluster(final int numMasters,
845 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
846 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
847 throws Exception {
848 return startMiniCluster(
849 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
850 }
851
852
853
854
855
856
857 public MiniHBaseCluster startMiniCluster(final int numMasters,
858 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
859 Class<? extends HMaster> masterClass,
860 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
861 throws Exception {
862 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
863 numDataNodes = dataNodeHosts.length;
864 }
865
866 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
867 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
868
869
870 if (miniClusterRunning) {
871 throw new IllegalStateException("A mini-cluster is already running");
872 }
873 miniClusterRunning = true;
874
875 setupClusterTestDir();
876 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
877
878
879
880 startMiniDFSCluster(numDataNodes, dataNodeHosts);
881
882
883 if (this.zkCluster == null) {
884 startMiniZKCluster(clusterTestDir);
885 }
886
887
888 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
889 }
890
891 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
892 throws IOException, InterruptedException{
893 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
894 }
895
896
897
898
899
900
901
902
903
904
905
906
907 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
908 final int numSlaves, Class<? extends HMaster> masterClass,
909 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
910 throws IOException, InterruptedException {
911
912 createRootDir();
913
914
915
916 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
917 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
918 }
919 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
920 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
921 }
922
923 Configuration c = new Configuration(this.conf);
924 this.hbaseCluster =
925 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
926
927 HTable t = new HTable(c, TableName.META_TABLE_NAME);
928 ResultScanner s = t.getScanner(new Scan());
929 while (s.next() != null) {
930 continue;
931 }
932 s.close();
933 t.close();
934
935 getHBaseAdmin();
936 LOG.info("Minicluster is up");
937 return (MiniHBaseCluster)this.hbaseCluster;
938 }
939
940
941
942
943
944
945
946 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
947 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
948
949 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
950 ResultScanner s = t.getScanner(new Scan());
951 while (s.next() != null) {
952
953 }
954 LOG.info("HBase has been restarted");
955 s.close();
956 t.close();
957 }
958
959
960
961
962
963
964 public MiniHBaseCluster getMiniHBaseCluster() {
965 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
966 return (MiniHBaseCluster)this.hbaseCluster;
967 }
968 throw new RuntimeException(hbaseCluster + " not an instance of " +
969 MiniHBaseCluster.class.getName());
970 }
971
972
973
974
975
976
977 public void shutdownMiniCluster() throws Exception {
978 LOG.info("Shutting down minicluster");
979 shutdownMiniHBaseCluster();
980 if (!this.passedZkCluster){
981 shutdownMiniZKCluster();
982 }
983 shutdownMiniDFSCluster();
984
985 cleanupTestDir();
986 miniClusterRunning = false;
987 LOG.info("Minicluster is down");
988 }
989
990
991
992
993
994 @Override
995 public boolean cleanupTestDir() throws IOException {
996 boolean ret = super.cleanupTestDir();
997 if (deleteDir(this.clusterTestDir)) {
998 this.clusterTestDir = null;
999 return ret & true;
1000 }
1001 return false;
1002 }
1003
1004
1005
1006
1007
1008 public void shutdownMiniHBaseCluster() throws IOException {
1009 if (hbaseAdmin != null) {
1010 hbaseAdmin.close0();
1011 hbaseAdmin = null;
1012 }
1013
1014
1015 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1016 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1017 if (this.hbaseCluster != null) {
1018 this.hbaseCluster.shutdown();
1019
1020 this.hbaseCluster.waitUntilShutDown();
1021 this.hbaseCluster = null;
1022 }
1023
1024 if (zooKeeperWatcher != null) {
1025 zooKeeperWatcher.close();
1026 zooKeeperWatcher = null;
1027 }
1028 }
1029
1030
1031
1032
1033
1034
1035
1036 public Path getDefaultRootDirPath() throws IOException {
1037 FileSystem fs = FileSystem.get(this.conf);
1038 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 public Path createRootDir() throws IOException {
1050 FileSystem fs = FileSystem.get(this.conf);
1051 Path hbaseRootdir = getDefaultRootDirPath();
1052 FSUtils.setRootDir(this.conf, hbaseRootdir);
1053 fs.mkdirs(hbaseRootdir);
1054 FSUtils.setVersion(fs, hbaseRootdir);
1055 return hbaseRootdir;
1056 }
1057
1058
1059
1060
1061
1062 public void flush() throws IOException {
1063 getMiniHBaseCluster().flushcache();
1064 }
1065
1066
1067
1068
1069
1070 public void flush(TableName tableName) throws IOException {
1071 getMiniHBaseCluster().flushcache(tableName);
1072 }
1073
1074
1075
1076
1077
1078 public void compact(boolean major) throws IOException {
1079 getMiniHBaseCluster().compact(major);
1080 }
1081
1082
1083
1084
1085
1086 public void compact(TableName tableName, boolean major) throws IOException {
1087 getMiniHBaseCluster().compact(tableName, major);
1088 }
1089
1090
1091
1092
1093
1094
1095
1096
1097 public HTable createTable(String tableName, String family)
1098 throws IOException{
1099 return createTable(TableName.valueOf(tableName), new String[]{family});
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109 public HTable createTable(byte[] tableName, byte[] family)
1110 throws IOException{
1111 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121 public HTable createTable(TableName tableName, String[] families)
1122 throws IOException {
1123 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1124 for (String family : families) {
1125 fams.add(Bytes.toBytes(family));
1126 }
1127 return createTable(tableName, fams.toArray(new byte[0][]));
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137 public HTable createTable(TableName tableName, byte[] family)
1138 throws IOException{
1139 return createTable(tableName, new byte[][]{family});
1140 }
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 public HTable createTable(byte[] tableName, byte[][] families)
1151 throws IOException {
1152 return createTable(tableName, families,
1153 new Configuration(getConfiguration()));
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163 public HTable createTable(TableName tableName, byte[][] families)
1164 throws IOException {
1165 return createTable(tableName, families,
1166 new Configuration(getConfiguration()));
1167 }
1168
1169 public HTable createTable(byte[] tableName, byte[][] families,
1170 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1171 return createTable(TableName.valueOf(tableName), families, numVersions,
1172 startKey, endKey, numRegions);
1173 }
1174
1175 public HTable createTable(String tableName, byte[][] families,
1176 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1177 return createTable(TableName.valueOf(tableName), families, numVersions,
1178 startKey, endKey, numRegions);
1179 }
1180
1181 public HTable createTable(TableName tableName, byte[][] families,
1182 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1183 throws IOException{
1184 HTableDescriptor desc = new HTableDescriptor(tableName);
1185 for (byte[] family : families) {
1186 HColumnDescriptor hcd = new HColumnDescriptor(family)
1187 .setMaxVersions(numVersions);
1188 desc.addFamily(hcd);
1189 }
1190 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1191
1192 waitUntilAllRegionsAssigned(tableName);
1193 return new HTable(getConfiguration(), tableName);
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1205 throws IOException {
1206 for(byte[] family : families) {
1207 HColumnDescriptor hcd = new HColumnDescriptor(family);
1208
1209
1210
1211 hcd.setBloomFilterType(BloomType.NONE);
1212 htd.addFamily(hcd);
1213 }
1214 getHBaseAdmin().createTable(htd);
1215
1216 waitUntilAllRegionsAssigned(htd.getTableName());
1217 return new HTable(c, htd.getTableName());
1218 }
1219
1220
1221
1222
1223
1224
1225
1226
1227 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1228 throws IOException {
1229 getHBaseAdmin().createTable(htd, splitRows);
1230
1231 waitUntilAllRegionsAssigned(htd.getTableName());
1232 return new HTable(getConfiguration(), htd.getTableName());
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 public HTable createTable(TableName tableName, byte[][] families,
1244 final Configuration c)
1245 throws IOException {
1246 return createTable(new HTableDescriptor(tableName), families, c);
1247 }
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 public HTable createTable(byte[] tableName, byte[][] families,
1258 final Configuration c)
1259 throws IOException {
1260 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1261 for(byte[] family : families) {
1262 HColumnDescriptor hcd = new HColumnDescriptor(family);
1263
1264
1265
1266 hcd.setBloomFilterType(BloomType.NONE);
1267 desc.addFamily(hcd);
1268 }
1269 getHBaseAdmin().createTable(desc);
1270 return new HTable(c, tableName);
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 public HTable createTable(TableName tableName, byte[][] families,
1283 final Configuration c, int numVersions)
1284 throws IOException {
1285 HTableDescriptor desc = new HTableDescriptor(tableName);
1286 for(byte[] family : families) {
1287 HColumnDescriptor hcd = new HColumnDescriptor(family)
1288 .setMaxVersions(numVersions);
1289 desc.addFamily(hcd);
1290 }
1291 getHBaseAdmin().createTable(desc);
1292
1293 waitUntilAllRegionsAssigned(tableName);
1294 return new HTable(c, tableName);
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 public HTable createTable(byte[] tableName, byte[][] families,
1307 final Configuration c, int numVersions)
1308 throws IOException {
1309 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1310 for(byte[] family : families) {
1311 HColumnDescriptor hcd = new HColumnDescriptor(family)
1312 .setMaxVersions(numVersions);
1313 desc.addFamily(hcd);
1314 }
1315 getHBaseAdmin().createTable(desc);
1316 return new HTable(c, tableName);
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1328 throws IOException {
1329 return createTable(tableName, new byte[][]{family}, numVersions);
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1341 throws IOException {
1342 return createTable(tableName, new byte[][]{family}, numVersions);
1343 }
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 public HTable createTable(byte[] tableName, byte[][] families,
1354 int numVersions)
1355 throws IOException {
1356 return createTable(TableName.valueOf(tableName), families, numVersions);
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 public HTable createTable(TableName tableName, byte[][] families,
1368 int numVersions)
1369 throws IOException {
1370 HTableDescriptor desc = new HTableDescriptor(tableName);
1371 for (byte[] family : families) {
1372 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1373 desc.addFamily(hcd);
1374 }
1375 getHBaseAdmin().createTable(desc);
1376
1377 waitUntilAllRegionsAssigned(tableName);
1378 return new HTable(new Configuration(getConfiguration()), tableName);
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 public HTable createTable(byte[] tableName, byte[][] families,
1390 int numVersions, int blockSize) throws IOException {
1391 return createTable(TableName.valueOf(tableName),
1392 families, numVersions, blockSize);
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 public HTable createTable(TableName tableName, byte[][] families,
1404 int numVersions, int blockSize) throws IOException {
1405 HTableDescriptor desc = new HTableDescriptor(tableName);
1406 for (byte[] family : families) {
1407 HColumnDescriptor hcd = new HColumnDescriptor(family)
1408 .setMaxVersions(numVersions)
1409 .setBlocksize(blockSize);
1410 desc.addFamily(hcd);
1411 }
1412 getHBaseAdmin().createTable(desc);
1413
1414 waitUntilAllRegionsAssigned(tableName);
1415 return new HTable(new Configuration(getConfiguration()), tableName);
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 public HTable createTable(byte[] tableName, byte[][] families,
1427 int[] numVersions)
1428 throws IOException {
1429 return createTable(TableName.valueOf(tableName), families, numVersions);
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440 public HTable createTable(TableName tableName, byte[][] families,
1441 int[] numVersions)
1442 throws IOException {
1443 HTableDescriptor desc = new HTableDescriptor(tableName);
1444 int i = 0;
1445 for (byte[] family : families) {
1446 HColumnDescriptor hcd = new HColumnDescriptor(family)
1447 .setMaxVersions(numVersions[i]);
1448 desc.addFamily(hcd);
1449 i++;
1450 }
1451 getHBaseAdmin().createTable(desc);
1452
1453 waitUntilAllRegionsAssigned(tableName);
1454 return new HTable(new Configuration(getConfiguration()), tableName);
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1466 throws IOException{
1467 return createTable(TableName.valueOf(tableName), family, splitRows);
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1479 throws IOException {
1480 HTableDescriptor desc = new HTableDescriptor(tableName);
1481 HColumnDescriptor hcd = new HColumnDescriptor(family);
1482 desc.addFamily(hcd);
1483 getHBaseAdmin().createTable(desc, splitRows);
1484
1485 waitUntilAllRegionsAssigned(tableName);
1486 return new HTable(getConfiguration(), tableName);
1487 }
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1498 throws IOException {
1499 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1500 for(byte[] family:families) {
1501 HColumnDescriptor hcd = new HColumnDescriptor(family);
1502 desc.addFamily(hcd);
1503 }
1504 getHBaseAdmin().createTable(desc, splitRows);
1505
1506 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1507 return new HTable(getConfiguration(), tableName);
1508 }
1509
1510
1511
1512
1513
1514 public void deleteTable(String tableName) throws IOException {
1515 deleteTable(TableName.valueOf(tableName));
1516 }
1517
1518
1519
1520
1521
1522 public void deleteTable(byte[] tableName) throws IOException {
1523 deleteTable(TableName.valueOf(tableName));
1524 }
1525
1526
1527
1528
1529
1530 public void deleteTable(TableName tableName) throws IOException {
1531 try {
1532 getHBaseAdmin().disableTable(tableName);
1533 } catch (TableNotEnabledException e) {
1534 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1535 }
1536 getHBaseAdmin().deleteTable(tableName);
1537 }
1538
1539
1540
1541
1542
1543 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1544 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1545 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1546 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1547 private static final int MAXVERSIONS = 3;
1548
1549 public static final char FIRST_CHAR = 'a';
1550 public static final char LAST_CHAR = 'z';
1551 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1552 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1553
1554
1555
1556
1557
1558
1559
1560
1561 public HTableDescriptor createTableDescriptor(final String name,
1562 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1563 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1564 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1565 htd.addFamily(new HColumnDescriptor(cfName)
1566 .setMinVersions(minVersions)
1567 .setMaxVersions(versions)
1568 .setKeepDeletedCells(keepDeleted)
1569 .setBlockCacheEnabled(false)
1570 .setTimeToLive(ttl)
1571 );
1572 }
1573 return htd;
1574 }
1575
1576
1577
1578
1579
1580
1581
1582 public HTableDescriptor createTableDescriptor(final String name) {
1583 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1584 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1596 byte [] endKey)
1597 throws IOException {
1598 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1599 return createLocalHRegion(hri, desc);
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1610 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1611 }
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1622 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1623 }
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1639 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1640 HLog hlog, byte[]... families) throws IOException {
1641 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1642 htd.setReadOnly(isReadOnly);
1643 for (byte[] family : families) {
1644 HColumnDescriptor hcd = new HColumnDescriptor(family);
1645
1646 hcd.setMaxVersions(Integer.MAX_VALUE);
1647 htd.addFamily(hcd);
1648 }
1649 htd.setDurability(durability);
1650 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1651 return createLocalHRegion(info, htd, hlog);
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 public HTable truncateTable(byte[] tableName) throws IOException {
1663 return truncateTable(TableName.valueOf(tableName));
1664 }
1665
1666
1667
1668
1669
1670
1671
1672 public HTable truncateTable(TableName tableName) throws IOException {
1673 HTable table = new HTable(getConfiguration(), tableName);
1674 Scan scan = new Scan();
1675 ResultScanner resScan = table.getScanner(scan);
1676 for(Result res : resScan) {
1677 Delete del = new Delete(res.getRow());
1678 table.delete(del);
1679 }
1680 resScan = table.getScanner(scan);
1681 resScan.close();
1682 return table;
1683 }
1684
1685
1686
1687
1688
1689
1690
1691
1692 public int loadTable(final HTable t, final byte[] f) throws IOException {
1693 return loadTable(t, new byte[][] {f});
1694 }
1695
1696
1697
1698
1699
1700
1701
1702
1703 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1704 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1715 return loadTable(t, f, null);
1716 }
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1727 return loadTable(t, f, value, true);
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1739 t.setAutoFlush(false);
1740 int rowCount = 0;
1741 for (byte[] row : HBaseTestingUtility.ROWS) {
1742 Put put = new Put(row);
1743 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1744 for (int i = 0; i < f.length; i++) {
1745 put.add(f[i], null, value != null ? value : row);
1746 }
1747 t.put(put);
1748 rowCount++;
1749 }
1750 t.flushCommits();
1751 return rowCount;
1752 }
1753
1754
1755
1756
1757 public static class SeenRowTracker {
1758 int dim = 'z' - 'a' + 1;
1759 int[][][] seenRows = new int[dim][dim][dim];
1760 byte[] startRow;
1761 byte[] stopRow;
1762
1763 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1764 this.startRow = startRow;
1765 this.stopRow = stopRow;
1766 }
1767
1768 void reset() {
1769 for (byte[] row : ROWS) {
1770 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1771 }
1772 }
1773
1774 int i(byte b) {
1775 return b - 'a';
1776 }
1777
1778 public void addRow(byte[] row) {
1779 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1780 }
1781
1782
1783
1784
1785 public void validate() {
1786 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1787 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1788 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1789 int count = seenRows[i(b1)][i(b2)][i(b3)];
1790 int expectedCount = 0;
1791 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1792 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1793 expectedCount = 1;
1794 }
1795 if (count != expectedCount) {
1796 String row = new String(new byte[] {b1,b2,b3});
1797 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1798 }
1799 }
1800 }
1801 }
1802 }
1803 }
1804
1805 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1806 return loadRegion(r, f, false);
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1818 throws IOException {
1819 byte[] k = new byte[3];
1820 int rowCount = 0;
1821 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1822 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1823 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1824 k[0] = b1;
1825 k[1] = b2;
1826 k[2] = b3;
1827 Put put = new Put(k);
1828 put.setDurability(Durability.SKIP_WAL);
1829 put.add(f, null, k);
1830 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1831
1832 int preRowCount = rowCount;
1833 int pause = 10;
1834 int maxPause = 1000;
1835 while (rowCount == preRowCount) {
1836 try {
1837 r.put(put);
1838 rowCount++;
1839 } catch (RegionTooBusyException e) {
1840 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1841 Threads.sleep(pause);
1842 }
1843 }
1844 }
1845 }
1846 if (flush) {
1847 r.flushcache();
1848 }
1849 }
1850 return rowCount;
1851 }
1852
1853 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1854 for (int i = startRow; i < endRow; i++) {
1855 byte[] data = Bytes.toBytes(String.valueOf(i));
1856 Put put = new Put(data);
1857 put.add(f, null, data);
1858 t.put(put);
1859 }
1860 }
1861
1862
1863
1864
1865 public int countRows(final HTable table) throws IOException {
1866 Scan scan = new Scan();
1867 ResultScanner results = table.getScanner(scan);
1868 int count = 0;
1869 for (@SuppressWarnings("unused") Result res : results) {
1870 count++;
1871 }
1872 results.close();
1873 return count;
1874 }
1875
1876 public int countRows(final HTable table, final byte[]... families) throws IOException {
1877 Scan scan = new Scan();
1878 for (byte[] family: families) {
1879 scan.addFamily(family);
1880 }
1881 ResultScanner results = table.getScanner(scan);
1882 int count = 0;
1883 for (@SuppressWarnings("unused") Result res : results) {
1884 count++;
1885 }
1886 results.close();
1887 return count;
1888 }
1889
1890
1891
1892
1893 public String checksumRows(final HTable table) throws Exception {
1894 Scan scan = new Scan();
1895 ResultScanner results = table.getScanner(scan);
1896 MessageDigest digest = MessageDigest.getInstance("MD5");
1897 for (Result res : results) {
1898 digest.update(res.getRow());
1899 }
1900 results.close();
1901 return digest.toString();
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912 public int createMultiRegions(HTable table, byte[] columnFamily)
1913 throws IOException {
1914 return createMultiRegions(getConfiguration(), table, columnFamily);
1915 }
1916
1917
1918 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1919 static {
1920 int i = 0;
1921 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1922 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1923 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1924 ROWS[i][0] = b1;
1925 ROWS[i][1] = b2;
1926 ROWS[i][2] = b3;
1927 i++;
1928 }
1929 }
1930 }
1931 }
1932
1933 public static final byte[][] KEYS = {
1934 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1935 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1936 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1937 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1938 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1939 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1940 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1941 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1942 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1943 };
1944
1945 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1946 Bytes.toBytes("bbb"),
1947 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1948 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1949 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1950 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1951 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1952 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1953 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1954 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1955 };
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 public int createMultiRegions(final Configuration c, final HTable table,
1966 final byte[] columnFamily)
1967 throws IOException {
1968 return createMultiRegions(c, table, columnFamily, KEYS);
1969 }
1970
1971 void makeDFSClientNonRetrying() {
1972 if (null == this.dfsCluster) {
1973 LOG.debug("dfsCluster has not started, can't make client non-retrying.");
1974 return;
1975 }
1976 try {
1977 final FileSystem filesystem = this.dfsCluster.getFileSystem();
1978 if (!(filesystem instanceof DistributedFileSystem)) {
1979 LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
1980 return;
1981 }
1982
1983 final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
1984
1985 final Field dfsField = fs.getClass().getDeclaredField("dfs");
1986 dfsField.setAccessible(true);
1987 final Class<?> dfsClazz = dfsField.getType();
1988 final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
1989
1990
1991 final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
1992 createRPCNamenode.setAccessible(true);
1993
1994
1995 final Field nnField = dfsClazz.getDeclaredField("nnAddress");
1996 nnField.setAccessible(true);
1997 final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
1998 final Field confField = dfsClazz.getDeclaredField("conf");
1999 confField.setAccessible(true);
2000 final Configuration conf = Configuration.class.cast(confField.get(dfs));
2001 final Field ugiField = dfsClazz.getDeclaredField("ugi");
2002 ugiField.setAccessible(true);
2003 final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
2004
2005
2006 final Field namenodeField = dfsClazz.getDeclaredField("namenode");
2007 namenodeField.setAccessible(true);
2008 namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
2009 LOG.debug("Set DSFClient namenode to bare RPC");
2010 } catch (Exception exception) {
2011 LOG.info("Could not alter DFSClient to be non-retrying.", exception);
2012 }
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 public int createMultiRegions(final Configuration c, final HTable table,
2025 final byte [] family, int numRegions)
2026 throws IOException {
2027 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2028 byte [] startKey = Bytes.toBytes("aaaaa");
2029 byte [] endKey = Bytes.toBytes("zzzzz");
2030 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2031 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2032 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2033 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2034 return createMultiRegions(c, table, family, regionStartKeys);
2035 }
2036
2037 @SuppressWarnings("deprecation")
2038 public int createMultiRegions(final Configuration c, final HTable table,
2039 final byte[] columnFamily, byte [][] startKeys)
2040 throws IOException {
2041 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2042 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2043 HTableDescriptor htd = table.getTableDescriptor();
2044 if(!htd.hasFamily(columnFamily)) {
2045 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2046 htd.addFamily(hcd);
2047 }
2048
2049
2050
2051
2052 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2053 String regionToDeleteInFS = table
2054 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2055 .getRegionInfo().getEncodedName();
2056 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2057
2058 int count = 0;
2059 for (int i = 0; i < startKeys.length; i++) {
2060 int j = (i + 1) % startKeys.length;
2061 HRegionInfo hri = new HRegionInfo(table.getName(),
2062 startKeys[i], startKeys[j]);
2063 MetaEditor.addRegionToMeta(meta, hri);
2064 newRegions.add(hri);
2065 count++;
2066 }
2067
2068 for (byte[] row : rows) {
2069 LOG.info("createMultiRegions: deleting meta row -> " +
2070 Bytes.toStringBinary(row));
2071 meta.delete(new Delete(row));
2072 }
2073
2074 Path tableDir = new Path(getDefaultRootDirPath().toString()
2075 + System.getProperty("file.separator") + htd.getTableName()
2076 + System.getProperty("file.separator") + regionToDeleteInFS);
2077 FileSystem.get(c).delete(tableDir);
2078
2079 HConnection conn = table.getConnection();
2080 conn.clearRegionCache();
2081
2082 HBaseAdmin admin = getHBaseAdmin();
2083 if (admin.isTableEnabled(table.getTableName())) {
2084 for(HRegionInfo hri : newRegions) {
2085 admin.assign(hri.getRegionName());
2086 }
2087 }
2088
2089 meta.close();
2090
2091 return count;
2092 }
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2105 final HTableDescriptor htd, byte [][] startKeys)
2106 throws IOException {
2107 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2108 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2109 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2110
2111 for (int i = 0; i < startKeys.length; i++) {
2112 int j = (i + 1) % startKeys.length;
2113 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2114 startKeys[j]);
2115 MetaEditor.addRegionToMeta(meta, hri);
2116 newRegions.add(hri);
2117 }
2118
2119 meta.close();
2120 return newRegions;
2121 }
2122
2123
2124
2125
2126
2127
2128 public List<byte[]> getMetaTableRows() throws IOException {
2129
2130 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2131 List<byte[]> rows = new ArrayList<byte[]>();
2132 ResultScanner s = t.getScanner(new Scan());
2133 for (Result result : s) {
2134 LOG.info("getMetaTableRows: row -> " +
2135 Bytes.toStringBinary(result.getRow()));
2136 rows.add(result.getRow());
2137 }
2138 s.close();
2139 t.close();
2140 return rows;
2141 }
2142
2143
2144
2145
2146
2147
2148 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2149
2150 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2151 List<byte[]> rows = new ArrayList<byte[]>();
2152 ResultScanner s = t.getScanner(new Scan());
2153 for (Result result : s) {
2154 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2155 if (info == null) {
2156 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2157
2158 continue;
2159 }
2160
2161 if (info.getTable().equals(tableName)) {
2162 LOG.info("getMetaTableRows: row -> " +
2163 Bytes.toStringBinary(result.getRow()) + info);
2164 rows.add(result.getRow());
2165 }
2166 }
2167 s.close();
2168 t.close();
2169 return rows;
2170 }
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2184 throws IOException, InterruptedException {
2185 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2186 }
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2198 throws IOException, InterruptedException {
2199 List<byte[]> metaRows = getMetaTableRows(tableName);
2200 if (metaRows == null || metaRows.isEmpty()) {
2201 return null;
2202 }
2203 LOG.debug("Found " + metaRows.size() + " rows for table " +
2204 tableName);
2205 byte [] firstrow = metaRows.get(0);
2206 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2207 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2208 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2209 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2210 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2211 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2212 while(retrier.shouldRetry()) {
2213 int index = getMiniHBaseCluster().getServerWith(firstrow);
2214 if (index != -1) {
2215 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2216 }
2217
2218 retrier.sleepUntilNextRetry();
2219 }
2220 return null;
2221 }
2222
2223
2224
2225
2226
2227
2228
2229 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2230 startMiniMapReduceCluster(2);
2231 return mrCluster;
2232 }
2233
2234
2235
2236
2237
2238 private void forceChangeTaskLogDir() {
2239 Field logDirField;
2240 try {
2241 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2242 logDirField.setAccessible(true);
2243
2244 Field modifiersField = Field.class.getDeclaredField("modifiers");
2245 modifiersField.setAccessible(true);
2246 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2247
2248 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2249 } catch (SecurityException e) {
2250 throw new RuntimeException(e);
2251 } catch (NoSuchFieldException e) {
2252
2253 throw new RuntimeException(e);
2254 } catch (IllegalArgumentException e) {
2255 throw new RuntimeException(e);
2256 } catch (IllegalAccessException e) {
2257 throw new RuntimeException(e);
2258 }
2259 }
2260
2261
2262
2263
2264
2265
2266
2267 private void startMiniMapReduceCluster(final int servers) throws IOException {
2268 if (mrCluster != null) {
2269 throw new IllegalStateException("MiniMRCluster is already running");
2270 }
2271 LOG.info("Starting mini mapreduce cluster...");
2272 setupClusterTestDir();
2273 createDirsAndSetProperties();
2274
2275 forceChangeTaskLogDir();
2276
2277
2278
2279
2280 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2281
2282
2283
2284 conf.setBoolean("mapreduce.map.speculative", false);
2285 conf.setBoolean("mapreduce.reduce.speculative", false);
2286
2287
2288
2289 mrCluster = new MiniMRCluster(servers,
2290 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2291 null, null, new JobConf(this.conf));
2292 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2293 if (jobConf == null) {
2294 jobConf = mrCluster.createJobConf();
2295 }
2296
2297 jobConf.set("mapred.local.dir",
2298 conf.get("mapred.local.dir"));
2299 LOG.info("Mini mapreduce cluster started");
2300
2301
2302
2303
2304 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2305
2306 conf.set("mapreduce.framework.name", "yarn");
2307 conf.setBoolean("yarn.is.minicluster", true);
2308 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2309 if (rmAddress != null) {
2310 conf.set("yarn.resourcemanager.address", rmAddress);
2311 }
2312 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2313 if (historyAddress != null) {
2314 conf.set("mapreduce.jobhistory.address", historyAddress);
2315 }
2316 String schedulerAddress =
2317 jobConf.get("yarn.resourcemanager.scheduler.address");
2318 if (schedulerAddress != null) {
2319 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2320 }
2321 }
2322
2323
2324
2325
2326 public void shutdownMiniMapReduceCluster() {
2327 LOG.info("Stopping mini mapreduce cluster...");
2328 if (mrCluster != null) {
2329 mrCluster.shutdown();
2330 mrCluster = null;
2331 }
2332
2333 conf.set("mapred.job.tracker", "local");
2334 LOG.info("Mini mapreduce cluster stopped");
2335 }
2336
2337
2338
2339
2340 public RegionServerServices createMockRegionServerService() throws IOException {
2341 return createMockRegionServerService((ServerName)null);
2342 }
2343
2344
2345
2346
2347
2348 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2349 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2350 rss.setFileSystem(getTestFileSystem());
2351 rss.setRpcServer(rpc);
2352 return rss;
2353 }
2354
2355
2356
2357
2358
2359 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2360 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2361 rss.setFileSystem(getTestFileSystem());
2362 return rss;
2363 }
2364
2365
2366
2367
2368
2369
2370 public void enableDebug(Class<?> clazz) {
2371 Log l = LogFactory.getLog(clazz);
2372 if (l instanceof Log4JLogger) {
2373 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2374 } else if (l instanceof Jdk14Logger) {
2375 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2376 }
2377 }
2378
2379
2380
2381
2382
2383 public void expireMasterSession() throws Exception {
2384 HMaster master = getMiniHBaseCluster().getMaster();
2385 expireSession(master.getZooKeeper(), false);
2386 }
2387
2388
2389
2390
2391
2392
2393 public void expireRegionServerSession(int index) throws Exception {
2394 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2395 expireSession(rs.getZooKeeper(), false);
2396 decrementMinRegionServerCount();
2397 }
2398
2399 private void decrementMinRegionServerCount() {
2400
2401
2402 decrementMinRegionServerCount(getConfiguration());
2403
2404
2405 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2406 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2407 }
2408 }
2409
2410 private void decrementMinRegionServerCount(Configuration conf) {
2411 int currentCount = conf.getInt(
2412 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2413 if (currentCount != -1) {
2414 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2415 Math.max(currentCount - 1, 1));
2416 }
2417 }
2418
2419 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2420 expireSession(nodeZK, false);
2421 }
2422
2423 @Deprecated
2424 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2425 throws Exception {
2426 expireSession(nodeZK, false);
2427 }
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2441 throws Exception {
2442 Configuration c = new Configuration(this.conf);
2443 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2444 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2445 byte[] password = zk.getSessionPasswd();
2446 long sessionID = zk.getSessionId();
2447
2448
2449
2450
2451
2452
2453
2454
2455 ZooKeeper monitor = new ZooKeeper(quorumServers,
2456 1000, new org.apache.zookeeper.Watcher(){
2457 @Override
2458 public void process(WatchedEvent watchedEvent) {
2459 LOG.info("Monitor ZKW received event="+watchedEvent);
2460 }
2461 } , sessionID, password);
2462
2463
2464 ZooKeeper newZK = new ZooKeeper(quorumServers,
2465 1000, EmptyWatcher.instance, sessionID, password);
2466
2467
2468
2469 long start = System.currentTimeMillis();
2470 while (newZK.getState() != States.CONNECTED
2471 && System.currentTimeMillis() - start < 1000) {
2472 Thread.sleep(1);
2473 }
2474 newZK.close();
2475 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2476
2477
2478 monitor.close();
2479
2480 if (checkStatus) {
2481 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2482 }
2483 }
2484
2485
2486
2487
2488
2489
2490
2491 public MiniHBaseCluster getHBaseCluster() {
2492 return getMiniHBaseCluster();
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 public HBaseCluster getHBaseClusterInterface() {
2504
2505
2506 return hbaseCluster;
2507 }
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518 public synchronized HBaseAdmin getHBaseAdmin()
2519 throws IOException {
2520 if (hbaseAdmin == null){
2521 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2522 }
2523 return hbaseAdmin;
2524 }
2525
2526 private HBaseAdminForTests hbaseAdmin = null;
2527 private static class HBaseAdminForTests extends HBaseAdmin {
2528 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2529 ZooKeeperConnectionException, IOException {
2530 super(c);
2531 }
2532
2533 @Override
2534 public synchronized void close() throws IOException {
2535 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2536 }
2537
2538 private synchronized void close0() throws IOException {
2539 super.close();
2540 }
2541 }
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2553 throws IOException {
2554 if (zooKeeperWatcher == null) {
2555 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2556 new Abortable() {
2557 @Override public void abort(String why, Throwable e) {
2558 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2559 }
2560 @Override public boolean isAborted() {return false;}
2561 });
2562 }
2563 return zooKeeperWatcher;
2564 }
2565 private ZooKeeperWatcher zooKeeperWatcher;
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575 public void closeRegion(String regionName) throws IOException {
2576 closeRegion(Bytes.toBytes(regionName));
2577 }
2578
2579
2580
2581
2582
2583
2584
2585 public void closeRegion(byte[] regionName) throws IOException {
2586 getHBaseAdmin().closeRegion(regionName, null);
2587 }
2588
2589
2590
2591
2592
2593
2594
2595
2596 public void closeRegionByRow(String row, HTable table) throws IOException {
2597 closeRegionByRow(Bytes.toBytes(row), table);
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2608 HRegionLocation hrl = table.getRegionLocation(row);
2609 closeRegion(hrl.getRegionInfo().getRegionName());
2610 }
2611
2612
2613
2614
2615
2616
2617
2618
2619 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2620 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2621 int regCount = regions.size();
2622 Set<Integer> attempted = new HashSet<Integer>();
2623 int idx;
2624 int attempts = 0;
2625 do {
2626 regions = getHBaseCluster().getRegions(tableName);
2627 if (regCount != regions.size()) {
2628
2629 attempted.clear();
2630 }
2631 regCount = regions.size();
2632
2633
2634 if (regCount > 0) {
2635 idx = random.nextInt(regCount);
2636
2637 if (attempted.contains(idx))
2638 continue;
2639 try {
2640 regions.get(idx).checkSplit();
2641 return regions.get(idx);
2642 } catch (Exception ex) {
2643 LOG.warn("Caught exception", ex);
2644 attempted.add(idx);
2645 }
2646 }
2647 attempts++;
2648 } while (maxAttempts == -1 || attempts < maxAttempts);
2649 return null;
2650 }
2651
2652 public MiniZooKeeperCluster getZkCluster() {
2653 return zkCluster;
2654 }
2655
2656 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2657 this.passedZkCluster = true;
2658 this.zkCluster = zkCluster;
2659 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2660 }
2661
2662 public MiniDFSCluster getDFSCluster() {
2663 return dfsCluster;
2664 }
2665
2666 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2667 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2668 throw new IOException("DFSCluster is already running! Shut it down first.");
2669 }
2670 this.dfsCluster = cluster;
2671 }
2672
2673 public FileSystem getTestFileSystem() throws IOException {
2674 return HFileSystem.get(conf);
2675 }
2676
2677
2678
2679
2680
2681
2682
2683
2684 public void waitTableAvailable(byte[] table)
2685 throws InterruptedException, IOException {
2686 waitTableAvailable(getHBaseAdmin(), table, 30000);
2687 }
2688
2689 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2690 throws InterruptedException, IOException {
2691 waitTableAvailable(admin, table, 30000);
2692 }
2693
2694
2695
2696
2697
2698
2699
2700
2701 public void waitTableAvailable(byte[] table, long timeoutMillis)
2702 throws InterruptedException, IOException {
2703 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2704 }
2705
2706 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2707 throws InterruptedException, IOException {
2708 long startWait = System.currentTimeMillis();
2709 while (!admin.isTableAvailable(table)) {
2710 assertTrue("Timed out waiting for table to become available " +
2711 Bytes.toStringBinary(table),
2712 System.currentTimeMillis() - startWait < timeoutMillis);
2713 Thread.sleep(200);
2714 }
2715 }
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726 public void waitTableEnabled(byte[] table)
2727 throws InterruptedException, IOException {
2728 waitTableEnabled(getHBaseAdmin(), table, 30000);
2729 }
2730
2731 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2732 throws InterruptedException, IOException {
2733 waitTableEnabled(admin, table, 30000);
2734 }
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745 public void waitTableEnabled(byte[] table, long timeoutMillis)
2746 throws InterruptedException, IOException {
2747 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2748 }
2749
2750 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2751 throws InterruptedException, IOException {
2752 long startWait = System.currentTimeMillis();
2753 waitTableAvailable(admin, table, timeoutMillis);
2754 while (!admin.isTableEnabled(table)) {
2755 assertTrue("Timed out waiting for table to become available and enabled " +
2756 Bytes.toStringBinary(table),
2757 System.currentTimeMillis() - startWait < timeoutMillis);
2758 Thread.sleep(200);
2759 }
2760
2761
2762
2763
2764
2765 try {
2766 Canary.sniff(admin, TableName.valueOf(table));
2767 } catch (Exception e) {
2768 throw new IOException(e);
2769 }
2770 }
2771
2772
2773
2774
2775
2776
2777
2778
2779 public boolean ensureSomeRegionServersAvailable(final int num)
2780 throws IOException {
2781 boolean startedServer = false;
2782 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2783 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2784 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2785 startedServer = true;
2786 }
2787
2788 return startedServer;
2789 }
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2801 throws IOException {
2802 boolean startedServer = ensureSomeRegionServersAvailable(num);
2803
2804 int nonStoppedServers = 0;
2805 for (JVMClusterUtil.RegionServerThread rst :
2806 getMiniHBaseCluster().getRegionServerThreads()) {
2807
2808 HRegionServer hrs = rst.getRegionServer();
2809 if (hrs.isStopping() || hrs.isStopped()) {
2810 LOG.info("A region server is stopped or stopping:"+hrs);
2811 } else {
2812 nonStoppedServers++;
2813 }
2814 }
2815 for (int i=nonStoppedServers; i<num; ++i) {
2816 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2817 startedServer = true;
2818 }
2819 return startedServer;
2820 }
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 public static User getDifferentUser(final Configuration c,
2833 final String differentiatingSuffix)
2834 throws IOException {
2835 FileSystem currentfs = FileSystem.get(c);
2836 if (!(currentfs instanceof DistributedFileSystem)) {
2837 return User.getCurrent();
2838 }
2839
2840
2841 String username = User.getCurrent().getName() +
2842 differentiatingSuffix;
2843 User user = User.createUserForTesting(c, username,
2844 new String[]{"supergroup"});
2845 return user;
2846 }
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2862 final int max) {
2863 try {
2864 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2865 for (Class<?> clazz: clazzes) {
2866 String className = clazz.getSimpleName();
2867 if (className.equals("DFSOutputStream")) {
2868 if (clazz.isInstance(stream)) {
2869 Field maxRecoveryErrorCountField =
2870 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2871 maxRecoveryErrorCountField.setAccessible(true);
2872 maxRecoveryErrorCountField.setInt(stream, max);
2873 break;
2874 }
2875 }
2876 }
2877 } catch (Exception e) {
2878 LOG.info("Could not set max recovery field", e);
2879 }
2880 }
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2891 waitUntilAllRegionsAssigned(tableName, 60000);
2892 }
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2904 throws IOException {
2905 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2906 try {
2907 waitFor(timeout, 200, true, new Predicate<IOException>() {
2908 @Override
2909 public boolean evaluate() throws IOException {
2910 boolean allRegionsAssigned = true;
2911 Scan scan = new Scan();
2912 scan.addFamily(HConstants.CATALOG_FAMILY);
2913 ResultScanner s = meta.getScanner(scan);
2914 try {
2915 Result r;
2916 while ((r = s.next()) != null) {
2917 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2918 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2919 if (info != null && info.getTable().equals(tableName)) {
2920 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2921 allRegionsAssigned &= (b != null);
2922 }
2923 }
2924 } finally {
2925 s.close();
2926 }
2927 return allRegionsAssigned;
2928 }
2929 });
2930 } finally {
2931 meta.close();
2932 }
2933
2934
2935 HMaster master = getHBaseCluster().getMaster();
2936 final RegionStates states = master.getAssignmentManager().getRegionStates();
2937 waitFor(timeout, 200, new Predicate<IOException>() {
2938 @Override
2939 public boolean evaluate() throws IOException {
2940 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
2941 return hris != null && !hris.isEmpty();
2942 }
2943 });
2944 }
2945
2946
2947
2948
2949
2950 public static List<Cell> getFromStoreFile(HStore store,
2951 Get get) throws IOException {
2952 Scan scan = new Scan(get);
2953 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2954 scan.getFamilyMap().get(store.getFamily().getName()),
2955
2956
2957 0);
2958
2959 List<Cell> result = new ArrayList<Cell>();
2960 scanner.next(result);
2961 if (!result.isEmpty()) {
2962
2963 Cell kv = result.get(0);
2964 if (!CellUtil.matchingRow(kv, get.getRow())) {
2965 result.clear();
2966 }
2967 }
2968 scanner.close();
2969 return result;
2970 }
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2981 assertTrue(numRegions>3);
2982 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2983 byte [][] result = new byte[tmpSplitKeys.length+1][];
2984 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
2985 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2986 return result;
2987 }
2988
2989
2990
2991
2992
2993 public static List<Cell> getFromStoreFile(HStore store,
2994 byte [] row,
2995 NavigableSet<byte[]> columns
2996 ) throws IOException {
2997 Get get = new Get(row);
2998 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2999 s.put(store.getFamily().getName(), columns);
3000
3001 return getFromStoreFile(store,get);
3002 }
3003
3004
3005
3006
3007
3008 public static ZooKeeperWatcher getZooKeeperWatcher(
3009 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3010 IOException {
3011 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3012 "unittest", new Abortable() {
3013 boolean aborted = false;
3014
3015 @Override
3016 public void abort(String why, Throwable e) {
3017 aborted = true;
3018 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3019 }
3020
3021 @Override
3022 public boolean isAborted() {
3023 return aborted;
3024 }
3025 });
3026 return zkw;
3027 }
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3041 HBaseTestingUtility TEST_UTIL, HRegion region,
3042 ServerName serverName) throws ZooKeeperConnectionException,
3043 IOException, KeeperException, NodeExistsException {
3044 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3045 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3046 int version = ZKAssign.transitionNodeOpening(zkw, region
3047 .getRegionInfo(), serverName);
3048 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3049 version);
3050 return zkw;
3051 }
3052
3053 public static void assertKVListsEqual(String additionalMsg,
3054 final List<? extends Cell> expected,
3055 final List<? extends Cell> actual) {
3056 final int eLen = expected.size();
3057 final int aLen = actual.size();
3058 final int minLen = Math.min(eLen, aLen);
3059
3060 int i;
3061 for (i = 0; i < minLen
3062 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3063 ++i) {}
3064
3065 if (additionalMsg == null) {
3066 additionalMsg = "";
3067 }
3068 if (!additionalMsg.isEmpty()) {
3069 additionalMsg = ". " + additionalMsg;
3070 }
3071
3072 if (eLen != aLen || i != minLen) {
3073 throw new AssertionError(
3074 "Expected and actual KV arrays differ at position " + i + ": " +
3075 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3076 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3077 }
3078 }
3079
3080 private static <T> String safeGetAsStr(List<T> lst, int i) {
3081 if (0 <= i && i < lst.size()) {
3082 return lst.get(i).toString();
3083 } else {
3084 return "<out_of_range>";
3085 }
3086 }
3087
3088 public String getClusterKey() {
3089 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3090 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3091 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3092 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3093 }
3094
3095
3096 public HTable createRandomTable(String tableName,
3097 final Collection<String> families,
3098 final int maxVersions,
3099 final int numColsPerRow,
3100 final int numFlushes,
3101 final int numRegions,
3102 final int numRowsPerFlush)
3103 throws IOException, InterruptedException {
3104
3105 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3106 " regions, " + numFlushes + " storefiles per region, " +
3107 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3108 "\n");
3109
3110 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3111 final int numCF = families.size();
3112 final byte[][] cfBytes = new byte[numCF][];
3113 {
3114 int cfIndex = 0;
3115 for (String cf : families) {
3116 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3117 }
3118 }
3119
3120 final int actualStartKey = 0;
3121 final int actualEndKey = Integer.MAX_VALUE;
3122 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3123 final int splitStartKey = actualStartKey + keysPerRegion;
3124 final int splitEndKey = actualEndKey - keysPerRegion;
3125 final String keyFormat = "%08x";
3126 final HTable table = createTable(tableName, cfBytes,
3127 maxVersions,
3128 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3129 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3130 numRegions);
3131
3132 if (hbaseCluster != null) {
3133 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3134 }
3135
3136 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3137 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3138 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3139 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3140
3141 Put put = new Put(row);
3142 Delete del = new Delete(row);
3143 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3144 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3145 final long ts = rand.nextInt();
3146 final byte[] qual = Bytes.toBytes("col" + iCol);
3147 if (rand.nextBoolean()) {
3148 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3149 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3150 ts + "_random_" + rand.nextLong());
3151 put.add(cf, qual, ts, value);
3152 } else if (rand.nextDouble() < 0.8) {
3153 del.deleteColumn(cf, qual, ts);
3154 } else {
3155 del.deleteColumns(cf, qual, ts);
3156 }
3157 }
3158
3159 if (!put.isEmpty()) {
3160 table.put(put);
3161 }
3162
3163 if (!del.isEmpty()) {
3164 table.delete(del);
3165 }
3166 }
3167 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3168 table.flushCommits();
3169 if (hbaseCluster != null) {
3170 getMiniHBaseCluster().flushcache(table.getName());
3171 }
3172 }
3173
3174 return table;
3175 }
3176
3177 private static final int MIN_RANDOM_PORT = 0xc000;
3178 private static final int MAX_RANDOM_PORT = 0xfffe;
3179 private static Random random = new Random();
3180
3181
3182
3183
3184
3185 public static int randomPort() {
3186 return MIN_RANDOM_PORT
3187 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3188 }
3189
3190
3191
3192
3193
3194 public static int randomFreePort() {
3195 int port = 0;
3196 do {
3197 port = randomPort();
3198 if (takenRandomPorts.contains(port)) {
3199 continue;
3200 }
3201 takenRandomPorts.add(port);
3202
3203 try {
3204 ServerSocket sock = new ServerSocket(port);
3205 sock.close();
3206 } catch (IOException ex) {
3207 port = 0;
3208 }
3209 } while (port == 0);
3210 return port;
3211 }
3212
3213
3214 public static String randomMultiCastAddress() {
3215 return "226.1.1." + random.nextInt(254);
3216 }
3217
3218
3219
3220 public static void waitForHostPort(String host, int port)
3221 throws IOException {
3222 final int maxTimeMs = 10000;
3223 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3224 IOException savedException = null;
3225 LOG.info("Waiting for server at " + host + ":" + port);
3226 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3227 try {
3228 Socket sock = new Socket(InetAddress.getByName(host), port);
3229 sock.close();
3230 savedException = null;
3231 LOG.info("Server at " + host + ":" + port + " is available");
3232 break;
3233 } catch (UnknownHostException e) {
3234 throw new IOException("Failed to look up " + host, e);
3235 } catch (IOException e) {
3236 savedException = e;
3237 }
3238 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3239 }
3240
3241 if (savedException != null) {
3242 throw savedException;
3243 }
3244 }
3245
3246
3247
3248
3249
3250
3251 public static int createPreSplitLoadTestTable(Configuration conf,
3252 TableName tableName, byte[] columnFamily, Algorithm compression,
3253 DataBlockEncoding dataBlockEncoding) throws IOException {
3254 return createPreSplitLoadTestTable(conf, tableName,
3255 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER,
3256 Durability.USE_DEFAULT);
3257 }
3258
3259
3260
3261
3262
3263 public static int createPreSplitLoadTestTable(Configuration conf,
3264 TableName tableName, byte[] columnFamily, Algorithm compression,
3265 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer,
3266 Durability durability)
3267 throws IOException {
3268 HTableDescriptor desc = new HTableDescriptor(tableName);
3269 desc.setDurability(durability);
3270 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3271 hcd.setDataBlockEncoding(dataBlockEncoding);
3272 hcd.setCompressionType(compression);
3273 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3274 }
3275
3276
3277
3278
3279
3280
3281 public static int createPreSplitLoadTestTable(Configuration conf,
3282 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3283 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3284 }
3285
3286
3287
3288
3289
3290
3291 public static int createPreSplitLoadTestTable(Configuration conf,
3292 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3293 if (!desc.hasFamily(hcd.getName())) {
3294 desc.addFamily(hcd);
3295 }
3296
3297 int totalNumberOfRegions = 0;
3298 HBaseAdmin admin = new HBaseAdmin(conf);
3299 try {
3300
3301
3302
3303 int numberOfServers = admin.getClusterStatus().getServers().size();
3304 if (numberOfServers == 0) {
3305 throw new IllegalStateException("No live regionservers");
3306 }
3307
3308 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3309 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3310 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3311 "(regions per server: " + numRegionsPerServer + ")");
3312
3313 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3314 totalNumberOfRegions);
3315
3316 admin.createTable(desc, splits);
3317 } catch (MasterNotRunningException e) {
3318 LOG.error("Master not running", e);
3319 throw new IOException(e);
3320 } catch (TableExistsException e) {
3321 LOG.warn("Table " + desc.getTableName() +
3322 " already exists, continuing");
3323 } finally {
3324 admin.close();
3325 }
3326 return totalNumberOfRegions;
3327 }
3328
3329 public static int getMetaRSPort(Configuration conf) throws IOException {
3330 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3331 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3332 table.close();
3333 return hloc.getPort();
3334 }
3335
3336
3337
3338
3339
3340
3341
3342 public void assertRegionOnServer(
3343 final HRegionInfo hri, final ServerName server,
3344 final long timeout) throws IOException, InterruptedException {
3345 long timeoutTime = System.currentTimeMillis() + timeout;
3346 while (true) {
3347 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3348 if (regions.contains(hri)) return;
3349 long now = System.currentTimeMillis();
3350 if (now > timeoutTime) break;
3351 Thread.sleep(10);
3352 }
3353 fail("Could not find region " + hri.getRegionNameAsString()
3354 + " on server " + server);
3355 }
3356
3357
3358
3359
3360
3361 public void assertRegionOnlyOnServer(
3362 final HRegionInfo hri, final ServerName server,
3363 final long timeout) throws IOException, InterruptedException {
3364 long timeoutTime = System.currentTimeMillis() + timeout;
3365 while (true) {
3366 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3367 if (regions.contains(hri)) {
3368 List<JVMClusterUtil.RegionServerThread> rsThreads =
3369 getHBaseCluster().getLiveRegionServerThreads();
3370 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3371 HRegionServer rs = rsThread.getRegionServer();
3372 if (server.equals(rs.getServerName())) {
3373 continue;
3374 }
3375 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3376 for (HRegion r: hrs) {
3377 assertTrue("Region should not be double assigned",
3378 r.getRegionId() != hri.getRegionId());
3379 }
3380 }
3381 return;
3382 }
3383 long now = System.currentTimeMillis();
3384 if (now > timeoutTime) break;
3385 Thread.sleep(10);
3386 }
3387 fail("Could not find region " + hri.getRegionNameAsString()
3388 + " on server " + server);
3389 }
3390
3391 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3392 throws IOException {
3393 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3394 htd.addFamily(hcd);
3395 HRegionInfo info =
3396 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3397 HRegion region =
3398 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3399 return region;
3400 }
3401
3402 public void setFileSystemURI(String fsURI) {
3403 FS_URI = fsURI;
3404 }
3405
3406
3407
3408
3409 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3410 throws E {
3411 return Waiter.waitFor(this.conf, timeout, predicate);
3412 }
3413
3414
3415
3416
3417 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3418 throws E {
3419 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3420 }
3421
3422
3423
3424
3425 public <E extends Exception> long waitFor(long timeout, long interval,
3426 boolean failIfTimeout, Predicate<E> predicate) throws E {
3427 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3428 }
3429
3430
3431
3432
3433
3434
3435 public void waitUntilNoRegionsInTransition(
3436 final long timeout) throws Exception {
3437 waitFor(timeout, predicateNoRegionsInTransition());
3438 }
3439
3440
3441
3442
3443 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3444 return new Waiter.Predicate<Exception>() {
3445 @Override
3446 public boolean evaluate() throws Exception {
3447 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3448 .getAssignmentManager().getRegionStates();
3449 return !regionStates.isRegionsInTransition();
3450 }
3451 };
3452 }
3453
3454
3455
3456
3457 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3458 return new Waiter.Predicate<Exception>() {
3459 @Override
3460 public boolean evaluate() throws Exception {
3461 return getHBaseAdmin().isTableEnabled(tableName);
3462 }
3463 };
3464 }
3465
3466
3467
3468
3469
3470
3471 public static List<HColumnDescriptor> generateColumnDescriptors() {
3472 return generateColumnDescriptors("");
3473 }
3474
3475
3476
3477
3478
3479
3480
3481 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3482 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3483 long familyId = 0;
3484 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3485 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3486 for (BloomType bloomType: BloomType.values()) {
3487 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3488 HColumnDescriptor htd = new HColumnDescriptor(name);
3489 htd.setCompressionType(compressionType);
3490 htd.setDataBlockEncoding(encodingType);
3491 htd.setBloomFilterType(bloomType);
3492 htds.add(htd);
3493 familyId++;
3494 }
3495 }
3496 }
3497 return htds;
3498 }
3499
3500
3501
3502
3503
3504 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3505 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3506 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3507 for (String algoName : allAlgos) {
3508 try {
3509 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3510 algo.getCompressor();
3511 supportedAlgos.add(algo);
3512 } catch (Throwable t) {
3513
3514 }
3515 }
3516 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3517 }
3518 }