1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Method;
28 import java.lang.reflect.Modifier;
29 import java.net.InetAddress;
30 import java.net.InetSocketAddress;
31 import java.net.ServerSocket;
32 import java.net.Socket;
33 import java.net.UnknownHostException;
34 import java.security.MessageDigest;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.HashSet;
40 import java.util.List;
41 import java.util.Map;
42 import java.util.NavigableSet;
43 import java.util.Random;
44 import java.util.Set;
45 import java.util.UUID;
46 import java.util.concurrent.TimeUnit;
47
48 import org.apache.commons.logging.Log;
49 import org.apache.commons.logging.LogFactory;
50 import org.apache.commons.logging.impl.Jdk14Logger;
51 import org.apache.commons.logging.impl.Log4JLogger;
52 import org.apache.hadoop.hbase.classification.InterfaceAudience;
53 import org.apache.hadoop.hbase.classification.InterfaceStability;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FileSystem;
56 import org.apache.hadoop.fs.Path;
57 import org.apache.hadoop.hbase.Waiter.Predicate;
58 import org.apache.hadoop.hbase.catalog.MetaEditor;
59 import org.apache.hadoop.hbase.client.Delete;
60 import org.apache.hadoop.hbase.client.Durability;
61 import org.apache.hadoop.hbase.client.Get;
62 import org.apache.hadoop.hbase.client.HBaseAdmin;
63 import org.apache.hadoop.hbase.client.HConnection;
64 import org.apache.hadoop.hbase.client.HConnectionManager;
65 import org.apache.hadoop.hbase.client.HTable;
66 import org.apache.hadoop.hbase.client.Put;
67 import org.apache.hadoop.hbase.client.Result;
68 import org.apache.hadoop.hbase.client.ResultScanner;
69 import org.apache.hadoop.hbase.client.Scan;
70 import org.apache.hadoop.hbase.fs.HFileSystem;
71 import org.apache.hadoop.hbase.io.compress.Compression;
72 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
73 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
74 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
75 import org.apache.hadoop.hbase.io.hfile.HFile;
76 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
77 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
78 import org.apache.hadoop.hbase.master.HMaster;
79 import org.apache.hadoop.hbase.master.RegionStates;
80 import org.apache.hadoop.hbase.master.ServerManager;
81 import org.apache.hadoop.hbase.regionserver.BloomType;
82 import org.apache.hadoop.hbase.regionserver.HRegion;
83 import org.apache.hadoop.hbase.regionserver.HRegionServer;
84 import org.apache.hadoop.hbase.regionserver.HStore;
85 import org.apache.hadoop.hbase.regionserver.InternalScanner;
86 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
87 import org.apache.hadoop.hbase.regionserver.wal.HLog;
88 import org.apache.hadoop.hbase.security.User;
89 import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
90 import org.apache.hadoop.hbase.tool.Canary;
91 import org.apache.hadoop.hbase.util.Bytes;
92 import org.apache.hadoop.hbase.util.FSTableDescriptors;
93 import org.apache.hadoop.hbase.util.FSUtils;
94 import org.apache.hadoop.hbase.util.JVMClusterUtil;
95 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
96 import org.apache.hadoop.hbase.util.RegionSplitter;
97 import org.apache.hadoop.hbase.util.RetryCounter;
98 import org.apache.hadoop.hbase.util.Threads;
99 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
100 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
101 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
102 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
103 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
104 import org.apache.hadoop.hdfs.DFSClient;
105 import org.apache.hadoop.hdfs.DistributedFileSystem;
106 import org.apache.hadoop.hdfs.MiniDFSCluster;
107 import org.apache.hadoop.mapred.JobConf;
108 import org.apache.hadoop.mapred.MiniMRCluster;
109 import org.apache.hadoop.mapred.TaskLog;
110 import org.apache.hadoop.security.UserGroupInformation;
111 import org.apache.zookeeper.KeeperException;
112 import org.apache.zookeeper.KeeperException.NodeExistsException;
113 import org.apache.zookeeper.WatchedEvent;
114 import org.apache.zookeeper.ZooKeeper;
115 import org.apache.zookeeper.ZooKeeper.States;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 @InterfaceAudience.Public
132 @InterfaceStability.Evolving
133 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
134 private MiniZooKeeperCluster zkCluster = null;
135
136 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
137
138
139
140
141 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
142
143
144
145
146
147 private boolean passedZkCluster = false;
148 private MiniDFSCluster dfsCluster = null;
149
150 private HBaseCluster hbaseCluster = null;
151 private MiniMRCluster mrCluster = null;
152
153
154 private boolean miniClusterRunning;
155
156 private String hadoopLogDir;
157
158
159 private File clusterTestDir = null;
160
161
162
163 private Path dataTestDirOnTestFS = null;
164
165
166
167
168
169
170
171
172 @Deprecated
173 private static final String TEST_DIRECTORY_KEY = "test.build.data";
174
175
176 private static String FS_URI;
177
178
179 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
180
181
182 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
183 Arrays.asList(new Object[][] {
184 { Compression.Algorithm.NONE },
185 { Compression.Algorithm.GZ }
186 });
187
188
189 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
190 Arrays.asList(new Object[][] {
191 { new Boolean(false) },
192 { new Boolean(true) }
193 });
194
195
196 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
197
198 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
199 Compression.Algorithm.NONE, Compression.Algorithm.GZ
200 };
201
202
203
204
205
206 private static List<Object[]> bloomAndCompressionCombinations() {
207 List<Object[]> configurations = new ArrayList<Object[]>();
208 for (Compression.Algorithm comprAlgo :
209 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
210 for (BloomType bloomType : BloomType.values()) {
211 configurations.add(new Object[] { comprAlgo, bloomType });
212 }
213 }
214 return Collections.unmodifiableList(configurations);
215 }
216
217
218
219
220 private static List<Object[]> memStoreTSAndTagsCombination() {
221 List<Object[]> configurations = new ArrayList<Object[]>();
222 configurations.add(new Object[] { false, false });
223 configurations.add(new Object[] { false, true });
224 configurations.add(new Object[] { true, false });
225 configurations.add(new Object[] { true, true });
226 return Collections.unmodifiableList(configurations);
227 }
228
229 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
230 bloomAndCompressionCombinations();
231
232 public HBaseTestingUtility() {
233 this(HBaseConfiguration.create());
234 }
235
236 public HBaseTestingUtility(Configuration conf) {
237 super(conf);
238
239
240 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
241 }
242
243
244
245
246
247
248
249 public static HBaseTestingUtility createLocalHTU() {
250 Configuration c = HBaseConfiguration.create();
251 return createLocalHTU(c);
252 }
253
254
255
256
257
258
259
260
261 public static HBaseTestingUtility createLocalHTU(Configuration c) {
262 HBaseTestingUtility htu = new HBaseTestingUtility(c);
263 String dataTestDir = htu.getDataTestDir().toString();
264 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
265 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
266 return htu;
267 }
268
269
270
271
272
273 @Deprecated
274 public void setHDFSClientRetry(final int retries) {
275 this.conf.setInt("hdfs.client.retries.number", retries);
276 if (0 == retries) {
277 makeDFSClientNonRetrying();
278 }
279 }
280
281
282
283
284
285
286
287
288
289
290
291
292 @Override
293 public Configuration getConfiguration() {
294 return super.getConfiguration();
295 }
296
297 public void setHBaseCluster(HBaseCluster hbaseCluster) {
298 this.hbaseCluster = hbaseCluster;
299 }
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 @Override
318 protected Path setupDataTestDir() {
319 Path testPath = super.setupDataTestDir();
320 if (null == testPath) {
321 return null;
322 }
323
324 createSubDirAndSystemProperty(
325 "hadoop.log.dir",
326 testPath, "hadoop-log-dir");
327
328
329
330 createSubDirAndSystemProperty(
331 "hadoop.tmp.dir",
332 testPath, "hadoop-tmp-dir");
333
334
335 createSubDir(
336 "mapred.local.dir",
337 testPath, "mapred-local-dir");
338
339 return testPath;
340 }
341
342 private void createSubDirAndSystemProperty(
343 String propertyName, Path parent, String subDirName){
344
345 String sysValue = System.getProperty(propertyName);
346
347 if (sysValue != null) {
348
349
350 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351 sysValue + " so I do NOT create it in " + parent);
352 String confValue = conf.get(propertyName);
353 if (confValue != null && !confValue.endsWith(sysValue)){
354 LOG.warn(
355 propertyName + " property value differs in configuration and system: "+
356 "Configuration="+confValue+" while System="+sysValue+
357 " Erasing configuration value by system value."
358 );
359 }
360 conf.set(propertyName, sysValue);
361 } else {
362
363 createSubDir(propertyName, parent, subDirName);
364 System.setProperty(propertyName, conf.get(propertyName));
365 }
366 }
367
368
369
370
371
372
373
374 private Path getBaseTestDirOnTestFS() throws IOException {
375 FileSystem fs = getTestFileSystem();
376 return new Path(fs.getWorkingDirectory(), "test-data");
377 }
378
379
380
381
382 public HTableDescriptor getMetaTableDescriptor() {
383 try {
384 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385 } catch (IOException e) {
386 throw new RuntimeException("Unable to create META table descriptor", e);
387 }
388 }
389
390
391
392
393
394
395 Path getClusterTestDir() {
396 if (clusterTestDir == null){
397 setupClusterTestDir();
398 }
399 return new Path(clusterTestDir.getAbsolutePath());
400 }
401
402
403
404
405 private void setupClusterTestDir() {
406 if (clusterTestDir != null) {
407 return;
408 }
409
410
411
412 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414
415 boolean b = deleteOnExit();
416 if (b) clusterTestDir.deleteOnExit();
417 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419 }
420
421
422
423
424
425
426
427 public Path getDataTestDirOnTestFS() throws IOException {
428 if (dataTestDirOnTestFS == null) {
429 setupDataTestDirOnTestFS();
430 }
431
432 return dataTestDirOnTestFS;
433 }
434
435
436
437
438
439
440
441
442 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443 return new Path(getDataTestDirOnTestFS(), subdirName);
444 }
445
446
447
448
449 private void setupDataTestDirOnTestFS() throws IOException {
450 if (dataTestDirOnTestFS != null) {
451 LOG.warn("Data test on test fs dir already setup in "
452 + dataTestDirOnTestFS.toString());
453 return;
454 }
455
456
457
458
459
460 FileSystem fs = getTestFileSystem();
461 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
462 File dataTestDir = new File(getDataTestDir().toString());
463 if (deleteOnExit()) dataTestDir.deleteOnExit();
464 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
465 } else {
466 Path base = getBaseTestDirOnTestFS();
467 String randomStr = UUID.randomUUID().toString();
468 dataTestDirOnTestFS = new Path(base, randomStr);
469 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
470 }
471 }
472
473
474
475
476
477
478 public boolean cleanupDataTestDirOnTestFS() throws IOException {
479 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
480 if (ret)
481 dataTestDirOnTestFS = null;
482 return ret;
483 }
484
485
486
487
488
489
490 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
491 Path cpath = getDataTestDirOnTestFS(subdirName);
492 return getTestFileSystem().delete(cpath, true);
493 }
494
495
496
497
498
499
500
501
502 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
503 return startMiniDFSCluster(servers, null);
504 }
505
506
507
508
509
510
511
512
513
514
515
516
517 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
518 throws Exception {
519 if ( hosts != null && hosts.length != 0) {
520 return startMiniDFSCluster(hosts.length, hosts);
521 } else {
522 return startMiniDFSCluster(1, null);
523 }
524 }
525
526
527
528
529
530
531
532
533
534
535 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
536 throws Exception {
537 createDirsAndSetProperties();
538 try {
539 Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
540 .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
541 m.invoke(null, new Object[] {true});
542 } catch (ClassNotFoundException e) {
543 LOG.info("EditLogFileOutputStream not found");
544 }
545
546
547 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
548 setLevel(org.apache.log4j.Level.ERROR);
549 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
550 setLevel(org.apache.log4j.Level.ERROR);
551
552
553 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
554 true, null, null, hosts, null);
555
556
557 setFs();
558
559
560 this.dfsCluster.waitClusterUp();
561
562
563 dataTestDirOnTestFS = null;
564
565 return this.dfsCluster;
566 }
567
568 private void setFs() throws IOException {
569 if(this.dfsCluster == null){
570 LOG.info("Skipping setting fs because dfsCluster is null");
571 return;
572 }
573 FileSystem fs = this.dfsCluster.getFileSystem();
574 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
575 }
576
577 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
578 throws Exception {
579 createDirsAndSetProperties();
580 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
581 true, null, racks, hosts, null);
582
583
584 FileSystem fs = this.dfsCluster.getFileSystem();
585 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
586
587
588 this.dfsCluster.waitClusterUp();
589
590
591 dataTestDirOnTestFS = null;
592
593 return this.dfsCluster;
594 }
595
596 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
597 createDirsAndSetProperties();
598 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
599 null, null, null);
600 return dfsCluster;
601 }
602
603
604 private void createDirsAndSetProperties() throws IOException {
605 setupClusterTestDir();
606 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
607 createDirAndSetProperty("cache_data", "test.cache.data");
608 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
609 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
610 createDirAndSetProperty("mapred_local", "mapred.local.dir");
611 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
612 enableShortCircuit();
613
614 Path root = getDataTestDirOnTestFS("hadoop");
615 conf.set(MapreduceTestingShim.getMROutputDirProp(),
616 new Path(root, "mapred-output-dir").toString());
617 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
618 conf.set("mapreduce.jobtracker.staging.root.dir",
619 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
620 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
621 }
622
623
624
625
626
627
628
629 public boolean isReadShortCircuitOn(){
630 final String propName = "hbase.tests.use.shortcircuit.reads";
631 String readOnProp = System.getProperty(propName);
632 if (readOnProp != null){
633 return Boolean.parseBoolean(readOnProp);
634 } else {
635 return conf.getBoolean(propName, false);
636 }
637 }
638
639
640
641
642 private void enableShortCircuit() {
643 if (isReadShortCircuitOn()) {
644 String curUser = System.getProperty("user.name");
645 LOG.info("read short circuit is ON for user " + curUser);
646
647 conf.set("dfs.block.local-path-access.user", curUser);
648
649 conf.setBoolean("dfs.client.read.shortcircuit", true);
650
651 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
652 } else {
653 LOG.info("read short circuit is OFF");
654 }
655 }
656
657 private String createDirAndSetProperty(final String relPath, String property) {
658 String path = getDataTestDir(relPath).toString();
659 System.setProperty(property, path);
660 conf.set(property, path);
661 new File(path).mkdirs();
662 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
663 return path;
664 }
665
666
667
668
669
670
671 public void shutdownMiniDFSCluster() throws IOException {
672 if (this.dfsCluster != null) {
673
674 this.dfsCluster.shutdown();
675 dfsCluster = null;
676 dataTestDirOnTestFS = null;
677 FSUtils.setFsDefault(this.conf, new Path("file:///"));
678 }
679 }
680
681
682
683
684
685
686
687
688 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
689 return startMiniZKCluster(1);
690 }
691
692
693
694
695
696
697
698
699
700 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
701 throws Exception {
702 setupClusterTestDir();
703 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
704 }
705
706 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
707 throws Exception {
708 return startMiniZKCluster(dir,1);
709 }
710
711
712
713
714
715 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
716 int zooKeeperServerNum)
717 throws Exception {
718 if (this.zkCluster != null) {
719 throw new IOException("Cluster already running at " + dir);
720 }
721 this.passedZkCluster = false;
722 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
723 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
724 if (defPort > 0){
725
726 this.zkCluster.setDefaultClientPort(defPort);
727 }
728 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
729 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
730 Integer.toString(clientPort));
731 return this.zkCluster;
732 }
733
734
735
736
737
738
739
740 public void shutdownMiniZKCluster() throws IOException {
741 if (this.zkCluster != null) {
742 this.zkCluster.shutdown();
743 this.zkCluster = null;
744 }
745 }
746
747
748
749
750
751
752
753 public MiniHBaseCluster startMiniCluster() throws Exception {
754 return startMiniCluster(1, 1);
755 }
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770 public MiniHBaseCluster startMiniCluster(final int numSlaves)
771 throws Exception {
772 return startMiniCluster(1, numSlaves);
773 }
774
775
776
777
778
779
780
781
782 public MiniHBaseCluster startMiniCluster(final int numMasters,
783 final int numSlaves)
784 throws Exception {
785 return startMiniCluster(numMasters, numSlaves, null);
786 }
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812 public MiniHBaseCluster startMiniCluster(final int numMasters,
813 final int numSlaves, final String[] dataNodeHosts) throws Exception {
814 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
815 }
816
817
818
819
820
821 public MiniHBaseCluster startMiniCluster(final int numMasters,
822 final int numSlaves, final int numDataNodes) throws Exception {
823 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
824 }
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853 public MiniHBaseCluster startMiniCluster(final int numMasters,
854 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
855 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
856 throws Exception {
857 return startMiniCluster(
858 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
859 }
860
861
862
863
864
865
866 public MiniHBaseCluster startMiniCluster(final int numMasters,
867 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
868 Class<? extends HMaster> masterClass,
869 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
870 throws Exception {
871 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
872 numDataNodes = dataNodeHosts.length;
873 }
874
875 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
876 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
877
878
879 if (miniClusterRunning) {
880 throw new IllegalStateException("A mini-cluster is already running");
881 }
882 miniClusterRunning = true;
883
884 setupClusterTestDir();
885 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
886
887
888
889 if(this.dfsCluster == null) {
890 dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
891 }
892
893
894 if (this.zkCluster == null) {
895 startMiniZKCluster(clusterTestDir);
896 }
897
898
899 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
900 }
901
902 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
903 throws IOException, InterruptedException{
904 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
905 }
906
907
908
909
910
911
912
913
914
915
916
917
918 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
919 final int numSlaves, Class<? extends HMaster> masterClass,
920 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
921 throws IOException, InterruptedException {
922
923 createRootDir();
924
925
926
927 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
928 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
929 }
930 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
931 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
932 }
933
934 Configuration c = new Configuration(this.conf);
935 this.hbaseCluster =
936 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
937
938 HTable t = new HTable(c, TableName.META_TABLE_NAME);
939 ResultScanner s = t.getScanner(new Scan());
940 while (s.next() != null) {
941 continue;
942 }
943 s.close();
944 t.close();
945
946 getHBaseAdmin();
947 LOG.info("Minicluster is up");
948 return (MiniHBaseCluster)this.hbaseCluster;
949 }
950
951
952
953
954
955
956
957 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
958 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
959
960 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
961 ResultScanner s = t.getScanner(new Scan());
962 while (s.next() != null) {
963
964 }
965 LOG.info("HBase has been restarted");
966 s.close();
967 t.close();
968 }
969
970
971
972
973
974
975 public MiniHBaseCluster getMiniHBaseCluster() {
976 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
977 return (MiniHBaseCluster)this.hbaseCluster;
978 }
979 throw new RuntimeException(hbaseCluster + " not an instance of " +
980 MiniHBaseCluster.class.getName());
981 }
982
983
984
985
986
987
988 public void shutdownMiniCluster() throws Exception {
989 LOG.info("Shutting down minicluster");
990 shutdownMiniHBaseCluster();
991 if (!this.passedZkCluster){
992 shutdownMiniZKCluster();
993 }
994 shutdownMiniDFSCluster();
995
996 cleanupTestDir();
997 miniClusterRunning = false;
998 LOG.info("Minicluster is down");
999 }
1000
1001
1002
1003
1004
1005 @Override
1006 public boolean cleanupTestDir() throws IOException {
1007 boolean ret = super.cleanupTestDir();
1008 if (deleteDir(this.clusterTestDir)) {
1009 this.clusterTestDir = null;
1010 return ret & true;
1011 }
1012 return false;
1013 }
1014
1015
1016
1017
1018
1019 public void shutdownMiniHBaseCluster() throws IOException {
1020 if (hbaseAdmin != null) {
1021 hbaseAdmin.close0();
1022 hbaseAdmin = null;
1023 }
1024
1025
1026 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1027 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1028 if (this.hbaseCluster != null) {
1029 this.hbaseCluster.shutdown();
1030
1031 this.hbaseCluster.waitUntilShutDown();
1032 this.hbaseCluster = null;
1033 }
1034
1035 if (zooKeeperWatcher != null) {
1036 zooKeeperWatcher.close();
1037 zooKeeperWatcher = null;
1038 }
1039 }
1040
1041
1042
1043
1044
1045
1046
1047 public Path getDefaultRootDirPath() throws IOException {
1048 FileSystem fs = FileSystem.get(this.conf);
1049 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 public Path createRootDir() throws IOException {
1061 FileSystem fs = FileSystem.get(this.conf);
1062 Path hbaseRootdir = getDefaultRootDirPath();
1063 FSUtils.setRootDir(this.conf, hbaseRootdir);
1064 fs.mkdirs(hbaseRootdir);
1065 FSUtils.setVersion(fs, hbaseRootdir);
1066 return hbaseRootdir;
1067 }
1068
1069
1070
1071
1072
1073 public void flush() throws IOException {
1074 getMiniHBaseCluster().flushcache();
1075 }
1076
1077
1078
1079
1080
1081 public void flush(TableName tableName) throws IOException {
1082 getMiniHBaseCluster().flushcache(tableName);
1083 }
1084
1085
1086
1087
1088
1089 public void compact(boolean major) throws IOException {
1090 getMiniHBaseCluster().compact(major);
1091 }
1092
1093
1094
1095
1096
1097 public void compact(TableName tableName, boolean major) throws IOException {
1098 getMiniHBaseCluster().compact(tableName, major);
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108 public HTable createTable(String tableName, String family)
1109 throws IOException{
1110 return createTable(TableName.valueOf(tableName), new String[]{family});
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120 public HTable createTable(byte[] tableName, byte[] family)
1121 throws IOException{
1122 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132 public HTable createTable(TableName tableName, String[] families)
1133 throws IOException {
1134 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1135 for (String family : families) {
1136 fams.add(Bytes.toBytes(family));
1137 }
1138 return createTable(tableName, fams.toArray(new byte[0][]));
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148 public HTable createTable(TableName tableName, byte[] family)
1149 throws IOException{
1150 return createTable(tableName, new byte[][]{family});
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 public HTable createTable(byte[] tableName, byte[][] families)
1162 throws IOException {
1163 return createTable(tableName, families,
1164 new Configuration(getConfiguration()));
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174 public HTable createTable(TableName tableName, byte[][] families)
1175 throws IOException {
1176 return createTable(tableName, families,
1177 new Configuration(getConfiguration()));
1178 }
1179
1180 public HTable createTable(byte[] tableName, byte[][] families,
1181 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1182 return createTable(TableName.valueOf(tableName), families, numVersions,
1183 startKey, endKey, numRegions);
1184 }
1185
1186 public HTable createTable(String tableName, byte[][] families,
1187 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1188 return createTable(TableName.valueOf(tableName), families, numVersions,
1189 startKey, endKey, numRegions);
1190 }
1191
1192 public HTable createTable(TableName tableName, byte[][] families,
1193 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1194 throws IOException{
1195 HTableDescriptor desc = new HTableDescriptor(tableName);
1196 for (byte[] family : families) {
1197 HColumnDescriptor hcd = new HColumnDescriptor(family)
1198 .setMaxVersions(numVersions);
1199 desc.addFamily(hcd);
1200 }
1201 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1202
1203 waitUntilAllRegionsAssigned(tableName);
1204 return new HTable(getConfiguration(), tableName);
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1216 throws IOException {
1217 for(byte[] family : families) {
1218 HColumnDescriptor hcd = new HColumnDescriptor(family);
1219
1220
1221
1222 hcd.setBloomFilterType(BloomType.NONE);
1223 htd.addFamily(hcd);
1224 }
1225 getHBaseAdmin().createTable(htd);
1226
1227 waitUntilAllRegionsAssigned(htd.getTableName());
1228 return new HTable(c, htd.getTableName());
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1239 throws IOException {
1240 getHBaseAdmin().createTable(htd, splitRows);
1241
1242 waitUntilAllRegionsAssigned(htd.getTableName());
1243 return new HTable(getConfiguration(), htd.getTableName());
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 public HTable createTable(TableName tableName, byte[][] families,
1255 final Configuration c)
1256 throws IOException {
1257 return createTable(new HTableDescriptor(tableName), families, c);
1258 }
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 public HTable createTable(byte[] tableName, byte[][] families,
1269 final Configuration c)
1270 throws IOException {
1271 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1272 for(byte[] family : families) {
1273 HColumnDescriptor hcd = new HColumnDescriptor(family);
1274
1275
1276
1277 hcd.setBloomFilterType(BloomType.NONE);
1278 desc.addFamily(hcd);
1279 }
1280 getHBaseAdmin().createTable(desc);
1281 return new HTable(c, tableName);
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 public HTable createTable(TableName tableName, byte[][] families,
1294 final Configuration c, int numVersions)
1295 throws IOException {
1296 HTableDescriptor desc = new HTableDescriptor(tableName);
1297 for(byte[] family : families) {
1298 HColumnDescriptor hcd = new HColumnDescriptor(family)
1299 .setMaxVersions(numVersions);
1300 desc.addFamily(hcd);
1301 }
1302 getHBaseAdmin().createTable(desc);
1303
1304 waitUntilAllRegionsAssigned(tableName);
1305 return new HTable(c, tableName);
1306 }
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 public HTable createTable(byte[] tableName, byte[][] families,
1318 final Configuration c, int numVersions)
1319 throws IOException {
1320 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1321 for(byte[] family : families) {
1322 HColumnDescriptor hcd = new HColumnDescriptor(family)
1323 .setMaxVersions(numVersions);
1324 desc.addFamily(hcd);
1325 }
1326 getHBaseAdmin().createTable(desc);
1327 return new HTable(c, tableName);
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1339 throws IOException {
1340 return createTable(tableName, new byte[][]{family}, numVersions);
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1352 throws IOException {
1353 return createTable(tableName, new byte[][]{family}, numVersions);
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 public HTable createTable(byte[] tableName, byte[][] families,
1365 int numVersions)
1366 throws IOException {
1367 return createTable(TableName.valueOf(tableName), families, numVersions);
1368 }
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 public HTable createTable(TableName tableName, byte[][] families,
1379 int numVersions)
1380 throws IOException {
1381 HTableDescriptor desc = new HTableDescriptor(tableName);
1382 for (byte[] family : families) {
1383 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1384 desc.addFamily(hcd);
1385 }
1386 getHBaseAdmin().createTable(desc);
1387
1388 waitUntilAllRegionsAssigned(tableName);
1389 return new HTable(new Configuration(getConfiguration()), tableName);
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 public HTable createTable(byte[] tableName, byte[][] families,
1401 int numVersions, int blockSize) throws IOException {
1402 return createTable(TableName.valueOf(tableName),
1403 families, numVersions, blockSize);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 public HTable createTable(TableName tableName, byte[][] families,
1415 int numVersions, int blockSize) throws IOException {
1416 HTableDescriptor desc = new HTableDescriptor(tableName);
1417 for (byte[] family : families) {
1418 HColumnDescriptor hcd = new HColumnDescriptor(family)
1419 .setMaxVersions(numVersions)
1420 .setBlocksize(blockSize);
1421 desc.addFamily(hcd);
1422 }
1423 getHBaseAdmin().createTable(desc);
1424
1425 waitUntilAllRegionsAssigned(tableName);
1426 return new HTable(new Configuration(getConfiguration()), tableName);
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 public HTable createTable(byte[] tableName, byte[][] families,
1438 int[] numVersions)
1439 throws IOException {
1440 return createTable(TableName.valueOf(tableName), families, numVersions);
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 public HTable createTable(TableName tableName, byte[][] families,
1452 int[] numVersions)
1453 throws IOException {
1454 HTableDescriptor desc = new HTableDescriptor(tableName);
1455 int i = 0;
1456 for (byte[] family : families) {
1457 HColumnDescriptor hcd = new HColumnDescriptor(family)
1458 .setMaxVersions(numVersions[i]);
1459 desc.addFamily(hcd);
1460 i++;
1461 }
1462 getHBaseAdmin().createTable(desc);
1463
1464 waitUntilAllRegionsAssigned(tableName);
1465 return new HTable(new Configuration(getConfiguration()), tableName);
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1477 throws IOException{
1478 return createTable(TableName.valueOf(tableName), family, splitRows);
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1490 throws IOException {
1491 HTableDescriptor desc = new HTableDescriptor(tableName);
1492 HColumnDescriptor hcd = new HColumnDescriptor(family);
1493 desc.addFamily(hcd);
1494 getHBaseAdmin().createTable(desc, splitRows);
1495
1496 waitUntilAllRegionsAssigned(tableName);
1497 return new HTable(getConfiguration(), tableName);
1498 }
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1509 throws IOException {
1510 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1511 for(byte[] family:families) {
1512 HColumnDescriptor hcd = new HColumnDescriptor(family);
1513 desc.addFamily(hcd);
1514 }
1515 getHBaseAdmin().createTable(desc, splitRows);
1516
1517 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1518 return new HTable(getConfiguration(), tableName);
1519 }
1520
1521
1522
1523
1524
1525 public void deleteTable(String tableName) throws IOException {
1526 deleteTable(TableName.valueOf(tableName));
1527 }
1528
1529
1530
1531
1532
1533 public void deleteTable(byte[] tableName) throws IOException {
1534 deleteTable(TableName.valueOf(tableName));
1535 }
1536
1537
1538
1539
1540
1541 public void deleteTable(TableName tableName) throws IOException {
1542 try {
1543 getHBaseAdmin().disableTable(tableName);
1544 } catch (TableNotEnabledException e) {
1545 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1546 }
1547 getHBaseAdmin().deleteTable(tableName);
1548 }
1549
1550
1551
1552
1553
1554 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1555 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1556 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1557 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1558 private static final int MAXVERSIONS = 3;
1559
1560 public static final char FIRST_CHAR = 'a';
1561 public static final char LAST_CHAR = 'z';
1562 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1563 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1564
1565
1566
1567
1568
1569
1570
1571
1572 public HTableDescriptor createTableDescriptor(final String name,
1573 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1574 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1575 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1576 htd.addFamily(new HColumnDescriptor(cfName)
1577 .setMinVersions(minVersions)
1578 .setMaxVersions(versions)
1579 .setKeepDeletedCells(keepDeleted)
1580 .setBlockCacheEnabled(false)
1581 .setTimeToLive(ttl)
1582 );
1583 }
1584 return htd;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593 public HTableDescriptor createTableDescriptor(final String name) {
1594 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1595 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1596 }
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1607 byte [] endKey)
1608 throws IOException {
1609 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1610 return createLocalHRegion(hri, desc);
1611 }
1612
1613
1614
1615
1616
1617
1618
1619
1620 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1621 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1633 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1650 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1651 HLog hlog, byte[]... families) throws IOException {
1652 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1653 htd.setReadOnly(isReadOnly);
1654 for (byte[] family : families) {
1655 HColumnDescriptor hcd = new HColumnDescriptor(family);
1656
1657 hcd.setMaxVersions(Integer.MAX_VALUE);
1658 htd.addFamily(hcd);
1659 }
1660 htd.setDurability(durability);
1661 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1662 return createLocalHRegion(info, htd, hlog);
1663 }
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 public HTable truncateTable(byte[] tableName) throws IOException {
1674 return truncateTable(TableName.valueOf(tableName));
1675 }
1676
1677
1678
1679
1680
1681
1682
1683 public HTable truncateTable(TableName tableName) throws IOException {
1684 HTable table = new HTable(getConfiguration(), tableName);
1685 Scan scan = new Scan();
1686 ResultScanner resScan = table.getScanner(scan);
1687 for(Result res : resScan) {
1688 Delete del = new Delete(res.getRow());
1689 table.delete(del);
1690 }
1691 resScan = table.getScanner(scan);
1692 resScan.close();
1693 return table;
1694 }
1695
1696
1697
1698
1699
1700
1701
1702
1703 public int loadTable(final HTable t, final byte[] f) throws IOException {
1704 return loadTable(t, new byte[][] {f});
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1715 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1716 }
1717
1718
1719
1720
1721
1722
1723
1724
1725 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1726 return loadTable(t, f, null);
1727 }
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1738 return loadTable(t, f, value, true);
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1750 t.setAutoFlush(false);
1751 int rowCount = 0;
1752 for (byte[] row : HBaseTestingUtility.ROWS) {
1753 Put put = new Put(row);
1754 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1755 for (int i = 0; i < f.length; i++) {
1756 put.add(f[i], null, value != null ? value : row);
1757 }
1758 t.put(put);
1759 rowCount++;
1760 }
1761 t.flushCommits();
1762 return rowCount;
1763 }
1764
1765
1766
1767
1768 public static class SeenRowTracker {
1769 int dim = 'z' - 'a' + 1;
1770 int[][][] seenRows = new int[dim][dim][dim];
1771 byte[] startRow;
1772 byte[] stopRow;
1773
1774 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1775 this.startRow = startRow;
1776 this.stopRow = stopRow;
1777 }
1778
1779 void reset() {
1780 for (byte[] row : ROWS) {
1781 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1782 }
1783 }
1784
1785 int i(byte b) {
1786 return b - 'a';
1787 }
1788
1789 public void addRow(byte[] row) {
1790 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1791 }
1792
1793
1794
1795
1796 public void validate() {
1797 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1798 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1799 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1800 int count = seenRows[i(b1)][i(b2)][i(b3)];
1801 int expectedCount = 0;
1802 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1803 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1804 expectedCount = 1;
1805 }
1806 if (count != expectedCount) {
1807 String row = new String(new byte[] {b1,b2,b3});
1808 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1809 }
1810 }
1811 }
1812 }
1813 }
1814 }
1815
1816 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1817 return loadRegion(r, f, false);
1818 }
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1829 throws IOException {
1830 byte[] k = new byte[3];
1831 int rowCount = 0;
1832 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1833 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1834 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1835 k[0] = b1;
1836 k[1] = b2;
1837 k[2] = b3;
1838 Put put = new Put(k);
1839 put.setDurability(Durability.SKIP_WAL);
1840 put.add(f, null, k);
1841 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1842
1843 int preRowCount = rowCount;
1844 int pause = 10;
1845 int maxPause = 1000;
1846 while (rowCount == preRowCount) {
1847 try {
1848 r.put(put);
1849 rowCount++;
1850 } catch (RegionTooBusyException e) {
1851 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1852 Threads.sleep(pause);
1853 }
1854 }
1855 }
1856 }
1857 if (flush) {
1858 r.flushcache();
1859 }
1860 }
1861 return rowCount;
1862 }
1863
1864 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1865 for (int i = startRow; i < endRow; i++) {
1866 byte[] data = Bytes.toBytes(String.valueOf(i));
1867 Put put = new Put(data);
1868 put.add(f, null, data);
1869 t.put(put);
1870 }
1871 }
1872
1873
1874
1875
1876 public int countRows(final HTable table) throws IOException {
1877 Scan scan = new Scan();
1878 ResultScanner results = table.getScanner(scan);
1879 int count = 0;
1880 for (@SuppressWarnings("unused") Result res : results) {
1881 count++;
1882 }
1883 results.close();
1884 return count;
1885 }
1886
1887 public int countRows(final HTable table, final byte[]... families) throws IOException {
1888 Scan scan = new Scan();
1889 for (byte[] family: families) {
1890 scan.addFamily(family);
1891 }
1892 ResultScanner results = table.getScanner(scan);
1893 int count = 0;
1894 for (@SuppressWarnings("unused") Result res : results) {
1895 count++;
1896 }
1897 results.close();
1898 return count;
1899 }
1900
1901
1902
1903
1904 public String checksumRows(final HTable table) throws Exception {
1905 Scan scan = new Scan();
1906 ResultScanner results = table.getScanner(scan);
1907 MessageDigest digest = MessageDigest.getInstance("MD5");
1908 for (Result res : results) {
1909 digest.update(res.getRow());
1910 }
1911 results.close();
1912 return digest.toString();
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 public int createMultiRegions(HTable table, byte[] columnFamily)
1924 throws IOException {
1925 return createMultiRegions(getConfiguration(), table, columnFamily);
1926 }
1927
1928
1929 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1930 static {
1931 int i = 0;
1932 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1933 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1934 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1935 ROWS[i][0] = b1;
1936 ROWS[i][1] = b2;
1937 ROWS[i][2] = b3;
1938 i++;
1939 }
1940 }
1941 }
1942 }
1943
1944 public static final byte[][] KEYS = {
1945 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1946 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1947 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1948 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1949 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1950 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1951 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1952 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1953 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1954 };
1955
1956 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1957 Bytes.toBytes("bbb"),
1958 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1959 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1960 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1961 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1962 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1963 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1964 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1965 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1966 };
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 public int createMultiRegions(final Configuration c, final HTable table,
1977 final byte[] columnFamily)
1978 throws IOException {
1979 return createMultiRegions(c, table, columnFamily, KEYS);
1980 }
1981
1982 void makeDFSClientNonRetrying() {
1983 if (null == this.dfsCluster) {
1984 LOG.debug("dfsCluster has not started, can't make client non-retrying.");
1985 return;
1986 }
1987 try {
1988 final FileSystem filesystem = this.dfsCluster.getFileSystem();
1989 if (!(filesystem instanceof DistributedFileSystem)) {
1990 LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
1991 return;
1992 }
1993
1994 final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
1995
1996 final Field dfsField = fs.getClass().getDeclaredField("dfs");
1997 dfsField.setAccessible(true);
1998 final Class<?> dfsClazz = dfsField.getType();
1999 final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
2000
2001
2002 final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
2003 createRPCNamenode.setAccessible(true);
2004
2005
2006 final Field nnField = dfsClazz.getDeclaredField("nnAddress");
2007 nnField.setAccessible(true);
2008 final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
2009 final Field confField = dfsClazz.getDeclaredField("conf");
2010 confField.setAccessible(true);
2011 final Configuration conf = Configuration.class.cast(confField.get(dfs));
2012 final Field ugiField = dfsClazz.getDeclaredField("ugi");
2013 ugiField.setAccessible(true);
2014 final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
2015
2016
2017 final Field namenodeField = dfsClazz.getDeclaredField("namenode");
2018 namenodeField.setAccessible(true);
2019 namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
2020 LOG.debug("Set DSFClient namenode to bare RPC");
2021 } catch (Exception exception) {
2022 LOG.info("Could not alter DFSClient to be non-retrying.", exception);
2023 }
2024 }
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 public int createMultiRegions(final Configuration c, final HTable table,
2036 final byte [] family, int numRegions)
2037 throws IOException {
2038 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2039 byte [] startKey = Bytes.toBytes("aaaaa");
2040 byte [] endKey = Bytes.toBytes("zzzzz");
2041 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2042 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2043 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2044 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2045 return createMultiRegions(c, table, family, regionStartKeys);
2046 }
2047
2048 @SuppressWarnings("deprecation")
2049 public int createMultiRegions(final Configuration c, final HTable table,
2050 final byte[] columnFamily, byte [][] startKeys)
2051 throws IOException {
2052 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2053 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2054 HTableDescriptor htd = table.getTableDescriptor();
2055 if(!htd.hasFamily(columnFamily)) {
2056 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2057 htd.addFamily(hcd);
2058 }
2059
2060
2061
2062
2063 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2064 String regionToDeleteInFS = table
2065 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2066 .getRegionInfo().getEncodedName();
2067 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2068
2069 int count = 0;
2070 for (int i = 0; i < startKeys.length; i++) {
2071 int j = (i + 1) % startKeys.length;
2072 HRegionInfo hri = new HRegionInfo(table.getName(),
2073 startKeys[i], startKeys[j]);
2074 MetaEditor.addRegionToMeta(meta, hri);
2075 newRegions.add(hri);
2076 count++;
2077 }
2078
2079 for (byte[] row : rows) {
2080 LOG.info("createMultiRegions: deleting meta row -> " +
2081 Bytes.toStringBinary(row));
2082 meta.delete(new Delete(row));
2083 }
2084
2085 Path tableDir = new Path(getDefaultRootDirPath().toString()
2086 + System.getProperty("file.separator") + htd.getTableName()
2087 + System.getProperty("file.separator") + regionToDeleteInFS);
2088 FileSystem.get(c).delete(tableDir);
2089
2090 HConnection conn = table.getConnection();
2091 conn.clearRegionCache();
2092
2093 HBaseAdmin admin = getHBaseAdmin();
2094 if (admin.isTableEnabled(table.getTableName())) {
2095 for(HRegionInfo hri : newRegions) {
2096 admin.assign(hri.getRegionName());
2097 }
2098 }
2099
2100 meta.close();
2101
2102 return count;
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2116 final HTableDescriptor htd, byte [][] startKeys)
2117 throws IOException {
2118 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2119 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2120 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2121
2122 for (int i = 0; i < startKeys.length; i++) {
2123 int j = (i + 1) % startKeys.length;
2124 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2125 startKeys[j]);
2126 MetaEditor.addRegionToMeta(meta, hri);
2127 newRegions.add(hri);
2128 }
2129
2130 meta.close();
2131 return newRegions;
2132 }
2133
2134
2135
2136
2137
2138
2139 public List<byte[]> getMetaTableRows() throws IOException {
2140
2141 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2142 List<byte[]> rows = new ArrayList<byte[]>();
2143 ResultScanner s = t.getScanner(new Scan());
2144 for (Result result : s) {
2145 LOG.info("getMetaTableRows: row -> " +
2146 Bytes.toStringBinary(result.getRow()));
2147 rows.add(result.getRow());
2148 }
2149 s.close();
2150 t.close();
2151 return rows;
2152 }
2153
2154
2155
2156
2157
2158
2159 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2160
2161 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2162 List<byte[]> rows = new ArrayList<byte[]>();
2163 ResultScanner s = t.getScanner(new Scan());
2164 for (Result result : s) {
2165 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2166 if (info == null) {
2167 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2168
2169 continue;
2170 }
2171
2172 if (info.getTable().equals(tableName)) {
2173 LOG.info("getMetaTableRows: row -> " +
2174 Bytes.toStringBinary(result.getRow()) + info);
2175 rows.add(result.getRow());
2176 }
2177 }
2178 s.close();
2179 t.close();
2180 return rows;
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2195 throws IOException, InterruptedException {
2196 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2209 throws IOException, InterruptedException {
2210 List<byte[]> metaRows = getMetaTableRows(tableName);
2211 if (metaRows == null || metaRows.isEmpty()) {
2212 return null;
2213 }
2214 LOG.debug("Found " + metaRows.size() + " rows for table " +
2215 tableName);
2216 byte [] firstrow = metaRows.get(0);
2217 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2218 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2219 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2220 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2221 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2222 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2223 while(retrier.shouldRetry()) {
2224 int index = getMiniHBaseCluster().getServerWith(firstrow);
2225 if (index != -1) {
2226 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2227 }
2228
2229 retrier.sleepUntilNextRetry();
2230 }
2231 return null;
2232 }
2233
2234
2235
2236
2237
2238
2239
2240 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2241 startMiniMapReduceCluster(2);
2242 return mrCluster;
2243 }
2244
2245
2246
2247
2248
2249 private void forceChangeTaskLogDir() {
2250 Field logDirField;
2251 try {
2252 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2253 logDirField.setAccessible(true);
2254
2255 Field modifiersField = Field.class.getDeclaredField("modifiers");
2256 modifiersField.setAccessible(true);
2257 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2258
2259 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2260 } catch (SecurityException e) {
2261 throw new RuntimeException(e);
2262 } catch (NoSuchFieldException e) {
2263
2264 throw new RuntimeException(e);
2265 } catch (IllegalArgumentException e) {
2266 throw new RuntimeException(e);
2267 } catch (IllegalAccessException e) {
2268 throw new RuntimeException(e);
2269 }
2270 }
2271
2272
2273
2274
2275
2276
2277
2278 private void startMiniMapReduceCluster(final int servers) throws IOException {
2279 if (mrCluster != null) {
2280 throw new IllegalStateException("MiniMRCluster is already running");
2281 }
2282 LOG.info("Starting mini mapreduce cluster...");
2283 setupClusterTestDir();
2284 createDirsAndSetProperties();
2285
2286 forceChangeTaskLogDir();
2287
2288
2289
2290
2291 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2292
2293
2294
2295 conf.setBoolean("mapreduce.map.speculative", false);
2296 conf.setBoolean("mapreduce.reduce.speculative", false);
2297
2298
2299
2300 mrCluster = new MiniMRCluster(servers,
2301 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2302 null, null, new JobConf(this.conf));
2303 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2304 if (jobConf == null) {
2305 jobConf = mrCluster.createJobConf();
2306 }
2307
2308 jobConf.set("mapred.local.dir",
2309 conf.get("mapred.local.dir"));
2310 LOG.info("Mini mapreduce cluster started");
2311
2312
2313
2314
2315 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2316
2317 conf.set("mapreduce.framework.name", "yarn");
2318 conf.setBoolean("yarn.is.minicluster", true);
2319 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2320 if (rmAddress != null) {
2321 conf.set("yarn.resourcemanager.address", rmAddress);
2322 }
2323 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2324 if (historyAddress != null) {
2325 conf.set("mapreduce.jobhistory.address", historyAddress);
2326 }
2327 String schedulerAddress =
2328 jobConf.get("yarn.resourcemanager.scheduler.address");
2329 if (schedulerAddress != null) {
2330 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2331 }
2332 }
2333
2334
2335
2336
2337 public void shutdownMiniMapReduceCluster() {
2338 LOG.info("Stopping mini mapreduce cluster...");
2339 if (mrCluster != null) {
2340 mrCluster.shutdown();
2341 mrCluster = null;
2342 }
2343
2344 conf.set("mapred.job.tracker", "local");
2345 LOG.info("Mini mapreduce cluster stopped");
2346 }
2347
2348
2349
2350
2351 public RegionServerServices createMockRegionServerService() throws IOException {
2352 return createMockRegionServerService((ServerName)null);
2353 }
2354
2355
2356
2357
2358
2359 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2360 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2361 rss.setFileSystem(getTestFileSystem());
2362 rss.setRpcServer(rpc);
2363 return rss;
2364 }
2365
2366
2367
2368
2369
2370 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2371 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2372 rss.setFileSystem(getTestFileSystem());
2373 return rss;
2374 }
2375
2376
2377
2378
2379
2380
2381 public void enableDebug(Class<?> clazz) {
2382 Log l = LogFactory.getLog(clazz);
2383 if (l instanceof Log4JLogger) {
2384 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2385 } else if (l instanceof Jdk14Logger) {
2386 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2387 }
2388 }
2389
2390
2391
2392
2393
2394 public void expireMasterSession() throws Exception {
2395 HMaster master = getMiniHBaseCluster().getMaster();
2396 expireSession(master.getZooKeeper(), false);
2397 }
2398
2399
2400
2401
2402
2403
2404 public void expireRegionServerSession(int index) throws Exception {
2405 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2406 expireSession(rs.getZooKeeper(), false);
2407 decrementMinRegionServerCount();
2408 }
2409
2410 private void decrementMinRegionServerCount() {
2411
2412
2413 decrementMinRegionServerCount(getConfiguration());
2414
2415
2416 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2417 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2418 }
2419 }
2420
2421 private void decrementMinRegionServerCount(Configuration conf) {
2422 int currentCount = conf.getInt(
2423 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2424 if (currentCount != -1) {
2425 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2426 Math.max(currentCount - 1, 1));
2427 }
2428 }
2429
2430 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2431 expireSession(nodeZK, false);
2432 }
2433
2434 @Deprecated
2435 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2436 throws Exception {
2437 expireSession(nodeZK, false);
2438 }
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2452 throws Exception {
2453 Configuration c = new Configuration(this.conf);
2454 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2455 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2456 byte[] password = zk.getSessionPasswd();
2457 long sessionID = zk.getSessionId();
2458
2459
2460
2461
2462
2463
2464
2465
2466 ZooKeeper monitor = new ZooKeeper(quorumServers,
2467 1000, new org.apache.zookeeper.Watcher(){
2468 @Override
2469 public void process(WatchedEvent watchedEvent) {
2470 LOG.info("Monitor ZKW received event="+watchedEvent);
2471 }
2472 } , sessionID, password);
2473
2474
2475 ZooKeeper newZK = new ZooKeeper(quorumServers,
2476 1000, EmptyWatcher.instance, sessionID, password);
2477
2478
2479
2480 long start = System.currentTimeMillis();
2481 while (newZK.getState() != States.CONNECTED
2482 && System.currentTimeMillis() - start < 1000) {
2483 Thread.sleep(1);
2484 }
2485 newZK.close();
2486 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2487
2488
2489 monitor.close();
2490
2491 if (checkStatus) {
2492 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2493 }
2494 }
2495
2496
2497
2498
2499
2500
2501
2502 public MiniHBaseCluster getHBaseCluster() {
2503 return getMiniHBaseCluster();
2504 }
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 public HBaseCluster getHBaseClusterInterface() {
2515
2516
2517 return hbaseCluster;
2518 }
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529 public synchronized HBaseAdmin getHBaseAdmin()
2530 throws IOException {
2531 if (hbaseAdmin == null){
2532 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2533 }
2534 return hbaseAdmin;
2535 }
2536
2537 private HBaseAdminForTests hbaseAdmin = null;
2538 private static class HBaseAdminForTests extends HBaseAdmin {
2539 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2540 ZooKeeperConnectionException, IOException {
2541 super(c);
2542 }
2543
2544 @Override
2545 public synchronized void close() throws IOException {
2546 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2547 }
2548
2549 private synchronized void close0() throws IOException {
2550 super.close();
2551 }
2552 }
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2564 throws IOException {
2565 if (zooKeeperWatcher == null) {
2566 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2567 new Abortable() {
2568 @Override public void abort(String why, Throwable e) {
2569 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2570 }
2571 @Override public boolean isAborted() {return false;}
2572 });
2573 }
2574 return zooKeeperWatcher;
2575 }
2576 private ZooKeeperWatcher zooKeeperWatcher;
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 public void closeRegion(String regionName) throws IOException {
2587 closeRegion(Bytes.toBytes(regionName));
2588 }
2589
2590
2591
2592
2593
2594
2595
2596 public void closeRegion(byte[] regionName) throws IOException {
2597 getHBaseAdmin().closeRegion(regionName, null);
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607 public void closeRegionByRow(String row, HTable table) throws IOException {
2608 closeRegionByRow(Bytes.toBytes(row), table);
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2619 HRegionLocation hrl = table.getRegionLocation(row);
2620 closeRegion(hrl.getRegionInfo().getRegionName());
2621 }
2622
2623
2624
2625
2626
2627
2628
2629
2630 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2631 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2632 int regCount = regions.size();
2633 Set<Integer> attempted = new HashSet<Integer>();
2634 int idx;
2635 int attempts = 0;
2636 do {
2637 regions = getHBaseCluster().getRegions(tableName);
2638 if (regCount != regions.size()) {
2639
2640 attempted.clear();
2641 }
2642 regCount = regions.size();
2643
2644
2645 if (regCount > 0) {
2646 idx = random.nextInt(regCount);
2647
2648 if (attempted.contains(idx))
2649 continue;
2650 try {
2651 regions.get(idx).checkSplit();
2652 return regions.get(idx);
2653 } catch (Exception ex) {
2654 LOG.warn("Caught exception", ex);
2655 attempted.add(idx);
2656 }
2657 }
2658 attempts++;
2659 } while (maxAttempts == -1 || attempts < maxAttempts);
2660 return null;
2661 }
2662
2663 public MiniZooKeeperCluster getZkCluster() {
2664 return zkCluster;
2665 }
2666
2667 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2668 this.passedZkCluster = true;
2669 this.zkCluster = zkCluster;
2670 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2671 }
2672
2673 public MiniDFSCluster getDFSCluster() {
2674 return dfsCluster;
2675 }
2676
2677 public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
2678 setDFSCluster(cluster, true);
2679 }
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689 public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
2690 throws IllegalStateException, IOException {
2691 if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
2692 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2693 }
2694 this.dfsCluster = cluster;
2695 this.setFs();
2696 }
2697
2698 public FileSystem getTestFileSystem() throws IOException {
2699 return HFileSystem.get(conf);
2700 }
2701
2702
2703
2704
2705
2706
2707
2708
2709 public void waitTableAvailable(byte[] table)
2710 throws InterruptedException, IOException {
2711 waitTableAvailable(getHBaseAdmin(), table, 30000);
2712 }
2713
2714 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2715 throws InterruptedException, IOException {
2716 waitTableAvailable(admin, table, 30000);
2717 }
2718
2719
2720
2721
2722
2723
2724
2725
2726 public void waitTableAvailable(byte[] table, long timeoutMillis)
2727 throws InterruptedException, IOException {
2728 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2729 }
2730
2731 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2732 throws InterruptedException, IOException {
2733 long startWait = System.currentTimeMillis();
2734 while (!admin.isTableAvailable(table)) {
2735 assertTrue("Timed out waiting for table to become available " +
2736 Bytes.toStringBinary(table),
2737 System.currentTimeMillis() - startWait < timeoutMillis);
2738 Thread.sleep(200);
2739 }
2740 }
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751 public void waitTableEnabled(byte[] table)
2752 throws InterruptedException, IOException {
2753 waitTableEnabled(getHBaseAdmin(), table, 30000);
2754 }
2755
2756 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2757 throws InterruptedException, IOException {
2758 waitTableEnabled(admin, table, 30000);
2759 }
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 public void waitTableEnabled(byte[] table, long timeoutMillis)
2771 throws InterruptedException, IOException {
2772 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2773 }
2774
2775 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2776 throws InterruptedException, IOException {
2777 long startWait = System.currentTimeMillis();
2778 waitTableAvailable(admin, table, timeoutMillis);
2779 while (!admin.isTableEnabled(table)) {
2780 assertTrue("Timed out waiting for table to become available and enabled " +
2781 Bytes.toStringBinary(table),
2782 System.currentTimeMillis() - startWait < timeoutMillis);
2783 Thread.sleep(200);
2784 }
2785
2786
2787
2788
2789
2790 HConnection connection = HConnectionManager.createConnection(conf);
2791 try {
2792 Canary.sniff(connection, TableName.valueOf(table));
2793 } catch (Exception e) {
2794 throw new IOException(e);
2795 } finally {
2796 connection.close();
2797 }
2798 }
2799
2800
2801
2802
2803
2804
2805
2806
2807 public void waitTableDisabled(byte[] table)
2808 throws InterruptedException, IOException {
2809 waitTableDisabled(getHBaseAdmin(), table, 30000);
2810 }
2811
2812 public void waitTableDisabled(HBaseAdmin admin, byte[] table)
2813 throws InterruptedException, IOException {
2814 waitTableDisabled(admin, table, 30000);
2815 }
2816
2817
2818
2819
2820
2821
2822
2823
2824 public void waitTableDisabled(byte[] table, long timeoutMillis)
2825 throws InterruptedException, IOException {
2826 waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2827 }
2828
2829 public void waitTableDisabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2830 throws InterruptedException, IOException {
2831 TableName tableName = TableName.valueOf(table);
2832 long startWait = System.currentTimeMillis();
2833 while (!admin.isTableDisabled(tableName)) {
2834 assertTrue("Timed out waiting for table to become disabled " +
2835 Bytes.toStringBinary(table),
2836 System.currentTimeMillis() - startWait < timeoutMillis);
2837 Thread.sleep(200);
2838 }
2839 }
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 public boolean ensureSomeRegionServersAvailable(final int num)
2850 throws IOException {
2851 boolean startedServer = false;
2852 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2853 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2854 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2855 startedServer = true;
2856 }
2857
2858 return startedServer;
2859 }
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2871 throws IOException {
2872 boolean startedServer = ensureSomeRegionServersAvailable(num);
2873
2874 int nonStoppedServers = 0;
2875 for (JVMClusterUtil.RegionServerThread rst :
2876 getMiniHBaseCluster().getRegionServerThreads()) {
2877
2878 HRegionServer hrs = rst.getRegionServer();
2879 if (hrs.isStopping() || hrs.isStopped()) {
2880 LOG.info("A region server is stopped or stopping:"+hrs);
2881 } else {
2882 nonStoppedServers++;
2883 }
2884 }
2885 for (int i=nonStoppedServers; i<num; ++i) {
2886 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2887 startedServer = true;
2888 }
2889 return startedServer;
2890 }
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902 public static User getDifferentUser(final Configuration c,
2903 final String differentiatingSuffix)
2904 throws IOException {
2905 FileSystem currentfs = FileSystem.get(c);
2906 if (!(currentfs instanceof DistributedFileSystem)) {
2907 return User.getCurrent();
2908 }
2909
2910
2911 String username = User.getCurrent().getName() +
2912 differentiatingSuffix;
2913 User user = User.createUserForTesting(c, username,
2914 new String[]{"supergroup"});
2915 return user;
2916 }
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2932 final int max) {
2933 try {
2934 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2935 for (Class<?> clazz: clazzes) {
2936 String className = clazz.getSimpleName();
2937 if (className.equals("DFSOutputStream")) {
2938 if (clazz.isInstance(stream)) {
2939 Field maxRecoveryErrorCountField =
2940 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2941 maxRecoveryErrorCountField.setAccessible(true);
2942 maxRecoveryErrorCountField.setInt(stream, max);
2943 break;
2944 }
2945 }
2946 }
2947 } catch (Exception e) {
2948 LOG.info("Could not set max recovery field", e);
2949 }
2950 }
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2961 waitUntilAllRegionsAssigned(tableName, 60000);
2962 }
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2974 throws IOException {
2975 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2976 try {
2977 waitFor(timeout, 200, true, new Predicate<IOException>() {
2978 @Override
2979 public boolean evaluate() throws IOException {
2980 boolean allRegionsAssigned = true;
2981 Scan scan = new Scan();
2982 scan.addFamily(HConstants.CATALOG_FAMILY);
2983 ResultScanner s = meta.getScanner(scan);
2984 try {
2985 Result r;
2986 while ((r = s.next()) != null) {
2987 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2988 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2989 if (info != null && info.getTable().equals(tableName)) {
2990 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2991 allRegionsAssigned &= (b != null);
2992 }
2993 }
2994 } finally {
2995 s.close();
2996 }
2997 return allRegionsAssigned;
2998 }
2999 });
3000 } finally {
3001 meta.close();
3002 }
3003
3004
3005 HMaster master = getHBaseCluster().getMaster();
3006 final RegionStates states = master.getAssignmentManager().getRegionStates();
3007 waitFor(timeout, 200, new Predicate<IOException>() {
3008 @Override
3009 public boolean evaluate() throws IOException {
3010 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3011 return hris != null && !hris.isEmpty();
3012 }
3013 });
3014 }
3015
3016
3017
3018
3019
3020 public static List<Cell> getFromStoreFile(HStore store,
3021 Get get) throws IOException {
3022 Scan scan = new Scan(get);
3023 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3024 scan.getFamilyMap().get(store.getFamily().getName()),
3025
3026
3027 0);
3028
3029 List<Cell> result = new ArrayList<Cell>();
3030 scanner.next(result);
3031 if (!result.isEmpty()) {
3032
3033 Cell kv = result.get(0);
3034 if (!CellUtil.matchingRow(kv, get.getRow())) {
3035 result.clear();
3036 }
3037 }
3038 scanner.close();
3039 return result;
3040 }
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3051 assertTrue(numRegions>3);
3052 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3053 byte [][] result = new byte[tmpSplitKeys.length+1][];
3054 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3055 result[0] = HConstants.EMPTY_BYTE_ARRAY;
3056 return result;
3057 }
3058
3059
3060
3061
3062
3063 public static List<Cell> getFromStoreFile(HStore store,
3064 byte [] row,
3065 NavigableSet<byte[]> columns
3066 ) throws IOException {
3067 Get get = new Get(row);
3068 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3069 s.put(store.getFamily().getName(), columns);
3070
3071 return getFromStoreFile(store,get);
3072 }
3073
3074
3075
3076
3077
3078 public static ZooKeeperWatcher getZooKeeperWatcher(
3079 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3080 IOException {
3081 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3082 "unittest", new Abortable() {
3083 boolean aborted = false;
3084
3085 @Override
3086 public void abort(String why, Throwable e) {
3087 aborted = true;
3088 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3089 }
3090
3091 @Override
3092 public boolean isAborted() {
3093 return aborted;
3094 }
3095 });
3096 return zkw;
3097 }
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3111 HBaseTestingUtility TEST_UTIL, HRegion region,
3112 ServerName serverName) throws ZooKeeperConnectionException,
3113 IOException, KeeperException, NodeExistsException {
3114 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3115 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3116 int version = ZKAssign.transitionNodeOpening(zkw, region
3117 .getRegionInfo(), serverName);
3118 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3119 version);
3120 return zkw;
3121 }
3122
3123 public static void assertKVListsEqual(String additionalMsg,
3124 final List<? extends Cell> expected,
3125 final List<? extends Cell> actual) {
3126 final int eLen = expected.size();
3127 final int aLen = actual.size();
3128 final int minLen = Math.min(eLen, aLen);
3129
3130 int i;
3131 for (i = 0; i < minLen
3132 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3133 ++i) {}
3134
3135 if (additionalMsg == null) {
3136 additionalMsg = "";
3137 }
3138 if (!additionalMsg.isEmpty()) {
3139 additionalMsg = ". " + additionalMsg;
3140 }
3141
3142 if (eLen != aLen || i != minLen) {
3143 throw new AssertionError(
3144 "Expected and actual KV arrays differ at position " + i + ": " +
3145 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3146 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3147 }
3148 }
3149
3150 private static <T> String safeGetAsStr(List<T> lst, int i) {
3151 if (0 <= i && i < lst.size()) {
3152 return lst.get(i).toString();
3153 } else {
3154 return "<out_of_range>";
3155 }
3156 }
3157
3158 public String getClusterKey() {
3159 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3160 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3161 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3162 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3163 }
3164
3165
3166 public HTable createRandomTable(String tableName,
3167 final Collection<String> families,
3168 final int maxVersions,
3169 final int numColsPerRow,
3170 final int numFlushes,
3171 final int numRegions,
3172 final int numRowsPerFlush)
3173 throws IOException, InterruptedException {
3174
3175 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3176 " regions, " + numFlushes + " storefiles per region, " +
3177 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3178 "\n");
3179
3180 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3181 final int numCF = families.size();
3182 final byte[][] cfBytes = new byte[numCF][];
3183 {
3184 int cfIndex = 0;
3185 for (String cf : families) {
3186 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3187 }
3188 }
3189
3190 final int actualStartKey = 0;
3191 final int actualEndKey = Integer.MAX_VALUE;
3192 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3193 final int splitStartKey = actualStartKey + keysPerRegion;
3194 final int splitEndKey = actualEndKey - keysPerRegion;
3195 final String keyFormat = "%08x";
3196 final HTable table = createTable(tableName, cfBytes,
3197 maxVersions,
3198 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3199 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3200 numRegions);
3201
3202 if (hbaseCluster != null) {
3203 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3204 }
3205
3206 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3207 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3208 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3209 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3210
3211 Put put = new Put(row);
3212 Delete del = new Delete(row);
3213 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3214 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3215 final long ts = rand.nextInt();
3216 final byte[] qual = Bytes.toBytes("col" + iCol);
3217 if (rand.nextBoolean()) {
3218 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3219 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3220 ts + "_random_" + rand.nextLong());
3221 put.add(cf, qual, ts, value);
3222 } else if (rand.nextDouble() < 0.8) {
3223 del.deleteColumn(cf, qual, ts);
3224 } else {
3225 del.deleteColumns(cf, qual, ts);
3226 }
3227 }
3228
3229 if (!put.isEmpty()) {
3230 table.put(put);
3231 }
3232
3233 if (!del.isEmpty()) {
3234 table.delete(del);
3235 }
3236 }
3237 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3238 table.flushCommits();
3239 if (hbaseCluster != null) {
3240 getMiniHBaseCluster().flushcache(table.getName());
3241 }
3242 }
3243
3244 return table;
3245 }
3246
3247 private static final int MIN_RANDOM_PORT = 0xc000;
3248 private static final int MAX_RANDOM_PORT = 0xfffe;
3249 private static Random random = new Random();
3250
3251
3252
3253
3254
3255 public static int randomPort() {
3256 return MIN_RANDOM_PORT
3257 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3258 }
3259
3260
3261
3262
3263
3264 public static int randomFreePort() {
3265 int port = 0;
3266 do {
3267 port = randomPort();
3268 if (takenRandomPorts.contains(port)) {
3269 continue;
3270 }
3271 takenRandomPorts.add(port);
3272
3273 try {
3274 ServerSocket sock = new ServerSocket(port);
3275 sock.close();
3276 } catch (IOException ex) {
3277 port = 0;
3278 }
3279 } while (port == 0);
3280 return port;
3281 }
3282
3283
3284 public static String randomMultiCastAddress() {
3285 return "226.1.1." + random.nextInt(254);
3286 }
3287
3288
3289
3290 public static void waitForHostPort(String host, int port)
3291 throws IOException {
3292 final int maxTimeMs = 10000;
3293 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3294 IOException savedException = null;
3295 LOG.info("Waiting for server at " + host + ":" + port);
3296 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3297 try {
3298 Socket sock = new Socket(InetAddress.getByName(host), port);
3299 sock.close();
3300 savedException = null;
3301 LOG.info("Server at " + host + ":" + port + " is available");
3302 break;
3303 } catch (UnknownHostException e) {
3304 throw new IOException("Failed to look up " + host, e);
3305 } catch (IOException e) {
3306 savedException = e;
3307 }
3308 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3309 }
3310
3311 if (savedException != null) {
3312 throw savedException;
3313 }
3314 }
3315
3316
3317
3318
3319
3320
3321 public static int createPreSplitLoadTestTable(Configuration conf,
3322 TableName tableName, byte[] columnFamily, Algorithm compression,
3323 DataBlockEncoding dataBlockEncoding) throws IOException {
3324 return createPreSplitLoadTestTable(conf, tableName,
3325 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER,
3326 Durability.USE_DEFAULT);
3327 }
3328
3329
3330
3331
3332
3333 public static int createPreSplitLoadTestTable(Configuration conf,
3334 TableName tableName, byte[] columnFamily, Algorithm compression,
3335 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer,
3336 Durability durability)
3337 throws IOException {
3338 HTableDescriptor desc = new HTableDescriptor(tableName);
3339 desc.setDurability(durability);
3340 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3341 hcd.setDataBlockEncoding(dataBlockEncoding);
3342 hcd.setCompressionType(compression);
3343 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3344 }
3345
3346
3347
3348
3349
3350
3351 public static int createPreSplitLoadTestTable(Configuration conf,
3352 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3353 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3354 }
3355
3356
3357
3358
3359
3360
3361 public static int createPreSplitLoadTestTable(Configuration conf,
3362 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3363 if (!desc.hasFamily(hcd.getName())) {
3364 desc.addFamily(hcd);
3365 }
3366
3367 int totalNumberOfRegions = 0;
3368 HBaseAdmin admin = new HBaseAdmin(conf);
3369 try {
3370
3371
3372
3373 int numberOfServers = admin.getClusterStatus().getServers().size();
3374 if (numberOfServers == 0) {
3375 throw new IllegalStateException("No live regionservers");
3376 }
3377
3378 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3379 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3380 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3381 "(regions per server: " + numRegionsPerServer + ")");
3382
3383 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3384 totalNumberOfRegions);
3385
3386 admin.createTable(desc, splits);
3387 } catch (MasterNotRunningException e) {
3388 LOG.error("Master not running", e);
3389 throw new IOException(e);
3390 } catch (TableExistsException e) {
3391 LOG.warn("Table " + desc.getTableName() +
3392 " already exists, continuing");
3393 } finally {
3394 admin.close();
3395 }
3396 return totalNumberOfRegions;
3397 }
3398
3399 public static int getMetaRSPort(Configuration conf) throws IOException {
3400 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3401 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3402 table.close();
3403 return hloc.getPort();
3404 }
3405
3406
3407
3408
3409
3410
3411
3412 public void assertRegionOnServer(
3413 final HRegionInfo hri, final ServerName server,
3414 final long timeout) throws IOException, InterruptedException {
3415 long timeoutTime = System.currentTimeMillis() + timeout;
3416 while (true) {
3417 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3418 if (regions.contains(hri)) return;
3419 long now = System.currentTimeMillis();
3420 if (now > timeoutTime) break;
3421 Thread.sleep(10);
3422 }
3423 fail("Could not find region " + hri.getRegionNameAsString()
3424 + " on server " + server);
3425 }
3426
3427
3428
3429
3430
3431 public void assertRegionOnlyOnServer(
3432 final HRegionInfo hri, final ServerName server,
3433 final long timeout) throws IOException, InterruptedException {
3434 long timeoutTime = System.currentTimeMillis() + timeout;
3435 while (true) {
3436 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3437 if (regions.contains(hri)) {
3438 List<JVMClusterUtil.RegionServerThread> rsThreads =
3439 getHBaseCluster().getLiveRegionServerThreads();
3440 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3441 HRegionServer rs = rsThread.getRegionServer();
3442 if (server.equals(rs.getServerName())) {
3443 continue;
3444 }
3445 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3446 for (HRegion r: hrs) {
3447 assertTrue("Region should not be double assigned",
3448 r.getRegionId() != hri.getRegionId());
3449 }
3450 }
3451 return;
3452 }
3453 long now = System.currentTimeMillis();
3454 if (now > timeoutTime) break;
3455 Thread.sleep(10);
3456 }
3457 fail("Could not find region " + hri.getRegionNameAsString()
3458 + " on server " + server);
3459 }
3460
3461 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3462 throws IOException {
3463 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3464 htd.addFamily(hcd);
3465 HRegionInfo info =
3466 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3467 HRegion region =
3468 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3469 return region;
3470 }
3471
3472 public void setFileSystemURI(String fsURI) {
3473 FS_URI = fsURI;
3474 }
3475
3476
3477
3478
3479 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3480 throws E {
3481 return Waiter.waitFor(this.conf, timeout, predicate);
3482 }
3483
3484
3485
3486
3487 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3488 throws E {
3489 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3490 }
3491
3492
3493
3494
3495 public <E extends Exception> long waitFor(long timeout, long interval,
3496 boolean failIfTimeout, Predicate<E> predicate) throws E {
3497 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3498 }
3499
3500
3501
3502
3503
3504
3505 public void waitUntilNoRegionsInTransition(
3506 final long timeout) throws Exception {
3507 waitFor(timeout, predicateNoRegionsInTransition());
3508 }
3509
3510
3511
3512
3513 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3514 return new Waiter.Predicate<Exception>() {
3515 @Override
3516 public boolean evaluate() throws Exception {
3517 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3518 .getAssignmentManager().getRegionStates();
3519 return !regionStates.isRegionsInTransition();
3520 }
3521 };
3522 }
3523
3524
3525
3526
3527 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3528 return new Waiter.Predicate<Exception>() {
3529 @Override
3530 public boolean evaluate() throws Exception {
3531 return getHBaseAdmin().isTableEnabled(tableName);
3532 }
3533 };
3534 }
3535
3536
3537
3538
3539
3540
3541 public void waitLabelAvailable(long timeoutMillis, final String... labels) {
3542 final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
3543 waitFor(timeoutMillis, new Waiter.Predicate<RuntimeException>() {
3544
3545 @Override
3546 public boolean evaluate() {
3547 for (String label : labels) {
3548 if (labelsCache.getLabelOrdinal(label) == 0) {
3549 return false;
3550 }
3551 }
3552 return true;
3553 }
3554 });
3555 }
3556
3557
3558
3559
3560
3561
3562 public static List<HColumnDescriptor> generateColumnDescriptors() {
3563 return generateColumnDescriptors("");
3564 }
3565
3566
3567
3568
3569
3570
3571
3572 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3573 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3574 long familyId = 0;
3575 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3576 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3577 for (BloomType bloomType: BloomType.values()) {
3578 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3579 HColumnDescriptor htd = new HColumnDescriptor(name);
3580 htd.setCompressionType(compressionType);
3581 htd.setDataBlockEncoding(encodingType);
3582 htd.setBloomFilterType(bloomType);
3583 htds.add(htd);
3584 familyId++;
3585 }
3586 }
3587 }
3588 return htds;
3589 }
3590
3591
3592
3593
3594
3595 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3596 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3597 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3598 for (String algoName : allAlgos) {
3599 try {
3600 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3601 algo.getCompressor();
3602 supportedAlgos.add(algo);
3603 } catch (Throwable t) {
3604
3605 }
3606 }
3607 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3608 }
3609 }