1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Method;
28 import java.lang.reflect.Modifier;
29 import java.net.InetAddress;
30 import java.net.InetSocketAddress;
31 import java.net.ServerSocket;
32 import java.net.Socket;
33 import java.net.UnknownHostException;
34 import java.security.MessageDigest;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.HashSet;
40 import java.util.List;
41 import java.util.Map;
42 import java.util.NavigableSet;
43 import java.util.Random;
44 import java.util.Set;
45 import java.util.UUID;
46 import java.util.concurrent.TimeUnit;
47
48 import org.apache.commons.logging.Log;
49 import org.apache.commons.logging.LogFactory;
50 import org.apache.commons.logging.impl.Jdk14Logger;
51 import org.apache.commons.logging.impl.Log4JLogger;
52 import org.apache.hadoop.hbase.classification.InterfaceAudience;
53 import org.apache.hadoop.hbase.classification.InterfaceStability;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FileSystem;
56 import org.apache.hadoop.fs.Path;
57 import org.apache.hadoop.hbase.Waiter.Predicate;
58 import org.apache.hadoop.hbase.catalog.MetaEditor;
59 import org.apache.hadoop.hbase.client.Delete;
60 import org.apache.hadoop.hbase.client.Durability;
61 import org.apache.hadoop.hbase.client.Get;
62 import org.apache.hadoop.hbase.client.HBaseAdmin;
63 import org.apache.hadoop.hbase.client.HConnection;
64 import org.apache.hadoop.hbase.client.HConnectionManager;
65 import org.apache.hadoop.hbase.client.HTable;
66 import org.apache.hadoop.hbase.client.Put;
67 import org.apache.hadoop.hbase.client.Result;
68 import org.apache.hadoop.hbase.client.ResultScanner;
69 import org.apache.hadoop.hbase.client.Scan;
70 import org.apache.hadoop.hbase.fs.HFileSystem;
71 import org.apache.hadoop.hbase.io.compress.Compression;
72 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
73 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
74 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
75 import org.apache.hadoop.hbase.io.hfile.HFile;
76 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
77 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
78 import org.apache.hadoop.hbase.master.HMaster;
79 import org.apache.hadoop.hbase.master.RegionStates;
80 import org.apache.hadoop.hbase.master.ServerManager;
81 import org.apache.hadoop.hbase.regionserver.BloomType;
82 import org.apache.hadoop.hbase.regionserver.HRegion;
83 import org.apache.hadoop.hbase.regionserver.HRegionServer;
84 import org.apache.hadoop.hbase.regionserver.HStore;
85 import org.apache.hadoop.hbase.regionserver.InternalScanner;
86 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
87 import org.apache.hadoop.hbase.regionserver.wal.HLog;
88 import org.apache.hadoop.hbase.security.User;
89 import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
90 import org.apache.hadoop.hbase.tool.Canary;
91 import org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
92 import org.apache.hadoop.hbase.util.Bytes;
93 import org.apache.hadoop.hbase.util.FSTableDescriptors;
94 import org.apache.hadoop.hbase.util.FSUtils;
95 import org.apache.hadoop.hbase.util.JVMClusterUtil;
96 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
97 import org.apache.hadoop.hbase.util.RegionSplitter;
98 import org.apache.hadoop.hbase.util.RetryCounter;
99 import org.apache.hadoop.hbase.util.Threads;
100 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
101 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
102 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
103 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
104 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
105 import org.apache.hadoop.hdfs.DFSClient;
106 import org.apache.hadoop.hdfs.DistributedFileSystem;
107 import org.apache.hadoop.hdfs.MiniDFSCluster;
108 import org.apache.hadoop.mapred.JobConf;
109 import org.apache.hadoop.mapred.MiniMRCluster;
110 import org.apache.hadoop.mapred.TaskLog;
111 import org.apache.hadoop.security.UserGroupInformation;
112 import org.apache.zookeeper.KeeperException;
113 import org.apache.zookeeper.KeeperException.NodeExistsException;
114 import org.apache.zookeeper.WatchedEvent;
115 import org.apache.zookeeper.ZooKeeper;
116 import org.apache.zookeeper.ZooKeeper.States;
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 @InterfaceAudience.Public
133 @InterfaceStability.Evolving
134 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
135 private MiniZooKeeperCluster zkCluster = null;
136
137 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
138
139
140
141
142 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
143
144
145
146
147
148 private boolean passedZkCluster = false;
149 private MiniDFSCluster dfsCluster = null;
150
151 private HBaseCluster hbaseCluster = null;
152 private MiniMRCluster mrCluster = null;
153
154
155 private boolean miniClusterRunning;
156
157 private String hadoopLogDir;
158
159
160 private File clusterTestDir = null;
161
162
163
164 private Path dataTestDirOnTestFS = null;
165
166
167
168
169
170
171
172
173 @Deprecated
174 private static final String TEST_DIRECTORY_KEY = "test.build.data";
175
176
177 private static String FS_URI;
178
179
180 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
181
182
183 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
184 Arrays.asList(new Object[][] {
185 { Compression.Algorithm.NONE },
186 { Compression.Algorithm.GZ }
187 });
188
189
190 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
191 Arrays.asList(new Object[][] {
192 { new Boolean(false) },
193 { new Boolean(true) }
194 });
195
196
197 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
198
199 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
200 Compression.Algorithm.NONE, Compression.Algorithm.GZ
201 };
202
203
204
205
206
207 private static List<Object[]> bloomAndCompressionCombinations() {
208 List<Object[]> configurations = new ArrayList<Object[]>();
209 for (Compression.Algorithm comprAlgo :
210 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
211 for (BloomType bloomType : BloomType.values()) {
212 configurations.add(new Object[] { comprAlgo, bloomType });
213 }
214 }
215 return Collections.unmodifiableList(configurations);
216 }
217
218
219
220
221 private static List<Object[]> memStoreTSAndTagsCombination() {
222 List<Object[]> configurations = new ArrayList<Object[]>();
223 configurations.add(new Object[] { false, false });
224 configurations.add(new Object[] { false, true });
225 configurations.add(new Object[] { true, false });
226 configurations.add(new Object[] { true, true });
227 return Collections.unmodifiableList(configurations);
228 }
229
230 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
231 bloomAndCompressionCombinations();
232
233 public HBaseTestingUtility() {
234 this(HBaseConfiguration.create());
235 }
236
237 public HBaseTestingUtility(Configuration conf) {
238 super(conf);
239
240
241 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
242 }
243
244
245
246
247
248
249
250 public static HBaseTestingUtility createLocalHTU() {
251 Configuration c = HBaseConfiguration.create();
252 return createLocalHTU(c);
253 }
254
255
256
257
258
259
260
261
262 public static HBaseTestingUtility createLocalHTU(Configuration c) {
263 HBaseTestingUtility htu = new HBaseTestingUtility(c);
264 String dataTestDir = htu.getDataTestDir().toString();
265 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
266 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
267 return htu;
268 }
269
270
271
272
273
274 @Deprecated
275 public void setHDFSClientRetry(final int retries) {
276 this.conf.setInt("hdfs.client.retries.number", retries);
277 if (0 == retries) {
278 makeDFSClientNonRetrying();
279 }
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293 @Override
294 public Configuration getConfiguration() {
295 return super.getConfiguration();
296 }
297
298 public void setHBaseCluster(HBaseCluster hbaseCluster) {
299 this.hbaseCluster = hbaseCluster;
300 }
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 @Override
319 protected Path setupDataTestDir() {
320 Path testPath = super.setupDataTestDir();
321 if (null == testPath) {
322 return null;
323 }
324
325 createSubDirAndSystemProperty(
326 "hadoop.log.dir",
327 testPath, "hadoop-log-dir");
328
329
330
331 createSubDirAndSystemProperty(
332 "hadoop.tmp.dir",
333 testPath, "hadoop-tmp-dir");
334
335
336 createSubDir(
337 "mapred.local.dir",
338 testPath, "mapred-local-dir");
339
340 return testPath;
341 }
342
343 private void createSubDirAndSystemProperty(
344 String propertyName, Path parent, String subDirName){
345
346 String sysValue = System.getProperty(propertyName);
347
348 if (sysValue != null) {
349
350
351 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
352 sysValue + " so I do NOT create it in " + parent);
353 String confValue = conf.get(propertyName);
354 if (confValue != null && !confValue.endsWith(sysValue)){
355 LOG.warn(
356 propertyName + " property value differs in configuration and system: "+
357 "Configuration="+confValue+" while System="+sysValue+
358 " Erasing configuration value by system value."
359 );
360 }
361 conf.set(propertyName, sysValue);
362 } else {
363
364 createSubDir(propertyName, parent, subDirName);
365 System.setProperty(propertyName, conf.get(propertyName));
366 }
367 }
368
369
370
371
372
373
374
375 private Path getBaseTestDirOnTestFS() throws IOException {
376 FileSystem fs = getTestFileSystem();
377 return new Path(fs.getWorkingDirectory(), "test-data");
378 }
379
380
381
382
383 public HTableDescriptor getMetaTableDescriptor() {
384 try {
385 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
386 } catch (IOException e) {
387 throw new RuntimeException("Unable to create META table descriptor", e);
388 }
389 }
390
391
392
393
394
395
396 Path getClusterTestDir() {
397 if (clusterTestDir == null){
398 setupClusterTestDir();
399 }
400 return new Path(clusterTestDir.getAbsolutePath());
401 }
402
403
404
405
406 private void setupClusterTestDir() {
407 if (clusterTestDir != null) {
408 return;
409 }
410
411
412
413 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
414 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
415
416 boolean b = deleteOnExit();
417 if (b) clusterTestDir.deleteOnExit();
418 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
419 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
420 }
421
422
423
424
425
426
427
428 public Path getDataTestDirOnTestFS() throws IOException {
429 if (dataTestDirOnTestFS == null) {
430 setupDataTestDirOnTestFS();
431 }
432
433 return dataTestDirOnTestFS;
434 }
435
436
437
438
439
440
441
442
443 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
444 return new Path(getDataTestDirOnTestFS(), subdirName);
445 }
446
447
448
449
450 private void setupDataTestDirOnTestFS() throws IOException {
451 if (dataTestDirOnTestFS != null) {
452 LOG.warn("Data test on test fs dir already setup in "
453 + dataTestDirOnTestFS.toString());
454 return;
455 }
456
457
458
459
460
461 FileSystem fs = getTestFileSystem();
462 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
463 File dataTestDir = new File(getDataTestDir().toString());
464 if (deleteOnExit()) dataTestDir.deleteOnExit();
465 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
466 } else {
467 Path base = getBaseTestDirOnTestFS();
468 String randomStr = UUID.randomUUID().toString();
469 dataTestDirOnTestFS = new Path(base, randomStr);
470 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
471 }
472 }
473
474
475
476
477
478
479 public boolean cleanupDataTestDirOnTestFS() throws IOException {
480 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
481 if (ret)
482 dataTestDirOnTestFS = null;
483 return ret;
484 }
485
486
487
488
489
490
491 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
492 Path cpath = getDataTestDirOnTestFS(subdirName);
493 return getTestFileSystem().delete(cpath, true);
494 }
495
496
497
498
499
500
501
502
503 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
504 return startMiniDFSCluster(servers, null);
505 }
506
507
508
509
510
511
512
513
514
515
516
517
518 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
519 throws Exception {
520 if ( hosts != null && hosts.length != 0) {
521 return startMiniDFSCluster(hosts.length, hosts);
522 } else {
523 return startMiniDFSCluster(1, null);
524 }
525 }
526
527
528
529
530
531
532
533
534
535
536 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
537 throws Exception {
538 createDirsAndSetProperties();
539 try {
540 Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
541 .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
542 m.invoke(null, new Object[] {true});
543 } catch (ClassNotFoundException e) {
544 LOG.info("EditLogFileOutputStream not found");
545 }
546
547
548 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
549 setLevel(org.apache.log4j.Level.ERROR);
550 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
551 setLevel(org.apache.log4j.Level.ERROR);
552
553
554 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
555 true, null, null, hosts, null);
556
557
558 setFs();
559
560
561 this.dfsCluster.waitClusterUp();
562
563
564 dataTestDirOnTestFS = null;
565
566 return this.dfsCluster;
567 }
568
569 private void setFs() throws IOException {
570 if(this.dfsCluster == null){
571 LOG.info("Skipping setting fs because dfsCluster is null");
572 return;
573 }
574 FileSystem fs = this.dfsCluster.getFileSystem();
575 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
576 }
577
578 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
579 throws Exception {
580 createDirsAndSetProperties();
581 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
582 true, null, racks, hosts, null);
583
584
585 FileSystem fs = this.dfsCluster.getFileSystem();
586 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
587
588
589 this.dfsCluster.waitClusterUp();
590
591
592 dataTestDirOnTestFS = null;
593
594 return this.dfsCluster;
595 }
596
597 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
598 createDirsAndSetProperties();
599 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
600 null, null, null);
601 return dfsCluster;
602 }
603
604
605 private void createDirsAndSetProperties() throws IOException {
606 setupClusterTestDir();
607 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
608 createDirAndSetProperty("cache_data", "test.cache.data");
609 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
610 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
611 createDirAndSetProperty("mapred_local", "mapred.local.dir");
612 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
613 enableShortCircuit();
614
615 Path root = getDataTestDirOnTestFS("hadoop");
616 conf.set(MapreduceTestingShim.getMROutputDirProp(),
617 new Path(root, "mapred-output-dir").toString());
618 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
619 conf.set("mapreduce.jobtracker.staging.root.dir",
620 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
621 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
622 }
623
624
625
626
627
628
629
630 public boolean isReadShortCircuitOn(){
631 final String propName = "hbase.tests.use.shortcircuit.reads";
632 String readOnProp = System.getProperty(propName);
633 if (readOnProp != null){
634 return Boolean.parseBoolean(readOnProp);
635 } else {
636 return conf.getBoolean(propName, false);
637 }
638 }
639
640
641
642
643 private void enableShortCircuit() {
644 if (isReadShortCircuitOn()) {
645 String curUser = System.getProperty("user.name");
646 LOG.info("read short circuit is ON for user " + curUser);
647
648 conf.set("dfs.block.local-path-access.user", curUser);
649
650 conf.setBoolean("dfs.client.read.shortcircuit", true);
651
652 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
653 } else {
654 LOG.info("read short circuit is OFF");
655 }
656 }
657
658 private String createDirAndSetProperty(final String relPath, String property) {
659 String path = getDataTestDir(relPath).toString();
660 System.setProperty(property, path);
661 conf.set(property, path);
662 new File(path).mkdirs();
663 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
664 return path;
665 }
666
667
668
669
670
671
672 public void shutdownMiniDFSCluster() throws IOException {
673 if (this.dfsCluster != null) {
674
675 this.dfsCluster.shutdown();
676 dfsCluster = null;
677 dataTestDirOnTestFS = null;
678 FSUtils.setFsDefault(this.conf, new Path("file:///"));
679 }
680 }
681
682
683
684
685
686
687
688
689 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
690 return startMiniZKCluster(1);
691 }
692
693
694
695
696
697
698
699
700
701 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
702 throws Exception {
703 setupClusterTestDir();
704 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
705 }
706
707 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
708 throws Exception {
709 return startMiniZKCluster(dir,1);
710 }
711
712
713
714
715
716 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
717 int zooKeeperServerNum)
718 throws Exception {
719 if (this.zkCluster != null) {
720 throw new IOException("Cluster already running at " + dir);
721 }
722 this.passedZkCluster = false;
723 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
724 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
725 if (defPort > 0){
726
727 this.zkCluster.setDefaultClientPort(defPort);
728 }
729 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
730 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
731 Integer.toString(clientPort));
732 return this.zkCluster;
733 }
734
735
736
737
738
739
740
741 public void shutdownMiniZKCluster() throws IOException {
742 if (this.zkCluster != null) {
743 this.zkCluster.shutdown();
744 this.zkCluster = null;
745 }
746 }
747
748
749
750
751
752
753
754 public MiniHBaseCluster startMiniCluster() throws Exception {
755 return startMiniCluster(1, 1);
756 }
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 public MiniHBaseCluster startMiniCluster(final int numSlaves)
772 throws Exception {
773 return startMiniCluster(1, numSlaves);
774 }
775
776
777
778
779
780
781
782
783 public MiniHBaseCluster startMiniCluster(final int numMasters,
784 final int numSlaves)
785 throws Exception {
786 return startMiniCluster(numMasters, numSlaves, null);
787 }
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 public MiniHBaseCluster startMiniCluster(final int numMasters,
814 final int numSlaves, final String[] dataNodeHosts) throws Exception {
815 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
816 }
817
818
819
820
821
822 public MiniHBaseCluster startMiniCluster(final int numMasters,
823 final int numSlaves, final int numDataNodes) throws Exception {
824 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
825 }
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854 public MiniHBaseCluster startMiniCluster(final int numMasters,
855 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
856 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
857 throws Exception {
858 return startMiniCluster(
859 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
860 }
861
862
863
864
865
866
867 public MiniHBaseCluster startMiniCluster(final int numMasters,
868 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
869 Class<? extends HMaster> masterClass,
870 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
871 throws Exception {
872 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
873 numDataNodes = dataNodeHosts.length;
874 }
875
876 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
877 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
878
879
880 if (miniClusterRunning) {
881 throw new IllegalStateException("A mini-cluster is already running");
882 }
883 miniClusterRunning = true;
884
885 setupClusterTestDir();
886 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
887
888
889
890 if(this.dfsCluster == null) {
891 dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
892 }
893
894
895 if (this.zkCluster == null) {
896 startMiniZKCluster(clusterTestDir);
897 }
898
899
900 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
901 }
902
903 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
904 throws IOException, InterruptedException{
905 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
906 }
907
908
909
910
911
912
913
914
915
916
917
918
919 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
920 final int numSlaves, Class<? extends HMaster> masterClass,
921 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
922 throws IOException, InterruptedException {
923
924 createRootDir();
925
926
927
928 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
929 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
930 }
931 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
932 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
933 }
934
935 Configuration c = new Configuration(this.conf);
936 this.hbaseCluster =
937 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
938
939 HTable t = new HTable(c, TableName.META_TABLE_NAME);
940 ResultScanner s = t.getScanner(new Scan());
941 while (s.next() != null) {
942 continue;
943 }
944 s.close();
945 t.close();
946
947 getHBaseAdmin();
948 LOG.info("Minicluster is up");
949
950
951
952 setHBaseFsTmpDir();
953
954 return (MiniHBaseCluster)this.hbaseCluster;
955 }
956
957
958
959
960
961
962
963 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
964 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
965
966 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
967 ResultScanner s = t.getScanner(new Scan());
968 while (s.next() != null) {
969
970 }
971 LOG.info("HBase has been restarted");
972 s.close();
973 t.close();
974 }
975
976
977
978
979
980
981 public MiniHBaseCluster getMiniHBaseCluster() {
982 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
983 return (MiniHBaseCluster)this.hbaseCluster;
984 }
985 throw new RuntimeException(hbaseCluster + " not an instance of " +
986 MiniHBaseCluster.class.getName());
987 }
988
989
990
991
992
993
994 public void shutdownMiniCluster() throws Exception {
995 LOG.info("Shutting down minicluster");
996 shutdownMiniHBaseCluster();
997 if (!this.passedZkCluster){
998 shutdownMiniZKCluster();
999 }
1000 shutdownMiniDFSCluster();
1001
1002 cleanupTestDir();
1003 miniClusterRunning = false;
1004 LOG.info("Minicluster is down");
1005 }
1006
1007
1008
1009
1010
1011 @Override
1012 public boolean cleanupTestDir() throws IOException {
1013 boolean ret = super.cleanupTestDir();
1014 if (deleteDir(this.clusterTestDir)) {
1015 this.clusterTestDir = null;
1016 return ret & true;
1017 }
1018 return false;
1019 }
1020
1021
1022
1023
1024
1025 public void shutdownMiniHBaseCluster() throws IOException {
1026 if (hbaseAdmin != null) {
1027 hbaseAdmin.close0();
1028 hbaseAdmin = null;
1029 }
1030
1031
1032 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1033 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1034 if (this.hbaseCluster != null) {
1035 this.hbaseCluster.shutdown();
1036
1037 this.hbaseCluster.waitUntilShutDown();
1038 this.hbaseCluster = null;
1039 }
1040
1041 if (zooKeeperWatcher != null) {
1042 zooKeeperWatcher.close();
1043 zooKeeperWatcher = null;
1044 }
1045 }
1046
1047
1048
1049
1050
1051
1052
1053 public Path getDefaultRootDirPath() throws IOException {
1054 FileSystem fs = FileSystem.get(this.conf);
1055 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 public Path createRootDir() throws IOException {
1067 FileSystem fs = FileSystem.get(this.conf);
1068 Path hbaseRootdir = getDefaultRootDirPath();
1069 FSUtils.setRootDir(this.conf, hbaseRootdir);
1070 fs.mkdirs(hbaseRootdir);
1071 FSUtils.setVersion(fs, hbaseRootdir);
1072 return hbaseRootdir;
1073 }
1074
1075 private void setHBaseFsTmpDir() throws IOException {
1076 String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1077 if (hbaseFsTmpDirInString == null) {
1078 this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1079 LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1080 } else {
1081 LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1082 }
1083 }
1084
1085
1086
1087
1088
1089 public void flush() throws IOException {
1090 getMiniHBaseCluster().flushcache();
1091 }
1092
1093
1094
1095
1096
1097 public void flush(TableName tableName) throws IOException {
1098 getMiniHBaseCluster().flushcache(tableName);
1099 }
1100
1101
1102
1103
1104
1105 public void compact(boolean major) throws IOException {
1106 getMiniHBaseCluster().compact(major);
1107 }
1108
1109
1110
1111
1112
1113 public void compact(TableName tableName, boolean major) throws IOException {
1114 getMiniHBaseCluster().compact(tableName, major);
1115 }
1116
1117
1118
1119
1120
1121
1122
1123
1124 public HTable createTable(String tableName, String family)
1125 throws IOException{
1126 return createTable(TableName.valueOf(tableName), new String[]{family});
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136 public HTable createTable(byte[] tableName, byte[] family)
1137 throws IOException{
1138 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148 public HTable createTable(TableName tableName, String[] families)
1149 throws IOException {
1150 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1151 for (String family : families) {
1152 fams.add(Bytes.toBytes(family));
1153 }
1154 return createTable(tableName, fams.toArray(new byte[0][]));
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164 public HTable createTable(TableName tableName, byte[] family)
1165 throws IOException{
1166 return createTable(tableName, new byte[][]{family});
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 public HTable createTable(byte[] tableName, byte[][] families)
1178 throws IOException {
1179 return createTable(tableName, families,
1180 new Configuration(getConfiguration()));
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190 public HTable createTable(TableName tableName, byte[][] families)
1191 throws IOException {
1192 return createTable(tableName, families,
1193 new Configuration(getConfiguration()));
1194 }
1195
1196 public HTable createTable(byte[] tableName, byte[][] families,
1197 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1198 return createTable(TableName.valueOf(tableName), families, numVersions,
1199 startKey, endKey, numRegions);
1200 }
1201
1202 public HTable createTable(String tableName, byte[][] families,
1203 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1204 return createTable(TableName.valueOf(tableName), families, numVersions,
1205 startKey, endKey, numRegions);
1206 }
1207
1208 public HTable createTable(TableName tableName, byte[][] families,
1209 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1210 throws IOException{
1211 HTableDescriptor desc = new HTableDescriptor(tableName);
1212 for (byte[] family : families) {
1213 HColumnDescriptor hcd = new HColumnDescriptor(family)
1214 .setMaxVersions(numVersions);
1215 desc.addFamily(hcd);
1216 }
1217 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1218
1219 waitUntilAllRegionsAssigned(tableName);
1220 return new HTable(getConfiguration(), tableName);
1221 }
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1232 throws IOException {
1233 for(byte[] family : families) {
1234 HColumnDescriptor hcd = new HColumnDescriptor(family);
1235
1236
1237
1238 hcd.setBloomFilterType(BloomType.NONE);
1239 htd.addFamily(hcd);
1240 }
1241 getHBaseAdmin().createTable(htd);
1242
1243 waitUntilAllRegionsAssigned(htd.getTableName());
1244 return new HTable(c, htd.getTableName());
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1255 throws IOException {
1256 getHBaseAdmin().createTable(htd, splitRows);
1257
1258 waitUntilAllRegionsAssigned(htd.getTableName());
1259 return new HTable(getConfiguration(), htd.getTableName());
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 public HTable createTable(TableName tableName, byte[][] families,
1271 final Configuration c)
1272 throws IOException {
1273 return createTable(new HTableDescriptor(tableName), families, c);
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 public HTable createTable(byte[] tableName, byte[][] families,
1285 final Configuration c)
1286 throws IOException {
1287 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1288 for(byte[] family : families) {
1289 HColumnDescriptor hcd = new HColumnDescriptor(family);
1290
1291
1292
1293 hcd.setBloomFilterType(BloomType.NONE);
1294 desc.addFamily(hcd);
1295 }
1296 getHBaseAdmin().createTable(desc);
1297 return new HTable(c, tableName);
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 public HTable createTable(TableName tableName, byte[][] families,
1310 final Configuration c, int numVersions)
1311 throws IOException {
1312 HTableDescriptor desc = new HTableDescriptor(tableName);
1313 for(byte[] family : families) {
1314 HColumnDescriptor hcd = new HColumnDescriptor(family)
1315 .setMaxVersions(numVersions);
1316 desc.addFamily(hcd);
1317 }
1318 getHBaseAdmin().createTable(desc);
1319
1320 waitUntilAllRegionsAssigned(tableName);
1321 return new HTable(c, tableName);
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 public HTable createTable(byte[] tableName, byte[][] families,
1334 final Configuration c, int numVersions)
1335 throws IOException {
1336 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1337 for(byte[] family : families) {
1338 HColumnDescriptor hcd = new HColumnDescriptor(family)
1339 .setMaxVersions(numVersions);
1340 desc.addFamily(hcd);
1341 }
1342 getHBaseAdmin().createTable(desc);
1343 return new HTable(c, tableName);
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1355 throws IOException {
1356 return createTable(tableName, new byte[][]{family}, numVersions);
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1368 throws IOException {
1369 return createTable(tableName, new byte[][]{family}, numVersions);
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 public HTable createTable(byte[] tableName, byte[][] families,
1381 int numVersions)
1382 throws IOException {
1383 return createTable(TableName.valueOf(tableName), families, numVersions);
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 public HTable createTable(TableName tableName, byte[][] families,
1395 int numVersions)
1396 throws IOException {
1397 HTableDescriptor desc = new HTableDescriptor(tableName);
1398 for (byte[] family : families) {
1399 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1400 desc.addFamily(hcd);
1401 }
1402 getHBaseAdmin().createTable(desc);
1403
1404 waitUntilAllRegionsAssigned(tableName);
1405 return new HTable(new Configuration(getConfiguration()), tableName);
1406 }
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 public HTable createTable(byte[] tableName, byte[][] families,
1417 int numVersions, int blockSize) throws IOException {
1418 return createTable(TableName.valueOf(tableName),
1419 families, numVersions, blockSize);
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 public HTable createTable(TableName tableName, byte[][] families,
1431 int numVersions, int blockSize) throws IOException {
1432 HTableDescriptor desc = new HTableDescriptor(tableName);
1433 for (byte[] family : families) {
1434 HColumnDescriptor hcd = new HColumnDescriptor(family)
1435 .setMaxVersions(numVersions)
1436 .setBlocksize(blockSize);
1437 desc.addFamily(hcd);
1438 }
1439 getHBaseAdmin().createTable(desc);
1440
1441 waitUntilAllRegionsAssigned(tableName);
1442 return new HTable(new Configuration(getConfiguration()), tableName);
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 public HTable createTable(byte[] tableName, byte[][] families,
1454 int[] numVersions)
1455 throws IOException {
1456 return createTable(TableName.valueOf(tableName), families, numVersions);
1457 }
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 public HTable createTable(TableName tableName, byte[][] families,
1468 int[] numVersions)
1469 throws IOException {
1470 HTableDescriptor desc = new HTableDescriptor(tableName);
1471 int i = 0;
1472 for (byte[] family : families) {
1473 HColumnDescriptor hcd = new HColumnDescriptor(family)
1474 .setMaxVersions(numVersions[i]);
1475 desc.addFamily(hcd);
1476 i++;
1477 }
1478 getHBaseAdmin().createTable(desc);
1479
1480 waitUntilAllRegionsAssigned(tableName);
1481 return new HTable(new Configuration(getConfiguration()), tableName);
1482 }
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1493 throws IOException{
1494 return createTable(TableName.valueOf(tableName), family, splitRows);
1495 }
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1506 throws IOException {
1507 HTableDescriptor desc = new HTableDescriptor(tableName);
1508 HColumnDescriptor hcd = new HColumnDescriptor(family);
1509 desc.addFamily(hcd);
1510 getHBaseAdmin().createTable(desc, splitRows);
1511
1512 waitUntilAllRegionsAssigned(tableName);
1513 return new HTable(getConfiguration(), tableName);
1514 }
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1525 throws IOException {
1526 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1527 for(byte[] family:families) {
1528 HColumnDescriptor hcd = new HColumnDescriptor(family);
1529 desc.addFamily(hcd);
1530 }
1531 getHBaseAdmin().createTable(desc, splitRows);
1532
1533 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1534 return new HTable(getConfiguration(), tableName);
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1546 throws IOException {
1547 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1548 byte[] startKey = Bytes.toBytes("aaaaa");
1549 byte[] endKey = Bytes.toBytes("zzzzz");
1550 byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1551
1552 return createTable(tableName, family, splitKeys);
1553 }
1554
1555
1556
1557
1558
1559 public void deleteTable(String tableName) throws IOException {
1560 deleteTable(TableName.valueOf(tableName));
1561 }
1562
1563
1564
1565
1566
1567 public void deleteTable(byte[] tableName) throws IOException {
1568 deleteTable(TableName.valueOf(tableName));
1569 }
1570
1571
1572
1573
1574
1575 public void deleteTable(TableName tableName) throws IOException {
1576 try {
1577 getHBaseAdmin().disableTable(tableName);
1578 } catch (TableNotEnabledException e) {
1579 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1580 }
1581 getHBaseAdmin().deleteTable(tableName);
1582 }
1583
1584
1585
1586
1587
1588 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1589 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1590 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1591 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1592 private static final int MAXVERSIONS = 3;
1593
1594 public static final char FIRST_CHAR = 'a';
1595 public static final char LAST_CHAR = 'z';
1596 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1597 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1598
1599
1600
1601
1602
1603
1604
1605
1606 public HTableDescriptor createTableDescriptor(final String name,
1607 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1608 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1609 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1610 htd.addFamily(new HColumnDescriptor(cfName)
1611 .setMinVersions(minVersions)
1612 .setMaxVersions(versions)
1613 .setKeepDeletedCells(keepDeleted)
1614 .setBlockCacheEnabled(false)
1615 .setTimeToLive(ttl)
1616 );
1617 }
1618 return htd;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627 public HTableDescriptor createTableDescriptor(final String name) {
1628 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1629 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1630 }
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1641 byte [] endKey)
1642 throws IOException {
1643 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1644 return createLocalHRegion(hri, desc);
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1655 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1667 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1684 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1685 HLog hlog, byte[]... families) throws IOException {
1686 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1687 htd.setReadOnly(isReadOnly);
1688 for (byte[] family : families) {
1689 HColumnDescriptor hcd = new HColumnDescriptor(family);
1690
1691 hcd.setMaxVersions(Integer.MAX_VALUE);
1692 htd.addFamily(hcd);
1693 }
1694 htd.setDurability(durability);
1695 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1696 return createLocalHRegion(info, htd, hlog);
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 public HTable truncateTable(byte[] tableName) throws IOException {
1708 return truncateTable(TableName.valueOf(tableName));
1709 }
1710
1711
1712
1713
1714
1715
1716
1717 public HTable truncateTable(TableName tableName) throws IOException {
1718 HTable table = new HTable(getConfiguration(), tableName);
1719 Scan scan = new Scan();
1720 ResultScanner resScan = table.getScanner(scan);
1721 for(Result res : resScan) {
1722 Delete del = new Delete(res.getRow());
1723 table.delete(del);
1724 }
1725 resScan = table.getScanner(scan);
1726 resScan.close();
1727 return table;
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737 public int loadTable(final HTable t, final byte[] f) throws IOException {
1738 return loadTable(t, new byte[][] {f});
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1749 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1750 }
1751
1752
1753
1754
1755
1756
1757
1758
1759 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1760 return loadTable(t, f, null);
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1772 return loadTable(t, f, value, true);
1773 }
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1784 t.setAutoFlush(false);
1785 int rowCount = 0;
1786 for (byte[] row : HBaseTestingUtility.ROWS) {
1787 Put put = new Put(row);
1788 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1789 for (int i = 0; i < f.length; i++) {
1790 put.add(f[i], null, value != null ? value : row);
1791 }
1792 t.put(put);
1793 rowCount++;
1794 }
1795 t.flushCommits();
1796 return rowCount;
1797 }
1798
1799
1800
1801
1802 public static class SeenRowTracker {
1803 int dim = 'z' - 'a' + 1;
1804 int[][][] seenRows = new int[dim][dim][dim];
1805 byte[] startRow;
1806 byte[] stopRow;
1807
1808 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1809 this.startRow = startRow;
1810 this.stopRow = stopRow;
1811 }
1812
1813 void reset() {
1814 for (byte[] row : ROWS) {
1815 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1816 }
1817 }
1818
1819 int i(byte b) {
1820 return b - 'a';
1821 }
1822
1823 public void addRow(byte[] row) {
1824 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1825 }
1826
1827
1828
1829
1830 public void validate() {
1831 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1832 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1833 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1834 int count = seenRows[i(b1)][i(b2)][i(b3)];
1835 int expectedCount = 0;
1836 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1837 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1838 expectedCount = 1;
1839 }
1840 if (count != expectedCount) {
1841 String row = new String(new byte[] {b1,b2,b3});
1842 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1843 }
1844 }
1845 }
1846 }
1847 }
1848 }
1849
1850 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1851 return loadRegion(r, f, false);
1852 }
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1863 throws IOException {
1864 byte[] k = new byte[3];
1865 int rowCount = 0;
1866 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1867 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1868 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1869 k[0] = b1;
1870 k[1] = b2;
1871 k[2] = b3;
1872 Put put = new Put(k);
1873 put.setDurability(Durability.SKIP_WAL);
1874 put.add(f, null, k);
1875 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1876
1877 int preRowCount = rowCount;
1878 int pause = 10;
1879 int maxPause = 1000;
1880 while (rowCount == preRowCount) {
1881 try {
1882 r.put(put);
1883 rowCount++;
1884 } catch (RegionTooBusyException e) {
1885 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1886 Threads.sleep(pause);
1887 }
1888 }
1889 }
1890 }
1891 if (flush) {
1892 r.flushcache();
1893 }
1894 }
1895 return rowCount;
1896 }
1897
1898 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1899 for (int i = startRow; i < endRow; i++) {
1900 byte[] data = Bytes.toBytes(String.valueOf(i));
1901 Put put = new Put(data);
1902 put.add(f, null, data);
1903 t.put(put);
1904 }
1905 }
1906
1907
1908
1909
1910 public int countRows(final HTable table) throws IOException {
1911 Scan scan = new Scan();
1912 ResultScanner results = table.getScanner(scan);
1913 int count = 0;
1914 for (@SuppressWarnings("unused") Result res : results) {
1915 count++;
1916 }
1917 results.close();
1918 return count;
1919 }
1920
1921 public int countRows(final HTable table, final byte[]... families) throws IOException {
1922 Scan scan = new Scan();
1923 for (byte[] family: families) {
1924 scan.addFamily(family);
1925 }
1926 ResultScanner results = table.getScanner(scan);
1927 int count = 0;
1928 for (@SuppressWarnings("unused") Result res : results) {
1929 count++;
1930 }
1931 results.close();
1932 return count;
1933 }
1934
1935
1936
1937
1938 public String checksumRows(final HTable table) throws Exception {
1939 Scan scan = new Scan();
1940 ResultScanner results = table.getScanner(scan);
1941 MessageDigest digest = MessageDigest.getInstance("MD5");
1942 for (Result res : results) {
1943 digest.update(res.getRow());
1944 }
1945 results.close();
1946 return digest.toString();
1947 }
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957 public int createMultiRegions(HTable table, byte[] columnFamily)
1958 throws IOException {
1959 return createMultiRegions(getConfiguration(), table, columnFamily);
1960 }
1961
1962
1963 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1964 static {
1965 int i = 0;
1966 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1967 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1968 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1969 ROWS[i][0] = b1;
1970 ROWS[i][1] = b2;
1971 ROWS[i][2] = b3;
1972 i++;
1973 }
1974 }
1975 }
1976 }
1977
1978 public static final byte[][] KEYS = {
1979 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1980 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1981 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1982 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1983 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1984 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1985 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1986 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1987 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1988 };
1989
1990 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1991 Bytes.toBytes("bbb"),
1992 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1993 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1994 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1995 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1996 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1997 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1998 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1999 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2000 };
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 public int createMultiRegions(final Configuration c, final HTable table,
2011 final byte[] columnFamily)
2012 throws IOException {
2013 return createMultiRegions(c, table, columnFamily, KEYS);
2014 }
2015
2016 void makeDFSClientNonRetrying() {
2017 if (null == this.dfsCluster) {
2018 LOG.debug("dfsCluster has not started, can't make client non-retrying.");
2019 return;
2020 }
2021 try {
2022 final FileSystem filesystem = this.dfsCluster.getFileSystem();
2023 if (!(filesystem instanceof DistributedFileSystem)) {
2024 LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
2025 return;
2026 }
2027
2028 final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
2029
2030 final Field dfsField = fs.getClass().getDeclaredField("dfs");
2031 dfsField.setAccessible(true);
2032 final Class<?> dfsClazz = dfsField.getType();
2033 final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
2034
2035
2036 final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
2037 createRPCNamenode.setAccessible(true);
2038
2039
2040 final Field nnField = dfsClazz.getDeclaredField("nnAddress");
2041 nnField.setAccessible(true);
2042 final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
2043 final Field confField = dfsClazz.getDeclaredField("conf");
2044 confField.setAccessible(true);
2045 final Configuration conf = Configuration.class.cast(confField.get(dfs));
2046 final Field ugiField = dfsClazz.getDeclaredField("ugi");
2047 ugiField.setAccessible(true);
2048 final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
2049
2050
2051 final Field namenodeField = dfsClazz.getDeclaredField("namenode");
2052 namenodeField.setAccessible(true);
2053 namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
2054 LOG.debug("Set DSFClient namenode to bare RPC");
2055 } catch (Exception exception) {
2056 LOG.info("Could not alter DFSClient to be non-retrying.", exception);
2057 }
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 public int createMultiRegions(final Configuration c, final HTable table,
2070 final byte [] family, int numRegions)
2071 throws IOException {
2072 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2073 byte [] startKey = Bytes.toBytes("aaaaa");
2074 byte [] endKey = Bytes.toBytes("zzzzz");
2075 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2076 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2077 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2078 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2079 return createMultiRegions(c, table, family, regionStartKeys);
2080 }
2081
2082 @SuppressWarnings("deprecation")
2083 public int createMultiRegions(final Configuration c, final HTable table,
2084 final byte[] columnFamily, byte [][] startKeys)
2085 throws IOException {
2086 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2087 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
2088 HTableDescriptor htd = table.getTableDescriptor();
2089 if(!htd.hasFamily(columnFamily)) {
2090 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2091 htd.addFamily(hcd);
2092 }
2093
2094
2095
2096
2097 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2098 String regionToDeleteInFS = table
2099 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2100 .getRegionInfo().getEncodedName();
2101 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2102
2103 int count = 0;
2104 for (int i = 0; i < startKeys.length; i++) {
2105 int j = (i + 1) % startKeys.length;
2106 HRegionInfo hri = new HRegionInfo(table.getName(),
2107 startKeys[i], startKeys[j]);
2108 MetaEditor.addRegionToMeta(meta, hri);
2109 newRegions.add(hri);
2110 count++;
2111 }
2112
2113 for (byte[] row : rows) {
2114 LOG.info("createMultiRegions: deleting meta row -> " +
2115 Bytes.toStringBinary(row));
2116 meta.delete(new Delete(row));
2117 }
2118
2119 Path tableDir = new Path(getDefaultRootDirPath().toString()
2120 + System.getProperty("file.separator") + htd.getTableName()
2121 + System.getProperty("file.separator") + regionToDeleteInFS);
2122 FileSystem.get(c).delete(tableDir);
2123
2124 HConnection conn = table.getConnection();
2125 conn.clearRegionCache();
2126
2127 HBaseAdmin admin = getHBaseAdmin();
2128 if (admin.isTableEnabled(table.getTableName())) {
2129 for(HRegionInfo hri : newRegions) {
2130 admin.assign(hri.getRegionName());
2131 }
2132 }
2133
2134 meta.close();
2135
2136 return count;
2137 }
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2150 final HTableDescriptor htd, byte [][] startKeys)
2151 throws IOException {
2152 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2153 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2154 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2155
2156 for (int i = 0; i < startKeys.length; i++) {
2157 int j = (i + 1) % startKeys.length;
2158 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2159 startKeys[j]);
2160 MetaEditor.addRegionToMeta(meta, hri);
2161 newRegions.add(hri);
2162 }
2163
2164 meta.close();
2165 return newRegions;
2166 }
2167
2168
2169
2170
2171
2172
2173 public List<byte[]> getMetaTableRows() throws IOException {
2174
2175 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2176 List<byte[]> rows = new ArrayList<byte[]>();
2177 ResultScanner s = t.getScanner(new Scan());
2178 for (Result result : s) {
2179 LOG.info("getMetaTableRows: row -> " +
2180 Bytes.toStringBinary(result.getRow()));
2181 rows.add(result.getRow());
2182 }
2183 s.close();
2184 t.close();
2185 return rows;
2186 }
2187
2188
2189
2190
2191
2192
2193 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2194
2195 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2196 List<byte[]> rows = new ArrayList<byte[]>();
2197 ResultScanner s = t.getScanner(new Scan());
2198 for (Result result : s) {
2199 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2200 if (info == null) {
2201 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2202
2203 continue;
2204 }
2205
2206 if (info.getTable().equals(tableName)) {
2207 LOG.info("getMetaTableRows: row -> " +
2208 Bytes.toStringBinary(result.getRow()) + info);
2209 rows.add(result.getRow());
2210 }
2211 }
2212 s.close();
2213 t.close();
2214 return rows;
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2229 throws IOException, InterruptedException {
2230 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2231 }
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2243 throws IOException, InterruptedException {
2244 List<byte[]> metaRows = getMetaTableRows(tableName);
2245 if (metaRows == null || metaRows.isEmpty()) {
2246 return null;
2247 }
2248 LOG.debug("Found " + metaRows.size() + " rows for table " +
2249 tableName);
2250 byte [] firstrow = metaRows.get(0);
2251 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2252 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2253 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2254 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2255 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2256 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2257 while(retrier.shouldRetry()) {
2258 int index = getMiniHBaseCluster().getServerWith(firstrow);
2259 if (index != -1) {
2260 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2261 }
2262
2263 retrier.sleepUntilNextRetry();
2264 }
2265 return null;
2266 }
2267
2268
2269
2270
2271
2272
2273
2274 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2275 startMiniMapReduceCluster(2);
2276 return mrCluster;
2277 }
2278
2279
2280
2281
2282
2283 private void forceChangeTaskLogDir() {
2284 Field logDirField;
2285 try {
2286 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2287 logDirField.setAccessible(true);
2288
2289 Field modifiersField = Field.class.getDeclaredField("modifiers");
2290 modifiersField.setAccessible(true);
2291 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2292
2293 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2294 } catch (SecurityException e) {
2295 throw new RuntimeException(e);
2296 } catch (NoSuchFieldException e) {
2297
2298 throw new RuntimeException(e);
2299 } catch (IllegalArgumentException e) {
2300 throw new RuntimeException(e);
2301 } catch (IllegalAccessException e) {
2302 throw new RuntimeException(e);
2303 }
2304 }
2305
2306
2307
2308
2309
2310
2311
2312 private void startMiniMapReduceCluster(final int servers) throws IOException {
2313 if (mrCluster != null) {
2314 throw new IllegalStateException("MiniMRCluster is already running");
2315 }
2316 LOG.info("Starting mini mapreduce cluster...");
2317 setupClusterTestDir();
2318 createDirsAndSetProperties();
2319
2320 forceChangeTaskLogDir();
2321
2322
2323
2324
2325 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2326
2327
2328
2329 conf.setBoolean("mapreduce.map.speculative", false);
2330 conf.setBoolean("mapreduce.reduce.speculative", false);
2331
2332
2333
2334 mrCluster = new MiniMRCluster(servers,
2335 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2336 null, null, new JobConf(this.conf));
2337 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2338 if (jobConf == null) {
2339 jobConf = mrCluster.createJobConf();
2340 }
2341
2342 jobConf.set("mapred.local.dir",
2343 conf.get("mapred.local.dir"));
2344 LOG.info("Mini mapreduce cluster started");
2345
2346
2347
2348
2349 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2350
2351 conf.set("mapreduce.framework.name", "yarn");
2352 conf.setBoolean("yarn.is.minicluster", true);
2353 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2354 if (rmAddress != null) {
2355 conf.set("yarn.resourcemanager.address", rmAddress);
2356 }
2357 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2358 if (historyAddress != null) {
2359 conf.set("mapreduce.jobhistory.address", historyAddress);
2360 }
2361 String schedulerAddress =
2362 jobConf.get("yarn.resourcemanager.scheduler.address");
2363 if (schedulerAddress != null) {
2364 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2365 }
2366 }
2367
2368
2369
2370
2371 public void shutdownMiniMapReduceCluster() {
2372 LOG.info("Stopping mini mapreduce cluster...");
2373 if (mrCluster != null) {
2374 mrCluster.shutdown();
2375 mrCluster = null;
2376 }
2377
2378 conf.set("mapred.job.tracker", "local");
2379 LOG.info("Mini mapreduce cluster stopped");
2380 }
2381
2382
2383
2384
2385 public RegionServerServices createMockRegionServerService() throws IOException {
2386 return createMockRegionServerService((ServerName)null);
2387 }
2388
2389
2390
2391
2392
2393 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2394 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2395 rss.setFileSystem(getTestFileSystem());
2396 rss.setRpcServer(rpc);
2397 return rss;
2398 }
2399
2400
2401
2402
2403
2404 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2405 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2406 rss.setFileSystem(getTestFileSystem());
2407 return rss;
2408 }
2409
2410
2411
2412
2413
2414
2415 public void enableDebug(Class<?> clazz) {
2416 Log l = LogFactory.getLog(clazz);
2417 if (l instanceof Log4JLogger) {
2418 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2419 } else if (l instanceof Jdk14Logger) {
2420 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2421 }
2422 }
2423
2424
2425
2426
2427
2428 public void expireMasterSession() throws Exception {
2429 HMaster master = getMiniHBaseCluster().getMaster();
2430 expireSession(master.getZooKeeper(), false);
2431 }
2432
2433
2434
2435
2436
2437
2438 public void expireRegionServerSession(int index) throws Exception {
2439 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2440 expireSession(rs.getZooKeeper(), false);
2441 decrementMinRegionServerCount();
2442 }
2443
2444 private void decrementMinRegionServerCount() {
2445
2446
2447 decrementMinRegionServerCount(getConfiguration());
2448
2449
2450 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2451 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2452 }
2453 }
2454
2455 private void decrementMinRegionServerCount(Configuration conf) {
2456 int currentCount = conf.getInt(
2457 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2458 if (currentCount != -1) {
2459 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2460 Math.max(currentCount - 1, 1));
2461 }
2462 }
2463
2464 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2465 expireSession(nodeZK, false);
2466 }
2467
2468 @Deprecated
2469 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2470 throws Exception {
2471 expireSession(nodeZK, false);
2472 }
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2486 throws Exception {
2487 Configuration c = new Configuration(this.conf);
2488 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2489 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2490 byte[] password = zk.getSessionPasswd();
2491 long sessionID = zk.getSessionId();
2492
2493
2494
2495
2496
2497
2498
2499
2500 ZooKeeper monitor = new ZooKeeper(quorumServers,
2501 1000, new org.apache.zookeeper.Watcher(){
2502 @Override
2503 public void process(WatchedEvent watchedEvent) {
2504 LOG.info("Monitor ZKW received event="+watchedEvent);
2505 }
2506 } , sessionID, password);
2507
2508
2509 ZooKeeper newZK = new ZooKeeper(quorumServers,
2510 1000, EmptyWatcher.instance, sessionID, password);
2511
2512
2513
2514 long start = System.currentTimeMillis();
2515 while (newZK.getState() != States.CONNECTED
2516 && System.currentTimeMillis() - start < 1000) {
2517 Thread.sleep(1);
2518 }
2519 newZK.close();
2520 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2521
2522
2523 monitor.close();
2524
2525 if (checkStatus) {
2526 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2527 }
2528 }
2529
2530
2531
2532
2533
2534
2535
2536 public MiniHBaseCluster getHBaseCluster() {
2537 return getMiniHBaseCluster();
2538 }
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548 public HBaseCluster getHBaseClusterInterface() {
2549
2550
2551 return hbaseCluster;
2552 }
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 public synchronized HBaseAdmin getHBaseAdmin()
2564 throws IOException {
2565 if (hbaseAdmin == null){
2566 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2567 }
2568 return hbaseAdmin;
2569 }
2570
2571 private HBaseAdminForTests hbaseAdmin = null;
2572 private static class HBaseAdminForTests extends HBaseAdmin {
2573 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2574 ZooKeeperConnectionException, IOException {
2575 super(c);
2576 }
2577
2578 @Override
2579 public synchronized void close() throws IOException {
2580 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2581 }
2582
2583 private synchronized void close0() throws IOException {
2584 super.close();
2585 }
2586 }
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2598 throws IOException {
2599 if (zooKeeperWatcher == null) {
2600 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2601 new Abortable() {
2602 @Override public void abort(String why, Throwable e) {
2603 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2604 }
2605 @Override public boolean isAborted() {return false;}
2606 });
2607 }
2608 return zooKeeperWatcher;
2609 }
2610 private ZooKeeperWatcher zooKeeperWatcher;
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620 public void closeRegion(String regionName) throws IOException {
2621 closeRegion(Bytes.toBytes(regionName));
2622 }
2623
2624
2625
2626
2627
2628
2629
2630 public void closeRegion(byte[] regionName) throws IOException {
2631 getHBaseAdmin().closeRegion(regionName, null);
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641 public void closeRegionByRow(String row, HTable table) throws IOException {
2642 closeRegionByRow(Bytes.toBytes(row), table);
2643 }
2644
2645
2646
2647
2648
2649
2650
2651
2652 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2653 HRegionLocation hrl = table.getRegionLocation(row);
2654 closeRegion(hrl.getRegionInfo().getRegionName());
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2665 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2666 int regCount = regions.size();
2667 Set<Integer> attempted = new HashSet<Integer>();
2668 int idx;
2669 int attempts = 0;
2670 do {
2671 regions = getHBaseCluster().getRegions(tableName);
2672 if (regCount != regions.size()) {
2673
2674 attempted.clear();
2675 }
2676 regCount = regions.size();
2677
2678
2679 if (regCount > 0) {
2680 idx = random.nextInt(regCount);
2681
2682 if (attempted.contains(idx))
2683 continue;
2684 try {
2685 regions.get(idx).checkSplit();
2686 return regions.get(idx);
2687 } catch (Exception ex) {
2688 LOG.warn("Caught exception", ex);
2689 attempted.add(idx);
2690 }
2691 }
2692 attempts++;
2693 } while (maxAttempts == -1 || attempts < maxAttempts);
2694 return null;
2695 }
2696
2697 public MiniZooKeeperCluster getZkCluster() {
2698 return zkCluster;
2699 }
2700
2701 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2702 this.passedZkCluster = true;
2703 this.zkCluster = zkCluster;
2704 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2705 }
2706
2707 public MiniDFSCluster getDFSCluster() {
2708 return dfsCluster;
2709 }
2710
2711 public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
2712 setDFSCluster(cluster, true);
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723 public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
2724 throws IllegalStateException, IOException {
2725 if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
2726 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2727 }
2728 this.dfsCluster = cluster;
2729 this.setFs();
2730 }
2731
2732 public FileSystem getTestFileSystem() throws IOException {
2733 return HFileSystem.get(conf);
2734 }
2735
2736
2737
2738
2739
2740
2741
2742
2743 public void waitTableAvailable(byte[] table)
2744 throws InterruptedException, IOException {
2745 waitTableAvailable(getHBaseAdmin(), table, 30000);
2746 }
2747
2748 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2749 throws InterruptedException, IOException {
2750 waitTableAvailable(admin, table, 30000);
2751 }
2752
2753
2754
2755
2756
2757
2758
2759
2760 public void waitTableAvailable(byte[] table, long timeoutMillis)
2761 throws InterruptedException, IOException {
2762 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2763 }
2764
2765 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2766 throws InterruptedException, IOException {
2767 long startWait = System.currentTimeMillis();
2768 while (!admin.isTableAvailable(table)) {
2769 assertTrue("Timed out waiting for table to become available " +
2770 Bytes.toStringBinary(table),
2771 System.currentTimeMillis() - startWait < timeoutMillis);
2772 Thread.sleep(200);
2773 }
2774 }
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 public void waitTableEnabled(byte[] table)
2786 throws InterruptedException, IOException {
2787 waitTableEnabled(getHBaseAdmin(), table, 30000);
2788 }
2789
2790 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2791 throws InterruptedException, IOException {
2792 waitTableEnabled(admin, table, 30000);
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804 public void waitTableEnabled(byte[] table, long timeoutMillis)
2805 throws InterruptedException, IOException {
2806 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2807 }
2808
2809 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2810 throws InterruptedException, IOException {
2811 long startWait = System.currentTimeMillis();
2812 waitTableAvailable(admin, table, timeoutMillis);
2813 while (!admin.isTableEnabled(table)) {
2814 assertTrue("Timed out waiting for table to become available and enabled " +
2815 Bytes.toStringBinary(table),
2816 System.currentTimeMillis() - startWait < timeoutMillis);
2817 Thread.sleep(200);
2818 }
2819
2820
2821
2822
2823
2824 HConnection connection = HConnectionManager.createConnection(conf);
2825 try {
2826 Canary.sniff(connection, TableName.valueOf(table), TaskType.READ);
2827 } catch (Exception e) {
2828 throw new IOException(e);
2829 } finally {
2830 connection.close();
2831 }
2832 }
2833
2834
2835
2836
2837
2838
2839
2840
2841 public void waitTableDisabled(byte[] table)
2842 throws InterruptedException, IOException {
2843 waitTableDisabled(getHBaseAdmin(), table, 30000);
2844 }
2845
2846 public void waitTableDisabled(HBaseAdmin admin, byte[] table)
2847 throws InterruptedException, IOException {
2848 waitTableDisabled(admin, table, 30000);
2849 }
2850
2851
2852
2853
2854
2855
2856
2857
2858 public void waitTableDisabled(byte[] table, long timeoutMillis)
2859 throws InterruptedException, IOException {
2860 waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2861 }
2862
2863 public void waitTableDisabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2864 throws InterruptedException, IOException {
2865 TableName tableName = TableName.valueOf(table);
2866 long startWait = System.currentTimeMillis();
2867 while (!admin.isTableDisabled(tableName)) {
2868 assertTrue("Timed out waiting for table to become disabled " +
2869 Bytes.toStringBinary(table),
2870 System.currentTimeMillis() - startWait < timeoutMillis);
2871 Thread.sleep(200);
2872 }
2873 }
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 public boolean ensureSomeRegionServersAvailable(final int num)
2884 throws IOException {
2885 boolean startedServer = false;
2886 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2887 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2888 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2889 startedServer = true;
2890 }
2891
2892 return startedServer;
2893 }
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2905 throws IOException {
2906 boolean startedServer = ensureSomeRegionServersAvailable(num);
2907
2908 int nonStoppedServers = 0;
2909 for (JVMClusterUtil.RegionServerThread rst :
2910 getMiniHBaseCluster().getRegionServerThreads()) {
2911
2912 HRegionServer hrs = rst.getRegionServer();
2913 if (hrs.isStopping() || hrs.isStopped()) {
2914 LOG.info("A region server is stopped or stopping:"+hrs);
2915 } else {
2916 nonStoppedServers++;
2917 }
2918 }
2919 for (int i=nonStoppedServers; i<num; ++i) {
2920 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2921 startedServer = true;
2922 }
2923 return startedServer;
2924 }
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 public static User getDifferentUser(final Configuration c,
2937 final String differentiatingSuffix)
2938 throws IOException {
2939 FileSystem currentfs = FileSystem.get(c);
2940 if (!(currentfs instanceof DistributedFileSystem)) {
2941 return User.getCurrent();
2942 }
2943
2944
2945 String username = User.getCurrent().getName() +
2946 differentiatingSuffix;
2947 User user = User.createUserForTesting(c, username,
2948 new String[]{"supergroup"});
2949 return user;
2950 }
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2966 final int max) {
2967 try {
2968 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2969 for (Class<?> clazz: clazzes) {
2970 String className = clazz.getSimpleName();
2971 if (className.equals("DFSOutputStream")) {
2972 if (clazz.isInstance(stream)) {
2973 Field maxRecoveryErrorCountField =
2974 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2975 maxRecoveryErrorCountField.setAccessible(true);
2976 maxRecoveryErrorCountField.setInt(stream, max);
2977 break;
2978 }
2979 }
2980 }
2981 } catch (Exception e) {
2982 LOG.info("Could not set max recovery field", e);
2983 }
2984 }
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2995 waitUntilAllRegionsAssigned(tableName, 60000);
2996 }
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3008 throws IOException {
3009 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3010 try {
3011 waitFor(timeout, 200, true, new Predicate<IOException>() {
3012 @Override
3013 public boolean evaluate() throws IOException {
3014 boolean allRegionsAssigned = true;
3015 Scan scan = new Scan();
3016 scan.addFamily(HConstants.CATALOG_FAMILY);
3017 ResultScanner s = meta.getScanner(scan);
3018 try {
3019 Result r;
3020 while ((r = s.next()) != null) {
3021 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3022 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3023 if (info != null && info.getTable().equals(tableName)) {
3024 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3025 allRegionsAssigned &= (b != null);
3026 }
3027 }
3028 } finally {
3029 s.close();
3030 }
3031 return allRegionsAssigned;
3032 }
3033 });
3034 } finally {
3035 meta.close();
3036 }
3037
3038
3039 HMaster master = getHBaseCluster().getMaster();
3040 final RegionStates states = master.getAssignmentManager().getRegionStates();
3041 waitFor(timeout, 200, new Predicate<IOException>() {
3042 @Override
3043 public boolean evaluate() throws IOException {
3044 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3045 return hris != null && !hris.isEmpty();
3046 }
3047 });
3048 }
3049
3050
3051
3052
3053
3054 public static List<Cell> getFromStoreFile(HStore store,
3055 Get get) throws IOException {
3056 Scan scan = new Scan(get);
3057 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3058 scan.getFamilyMap().get(store.getFamily().getName()),
3059
3060
3061 0);
3062
3063 List<Cell> result = new ArrayList<Cell>();
3064 scanner.next(result);
3065 if (!result.isEmpty()) {
3066
3067 Cell kv = result.get(0);
3068 if (!CellUtil.matchingRow(kv, get.getRow())) {
3069 result.clear();
3070 }
3071 }
3072 scanner.close();
3073 return result;
3074 }
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3085 assertTrue(numRegions>3);
3086 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3087 byte [][] result = new byte[tmpSplitKeys.length+1][];
3088 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3089 result[0] = HConstants.EMPTY_BYTE_ARRAY;
3090 return result;
3091 }
3092
3093
3094
3095
3096
3097 public static List<Cell> getFromStoreFile(HStore store,
3098 byte [] row,
3099 NavigableSet<byte[]> columns
3100 ) throws IOException {
3101 Get get = new Get(row);
3102 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3103 s.put(store.getFamily().getName(), columns);
3104
3105 return getFromStoreFile(store,get);
3106 }
3107
3108
3109
3110
3111
3112 public static ZooKeeperWatcher getZooKeeperWatcher(
3113 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3114 IOException {
3115 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3116 "unittest", new Abortable() {
3117 boolean aborted = false;
3118
3119 @Override
3120 public void abort(String why, Throwable e) {
3121 aborted = true;
3122 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3123 }
3124
3125 @Override
3126 public boolean isAborted() {
3127 return aborted;
3128 }
3129 });
3130 return zkw;
3131 }
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3145 HBaseTestingUtility TEST_UTIL, HRegion region,
3146 ServerName serverName) throws ZooKeeperConnectionException,
3147 IOException, KeeperException, NodeExistsException {
3148 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3149 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3150 int version = ZKAssign.transitionNodeOpening(zkw, region
3151 .getRegionInfo(), serverName);
3152 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3153 version);
3154 return zkw;
3155 }
3156
3157 public static void assertKVListsEqual(String additionalMsg,
3158 final List<? extends Cell> expected,
3159 final List<? extends Cell> actual) {
3160 final int eLen = expected.size();
3161 final int aLen = actual.size();
3162 final int minLen = Math.min(eLen, aLen);
3163
3164 int i;
3165 for (i = 0; i < minLen
3166 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3167 ++i) {}
3168
3169 if (additionalMsg == null) {
3170 additionalMsg = "";
3171 }
3172 if (!additionalMsg.isEmpty()) {
3173 additionalMsg = ". " + additionalMsg;
3174 }
3175
3176 if (eLen != aLen || i != minLen) {
3177 throw new AssertionError(
3178 "Expected and actual KV arrays differ at position " + i + ": " +
3179 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3180 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3181 }
3182 }
3183
3184 private static <T> String safeGetAsStr(List<T> lst, int i) {
3185 if (0 <= i && i < lst.size()) {
3186 return lst.get(i).toString();
3187 } else {
3188 return "<out_of_range>";
3189 }
3190 }
3191
3192 public String getClusterKey() {
3193 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3194 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3195 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3196 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3197 }
3198
3199
3200 public HTable createRandomTable(String tableName,
3201 final Collection<String> families,
3202 final int maxVersions,
3203 final int numColsPerRow,
3204 final int numFlushes,
3205 final int numRegions,
3206 final int numRowsPerFlush)
3207 throws IOException, InterruptedException {
3208
3209 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3210 " regions, " + numFlushes + " storefiles per region, " +
3211 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3212 "\n");
3213
3214 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3215 final int numCF = families.size();
3216 final byte[][] cfBytes = new byte[numCF][];
3217 {
3218 int cfIndex = 0;
3219 for (String cf : families) {
3220 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3221 }
3222 }
3223
3224 final int actualStartKey = 0;
3225 final int actualEndKey = Integer.MAX_VALUE;
3226 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3227 final int splitStartKey = actualStartKey + keysPerRegion;
3228 final int splitEndKey = actualEndKey - keysPerRegion;
3229 final String keyFormat = "%08x";
3230 final HTable table = createTable(tableName, cfBytes,
3231 maxVersions,
3232 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3233 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3234 numRegions);
3235
3236 if (hbaseCluster != null) {
3237 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3238 }
3239
3240 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3241 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3242 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3243 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3244
3245 Put put = new Put(row);
3246 Delete del = new Delete(row);
3247 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3248 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3249 final long ts = rand.nextInt();
3250 final byte[] qual = Bytes.toBytes("col" + iCol);
3251 if (rand.nextBoolean()) {
3252 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3253 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3254 ts + "_random_" + rand.nextLong());
3255 put.add(cf, qual, ts, value);
3256 } else if (rand.nextDouble() < 0.8) {
3257 del.deleteColumn(cf, qual, ts);
3258 } else {
3259 del.deleteColumns(cf, qual, ts);
3260 }
3261 }
3262
3263 if (!put.isEmpty()) {
3264 table.put(put);
3265 }
3266
3267 if (!del.isEmpty()) {
3268 table.delete(del);
3269 }
3270 }
3271 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3272 table.flushCommits();
3273 if (hbaseCluster != null) {
3274 getMiniHBaseCluster().flushcache(table.getName());
3275 }
3276 }
3277
3278 return table;
3279 }
3280
3281 private static final int MIN_RANDOM_PORT = 0xc000;
3282 private static final int MAX_RANDOM_PORT = 0xfffe;
3283 private static Random random = new Random();
3284
3285
3286
3287
3288
3289 public static int randomPort() {
3290 return MIN_RANDOM_PORT
3291 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3292 }
3293
3294
3295
3296
3297
3298 public static int randomFreePort() {
3299 int port = 0;
3300 do {
3301 port = randomPort();
3302 if (takenRandomPorts.contains(port)) {
3303 continue;
3304 }
3305 takenRandomPorts.add(port);
3306
3307 try {
3308 ServerSocket sock = new ServerSocket(port);
3309 sock.close();
3310 } catch (IOException ex) {
3311 port = 0;
3312 }
3313 } while (port == 0);
3314 return port;
3315 }
3316
3317
3318 public static String randomMultiCastAddress() {
3319 return "226.1.1." + random.nextInt(254);
3320 }
3321
3322
3323
3324 public static void waitForHostPort(String host, int port)
3325 throws IOException {
3326 final int maxTimeMs = 10000;
3327 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3328 IOException savedException = null;
3329 LOG.info("Waiting for server at " + host + ":" + port);
3330 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3331 try {
3332 Socket sock = new Socket(InetAddress.getByName(host), port);
3333 sock.close();
3334 savedException = null;
3335 LOG.info("Server at " + host + ":" + port + " is available");
3336 break;
3337 } catch (UnknownHostException e) {
3338 throw new IOException("Failed to look up " + host, e);
3339 } catch (IOException e) {
3340 savedException = e;
3341 }
3342 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3343 }
3344
3345 if (savedException != null) {
3346 throw savedException;
3347 }
3348 }
3349
3350
3351
3352
3353
3354
3355 public static int createPreSplitLoadTestTable(Configuration conf,
3356 TableName tableName, byte[] columnFamily, Algorithm compression,
3357 DataBlockEncoding dataBlockEncoding) throws IOException {
3358 return createPreSplitLoadTestTable(conf, tableName,
3359 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER,
3360 Durability.USE_DEFAULT);
3361 }
3362
3363
3364
3365
3366
3367 public static int createPreSplitLoadTestTable(Configuration conf,
3368 TableName tableName, byte[] columnFamily, Algorithm compression,
3369 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer,
3370 Durability durability)
3371 throws IOException {
3372 HTableDescriptor desc = new HTableDescriptor(tableName);
3373 desc.setDurability(durability);
3374 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3375 hcd.setDataBlockEncoding(dataBlockEncoding);
3376 hcd.setCompressionType(compression);
3377 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3378 }
3379
3380
3381
3382
3383
3384
3385 public static int createPreSplitLoadTestTable(Configuration conf,
3386 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3387 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3388 }
3389
3390
3391
3392
3393
3394
3395 public static int createPreSplitLoadTestTable(Configuration conf,
3396 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3397 if (!desc.hasFamily(hcd.getName())) {
3398 desc.addFamily(hcd);
3399 }
3400
3401 int totalNumberOfRegions = 0;
3402 HBaseAdmin admin = new HBaseAdmin(conf);
3403 try {
3404
3405
3406
3407 int numberOfServers = admin.getClusterStatus().getServers().size();
3408 if (numberOfServers == 0) {
3409 throw new IllegalStateException("No live regionservers");
3410 }
3411
3412 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3413 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3414 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3415 "(regions per server: " + numRegionsPerServer + ")");
3416
3417 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3418 totalNumberOfRegions);
3419
3420 admin.createTable(desc, splits);
3421 } catch (MasterNotRunningException e) {
3422 LOG.error("Master not running", e);
3423 throw new IOException(e);
3424 } catch (TableExistsException e) {
3425 LOG.warn("Table " + desc.getTableName() +
3426 " already exists, continuing");
3427 } finally {
3428 admin.close();
3429 }
3430 return totalNumberOfRegions;
3431 }
3432
3433 public static int getMetaRSPort(Configuration conf) throws IOException {
3434 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3435 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3436 table.close();
3437 return hloc.getPort();
3438 }
3439
3440
3441
3442
3443
3444
3445
3446 public void assertRegionOnServer(
3447 final HRegionInfo hri, final ServerName server,
3448 final long timeout) throws IOException, InterruptedException {
3449 long timeoutTime = System.currentTimeMillis() + timeout;
3450 while (true) {
3451 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3452 if (regions.contains(hri)) return;
3453 long now = System.currentTimeMillis();
3454 if (now > timeoutTime) break;
3455 Thread.sleep(10);
3456 }
3457 fail("Could not find region " + hri.getRegionNameAsString()
3458 + " on server " + server);
3459 }
3460
3461
3462
3463
3464
3465 public void assertRegionOnlyOnServer(
3466 final HRegionInfo hri, final ServerName server,
3467 final long timeout) throws IOException, InterruptedException {
3468 long timeoutTime = System.currentTimeMillis() + timeout;
3469 while (true) {
3470 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3471 if (regions.contains(hri)) {
3472 List<JVMClusterUtil.RegionServerThread> rsThreads =
3473 getHBaseCluster().getLiveRegionServerThreads();
3474 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3475 HRegionServer rs = rsThread.getRegionServer();
3476 if (server.equals(rs.getServerName())) {
3477 continue;
3478 }
3479 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3480 for (HRegion r: hrs) {
3481 assertTrue("Region should not be double assigned",
3482 r.getRegionId() != hri.getRegionId());
3483 }
3484 }
3485 return;
3486 }
3487 long now = System.currentTimeMillis();
3488 if (now > timeoutTime) break;
3489 Thread.sleep(10);
3490 }
3491 fail("Could not find region " + hri.getRegionNameAsString()
3492 + " on server " + server);
3493 }
3494
3495 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3496 throws IOException {
3497 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3498 htd.addFamily(hcd);
3499 HRegionInfo info =
3500 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3501 HRegion region =
3502 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3503 return region;
3504 }
3505
3506 public void setFileSystemURI(String fsURI) {
3507 FS_URI = fsURI;
3508 }
3509
3510
3511
3512
3513 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3514 throws E {
3515 return Waiter.waitFor(this.conf, timeout, predicate);
3516 }
3517
3518
3519
3520
3521 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3522 throws E {
3523 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3524 }
3525
3526
3527
3528
3529 public <E extends Exception> long waitFor(long timeout, long interval,
3530 boolean failIfTimeout, Predicate<E> predicate) throws E {
3531 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3532 }
3533
3534
3535
3536
3537
3538
3539 public void waitUntilNoRegionsInTransition(
3540 final long timeout) throws Exception {
3541 waitFor(timeout, predicateNoRegionsInTransition());
3542 }
3543
3544
3545
3546
3547 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3548 return new Waiter.Predicate<Exception>() {
3549 @Override
3550 public boolean evaluate() throws Exception {
3551 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3552 .getAssignmentManager().getRegionStates();
3553 return !regionStates.isRegionsInTransition();
3554 }
3555 };
3556 }
3557
3558
3559
3560
3561 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3562 return new Waiter.Predicate<Exception>() {
3563 @Override
3564 public boolean evaluate() throws Exception {
3565 return getHBaseAdmin().isTableEnabled(tableName);
3566 }
3567 };
3568 }
3569
3570
3571
3572
3573
3574
3575 public void waitLabelAvailable(long timeoutMillis, final String... labels) {
3576 final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
3577 waitFor(timeoutMillis, new Waiter.Predicate<RuntimeException>() {
3578
3579 @Override
3580 public boolean evaluate() {
3581 for (String label : labels) {
3582 if (labelsCache.getLabelOrdinal(label) == 0) {
3583 return false;
3584 }
3585 }
3586 return true;
3587 }
3588 });
3589 }
3590
3591
3592
3593
3594
3595
3596 public static List<HColumnDescriptor> generateColumnDescriptors() {
3597 return generateColumnDescriptors("");
3598 }
3599
3600
3601
3602
3603
3604
3605
3606 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3607 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3608 long familyId = 0;
3609 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3610 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3611 for (BloomType bloomType: BloomType.values()) {
3612 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3613 HColumnDescriptor htd = new HColumnDescriptor(name);
3614 htd.setCompressionType(compressionType);
3615 htd.setDataBlockEncoding(encodingType);
3616 htd.setBloomFilterType(bloomType);
3617 htds.add(htd);
3618 familyId++;
3619 }
3620 }
3621 }
3622 return htds;
3623 }
3624
3625
3626
3627
3628
3629 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3630 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3631 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3632 for (String algoName : allAlgos) {
3633 try {
3634 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3635 algo.getCompressor();
3636 supportedAlgos.add(algo);
3637 } catch (Throwable t) {
3638
3639 }
3640 }
3641 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3642 }
3643 }