1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.TreeSet;
44 import java.util.UUID;
45 import java.util.concurrent.TimeUnit;
46
47 import org.apache.commons.logging.Log;
48 import org.apache.commons.logging.LogFactory;
49 import org.apache.commons.logging.impl.Jdk14Logger;
50 import org.apache.commons.logging.impl.Log4JLogger;
51 import org.apache.hadoop.conf.Configuration;
52 import org.apache.hadoop.fs.FileSystem;
53 import org.apache.hadoop.fs.Path;
54 import org.apache.hadoop.hbase.Waiter.Predicate;
55 import org.apache.hadoop.hbase.classification.InterfaceAudience;
56 import org.apache.hadoop.hbase.classification.InterfaceStability;
57 import org.apache.hadoop.hbase.client.Admin;
58 import org.apache.hadoop.hbase.client.Connection;
59 import org.apache.hadoop.hbase.client.ConnectionFactory;
60 import org.apache.hadoop.hbase.client.Delete;
61 import org.apache.hadoop.hbase.client.Durability;
62 import org.apache.hadoop.hbase.client.Get;
63 import org.apache.hadoop.hbase.client.HBaseAdmin;
64 import org.apache.hadoop.hbase.client.HConnection;
65 import org.apache.hadoop.hbase.client.HTable;
66 import org.apache.hadoop.hbase.client.Put;
67 import org.apache.hadoop.hbase.client.RegionLocator;
68 import org.apache.hadoop.hbase.client.Result;
69 import org.apache.hadoop.hbase.client.ResultScanner;
70 import org.apache.hadoop.hbase.client.Scan;
71 import org.apache.hadoop.hbase.client.Table;
72 import org.apache.hadoop.hbase.fs.HFileSystem;
73 import org.apache.hadoop.hbase.io.compress.Compression;
74 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
75 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
77 import org.apache.hadoop.hbase.io.hfile.HFile;
78 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
79 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
80 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
81 import org.apache.hadoop.hbase.master.HMaster;
82 import org.apache.hadoop.hbase.master.RegionStates;
83 import org.apache.hadoop.hbase.master.ServerManager;
84 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
85 import org.apache.hadoop.hbase.regionserver.BloomType;
86 import org.apache.hadoop.hbase.regionserver.HRegion;
87 import org.apache.hadoop.hbase.regionserver.HRegionServer;
88 import org.apache.hadoop.hbase.regionserver.HStore;
89 import org.apache.hadoop.hbase.regionserver.InternalScanner;
90 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
91 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
92 import org.apache.hadoop.hbase.wal.WAL;
93 import org.apache.hadoop.hbase.security.User;
94 import org.apache.hadoop.hbase.tool.Canary;
95 import org.apache.hadoop.hbase.util.Bytes;
96 import org.apache.hadoop.hbase.util.FSTableDescriptors;
97 import org.apache.hadoop.hbase.util.FSUtils;
98 import org.apache.hadoop.hbase.util.JVMClusterUtil;
99 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
100 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
101 import org.apache.hadoop.hbase.util.Pair;
102 import org.apache.hadoop.hbase.util.RegionSplitter;
103 import org.apache.hadoop.hbase.util.RetryCounter;
104 import org.apache.hadoop.hbase.util.Threads;
105 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
106 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
107 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
108 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
109 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
110 import org.apache.hadoop.hdfs.DFSClient;
111 import org.apache.hadoop.hdfs.DistributedFileSystem;
112 import org.apache.hadoop.hdfs.MiniDFSCluster;
113 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
114 import org.apache.hadoop.mapred.JobConf;
115 import org.apache.hadoop.mapred.MiniMRCluster;
116 import org.apache.hadoop.mapred.TaskLog;
117 import org.apache.zookeeper.KeeperException;
118 import org.apache.zookeeper.KeeperException.NodeExistsException;
119 import org.apache.zookeeper.WatchedEvent;
120 import org.apache.zookeeper.ZooKeeper;
121 import org.apache.zookeeper.ZooKeeper.States;
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137 @InterfaceAudience.Public
138 @InterfaceStability.Evolving
139 @SuppressWarnings("deprecation")
140 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
141 private MiniZooKeeperCluster zkCluster = null;
142
143 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
144
145
146
147
148 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
149
150
151
152
153
154 private boolean passedZkCluster = false;
155 private MiniDFSCluster dfsCluster = null;
156
157 private volatile HBaseCluster hbaseCluster = null;
158 private MiniMRCluster mrCluster = null;
159
160
161 private volatile boolean miniClusterRunning;
162
163 private String hadoopLogDir;
164
165
166 private File clusterTestDir = null;
167
168
169
170 private Path dataTestDirOnTestFS = null;
171
172
173
174
175 private volatile Connection connection;
176
177
178
179
180
181
182
183
184 @Deprecated
185 private static final String TEST_DIRECTORY_KEY = "test.build.data";
186
187
188 private static String FS_URI;
189
190
191 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
192
193
194 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
195 Arrays.asList(new Object[][] {
196 { Compression.Algorithm.NONE },
197 { Compression.Algorithm.GZ }
198 });
199
200
201 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
202 Arrays.asList(new Object[][] {
203 { new Boolean(false) },
204 { new Boolean(true) }
205 });
206
207
208 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
209
210 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
211 Compression.Algorithm.NONE, Compression.Algorithm.GZ
212 };
213
214
215
216
217
218 private static List<Object[]> bloomAndCompressionCombinations() {
219 List<Object[]> configurations = new ArrayList<Object[]>();
220 for (Compression.Algorithm comprAlgo :
221 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
222 for (BloomType bloomType : BloomType.values()) {
223 configurations.add(new Object[] { comprAlgo, bloomType });
224 }
225 }
226 return Collections.unmodifiableList(configurations);
227 }
228
229
230
231
232 private static List<Object[]> memStoreTSAndTagsCombination() {
233 List<Object[]> configurations = new ArrayList<Object[]>();
234 configurations.add(new Object[] { false, false });
235 configurations.add(new Object[] { false, true });
236 configurations.add(new Object[] { true, false });
237 configurations.add(new Object[] { true, true });
238 return Collections.unmodifiableList(configurations);
239 }
240
241 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
242 bloomAndCompressionCombinations();
243
244 public HBaseTestingUtility() {
245 this(HBaseConfiguration.create());
246 }
247
248 public HBaseTestingUtility(Configuration conf) {
249 super(conf);
250
251
252 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
253 }
254
255
256
257
258
259
260
261 public static HBaseTestingUtility createLocalHTU() {
262 Configuration c = HBaseConfiguration.create();
263 return createLocalHTU(c);
264 }
265
266
267
268
269
270
271
272
273 public static HBaseTestingUtility createLocalHTU(Configuration c) {
274 HBaseTestingUtility htu = new HBaseTestingUtility(c);
275 String dataTestDir = htu.getDataTestDir().toString();
276 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
277 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
278 return htu;
279 }
280
281
282
283
284
285
286
287
288
289
290
291
292 @Override
293 public Configuration getConfiguration() {
294 return super.getConfiguration();
295 }
296
297 public void setHBaseCluster(HBaseCluster hbaseCluster) {
298 this.hbaseCluster = hbaseCluster;
299 }
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 @Override
318 protected Path setupDataTestDir() {
319 Path testPath = super.setupDataTestDir();
320 if (null == testPath) {
321 return null;
322 }
323
324 createSubDirAndSystemProperty(
325 "hadoop.log.dir",
326 testPath, "hadoop-log-dir");
327
328
329
330 createSubDirAndSystemProperty(
331 "hadoop.tmp.dir",
332 testPath, "hadoop-tmp-dir");
333
334
335 createSubDir(
336 "mapreduce.cluster.local.dir",
337 testPath, "mapred-local-dir");
338
339 return testPath;
340 }
341
342 private void createSubDirAndSystemProperty(
343 String propertyName, Path parent, String subDirName){
344
345 String sysValue = System.getProperty(propertyName);
346
347 if (sysValue != null) {
348
349
350 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351 sysValue + " so I do NOT create it in " + parent);
352 String confValue = conf.get(propertyName);
353 if (confValue != null && !confValue.endsWith(sysValue)){
354 LOG.warn(
355 propertyName + " property value differs in configuration and system: "+
356 "Configuration="+confValue+" while System="+sysValue+
357 " Erasing configuration value by system value."
358 );
359 }
360 conf.set(propertyName, sysValue);
361 } else {
362
363 createSubDir(propertyName, parent, subDirName);
364 System.setProperty(propertyName, conf.get(propertyName));
365 }
366 }
367
368
369
370
371
372
373
374 private Path getBaseTestDirOnTestFS() throws IOException {
375 FileSystem fs = getTestFileSystem();
376 return new Path(fs.getWorkingDirectory(), "test-data");
377 }
378
379
380
381
382 public HTableDescriptor getMetaTableDescriptor() {
383 try {
384 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385 } catch (IOException e) {
386 throw new RuntimeException("Unable to create META table descriptor", e);
387 }
388 }
389
390
391
392
393
394
395 Path getClusterTestDir() {
396 if (clusterTestDir == null){
397 setupClusterTestDir();
398 }
399 return new Path(clusterTestDir.getAbsolutePath());
400 }
401
402
403
404
405 private void setupClusterTestDir() {
406 if (clusterTestDir != null) {
407 return;
408 }
409
410
411
412 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414
415 boolean b = deleteOnExit();
416 if (b) clusterTestDir.deleteOnExit();
417 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419 }
420
421
422
423
424
425
426
427 public Path getDataTestDirOnTestFS() throws IOException {
428 if (dataTestDirOnTestFS == null) {
429 setupDataTestDirOnTestFS();
430 }
431
432 return dataTestDirOnTestFS;
433 }
434
435
436
437
438
439
440
441
442 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443 return new Path(getDataTestDirOnTestFS(), subdirName);
444 }
445
446
447
448
449
450 private void setupDataTestDirOnTestFS() throws IOException {
451 if (dataTestDirOnTestFS != null) {
452 LOG.warn("Data test on test fs dir already setup in "
453 + dataTestDirOnTestFS.toString());
454 return;
455 }
456 dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
457 }
458
459
460
461
462 private Path getNewDataTestDirOnTestFS() throws IOException {
463
464
465
466
467 FileSystem fs = getTestFileSystem();
468 Path newDataTestDir = null;
469 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
470 File dataTestDir = new File(getDataTestDir().toString());
471 if (deleteOnExit()) dataTestDir.deleteOnExit();
472 newDataTestDir = new Path(dataTestDir.getAbsolutePath());
473 } else {
474 Path base = getBaseTestDirOnTestFS();
475 String randomStr = UUID.randomUUID().toString();
476 newDataTestDir = new Path(base, randomStr);
477 if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
478 }
479 return newDataTestDir;
480 }
481
482
483
484
485
486
487 public boolean cleanupDataTestDirOnTestFS() throws IOException {
488 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
489 if (ret)
490 dataTestDirOnTestFS = null;
491 return ret;
492 }
493
494
495
496
497
498
499 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
500 Path cpath = getDataTestDirOnTestFS(subdirName);
501 return getTestFileSystem().delete(cpath, true);
502 }
503
504
505
506
507
508
509
510
511 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
512 return startMiniDFSCluster(servers, null);
513 }
514
515
516
517
518
519
520
521
522
523
524
525
526 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
527 throws Exception {
528 if ( hosts != null && hosts.length != 0) {
529 return startMiniDFSCluster(hosts.length, hosts);
530 } else {
531 return startMiniDFSCluster(1, null);
532 }
533 }
534
535
536
537
538
539
540
541
542
543
544 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
545 throws Exception {
546 createDirsAndSetProperties();
547 EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
548
549
550 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
551 setLevel(org.apache.log4j.Level.ERROR);
552 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
553 setLevel(org.apache.log4j.Level.ERROR);
554
555
556 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
557 true, null, null, hosts, null);
558
559
560 FileSystem fs = this.dfsCluster.getFileSystem();
561 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
562
563
564 this.dfsCluster.waitClusterUp();
565
566
567 dataTestDirOnTestFS = null;
568
569 return this.dfsCluster;
570 }
571
572
573 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
574 throws Exception {
575 createDirsAndSetProperties();
576 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
577 true, null, racks, hosts, null);
578
579
580 FileSystem fs = this.dfsCluster.getFileSystem();
581 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
582
583
584 this.dfsCluster.waitClusterUp();
585
586
587 dataTestDirOnTestFS = null;
588
589 return this.dfsCluster;
590 }
591
592 public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
593 createDirsAndSetProperties();
594 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
595 null, null, null);
596 return dfsCluster;
597 }
598
599
600 private void createDirsAndSetProperties() throws IOException {
601 setupClusterTestDir();
602 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
603 createDirAndSetProperty("cache_data", "test.cache.data");
604 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
605 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
606 createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
607 createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
608 enableShortCircuit();
609
610 Path root = getDataTestDirOnTestFS("hadoop");
611 conf.set(MapreduceTestingShim.getMROutputDirProp(),
612 new Path(root, "mapred-output-dir").toString());
613 conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
614 conf.set("mapreduce.jobtracker.staging.root.dir",
615 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
616 conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
617 }
618
619
620
621
622
623
624
625 public boolean isReadShortCircuitOn(){
626 final String propName = "hbase.tests.use.shortcircuit.reads";
627 String readOnProp = System.getProperty(propName);
628 if (readOnProp != null){
629 return Boolean.parseBoolean(readOnProp);
630 } else {
631 return conf.getBoolean(propName, false);
632 }
633 }
634
635
636
637
638 private void enableShortCircuit() {
639 if (isReadShortCircuitOn()) {
640 String curUser = System.getProperty("user.name");
641 LOG.info("read short circuit is ON for user " + curUser);
642
643 conf.set("dfs.block.local-path-access.user", curUser);
644
645 conf.setBoolean("dfs.client.read.shortcircuit", true);
646
647 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
648 } else {
649 LOG.info("read short circuit is OFF");
650 }
651 }
652
653 private String createDirAndSetProperty(final String relPath, String property) {
654 String path = getDataTestDir(relPath).toString();
655 System.setProperty(property, path);
656 conf.set(property, path);
657 new File(path).mkdirs();
658 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
659 return path;
660 }
661
662
663
664
665
666
667 public void shutdownMiniDFSCluster() throws IOException {
668 if (this.dfsCluster != null) {
669
670 this.dfsCluster.shutdown();
671 dfsCluster = null;
672 dataTestDirOnTestFS = null;
673 FSUtils.setFsDefault(this.conf, new Path("file:///"));
674 }
675 }
676
677
678
679
680
681
682
683
684 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
685 return startMiniZKCluster(1);
686 }
687
688
689
690
691
692
693
694
695
696 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
697 throws Exception {
698 setupClusterTestDir();
699 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
700 }
701
702 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
703 throws Exception {
704 return startMiniZKCluster(dir,1);
705 }
706
707
708
709
710
711 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
712 int zooKeeperServerNum)
713 throws Exception {
714 if (this.zkCluster != null) {
715 throw new IOException("Cluster already running at " + dir);
716 }
717 this.passedZkCluster = false;
718 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
719 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
720 if (defPort > 0){
721
722 this.zkCluster.setDefaultClientPort(defPort);
723 }
724 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
725 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
726 Integer.toString(clientPort));
727 return this.zkCluster;
728 }
729
730
731
732
733
734
735
736 public void shutdownMiniZKCluster() throws IOException {
737 if (this.zkCluster != null) {
738 this.zkCluster.shutdown();
739 this.zkCluster = null;
740 }
741 }
742
743
744
745
746
747
748
749 public MiniHBaseCluster startMiniCluster() throws Exception {
750 return startMiniCluster(1, 1);
751 }
752
753
754
755
756
757
758
759
760
761 public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
762 throws Exception {
763 return startMiniCluster(1, numSlaves, create);
764 }
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779 public MiniHBaseCluster startMiniCluster(final int numSlaves)
780 throws Exception {
781 return startMiniCluster(1, numSlaves, false);
782 }
783
784
785
786
787
788
789
790
791 public MiniHBaseCluster startMiniCluster(final int numMasters,
792 final int numSlaves, boolean create)
793 throws Exception {
794 return startMiniCluster(numMasters, numSlaves, null, create);
795 }
796
797
798
799
800
801
802
803 public MiniHBaseCluster startMiniCluster(final int numMasters,
804 final int numSlaves)
805 throws Exception {
806 return startMiniCluster(numMasters, numSlaves, null, false);
807 }
808
809 public MiniHBaseCluster startMiniCluster(final int numMasters,
810 final int numSlaves, final String[] dataNodeHosts, boolean create)
811 throws Exception {
812 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
813 null, null, create);
814 }
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840 public MiniHBaseCluster startMiniCluster(final int numMasters,
841 final int numSlaves, final String[] dataNodeHosts) throws Exception {
842 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
843 null, null);
844 }
845
846
847
848
849
850 public MiniHBaseCluster startMiniCluster(final int numMasters,
851 final int numSlaves, final int numDataNodes) throws Exception {
852 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
853 }
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882 public MiniHBaseCluster startMiniCluster(final int numMasters,
883 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
884 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
885 throws Exception {
886 return startMiniCluster(
887 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
888 }
889
890 public MiniHBaseCluster startMiniCluster(final int numMasters,
891 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
892 Class<? extends HMaster> masterClass,
893 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
894 throws Exception {
895 return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
896 masterClass, regionserverClass, false);
897 }
898
899
900
901
902
903
904
905
906 public MiniHBaseCluster startMiniCluster(final int numMasters,
907 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
908 Class<? extends HMaster> masterClass,
909 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
910 boolean create)
911 throws Exception {
912 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
913 numDataNodes = dataNodeHosts.length;
914 }
915
916 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
917 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
918
919
920 if (miniClusterRunning) {
921 throw new IllegalStateException("A mini-cluster is already running");
922 }
923 miniClusterRunning = true;
924
925 setupClusterTestDir();
926 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
927
928
929
930 startMiniDFSCluster(numDataNodes, dataNodeHosts);
931
932
933 if (this.zkCluster == null) {
934 startMiniZKCluster(clusterTestDir);
935 }
936
937
938 return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
939 regionserverClass, create);
940 }
941
942 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
943 throws IOException, InterruptedException{
944 return startMiniHBaseCluster(numMasters, numSlaves, null, null, false);
945 }
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
961 final int numSlaves, Class<? extends HMaster> masterClass,
962 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
963 boolean create)
964 throws IOException, InterruptedException {
965
966 createRootDir(create);
967
968
969
970 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
971 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
972 }
973 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
974 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
975 }
976
977 Configuration c = new Configuration(this.conf);
978 this.hbaseCluster =
979 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
980
981 Table t = new HTable(c, TableName.META_TABLE_NAME);
982 ResultScanner s = t.getScanner(new Scan());
983 while (s.next() != null) {
984 continue;
985 }
986 s.close();
987 t.close();
988
989 getHBaseAdmin();
990 LOG.info("Minicluster is up");
991 return (MiniHBaseCluster)this.hbaseCluster;
992 }
993
994
995
996
997
998
999
1000 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1001 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1002
1003 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1004 ResultScanner s = t.getScanner(new Scan());
1005 while (s.next() != null) {
1006
1007 }
1008 LOG.info("HBase has been restarted");
1009 s.close();
1010 t.close();
1011 }
1012
1013
1014
1015
1016
1017
1018 public MiniHBaseCluster getMiniHBaseCluster() {
1019 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1020 return (MiniHBaseCluster)this.hbaseCluster;
1021 }
1022 throw new RuntimeException(hbaseCluster + " not an instance of " +
1023 MiniHBaseCluster.class.getName());
1024 }
1025
1026
1027
1028
1029
1030
1031 public void shutdownMiniCluster() throws Exception {
1032 LOG.info("Shutting down minicluster");
1033 if (this.connection != null && !this.connection.isClosed()) {
1034 this.connection.close();
1035 this.connection = null;
1036 }
1037 shutdownMiniHBaseCluster();
1038 if (!this.passedZkCluster){
1039 shutdownMiniZKCluster();
1040 }
1041 shutdownMiniDFSCluster();
1042
1043 cleanupTestDir();
1044 miniClusterRunning = false;
1045 LOG.info("Minicluster is down");
1046 }
1047
1048
1049
1050
1051
1052 @Override
1053 public boolean cleanupTestDir() throws IOException {
1054 boolean ret = super.cleanupTestDir();
1055 if (deleteDir(this.clusterTestDir)) {
1056 this.clusterTestDir = null;
1057 return ret & true;
1058 }
1059 return false;
1060 }
1061
1062
1063
1064
1065
1066 public void shutdownMiniHBaseCluster() throws IOException {
1067 if (hbaseAdmin != null) {
1068 hbaseAdmin.close0();
1069 hbaseAdmin = null;
1070 }
1071
1072
1073 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1074 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1075 if (this.hbaseCluster != null) {
1076 this.hbaseCluster.shutdown();
1077
1078 this.hbaseCluster.waitUntilShutDown();
1079 this.hbaseCluster = null;
1080 }
1081
1082 if (zooKeeperWatcher != null) {
1083 zooKeeperWatcher.close();
1084 zooKeeperWatcher = null;
1085 }
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 public Path getDefaultRootDirPath(boolean create) throws IOException {
1097 if (!create) {
1098 return getDataTestDirOnTestFS();
1099 } else {
1100 return getNewDataTestDirOnTestFS();
1101 }
1102 }
1103
1104
1105
1106
1107
1108
1109
1110
1111 public Path getDefaultRootDirPath() throws IOException {
1112 return getDefaultRootDirPath(false);
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 public Path createRootDir(boolean create) throws IOException {
1128 FileSystem fs = FileSystem.get(this.conf);
1129 Path hbaseRootdir = getDefaultRootDirPath(create);
1130 FSUtils.setRootDir(this.conf, hbaseRootdir);
1131 fs.mkdirs(hbaseRootdir);
1132 FSUtils.setVersion(fs, hbaseRootdir);
1133 return hbaseRootdir;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142 public Path createRootDir() throws IOException {
1143 return createRootDir(false);
1144 }
1145
1146
1147
1148
1149
1150 public void flush() throws IOException {
1151 getMiniHBaseCluster().flushcache();
1152 }
1153
1154
1155
1156
1157
1158 public void flush(TableName tableName) throws IOException {
1159 getMiniHBaseCluster().flushcache(tableName);
1160 }
1161
1162
1163
1164
1165
1166 public void compact(boolean major) throws IOException {
1167 getMiniHBaseCluster().compact(major);
1168 }
1169
1170
1171
1172
1173
1174 public void compact(TableName tableName, boolean major) throws IOException {
1175 getMiniHBaseCluster().compact(tableName, major);
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185 public Table createTable(TableName tableName, String family)
1186 throws IOException{
1187 return createTable(tableName, new String[]{family});
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197 public HTable createTable(byte[] tableName, byte[] family)
1198 throws IOException{
1199 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1200 }
1201
1202
1203
1204
1205
1206
1207
1208
1209 public Table createTable(TableName tableName, String[] families)
1210 throws IOException {
1211 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1212 for (String family : families) {
1213 fams.add(Bytes.toBytes(family));
1214 }
1215 return createTable(tableName, fams.toArray(new byte[0][]));
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225 public HTable createTable(TableName tableName, byte[] family)
1226 throws IOException{
1227 return createTable(tableName, new byte[][]{family});
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 public HTable createTable(byte[] tableName, byte[][] families)
1239 throws IOException {
1240 return createTable(tableName, families,
1241 new Configuration(getConfiguration()));
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 public HTable createTable(TableName tableName, byte[][] families)
1253 throws IOException {
1254 return createTable(tableName, families, new Configuration(getConfiguration()));
1255 }
1256
1257 public HTable createTable(byte[] tableName, byte[][] families,
1258 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1259 return createTable(TableName.valueOf(tableName), families, numVersions,
1260 startKey, endKey, numRegions);
1261 }
1262
1263 public HTable createTable(String tableName, byte[][] families,
1264 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1265 return createTable(TableName.valueOf(tableName), families, numVersions,
1266 startKey, endKey, numRegions);
1267 }
1268
1269 public HTable createTable(TableName tableName, byte[][] families,
1270 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1271 throws IOException{
1272 HTableDescriptor desc = new HTableDescriptor(tableName);
1273 for (byte[] family : families) {
1274 HColumnDescriptor hcd = new HColumnDescriptor(family)
1275 .setMaxVersions(numVersions);
1276 desc.addFamily(hcd);
1277 }
1278 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1279
1280 waitUntilAllRegionsAssigned(tableName);
1281 return new HTable(getConfiguration(), tableName);
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1293 throws IOException {
1294 for(byte[] family : families) {
1295 HColumnDescriptor hcd = new HColumnDescriptor(family);
1296
1297
1298
1299 hcd.setBloomFilterType(BloomType.NONE);
1300 htd.addFamily(hcd);
1301 }
1302 getHBaseAdmin().createTable(htd);
1303
1304 waitUntilAllRegionsAssigned(htd.getTableName());
1305 return (HTable)getConnection().getTable(htd.getTableName());
1306 }
1307
1308
1309
1310
1311
1312
1313
1314
1315 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1316 throws IOException {
1317 getHBaseAdmin().createTable(htd, splitRows);
1318
1319 waitUntilAllRegionsAssigned(htd.getTableName());
1320 return new HTable(getConfiguration(), htd.getTableName());
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 public HTable createTable(TableName tableName, byte[][] families,
1332 final Configuration c)
1333 throws IOException {
1334 return createTable(new HTableDescriptor(tableName), families, c);
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 public HTable createTable(byte[] tableName, byte[][] families,
1346 final Configuration c)
1347 throws IOException {
1348 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1349 for(byte[] family : families) {
1350 HColumnDescriptor hcd = new HColumnDescriptor(family);
1351
1352
1353
1354 hcd.setBloomFilterType(BloomType.NONE);
1355 desc.addFamily(hcd);
1356 }
1357 getHBaseAdmin().createTable(desc);
1358 return new HTable(c, desc.getTableName());
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 public HTable createTable(TableName tableName, byte[][] families,
1371 final Configuration c, int numVersions)
1372 throws IOException {
1373 HTableDescriptor desc = new HTableDescriptor(tableName);
1374 for(byte[] family : families) {
1375 HColumnDescriptor hcd = new HColumnDescriptor(family)
1376 .setMaxVersions(numVersions);
1377 desc.addFamily(hcd);
1378 }
1379 getHBaseAdmin().createTable(desc);
1380
1381 waitUntilAllRegionsAssigned(tableName);
1382 return new HTable(c, tableName);
1383 }
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 public HTable createTable(byte[] tableName, byte[][] families,
1395 final Configuration c, int numVersions)
1396 throws IOException {
1397 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1398 for(byte[] family : families) {
1399 HColumnDescriptor hcd = new HColumnDescriptor(family)
1400 .setMaxVersions(numVersions);
1401 desc.addFamily(hcd);
1402 }
1403 getHBaseAdmin().createTable(desc);
1404 return new HTable(c, desc.getTableName());
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1416 throws IOException {
1417 return createTable(tableName, new byte[][]{family}, numVersions);
1418 }
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1429 throws IOException {
1430 return createTable(tableName, new byte[][]{family}, numVersions);
1431 }
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 public HTable createTable(byte[] tableName, byte[][] families,
1442 int numVersions)
1443 throws IOException {
1444 return createTable(TableName.valueOf(tableName), families, numVersions);
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 public HTable createTable(TableName tableName, byte[][] families,
1456 int numVersions)
1457 throws IOException {
1458 HTableDescriptor desc = new HTableDescriptor(tableName);
1459 for (byte[] family : families) {
1460 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1461 desc.addFamily(hcd);
1462 }
1463 getHBaseAdmin().createTable(desc);
1464
1465 waitUntilAllRegionsAssigned(tableName);
1466 return new HTable(new Configuration(getConfiguration()), tableName);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 public HTable createTable(byte[] tableName, byte[][] families,
1478 int numVersions, int blockSize) throws IOException {
1479 return createTable(TableName.valueOf(tableName),
1480 families, numVersions, blockSize);
1481 }
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 public HTable createTable(TableName tableName, byte[][] families,
1492 int numVersions, int blockSize) throws IOException {
1493 HTableDescriptor desc = new HTableDescriptor(tableName);
1494 for (byte[] family : families) {
1495 HColumnDescriptor hcd = new HColumnDescriptor(family)
1496 .setMaxVersions(numVersions)
1497 .setBlocksize(blockSize);
1498 desc.addFamily(hcd);
1499 }
1500 getHBaseAdmin().createTable(desc);
1501
1502 waitUntilAllRegionsAssigned(tableName);
1503 return new HTable(new Configuration(getConfiguration()), tableName);
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 public HTable createTable(byte[] tableName, byte[][] families,
1515 int[] numVersions)
1516 throws IOException {
1517 return createTable(TableName.valueOf(tableName), families, numVersions);
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 public HTable createTable(TableName tableName, byte[][] families,
1529 int[] numVersions)
1530 throws IOException {
1531 HTableDescriptor desc = new HTableDescriptor(tableName);
1532 int i = 0;
1533 for (byte[] family : families) {
1534 HColumnDescriptor hcd = new HColumnDescriptor(family)
1535 .setMaxVersions(numVersions[i]);
1536 desc.addFamily(hcd);
1537 i++;
1538 }
1539 getHBaseAdmin().createTable(desc);
1540
1541 waitUntilAllRegionsAssigned(tableName);
1542 return new HTable(new Configuration(getConfiguration()), tableName);
1543 }
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1554 throws IOException{
1555 return createTable(TableName.valueOf(tableName), family, splitRows);
1556 }
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1567 throws IOException {
1568 HTableDescriptor desc = new HTableDescriptor(tableName);
1569 HColumnDescriptor hcd = new HColumnDescriptor(family);
1570 desc.addFamily(hcd);
1571 getHBaseAdmin().createTable(desc, splitRows);
1572
1573 waitUntilAllRegionsAssigned(tableName);
1574 return new HTable(getConfiguration(), tableName);
1575 }
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1586 throws IOException {
1587 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1588 for(byte[] family:families) {
1589 HColumnDescriptor hcd = new HColumnDescriptor(family);
1590 desc.addFamily(hcd);
1591 }
1592 getHBaseAdmin().createTable(desc, splitRows);
1593
1594 waitUntilAllRegionsAssigned(desc.getTableName());
1595 return new HTable(getConfiguration(), desc.getTableName());
1596 }
1597
1598
1599
1600
1601 @SuppressWarnings("serial")
1602 public static void modifyTableSync(Admin admin, HTableDescriptor desc)
1603 throws IOException, InterruptedException {
1604 admin.modifyTable(desc.getTableName(), desc);
1605 Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1606 setFirst(0);
1607 setSecond(0);
1608 }};
1609 for (int i = 0; status.getFirst() != 0 && i < 500; i++) {
1610 status = admin.getAlterStatus(desc.getTableName());
1611 if (status.getSecond() != 0) {
1612 LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1613 + " regions updated.");
1614 Thread.sleep(1 * 1000l);
1615 } else {
1616 LOG.debug("All regions updated.");
1617 break;
1618 }
1619 }
1620 if (status.getSecond() != 0) {
1621 throw new IOException("Failed to update replica count after 500 seconds.");
1622 }
1623 }
1624
1625
1626
1627
1628 public static void setReplicas(Admin admin, TableName table, int replicaCount)
1629 throws IOException, InterruptedException {
1630 admin.disableTable(table);
1631 HTableDescriptor desc = admin.getTableDescriptor(table);
1632 desc.setRegionReplication(replicaCount);
1633 modifyTableSync(admin, desc);
1634 admin.enableTable(table);
1635 }
1636
1637
1638
1639
1640
1641 public void deleteTable(String tableName) throws IOException {
1642 deleteTable(TableName.valueOf(tableName));
1643 }
1644
1645
1646
1647
1648
1649 public void deleteTable(byte[] tableName) throws IOException {
1650 deleteTable(TableName.valueOf(tableName));
1651 }
1652
1653
1654
1655
1656
1657 public void deleteTable(TableName tableName) throws IOException {
1658 try {
1659 getHBaseAdmin().disableTable(tableName);
1660 } catch (TableNotEnabledException e) {
1661 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1662 }
1663 getHBaseAdmin().deleteTable(tableName);
1664 }
1665
1666
1667
1668
1669
1670 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1671 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1672 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1673 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1674 private static final int MAXVERSIONS = 3;
1675
1676 public static final char FIRST_CHAR = 'a';
1677 public static final char LAST_CHAR = 'z';
1678 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1679 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1680
1681
1682
1683
1684
1685
1686
1687
1688 public HTableDescriptor createTableDescriptor(final String name,
1689 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1690 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1691 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1692 htd.addFamily(new HColumnDescriptor(cfName)
1693 .setMinVersions(minVersions)
1694 .setMaxVersions(versions)
1695 .setKeepDeletedCells(keepDeleted)
1696 .setBlockCacheEnabled(false)
1697 .setTimeToLive(ttl)
1698 );
1699 }
1700 return htd;
1701 }
1702
1703
1704
1705
1706
1707
1708
1709 public HTableDescriptor createTableDescriptor(final String name) {
1710 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1711 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1723 byte [] endKey)
1724 throws IOException {
1725 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1726 return createLocalHRegion(hri, desc);
1727 }
1728
1729
1730
1731
1732
1733
1734
1735
1736 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1737 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal)
1749 throws IOException {
1750 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
1751 }
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1766 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1767 WAL wal, byte[]... families) throws IOException {
1768 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1769 htd.setReadOnly(isReadOnly);
1770 for (byte[] family : families) {
1771 HColumnDescriptor hcd = new HColumnDescriptor(family);
1772
1773 hcd.setMaxVersions(Integer.MAX_VALUE);
1774 htd.addFamily(hcd);
1775 }
1776 htd.setDurability(durability);
1777 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1778 return createLocalHRegion(info, htd, wal);
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789 public HTable truncateTable(byte[] tableName) throws IOException {
1790 return truncateTable(TableName.valueOf(tableName));
1791 }
1792
1793
1794
1795
1796
1797
1798
1799 public HTable truncateTable(TableName tableName) throws IOException {
1800 HTable table = new HTable(getConfiguration(), tableName);
1801 Scan scan = new Scan();
1802 ResultScanner resScan = table.getScanner(scan);
1803 for(Result res : resScan) {
1804 Delete del = new Delete(res.getRow());
1805 table.delete(del);
1806 }
1807 resScan = table.getScanner(scan);
1808 resScan.close();
1809 return table;
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819 public int loadTable(final Table t, final byte[] f) throws IOException {
1820 return loadTable(t, new byte[][] {f});
1821 }
1822
1823
1824
1825
1826
1827
1828
1829
1830 public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
1831 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841 public int loadTable(final Table t, final byte[][] f) throws IOException {
1842 return loadTable(t, f, null);
1843 }
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
1854 return loadTable(t, f, value, true);
1855 }
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865 public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1866 List<Put> puts = new ArrayList<>();
1867 for (byte[] row : HBaseTestingUtility.ROWS) {
1868 Put put = new Put(row);
1869 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1870 for (int i = 0; i < f.length; i++) {
1871 put.add(f[i], null, value != null ? value : row);
1872 }
1873 puts.add(put);
1874 }
1875 t.put(puts);
1876 return puts.size();
1877 }
1878
1879
1880
1881
1882 public static class SeenRowTracker {
1883 int dim = 'z' - 'a' + 1;
1884 int[][][] seenRows = new int[dim][dim][dim];
1885 byte[] startRow;
1886 byte[] stopRow;
1887
1888 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1889 this.startRow = startRow;
1890 this.stopRow = stopRow;
1891 }
1892
1893 void reset() {
1894 for (byte[] row : ROWS) {
1895 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1896 }
1897 }
1898
1899 int i(byte b) {
1900 return b - 'a';
1901 }
1902
1903 public void addRow(byte[] row) {
1904 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1905 }
1906
1907
1908
1909
1910 public void validate() {
1911 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1912 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1913 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1914 int count = seenRows[i(b1)][i(b2)][i(b3)];
1915 int expectedCount = 0;
1916 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1917 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1918 expectedCount = 1;
1919 }
1920 if (count != expectedCount) {
1921 String row = new String(new byte[] {b1,b2,b3});
1922 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1923 }
1924 }
1925 }
1926 }
1927 }
1928 }
1929
1930 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1931 return loadRegion(r, f, false);
1932 }
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1943 throws IOException {
1944 byte[] k = new byte[3];
1945 int rowCount = 0;
1946 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1947 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1948 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1949 k[0] = b1;
1950 k[1] = b2;
1951 k[2] = b3;
1952 Put put = new Put(k);
1953 put.setDurability(Durability.SKIP_WAL);
1954 put.add(f, null, k);
1955 if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL);
1956
1957 int preRowCount = rowCount;
1958 int pause = 10;
1959 int maxPause = 1000;
1960 while (rowCount == preRowCount) {
1961 try {
1962 r.put(put);
1963 rowCount++;
1964 } catch (RegionTooBusyException e) {
1965 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1966 Threads.sleep(pause);
1967 }
1968 }
1969 }
1970 }
1971 if (flush) {
1972 r.flushcache();
1973 }
1974 }
1975 return rowCount;
1976 }
1977
1978 public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1979 for (int i = startRow; i < endRow; i++) {
1980 byte[] data = Bytes.toBytes(String.valueOf(i));
1981 Put put = new Put(data);
1982 put.add(f, null, data);
1983 t.put(put);
1984 }
1985 }
1986
1987 public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1988 for (int i = startRow; i < endRow; i++) {
1989 byte[] data = Bytes.toBytes(String.valueOf(i));
1990 Delete delete = new Delete(data);
1991 delete.deleteFamily(f);
1992 t.delete(delete);
1993 }
1994 }
1995
1996
1997
1998
1999 public int countRows(final Table table) throws IOException {
2000 Scan scan = new Scan();
2001 ResultScanner results = table.getScanner(scan);
2002 int count = 0;
2003 for (@SuppressWarnings("unused") Result res : results) {
2004 count++;
2005 }
2006 results.close();
2007 return count;
2008 }
2009
2010 public int countRows(final Table table, final byte[]... families) throws IOException {
2011 Scan scan = new Scan();
2012 for (byte[] family: families) {
2013 scan.addFamily(family);
2014 }
2015 ResultScanner results = table.getScanner(scan);
2016 int count = 0;
2017 for (@SuppressWarnings("unused") Result res : results) {
2018 count++;
2019 }
2020 results.close();
2021 return count;
2022 }
2023
2024
2025
2026
2027 public String checksumRows(final Table table) throws Exception {
2028 Scan scan = new Scan();
2029 ResultScanner results = table.getScanner(scan);
2030 MessageDigest digest = MessageDigest.getInstance("MD5");
2031 for (Result res : results) {
2032 digest.update(res.getRow());
2033 }
2034 results.close();
2035 return digest.toString();
2036 }
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 public int createMultiRegions(HTable table, byte[] columnFamily)
2047 throws IOException {
2048 return createMultiRegions(getConfiguration(), table, columnFamily);
2049 }
2050
2051
2052 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
2053 static {
2054 int i = 0;
2055 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2056 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2057 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2058 ROWS[i][0] = b1;
2059 ROWS[i][1] = b2;
2060 ROWS[i][2] = b3;
2061 i++;
2062 }
2063 }
2064 }
2065 }
2066
2067 public static final byte[][] KEYS = {
2068 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2069 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2070 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2071 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2072 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2073 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2074 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2075 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2076 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2077 };
2078
2079 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2080 Bytes.toBytes("bbb"),
2081 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2082 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2083 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2084 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2085 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2086 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2087 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2088 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2089 };
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 public int createMultiRegions(final Configuration c, final HTable table,
2100 final byte[] columnFamily)
2101 throws IOException {
2102 return createMultiRegions(c, table, columnFamily, KEYS);
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 public int createMultiRegions(final Configuration c, final HTable table,
2115 final byte [] family, int numRegions)
2116 throws IOException {
2117 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2118 byte [] startKey = Bytes.toBytes("aaaaa");
2119 byte [] endKey = Bytes.toBytes("zzzzz");
2120 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2121 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2122 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2123 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2124 return createMultiRegions(c, table, family, regionStartKeys);
2125 }
2126
2127 public int createMultiRegions(final Configuration c, final HTable table,
2128 final byte[] columnFamily, byte [][] startKeys)
2129 throws IOException {
2130 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2131 Table meta = new HTable(c, TableName.META_TABLE_NAME);
2132 HTableDescriptor htd = table.getTableDescriptor();
2133 if(!htd.hasFamily(columnFamily)) {
2134 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2135 htd.addFamily(hcd);
2136 }
2137
2138
2139
2140
2141 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2142 String regionToDeleteInFS = table
2143 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2144 .getRegionInfo().getEncodedName();
2145 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2146
2147 int count = 0;
2148 for (int i = 0; i < startKeys.length; i++) {
2149 int j = (i + 1) % startKeys.length;
2150 HRegionInfo hri = new HRegionInfo(table.getName(),
2151 startKeys[i], startKeys[j]);
2152 MetaTableAccessor.addRegionToMeta(meta, hri);
2153 newRegions.add(hri);
2154 count++;
2155 }
2156
2157 for (byte[] row : rows) {
2158 LOG.info("createMultiRegions: deleting meta row -> " +
2159 Bytes.toStringBinary(row));
2160 meta.delete(new Delete(row));
2161 }
2162
2163 Path tableDir = new Path(getDefaultRootDirPath().toString()
2164 + System.getProperty("file.separator") + htd.getTableName()
2165 + System.getProperty("file.separator") + regionToDeleteInFS);
2166 FileSystem.get(c).delete(tableDir, true);
2167
2168 HConnection conn = table.getConnection();
2169 conn.clearRegionCache();
2170
2171 Admin admin = getHBaseAdmin();
2172 if (admin.isTableEnabled(table.getName())) {
2173 for(HRegionInfo hri : newRegions) {
2174 admin.assign(hri.getRegionName());
2175 }
2176 }
2177
2178 meta.close();
2179
2180 return count;
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2194 final HTableDescriptor htd, byte [][] startKeys)
2195 throws IOException {
2196 Table meta = new HTable(conf, TableName.META_TABLE_NAME);
2197 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2198 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2199
2200 for (int i = 0; i < startKeys.length; i++) {
2201 int j = (i + 1) % startKeys.length;
2202 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2203 startKeys[j]);
2204 MetaTableAccessor.addRegionToMeta(meta, hri);
2205 newRegions.add(hri);
2206 }
2207
2208 meta.close();
2209 return newRegions;
2210 }
2211
2212
2213
2214
2215
2216
2217 public List<byte[]> getMetaTableRows() throws IOException {
2218
2219 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2220 List<byte[]> rows = new ArrayList<byte[]>();
2221 ResultScanner s = t.getScanner(new Scan());
2222 for (Result result : s) {
2223 LOG.info("getMetaTableRows: row -> " +
2224 Bytes.toStringBinary(result.getRow()));
2225 rows.add(result.getRow());
2226 }
2227 s.close();
2228 t.close();
2229 return rows;
2230 }
2231
2232
2233
2234
2235
2236
2237 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2238
2239 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2240 List<byte[]> rows = new ArrayList<byte[]>();
2241 ResultScanner s = t.getScanner(new Scan());
2242 for (Result result : s) {
2243 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2244 if (info == null) {
2245 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2246
2247 continue;
2248 }
2249
2250 if (info.getTable().equals(tableName)) {
2251 LOG.info("getMetaTableRows: row -> " +
2252 Bytes.toStringBinary(result.getRow()) + info);
2253 rows.add(result.getRow());
2254 }
2255 }
2256 s.close();
2257 t.close();
2258 return rows;
2259 }
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2273 throws IOException, InterruptedException {
2274 List<byte[]> metaRows = getMetaTableRows(tableName);
2275 if (metaRows == null || metaRows.isEmpty()) {
2276 return null;
2277 }
2278 LOG.debug("Found " + metaRows.size() + " rows for table " +
2279 tableName);
2280 byte [] firstrow = metaRows.get(0);
2281 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2282 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2283 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2284 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2285 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2286 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2287 while(retrier.shouldRetry()) {
2288 int index = getMiniHBaseCluster().getServerWith(firstrow);
2289 if (index != -1) {
2290 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2291 }
2292
2293 retrier.sleepUntilNextRetry();
2294 }
2295 return null;
2296 }
2297
2298
2299
2300
2301
2302
2303
2304 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2305 startMiniMapReduceCluster(2);
2306 return mrCluster;
2307 }
2308
2309
2310
2311
2312
2313 private void forceChangeTaskLogDir() {
2314 Field logDirField;
2315 try {
2316 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2317 logDirField.setAccessible(true);
2318
2319 Field modifiersField = Field.class.getDeclaredField("modifiers");
2320 modifiersField.setAccessible(true);
2321 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2322
2323 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2324 } catch (SecurityException e) {
2325 throw new RuntimeException(e);
2326 } catch (NoSuchFieldException e) {
2327
2328 throw new RuntimeException(e);
2329 } catch (IllegalArgumentException e) {
2330 throw new RuntimeException(e);
2331 } catch (IllegalAccessException e) {
2332 throw new RuntimeException(e);
2333 }
2334 }
2335
2336
2337
2338
2339
2340
2341
2342 private void startMiniMapReduceCluster(final int servers) throws IOException {
2343 if (mrCluster != null) {
2344 throw new IllegalStateException("MiniMRCluster is already running");
2345 }
2346 LOG.info("Starting mini mapreduce cluster...");
2347 setupClusterTestDir();
2348 createDirsAndSetProperties();
2349
2350 forceChangeTaskLogDir();
2351
2352
2353
2354
2355 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2356
2357
2358
2359 conf.setBoolean("mapreduce.map.speculative", false);
2360 conf.setBoolean("mapreduce.reduce.speculative", false);
2361
2362
2363
2364 mrCluster = new MiniMRCluster(servers,
2365 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2366 null, null, new JobConf(this.conf));
2367 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2368 if (jobConf == null) {
2369 jobConf = mrCluster.createJobConf();
2370 }
2371
2372 jobConf.set("mapreduce.cluster.local.dir",
2373 conf.get("mapreduce.cluster.local.dir"));
2374 LOG.info("Mini mapreduce cluster started");
2375
2376
2377
2378
2379 conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2380
2381 conf.set("mapreduce.framework.name", "yarn");
2382 conf.setBoolean("yarn.is.minicluster", true);
2383 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2384 if (rmAddress != null) {
2385 conf.set("yarn.resourcemanager.address", rmAddress);
2386 }
2387 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2388 if (historyAddress != null) {
2389 conf.set("mapreduce.jobhistory.address", historyAddress);
2390 }
2391 String schedulerAddress =
2392 jobConf.get("yarn.resourcemanager.scheduler.address");
2393 if (schedulerAddress != null) {
2394 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2395 }
2396 }
2397
2398
2399
2400
2401 public void shutdownMiniMapReduceCluster() {
2402 if (mrCluster != null) {
2403 LOG.info("Stopping mini mapreduce cluster...");
2404 mrCluster.shutdown();
2405 mrCluster = null;
2406 LOG.info("Mini mapreduce cluster stopped");
2407 }
2408
2409 conf.set("mapreduce.jobtracker.address", "local");
2410 }
2411
2412
2413
2414
2415 public RegionServerServices createMockRegionServerService() throws IOException {
2416 return createMockRegionServerService((ServerName)null);
2417 }
2418
2419
2420
2421
2422
2423 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2424 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2425 rss.setFileSystem(getTestFileSystem());
2426 rss.setRpcServer(rpc);
2427 return rss;
2428 }
2429
2430
2431
2432
2433
2434 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2435 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2436 rss.setFileSystem(getTestFileSystem());
2437 return rss;
2438 }
2439
2440
2441
2442
2443
2444
2445 public void enableDebug(Class<?> clazz) {
2446 Log l = LogFactory.getLog(clazz);
2447 if (l instanceof Log4JLogger) {
2448 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2449 } else if (l instanceof Jdk14Logger) {
2450 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2451 }
2452 }
2453
2454
2455
2456
2457
2458 public void expireMasterSession() throws Exception {
2459 HMaster master = getMiniHBaseCluster().getMaster();
2460 expireSession(master.getZooKeeper(), false);
2461 }
2462
2463
2464
2465
2466
2467
2468 public void expireRegionServerSession(int index) throws Exception {
2469 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2470 expireSession(rs.getZooKeeper(), false);
2471 decrementMinRegionServerCount();
2472 }
2473
2474 private void decrementMinRegionServerCount() {
2475
2476
2477 decrementMinRegionServerCount(getConfiguration());
2478
2479
2480 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2481 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2482 }
2483 }
2484
2485 private void decrementMinRegionServerCount(Configuration conf) {
2486 int currentCount = conf.getInt(
2487 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2488 if (currentCount != -1) {
2489 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2490 Math.max(currentCount - 1, 1));
2491 }
2492 }
2493
2494 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2495 expireSession(nodeZK, false);
2496 }
2497
2498 @Deprecated
2499 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2500 throws Exception {
2501 expireSession(nodeZK, false);
2502 }
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2516 throws Exception {
2517 Configuration c = new Configuration(this.conf);
2518 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2519 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2520 byte[] password = zk.getSessionPasswd();
2521 long sessionID = zk.getSessionId();
2522
2523
2524
2525
2526
2527
2528
2529
2530 ZooKeeper monitor = new ZooKeeper(quorumServers,
2531 1000, new org.apache.zookeeper.Watcher(){
2532 @Override
2533 public void process(WatchedEvent watchedEvent) {
2534 LOG.info("Monitor ZKW received event="+watchedEvent);
2535 }
2536 } , sessionID, password);
2537
2538
2539 ZooKeeper newZK = new ZooKeeper(quorumServers,
2540 1000, EmptyWatcher.instance, sessionID, password);
2541
2542
2543
2544 long start = System.currentTimeMillis();
2545 while (newZK.getState() != States.CONNECTED
2546 && System.currentTimeMillis() - start < 1000) {
2547 Thread.sleep(1);
2548 }
2549 newZK.close();
2550 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2551
2552
2553 monitor.close();
2554
2555 if (checkStatus) {
2556 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2557 }
2558 }
2559
2560
2561
2562
2563
2564
2565
2566 public MiniHBaseCluster getHBaseCluster() {
2567 return getMiniHBaseCluster();
2568 }
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 public HBaseCluster getHBaseClusterInterface() {
2579
2580
2581 return hbaseCluster;
2582 }
2583
2584
2585
2586
2587
2588
2589
2590 public Connection getConnection() throws IOException {
2591 if (this.connection == null) {
2592 this.connection = ConnectionFactory.createConnection(this.conf);
2593 }
2594 return this.connection;
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 public synchronized HBaseAdmin getHBaseAdmin()
2607 throws IOException {
2608 if (hbaseAdmin == null){
2609 this.hbaseAdmin = new HBaseAdminForTests(getConnection());
2610 }
2611 return hbaseAdmin;
2612 }
2613
2614 private HBaseAdminForTests hbaseAdmin = null;
2615 private static class HBaseAdminForTests extends HBaseAdmin {
2616 public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
2617 ZooKeeperConnectionException, IOException {
2618 super(connection);
2619 }
2620
2621 @Override
2622 public synchronized void close() throws IOException {
2623 LOG.warn("close() called on HBaseAdmin instance returned from " +
2624 "HBaseTestingUtility.getHBaseAdmin()");
2625 }
2626
2627 private synchronized void close0() throws IOException {
2628 super.close();
2629 }
2630 }
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2642 throws IOException {
2643 if (zooKeeperWatcher == null) {
2644 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2645 new Abortable() {
2646 @Override public void abort(String why, Throwable e) {
2647 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2648 }
2649 @Override public boolean isAborted() {return false;}
2650 });
2651 }
2652 return zooKeeperWatcher;
2653 }
2654 private ZooKeeperWatcher zooKeeperWatcher;
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664 public void closeRegion(String regionName) throws IOException {
2665 closeRegion(Bytes.toBytes(regionName));
2666 }
2667
2668
2669
2670
2671
2672
2673
2674 public void closeRegion(byte[] regionName) throws IOException {
2675 getHBaseAdmin().closeRegion(regionName, null);
2676 }
2677
2678
2679
2680
2681
2682
2683
2684
2685 public void closeRegionByRow(String row, RegionLocator table) throws IOException {
2686 closeRegionByRow(Bytes.toBytes(row), table);
2687 }
2688
2689
2690
2691
2692
2693
2694
2695
2696 public void closeRegionByRow(byte[] row, RegionLocator table) throws IOException {
2697 HRegionLocation hrl = table.getRegionLocation(row);
2698 closeRegion(hrl.getRegionInfo().getRegionName());
2699 }
2700
2701
2702
2703
2704
2705
2706
2707
2708 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2709 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2710 int regCount = regions.size();
2711 Set<Integer> attempted = new HashSet<Integer>();
2712 int idx;
2713 int attempts = 0;
2714 do {
2715 regions = getHBaseCluster().getRegions(tableName);
2716 if (regCount != regions.size()) {
2717
2718 attempted.clear();
2719 }
2720 regCount = regions.size();
2721
2722
2723 if (regCount > 0) {
2724 idx = random.nextInt(regCount);
2725
2726 if (attempted.contains(idx))
2727 continue;
2728 try {
2729 regions.get(idx).checkSplit();
2730 return regions.get(idx);
2731 } catch (Exception ex) {
2732 LOG.warn("Caught exception", ex);
2733 attempted.add(idx);
2734 }
2735 }
2736 attempts++;
2737 } while (maxAttempts == -1 || attempts < maxAttempts);
2738 return null;
2739 }
2740
2741 public MiniZooKeeperCluster getZkCluster() {
2742 return zkCluster;
2743 }
2744
2745 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2746 this.passedZkCluster = true;
2747 this.zkCluster = zkCluster;
2748 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2749 }
2750
2751 public MiniDFSCluster getDFSCluster() {
2752 return dfsCluster;
2753 }
2754
2755 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2756 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2757 throw new IOException("DFSCluster is already running! Shut it down first.");
2758 }
2759 this.dfsCluster = cluster;
2760 }
2761
2762 public FileSystem getTestFileSystem() throws IOException {
2763 return HFileSystem.get(conf);
2764 }
2765
2766
2767
2768
2769
2770
2771
2772
2773 public void waitTableAvailable(TableName table)
2774 throws InterruptedException, IOException {
2775 waitTableAvailable(getHBaseAdmin(), table.getName(), 30000);
2776 }
2777
2778 public void waitTableAvailable(Admin admin, byte[] table)
2779 throws InterruptedException, IOException {
2780 waitTableAvailable(admin, table, 30000);
2781 }
2782
2783
2784
2785
2786
2787
2788
2789
2790 public void waitTableAvailable(byte[] table, long timeoutMillis)
2791 throws InterruptedException, IOException {
2792 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2793 }
2794
2795 public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
2796 throws InterruptedException, IOException {
2797 long startWait = System.currentTimeMillis();
2798 while (!admin.isTableAvailable(TableName.valueOf(table))) {
2799 assertTrue("Timed out waiting for table to become available " +
2800 Bytes.toStringBinary(table),
2801 System.currentTimeMillis() - startWait < timeoutMillis);
2802 Thread.sleep(200);
2803 }
2804 }
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815 public void waitTableEnabled(TableName table)
2816 throws InterruptedException, IOException {
2817 waitTableEnabled(getHBaseAdmin(), table.getName(), 30000);
2818 }
2819
2820 public void waitTableEnabled(Admin admin, byte[] table)
2821 throws InterruptedException, IOException {
2822 waitTableEnabled(admin, table, 30000);
2823 }
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834 public void waitTableEnabled(byte[] table, long timeoutMillis)
2835 throws InterruptedException, IOException {
2836 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2837 }
2838
2839 public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
2840 throws InterruptedException, IOException {
2841 TableName tableName = TableName.valueOf(table);
2842 long startWait = System.currentTimeMillis();
2843 waitTableAvailable(admin, table, timeoutMillis);
2844 while (!admin.isTableEnabled(tableName)) {
2845 assertTrue("Timed out waiting for table to become available and enabled " +
2846 Bytes.toStringBinary(table),
2847 System.currentTimeMillis() - startWait < timeoutMillis);
2848 Thread.sleep(200);
2849 }
2850
2851
2852
2853
2854
2855 try {
2856 Canary.sniff(admin, tableName);
2857 } catch (Exception e) {
2858 throw new IOException(e);
2859 }
2860 }
2861
2862
2863
2864
2865
2866
2867
2868
2869 public void waitTableDisabled(byte[] table)
2870 throws InterruptedException, IOException {
2871 waitTableDisabled(getHBaseAdmin(), table, 30000);
2872 }
2873
2874 public void waitTableDisabled(Admin admin, byte[] table)
2875 throws InterruptedException, IOException {
2876 waitTableDisabled(admin, table, 30000);
2877 }
2878
2879
2880
2881
2882
2883
2884
2885
2886 public void waitTableDisabled(byte[] table, long timeoutMillis)
2887 throws InterruptedException, IOException {
2888 waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2889 }
2890
2891 public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
2892 throws InterruptedException, IOException {
2893 TableName tableName = TableName.valueOf(table);
2894 long startWait = System.currentTimeMillis();
2895 while (!admin.isTableDisabled(tableName)) {
2896 assertTrue("Timed out waiting for table to become disabled " +
2897 Bytes.toStringBinary(table),
2898 System.currentTimeMillis() - startWait < timeoutMillis);
2899 Thread.sleep(200);
2900 }
2901 }
2902
2903
2904
2905
2906
2907
2908
2909
2910 public boolean ensureSomeRegionServersAvailable(final int num)
2911 throws IOException {
2912 boolean startedServer = false;
2913 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2914 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2915 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2916 startedServer = true;
2917 }
2918
2919 return startedServer;
2920 }
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2932 throws IOException {
2933 boolean startedServer = ensureSomeRegionServersAvailable(num);
2934
2935 int nonStoppedServers = 0;
2936 for (JVMClusterUtil.RegionServerThread rst :
2937 getMiniHBaseCluster().getRegionServerThreads()) {
2938
2939 HRegionServer hrs = rst.getRegionServer();
2940 if (hrs.isStopping() || hrs.isStopped()) {
2941 LOG.info("A region server is stopped or stopping:"+hrs);
2942 } else {
2943 nonStoppedServers++;
2944 }
2945 }
2946 for (int i=nonStoppedServers; i<num; ++i) {
2947 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2948 startedServer = true;
2949 }
2950 return startedServer;
2951 }
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963 public static User getDifferentUser(final Configuration c,
2964 final String differentiatingSuffix)
2965 throws IOException {
2966 FileSystem currentfs = FileSystem.get(c);
2967 if (!(currentfs instanceof DistributedFileSystem)) {
2968 return User.getCurrent();
2969 }
2970
2971
2972 String username = User.getCurrent().getName() +
2973 differentiatingSuffix;
2974 User user = User.createUserForTesting(c, username,
2975 new String[]{"supergroup"});
2976 return user;
2977 }
2978
2979 public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
2980 throws IOException {
2981 NavigableSet<String> online = new TreeSet<String>();
2982 for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
2983 try {
2984 for (HRegionInfo region :
2985 ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
2986 online.add(region.getRegionNameAsString());
2987 }
2988 } catch (RegionServerStoppedException e) {
2989
2990 }
2991 }
2992 for (MasterThread mt : cluster.getLiveMasterThreads()) {
2993 try {
2994 for (HRegionInfo region :
2995 ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
2996 online.add(region.getRegionNameAsString());
2997 }
2998 } catch (RegionServerStoppedException e) {
2999
3000 } catch (ServerNotRunningYetException e) {
3001
3002 }
3003 }
3004 return online;
3005 }
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020 public static void setMaxRecoveryErrorCount(final OutputStream stream,
3021 final int max) {
3022 try {
3023 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3024 for (Class<?> clazz: clazzes) {
3025 String className = clazz.getSimpleName();
3026 if (className.equals("DFSOutputStream")) {
3027 if (clazz.isInstance(stream)) {
3028 Field maxRecoveryErrorCountField =
3029 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3030 maxRecoveryErrorCountField.setAccessible(true);
3031 maxRecoveryErrorCountField.setInt(stream, max);
3032 break;
3033 }
3034 }
3035 }
3036 } catch (Exception e) {
3037 LOG.info("Could not set max recovery field", e);
3038 }
3039 }
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3050 waitUntilAllRegionsAssigned(tableName, 60000);
3051 }
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3063 throws IOException {
3064 final Table meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3065 try {
3066 waitFor(timeout, 200, true, new Predicate<IOException>() {
3067 @Override
3068 public boolean evaluate() throws IOException {
3069 boolean allRegionsAssigned = true;
3070 Scan scan = new Scan();
3071 scan.addFamily(HConstants.CATALOG_FAMILY);
3072 ResultScanner s = meta.getScanner(scan);
3073 try {
3074 Result r;
3075 while ((r = s.next()) != null) {
3076 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3077 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3078 if (info != null && info.getTable().equals(tableName)) {
3079 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3080 allRegionsAssigned &= (b != null);
3081 }
3082 }
3083 } finally {
3084 s.close();
3085 }
3086 return allRegionsAssigned;
3087 }
3088 });
3089 } finally {
3090 meta.close();
3091 }
3092
3093
3094 if (!getHBaseClusterInterface().isDistributedCluster()) {
3095
3096
3097 HMaster master = getHBaseCluster().getMaster();
3098 final RegionStates states = master.getAssignmentManager().getRegionStates();
3099 waitFor(timeout, 200, new Predicate<IOException>() {
3100 @Override
3101 public boolean evaluate() throws IOException {
3102 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3103 return hris != null && !hris.isEmpty();
3104 }
3105 });
3106 }
3107 }
3108
3109
3110
3111
3112
3113 public static List<Cell> getFromStoreFile(HStore store,
3114 Get get) throws IOException {
3115 Scan scan = new Scan(get);
3116 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3117 scan.getFamilyMap().get(store.getFamily().getName()),
3118
3119
3120 0);
3121
3122 List<Cell> result = new ArrayList<Cell>();
3123 scanner.next(result);
3124 if (!result.isEmpty()) {
3125
3126 Cell kv = result.get(0);
3127 if (!CellUtil.matchingRow(kv, get.getRow())) {
3128 result.clear();
3129 }
3130 }
3131 scanner.close();
3132 return result;
3133 }
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3144 assertTrue(numRegions>3);
3145 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3146 byte [][] result = new byte[tmpSplitKeys.length+1][];
3147 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3148 result[0] = HConstants.EMPTY_BYTE_ARRAY;
3149 return result;
3150 }
3151
3152
3153
3154
3155
3156 public static List<Cell> getFromStoreFile(HStore store,
3157 byte [] row,
3158 NavigableSet<byte[]> columns
3159 ) throws IOException {
3160 Get get = new Get(row);
3161 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3162 s.put(store.getFamily().getName(), columns);
3163
3164 return getFromStoreFile(store,get);
3165 }
3166
3167
3168
3169
3170
3171 public static ZooKeeperWatcher getZooKeeperWatcher(
3172 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3173 IOException {
3174 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3175 "unittest", new Abortable() {
3176 boolean aborted = false;
3177
3178 @Override
3179 public void abort(String why, Throwable e) {
3180 aborted = true;
3181 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3182 }
3183
3184 @Override
3185 public boolean isAborted() {
3186 return aborted;
3187 }
3188 });
3189 return zkw;
3190 }
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3204 HBaseTestingUtility TEST_UTIL, HRegion region,
3205 ServerName serverName) throws ZooKeeperConnectionException,
3206 IOException, KeeperException, NodeExistsException {
3207 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3208 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3209 int version = ZKAssign.transitionNodeOpening(zkw, region
3210 .getRegionInfo(), serverName);
3211 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3212 version);
3213 return zkw;
3214 }
3215
3216 public static void assertKVListsEqual(String additionalMsg,
3217 final List<? extends Cell> expected,
3218 final List<? extends Cell> actual) {
3219 final int eLen = expected.size();
3220 final int aLen = actual.size();
3221 final int minLen = Math.min(eLen, aLen);
3222
3223 int i;
3224 for (i = 0; i < minLen
3225 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3226 ++i) {}
3227
3228 if (additionalMsg == null) {
3229 additionalMsg = "";
3230 }
3231 if (!additionalMsg.isEmpty()) {
3232 additionalMsg = ". " + additionalMsg;
3233 }
3234
3235 if (eLen != aLen || i != minLen) {
3236 throw new AssertionError(
3237 "Expected and actual KV arrays differ at position " + i + ": " +
3238 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3239 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3240 }
3241 }
3242
3243 public static <T> String safeGetAsStr(List<T> lst, int i) {
3244 if (0 <= i && i < lst.size()) {
3245 return lst.get(i).toString();
3246 } else {
3247 return "<out_of_range>";
3248 }
3249 }
3250
3251 public String getClusterKey() {
3252 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3253 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3254 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3255 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3256 }
3257
3258
3259 public HTable createRandomTable(String tableName,
3260 final Collection<String> families,
3261 final int maxVersions,
3262 final int numColsPerRow,
3263 final int numFlushes,
3264 final int numRegions,
3265 final int numRowsPerFlush)
3266 throws IOException, InterruptedException {
3267
3268 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3269 " regions, " + numFlushes + " storefiles per region, " +
3270 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3271 "\n");
3272
3273 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3274 final int numCF = families.size();
3275 final byte[][] cfBytes = new byte[numCF][];
3276 {
3277 int cfIndex = 0;
3278 for (String cf : families) {
3279 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3280 }
3281 }
3282
3283 final int actualStartKey = 0;
3284 final int actualEndKey = Integer.MAX_VALUE;
3285 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3286 final int splitStartKey = actualStartKey + keysPerRegion;
3287 final int splitEndKey = actualEndKey - keysPerRegion;
3288 final String keyFormat = "%08x";
3289 final HTable table = createTable(tableName, cfBytes,
3290 maxVersions,
3291 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3292 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3293 numRegions);
3294
3295 if (hbaseCluster != null) {
3296 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3297 }
3298
3299 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3300 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3301 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3302 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3303
3304 Put put = new Put(row);
3305 Delete del = new Delete(row);
3306 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3307 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3308 final long ts = rand.nextInt();
3309 final byte[] qual = Bytes.toBytes("col" + iCol);
3310 if (rand.nextBoolean()) {
3311 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3312 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3313 ts + "_random_" + rand.nextLong());
3314 put.add(cf, qual, ts, value);
3315 } else if (rand.nextDouble() < 0.8) {
3316 del.deleteColumn(cf, qual, ts);
3317 } else {
3318 del.deleteColumns(cf, qual, ts);
3319 }
3320 }
3321
3322 if (!put.isEmpty()) {
3323 table.put(put);
3324 }
3325
3326 if (!del.isEmpty()) {
3327 table.delete(del);
3328 }
3329 }
3330 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3331 table.flushCommits();
3332 if (hbaseCluster != null) {
3333 getMiniHBaseCluster().flushcache(table.getName());
3334 }
3335 }
3336
3337 return table;
3338 }
3339
3340 private static final int MIN_RANDOM_PORT = 0xc000;
3341 private static final int MAX_RANDOM_PORT = 0xfffe;
3342 private static Random random = new Random();
3343
3344
3345
3346
3347
3348 public static int randomPort() {
3349 return MIN_RANDOM_PORT
3350 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3351 }
3352
3353
3354
3355
3356
3357 public static int randomFreePort() {
3358 int port = 0;
3359 do {
3360 port = randomPort();
3361 if (takenRandomPorts.contains(port)) {
3362 continue;
3363 }
3364 takenRandomPorts.add(port);
3365
3366 try {
3367 ServerSocket sock = new ServerSocket(port);
3368 sock.close();
3369 } catch (IOException ex) {
3370 port = 0;
3371 }
3372 } while (port == 0);
3373 return port;
3374 }
3375
3376
3377 public static String randomMultiCastAddress() {
3378 return "226.1.1." + random.nextInt(254);
3379 }
3380
3381
3382
3383 public static void waitForHostPort(String host, int port)
3384 throws IOException {
3385 final int maxTimeMs = 10000;
3386 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3387 IOException savedException = null;
3388 LOG.info("Waiting for server at " + host + ":" + port);
3389 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3390 try {
3391 Socket sock = new Socket(InetAddress.getByName(host), port);
3392 sock.close();
3393 savedException = null;
3394 LOG.info("Server at " + host + ":" + port + " is available");
3395 break;
3396 } catch (UnknownHostException e) {
3397 throw new IOException("Failed to look up " + host, e);
3398 } catch (IOException e) {
3399 savedException = e;
3400 }
3401 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3402 }
3403
3404 if (savedException != null) {
3405 throw savedException;
3406 }
3407 }
3408
3409
3410
3411
3412
3413
3414 public static int createPreSplitLoadTestTable(Configuration conf,
3415 TableName tableName, byte[] columnFamily, Algorithm compression,
3416 DataBlockEncoding dataBlockEncoding) throws IOException {
3417 return createPreSplitLoadTestTable(conf, tableName,
3418 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3419 Durability.USE_DEFAULT);
3420 }
3421
3422
3423
3424
3425
3426 public static int createPreSplitLoadTestTable(Configuration conf,
3427 TableName tableName, byte[] columnFamily, Algorithm compression,
3428 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3429 Durability durability)
3430 throws IOException {
3431 HTableDescriptor desc = new HTableDescriptor(tableName);
3432 desc.setDurability(durability);
3433 desc.setRegionReplication(regionReplication);
3434 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3435 hcd.setDataBlockEncoding(dataBlockEncoding);
3436 hcd.setCompressionType(compression);
3437 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3438 }
3439
3440
3441
3442
3443
3444
3445 public static int createPreSplitLoadTestTable(Configuration conf,
3446 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3447 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3448 }
3449
3450
3451
3452
3453
3454
3455 public static int createPreSplitLoadTestTable(Configuration conf,
3456 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3457 if (!desc.hasFamily(hcd.getName())) {
3458 desc.addFamily(hcd);
3459 }
3460
3461 int totalNumberOfRegions = 0;
3462 Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3463 Admin admin = unmanagedConnection.getAdmin();
3464
3465 try {
3466
3467
3468
3469 int numberOfServers = admin.getClusterStatus().getServers().size();
3470 if (numberOfServers == 0) {
3471 throw new IllegalStateException("No live regionservers");
3472 }
3473
3474 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3475 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3476 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3477 "(regions per server: " + numRegionsPerServer + ")");
3478
3479 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3480 totalNumberOfRegions);
3481
3482 admin.createTable(desc, splits);
3483 } catch (MasterNotRunningException e) {
3484 LOG.error("Master not running", e);
3485 throw new IOException(e);
3486 } catch (TableExistsException e) {
3487 LOG.warn("Table " + desc.getTableName() +
3488 " already exists, continuing");
3489 } finally {
3490 admin.close();
3491 unmanagedConnection.close();
3492 }
3493 return totalNumberOfRegions;
3494 }
3495
3496 public static int getMetaRSPort(Configuration conf) throws IOException {
3497 RegionLocator table = new HTable(conf, TableName.META_TABLE_NAME);
3498 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3499 table.close();
3500 return hloc.getPort();
3501 }
3502
3503
3504
3505
3506
3507
3508
3509 public void assertRegionOnServer(
3510 final HRegionInfo hri, final ServerName server,
3511 final long timeout) throws IOException, InterruptedException {
3512 long timeoutTime = System.currentTimeMillis() + timeout;
3513 while (true) {
3514 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3515 if (regions.contains(hri)) return;
3516 long now = System.currentTimeMillis();
3517 if (now > timeoutTime) break;
3518 Thread.sleep(10);
3519 }
3520 fail("Could not find region " + hri.getRegionNameAsString()
3521 + " on server " + server);
3522 }
3523
3524
3525
3526
3527
3528 public void assertRegionOnlyOnServer(
3529 final HRegionInfo hri, final ServerName server,
3530 final long timeout) throws IOException, InterruptedException {
3531 long timeoutTime = System.currentTimeMillis() + timeout;
3532 while (true) {
3533 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3534 if (regions.contains(hri)) {
3535 List<JVMClusterUtil.RegionServerThread> rsThreads =
3536 getHBaseCluster().getLiveRegionServerThreads();
3537 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3538 HRegionServer rs = rsThread.getRegionServer();
3539 if (server.equals(rs.getServerName())) {
3540 continue;
3541 }
3542 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3543 for (HRegion r: hrs) {
3544 assertTrue("Region should not be double assigned",
3545 r.getRegionId() != hri.getRegionId());
3546 }
3547 }
3548 return;
3549 }
3550 long now = System.currentTimeMillis();
3551 if (now > timeoutTime) break;
3552 Thread.sleep(10);
3553 }
3554 fail("Could not find region " + hri.getRegionNameAsString()
3555 + " on server " + server);
3556 }
3557
3558 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3559 throws IOException {
3560 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3561 htd.addFamily(hcd);
3562 HRegionInfo info =
3563 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3564 HRegion region =
3565 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3566 return region;
3567 }
3568
3569 public void setFileSystemURI(String fsURI) {
3570 FS_URI = fsURI;
3571 }
3572
3573
3574
3575
3576 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3577 throws E {
3578 return Waiter.waitFor(this.conf, timeout, predicate);
3579 }
3580
3581
3582
3583
3584 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3585 throws E {
3586 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3587 }
3588
3589
3590
3591
3592 public <E extends Exception> long waitFor(long timeout, long interval,
3593 boolean failIfTimeout, Predicate<E> predicate) throws E {
3594 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3595 }
3596
3597
3598
3599
3600 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3601 return new Waiter.Predicate<Exception>() {
3602 @Override
3603 public boolean evaluate() throws Exception {
3604 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3605 .getAssignmentManager().getRegionStates();
3606 return !regionStates.isRegionsInTransition();
3607 }
3608 };
3609 }
3610
3611
3612
3613
3614 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3615 return new Waiter.Predicate<Exception>() {
3616 @Override
3617 public boolean evaluate() throws Exception {
3618 return getHBaseAdmin().isTableEnabled(tableName);
3619 }
3620 };
3621 }
3622
3623
3624
3625
3626
3627
3628 public static List<HColumnDescriptor> generateColumnDescriptors() {
3629 return generateColumnDescriptors("");
3630 }
3631
3632
3633
3634
3635
3636
3637
3638 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3639 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3640 long familyId = 0;
3641 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3642 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3643 for (BloomType bloomType: BloomType.values()) {
3644 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3645 HColumnDescriptor htd = new HColumnDescriptor(name);
3646 htd.setCompressionType(compressionType);
3647 htd.setDataBlockEncoding(encodingType);
3648 htd.setBloomFilterType(bloomType);
3649 htds.add(htd);
3650 familyId++;
3651 }
3652 }
3653 }
3654 return htds;
3655 }
3656
3657
3658
3659
3660
3661 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3662 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3663 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3664 for (String algoName : allAlgos) {
3665 try {
3666 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3667 algo.getCompressor();
3668 supportedAlgos.add(algo);
3669 } catch (Throwable t) {
3670
3671 }
3672 }
3673 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3674 }
3675
3676
3677
3678
3679
3680
3681 public void waitUntilNoRegionsInTransition(final long timeout) throws Exception {
3682 waitFor(timeout, predicateNoRegionsInTransition());
3683 }
3684 }