1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.TreeSet;
44 import java.util.UUID;
45 import java.util.concurrent.TimeUnit;
46
47 import org.apache.commons.logging.Log;
48 import org.apache.commons.logging.LogFactory;
49 import org.apache.commons.logging.impl.Jdk14Logger;
50 import org.apache.commons.logging.impl.Log4JLogger;
51 import org.apache.hadoop.conf.Configuration;
52 import org.apache.hadoop.fs.FileSystem;
53 import org.apache.hadoop.fs.Path;
54 import org.apache.hadoop.hbase.Waiter.Predicate;
55 import org.apache.hadoop.hbase.classification.InterfaceAudience;
56 import org.apache.hadoop.hbase.classification.InterfaceStability;
57 import org.apache.hadoop.hbase.client.Admin;
58 import org.apache.hadoop.hbase.client.Connection;
59 import org.apache.hadoop.hbase.client.ConnectionFactory;
60 import org.apache.hadoop.hbase.client.Delete;
61 import org.apache.hadoop.hbase.client.Durability;
62 import org.apache.hadoop.hbase.client.Get;
63 import org.apache.hadoop.hbase.client.HBaseAdmin;
64 import org.apache.hadoop.hbase.client.HConnection;
65 import org.apache.hadoop.hbase.client.HTable;
66 import org.apache.hadoop.hbase.client.Put;
67 import org.apache.hadoop.hbase.client.RegionLocator;
68 import org.apache.hadoop.hbase.client.Result;
69 import org.apache.hadoop.hbase.client.ResultScanner;
70 import org.apache.hadoop.hbase.client.Scan;
71 import org.apache.hadoop.hbase.client.Table;
72 import org.apache.hadoop.hbase.fs.HFileSystem;
73 import org.apache.hadoop.hbase.io.compress.Compression;
74 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
75 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
77 import org.apache.hadoop.hbase.io.hfile.HFile;
78 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
79 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
80 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
81 import org.apache.hadoop.hbase.master.HMaster;
82 import org.apache.hadoop.hbase.master.RegionStates;
83 import org.apache.hadoop.hbase.master.ServerManager;
84 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
85 import org.apache.hadoop.hbase.regionserver.BloomType;
86 import org.apache.hadoop.hbase.regionserver.HRegion;
87 import org.apache.hadoop.hbase.regionserver.HRegionServer;
88 import org.apache.hadoop.hbase.regionserver.HStore;
89 import org.apache.hadoop.hbase.regionserver.InternalScanner;
90 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
91 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
92 import org.apache.hadoop.hbase.wal.WAL;
93 import org.apache.hadoop.hbase.security.User;
94 import org.apache.hadoop.hbase.tool.Canary;
95 import org.apache.hadoop.hbase.util.Bytes;
96 import org.apache.hadoop.hbase.util.FSTableDescriptors;
97 import org.apache.hadoop.hbase.util.FSUtils;
98 import org.apache.hadoop.hbase.util.JVMClusterUtil;
99 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
100 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
101 import org.apache.hadoop.hbase.util.Pair;
102 import org.apache.hadoop.hbase.util.RegionSplitter;
103 import org.apache.hadoop.hbase.util.RetryCounter;
104 import org.apache.hadoop.hbase.util.Threads;
105 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
106 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
107 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
108 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
109 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
110 import org.apache.hadoop.hdfs.DFSClient;
111 import org.apache.hadoop.hdfs.DistributedFileSystem;
112 import org.apache.hadoop.hdfs.MiniDFSCluster;
113 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
114 import org.apache.hadoop.mapred.JobConf;
115 import org.apache.hadoop.mapred.MiniMRCluster;
116 import org.apache.hadoop.mapred.TaskLog;
117 import org.apache.zookeeper.KeeperException;
118 import org.apache.zookeeper.KeeperException.NodeExistsException;
119 import org.apache.zookeeper.WatchedEvent;
120 import org.apache.zookeeper.ZooKeeper;
121 import org.apache.zookeeper.ZooKeeper.States;
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137 @InterfaceAudience.Public
138 @InterfaceStability.Evolving
139 @SuppressWarnings("deprecation")
140 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
141 private MiniZooKeeperCluster zkCluster = null;
142
143 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
144
145
146
147
148 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
149
150
151
152
153
154 private boolean passedZkCluster = false;
155 private MiniDFSCluster dfsCluster = null;
156
157 private volatile HBaseCluster hbaseCluster = null;
158 private MiniMRCluster mrCluster = null;
159
160
161 private volatile boolean miniClusterRunning;
162
163 private String hadoopLogDir;
164
165
166 private File clusterTestDir = null;
167
168
169
170 private Path dataTestDirOnTestFS = null;
171
172
173
174
175 private volatile Connection connection;
176
177
178
179
180
181
182
183
184 @Deprecated
185 private static final String TEST_DIRECTORY_KEY = "test.build.data";
186
187
188 private static String FS_URI;
189
190
191 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
192
193
194 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
195 Arrays.asList(new Object[][] {
196 { Compression.Algorithm.NONE },
197 { Compression.Algorithm.GZ }
198 });
199
200
201 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
202 Arrays.asList(new Object[][] {
203 { new Boolean(false) },
204 { new Boolean(true) }
205 });
206
207
208 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
209
210 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
211 Compression.Algorithm.NONE, Compression.Algorithm.GZ
212 };
213
214
215
216
217
218 private static List<Object[]> bloomAndCompressionCombinations() {
219 List<Object[]> configurations = new ArrayList<Object[]>();
220 for (Compression.Algorithm comprAlgo :
221 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
222 for (BloomType bloomType : BloomType.values()) {
223 configurations.add(new Object[] { comprAlgo, bloomType });
224 }
225 }
226 return Collections.unmodifiableList(configurations);
227 }
228
229
230
231
232 private static List<Object[]> memStoreTSAndTagsCombination() {
233 List<Object[]> configurations = new ArrayList<Object[]>();
234 configurations.add(new Object[] { false, false });
235 configurations.add(new Object[] { false, true });
236 configurations.add(new Object[] { true, false });
237 configurations.add(new Object[] { true, true });
238 return Collections.unmodifiableList(configurations);
239 }
240
241 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
242 bloomAndCompressionCombinations();
243
244 public HBaseTestingUtility() {
245 this(HBaseConfiguration.create());
246 }
247
248 public HBaseTestingUtility(Configuration conf) {
249 super(conf);
250
251
252 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
253 }
254
255
256
257
258
259
260
261 public static HBaseTestingUtility createLocalHTU() {
262 Configuration c = HBaseConfiguration.create();
263 return createLocalHTU(c);
264 }
265
266
267
268
269
270
271
272
273 public static HBaseTestingUtility createLocalHTU(Configuration c) {
274 HBaseTestingUtility htu = new HBaseTestingUtility(c);
275 String dataTestDir = htu.getDataTestDir().toString();
276 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
277 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
278 return htu;
279 }
280
281
282
283
284
285
286
287
288
289
290
291
292 @Override
293 public Configuration getConfiguration() {
294 return super.getConfiguration();
295 }
296
297 public void setHBaseCluster(HBaseCluster hbaseCluster) {
298 this.hbaseCluster = hbaseCluster;
299 }
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 @Override
318 protected Path setupDataTestDir() {
319 Path testPath = super.setupDataTestDir();
320 if (null == testPath) {
321 return null;
322 }
323
324 createSubDirAndSystemProperty(
325 "hadoop.log.dir",
326 testPath, "hadoop-log-dir");
327
328
329
330 createSubDirAndSystemProperty(
331 "hadoop.tmp.dir",
332 testPath, "hadoop-tmp-dir");
333
334
335 createSubDir(
336 "mapreduce.cluster.local.dir",
337 testPath, "mapred-local-dir");
338
339 return testPath;
340 }
341
342 private void createSubDirAndSystemProperty(
343 String propertyName, Path parent, String subDirName){
344
345 String sysValue = System.getProperty(propertyName);
346
347 if (sysValue != null) {
348
349
350 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
351 sysValue + " so I do NOT create it in " + parent);
352 String confValue = conf.get(propertyName);
353 if (confValue != null && !confValue.endsWith(sysValue)){
354 LOG.warn(
355 propertyName + " property value differs in configuration and system: "+
356 "Configuration="+confValue+" while System="+sysValue+
357 " Erasing configuration value by system value."
358 );
359 }
360 conf.set(propertyName, sysValue);
361 } else {
362
363 createSubDir(propertyName, parent, subDirName);
364 System.setProperty(propertyName, conf.get(propertyName));
365 }
366 }
367
368
369
370
371
372
373
374 private Path getBaseTestDirOnTestFS() throws IOException {
375 FileSystem fs = getTestFileSystem();
376 return new Path(fs.getWorkingDirectory(), "test-data");
377 }
378
379
380
381
382 public HTableDescriptor getMetaTableDescriptor() {
383 try {
384 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
385 } catch (IOException e) {
386 throw new RuntimeException("Unable to create META table descriptor", e);
387 }
388 }
389
390
391
392
393
394
395 Path getClusterTestDir() {
396 if (clusterTestDir == null){
397 setupClusterTestDir();
398 }
399 return new Path(clusterTestDir.getAbsolutePath());
400 }
401
402
403
404
405 private void setupClusterTestDir() {
406 if (clusterTestDir != null) {
407 return;
408 }
409
410
411
412 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
413 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
414
415 boolean b = deleteOnExit();
416 if (b) clusterTestDir.deleteOnExit();
417 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
418 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
419 }
420
421
422
423
424
425
426
427 public Path getDataTestDirOnTestFS() throws IOException {
428 if (dataTestDirOnTestFS == null) {
429 setupDataTestDirOnTestFS();
430 }
431
432 return dataTestDirOnTestFS;
433 }
434
435
436
437
438
439
440
441
442 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
443 return new Path(getDataTestDirOnTestFS(), subdirName);
444 }
445
446
447
448
449
450 private void setupDataTestDirOnTestFS() throws IOException {
451 if (dataTestDirOnTestFS != null) {
452 LOG.warn("Data test on test fs dir already setup in "
453 + dataTestDirOnTestFS.toString());
454 return;
455 }
456 dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
457 }
458
459
460
461
462 private Path getNewDataTestDirOnTestFS() throws IOException {
463
464
465
466
467 FileSystem fs = getTestFileSystem();
468 Path newDataTestDir = null;
469 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
470 File dataTestDir = new File(getDataTestDir().toString());
471 if (deleteOnExit()) dataTestDir.deleteOnExit();
472 newDataTestDir = new Path(dataTestDir.getAbsolutePath());
473 } else {
474 Path base = getBaseTestDirOnTestFS();
475 String randomStr = UUID.randomUUID().toString();
476 newDataTestDir = new Path(base, randomStr);
477 if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
478 }
479 return newDataTestDir;
480 }
481
482
483
484
485
486
487 public boolean cleanupDataTestDirOnTestFS() throws IOException {
488 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
489 if (ret)
490 dataTestDirOnTestFS = null;
491 return ret;
492 }
493
494
495
496
497
498
499 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
500 Path cpath = getDataTestDirOnTestFS(subdirName);
501 return getTestFileSystem().delete(cpath, true);
502 }
503
504
505
506
507
508
509
510
511 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
512 return startMiniDFSCluster(servers, null);
513 }
514
515
516
517
518
519
520
521
522
523
524
525
526 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
527 throws Exception {
528 if ( hosts != null && hosts.length != 0) {
529 return startMiniDFSCluster(hosts.length, hosts);
530 } else {
531 return startMiniDFSCluster(1, null);
532 }
533 }
534
535
536
537
538
539
540
541
542
543
544 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
545 throws Exception {
546 createDirsAndSetProperties();
547 EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
548
549
550 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
551 setLevel(org.apache.log4j.Level.ERROR);
552 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
553 setLevel(org.apache.log4j.Level.ERROR);
554
555
556 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
557 true, null, null, hosts, null);
558
559
560 FileSystem fs = this.dfsCluster.getFileSystem();
561 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
562
563
564 this.dfsCluster.waitClusterUp();
565
566
567 dataTestDirOnTestFS = null;
568
569 return this.dfsCluster;
570 }
571
572
573 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
574 throws Exception {
575 createDirsAndSetProperties();
576 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
577 true, null, racks, hosts, null);
578
579
580 FileSystem fs = this.dfsCluster.getFileSystem();
581 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
582
583
584 this.dfsCluster.waitClusterUp();
585
586
587 dataTestDirOnTestFS = null;
588
589 return this.dfsCluster;
590 }
591
592 public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
593 createDirsAndSetProperties();
594 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
595 null, null, null);
596 return dfsCluster;
597 }
598
599
600 private void createDirsAndSetProperties() throws IOException {
601 setupClusterTestDir();
602 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
603 createDirAndSetProperty("cache_data", "test.cache.data");
604 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
605 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
606 createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
607 createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
608 enableShortCircuit();
609
610 Path root = getDataTestDirOnTestFS("hadoop");
611 conf.set(MapreduceTestingShim.getMROutputDirProp(),
612 new Path(root, "mapred-output-dir").toString());
613 conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
614 conf.set("mapreduce.jobtracker.staging.root.dir",
615 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
616 conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
617 }
618
619
620
621
622
623
624
625 public boolean isReadShortCircuitOn(){
626 final String propName = "hbase.tests.use.shortcircuit.reads";
627 String readOnProp = System.getProperty(propName);
628 if (readOnProp != null){
629 return Boolean.parseBoolean(readOnProp);
630 } else {
631 return conf.getBoolean(propName, false);
632 }
633 }
634
635
636
637
638 private void enableShortCircuit() {
639 if (isReadShortCircuitOn()) {
640 String curUser = System.getProperty("user.name");
641 LOG.info("read short circuit is ON for user " + curUser);
642
643 conf.set("dfs.block.local-path-access.user", curUser);
644
645 conf.setBoolean("dfs.client.read.shortcircuit", true);
646
647 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
648 } else {
649 LOG.info("read short circuit is OFF");
650 }
651 }
652
653 private String createDirAndSetProperty(final String relPath, String property) {
654 String path = getDataTestDir(relPath).toString();
655 System.setProperty(property, path);
656 conf.set(property, path);
657 new File(path).mkdirs();
658 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
659 return path;
660 }
661
662
663
664
665
666
667 public void shutdownMiniDFSCluster() throws IOException {
668 if (this.dfsCluster != null) {
669
670 this.dfsCluster.shutdown();
671 dfsCluster = null;
672 dataTestDirOnTestFS = null;
673 FSUtils.setFsDefault(this.conf, new Path("file:///"));
674 }
675 }
676
677
678
679
680
681
682
683
684 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
685 return startMiniZKCluster(1);
686 }
687
688
689
690
691
692
693
694
695
696 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
697 throws Exception {
698 setupClusterTestDir();
699 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
700 }
701
702 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
703 throws Exception {
704 return startMiniZKCluster(dir,1);
705 }
706
707
708
709
710
711 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
712 int zooKeeperServerNum)
713 throws Exception {
714 if (this.zkCluster != null) {
715 throw new IOException("Cluster already running at " + dir);
716 }
717 this.passedZkCluster = false;
718 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
719 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
720 if (defPort > 0){
721
722 this.zkCluster.setDefaultClientPort(defPort);
723 }
724 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
725 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
726 Integer.toString(clientPort));
727 return this.zkCluster;
728 }
729
730
731
732
733
734
735
736 public void shutdownMiniZKCluster() throws IOException {
737 if (this.zkCluster != null) {
738 this.zkCluster.shutdown();
739 this.zkCluster = null;
740 }
741 }
742
743
744
745
746
747
748
749 public MiniHBaseCluster startMiniCluster() throws Exception {
750 return startMiniCluster(1, 1);
751 }
752
753
754
755
756
757
758
759
760
761 public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
762 throws Exception {
763 return startMiniCluster(1, numSlaves, create);
764 }
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779 public MiniHBaseCluster startMiniCluster(final int numSlaves)
780 throws Exception {
781 return startMiniCluster(1, numSlaves, false);
782 }
783
784
785
786
787
788
789
790
791 public MiniHBaseCluster startMiniCluster(final int numMasters,
792 final int numSlaves, boolean create)
793 throws Exception {
794 return startMiniCluster(numMasters, numSlaves, null, create);
795 }
796
797
798
799
800
801
802
803 public MiniHBaseCluster startMiniCluster(final int numMasters,
804 final int numSlaves)
805 throws Exception {
806 return startMiniCluster(numMasters, numSlaves, null, false);
807 }
808
809 public MiniHBaseCluster startMiniCluster(final int numMasters,
810 final int numSlaves, final String[] dataNodeHosts, boolean create)
811 throws Exception {
812 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
813 null, null, create);
814 }
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840 public MiniHBaseCluster startMiniCluster(final int numMasters,
841 final int numSlaves, final String[] dataNodeHosts) throws Exception {
842 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
843 null, null);
844 }
845
846
847
848
849
850 public MiniHBaseCluster startMiniCluster(final int numMasters,
851 final int numSlaves, final int numDataNodes) throws Exception {
852 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
853 }
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882 public MiniHBaseCluster startMiniCluster(final int numMasters,
883 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
884 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
885 throws Exception {
886 return startMiniCluster(
887 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
888 }
889
890 public MiniHBaseCluster startMiniCluster(final int numMasters,
891 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
892 Class<? extends HMaster> masterClass,
893 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
894 throws Exception {
895 return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
896 masterClass, regionserverClass, false);
897 }
898
899
900
901
902
903
904
905
906 public MiniHBaseCluster startMiniCluster(final int numMasters,
907 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
908 Class<? extends HMaster> masterClass,
909 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
910 boolean create)
911 throws Exception {
912 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
913 numDataNodes = dataNodeHosts.length;
914 }
915
916 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
917 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
918
919
920 if (miniClusterRunning) {
921 throw new IllegalStateException("A mini-cluster is already running");
922 }
923 miniClusterRunning = true;
924
925 setupClusterTestDir();
926 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
927
928
929
930 startMiniDFSCluster(numDataNodes, dataNodeHosts);
931
932
933 if (this.zkCluster == null) {
934 startMiniZKCluster(clusterTestDir);
935 }
936
937
938 return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
939 regionserverClass, create);
940 }
941
942 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
943 throws IOException, InterruptedException{
944 return startMiniHBaseCluster(numMasters, numSlaves, null, null, false);
945 }
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
961 final int numSlaves, Class<? extends HMaster> masterClass,
962 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
963 boolean create)
964 throws IOException, InterruptedException {
965
966 createRootDir(create);
967
968
969
970 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
971 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
972 }
973 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
974 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
975 }
976
977 Configuration c = new Configuration(this.conf);
978 this.hbaseCluster =
979 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
980
981 Table t = new HTable(c, TableName.META_TABLE_NAME);
982 ResultScanner s = t.getScanner(new Scan());
983 while (s.next() != null) {
984 continue;
985 }
986 s.close();
987 t.close();
988
989 getHBaseAdmin();
990 LOG.info("Minicluster is up");
991 return (MiniHBaseCluster)this.hbaseCluster;
992 }
993
994
995
996
997
998
999
1000 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1001 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1002
1003 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1004 ResultScanner s = t.getScanner(new Scan());
1005 while (s.next() != null) {
1006
1007 }
1008 LOG.info("HBase has been restarted");
1009 s.close();
1010 t.close();
1011 }
1012
1013
1014
1015
1016
1017
1018 public MiniHBaseCluster getMiniHBaseCluster() {
1019 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1020 return (MiniHBaseCluster)this.hbaseCluster;
1021 }
1022 throw new RuntimeException(hbaseCluster + " not an instance of " +
1023 MiniHBaseCluster.class.getName());
1024 }
1025
1026
1027
1028
1029
1030
1031 public void shutdownMiniCluster() throws Exception {
1032 LOG.info("Shutting down minicluster");
1033 if (this.connection != null && !this.connection.isClosed()) {
1034 this.connection.close();
1035 this.connection = null;
1036 }
1037 shutdownMiniHBaseCluster();
1038 if (!this.passedZkCluster){
1039 shutdownMiniZKCluster();
1040 }
1041 shutdownMiniDFSCluster();
1042
1043 cleanupTestDir();
1044 miniClusterRunning = false;
1045 LOG.info("Minicluster is down");
1046 }
1047
1048
1049
1050
1051
1052 @Override
1053 public boolean cleanupTestDir() throws IOException {
1054 boolean ret = super.cleanupTestDir();
1055 if (deleteDir(this.clusterTestDir)) {
1056 this.clusterTestDir = null;
1057 return ret & true;
1058 }
1059 return false;
1060 }
1061
1062
1063
1064
1065
1066 public void shutdownMiniHBaseCluster() throws IOException {
1067 if (hbaseAdmin != null) {
1068 hbaseAdmin.close0();
1069 hbaseAdmin = null;
1070 }
1071
1072
1073 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1074 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1075 if (this.hbaseCluster != null) {
1076 this.hbaseCluster.shutdown();
1077
1078 this.hbaseCluster.waitUntilShutDown();
1079 this.hbaseCluster = null;
1080 }
1081
1082 if (zooKeeperWatcher != null) {
1083 zooKeeperWatcher.close();
1084 zooKeeperWatcher = null;
1085 }
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 public Path getDefaultRootDirPath(boolean create) throws IOException {
1097 if (!create) {
1098 return getDataTestDirOnTestFS();
1099 } else {
1100 return getNewDataTestDirOnTestFS();
1101 }
1102 }
1103
1104
1105
1106
1107
1108
1109
1110
1111 public Path getDefaultRootDirPath() throws IOException {
1112 return getDefaultRootDirPath(false);
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 public Path createRootDir(boolean create) throws IOException {
1128 FileSystem fs = FileSystem.get(this.conf);
1129 Path hbaseRootdir = getDefaultRootDirPath(create);
1130 FSUtils.setRootDir(this.conf, hbaseRootdir);
1131 fs.mkdirs(hbaseRootdir);
1132 FSUtils.setVersion(fs, hbaseRootdir);
1133 return hbaseRootdir;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142 public Path createRootDir() throws IOException {
1143 return createRootDir(false);
1144 }
1145
1146
1147
1148
1149
1150 public void flush() throws IOException {
1151 getMiniHBaseCluster().flushcache();
1152 }
1153
1154
1155
1156
1157
1158 public void flush(TableName tableName) throws IOException {
1159 getMiniHBaseCluster().flushcache(tableName);
1160 }
1161
1162
1163
1164
1165
1166 public void compact(boolean major) throws IOException {
1167 getMiniHBaseCluster().compact(major);
1168 }
1169
1170
1171
1172
1173
1174 public void compact(TableName tableName, boolean major) throws IOException {
1175 getMiniHBaseCluster().compact(tableName, major);
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185 public Table createTable(TableName tableName, String family)
1186 throws IOException{
1187 return createTable(tableName, new String[]{family});
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197 public HTable createTable(byte[] tableName, byte[] family)
1198 throws IOException{
1199 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1200 }
1201
1202
1203
1204
1205
1206
1207
1208
1209 public Table createTable(TableName tableName, String[] families)
1210 throws IOException {
1211 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1212 for (String family : families) {
1213 fams.add(Bytes.toBytes(family));
1214 }
1215 return createTable(tableName, fams.toArray(new byte[0][]));
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225 public HTable createTable(TableName tableName, byte[] family)
1226 throws IOException{
1227 return createTable(tableName, new byte[][]{family});
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 public HTable createTable(byte[] tableName, byte[][] families)
1239 throws IOException {
1240 return createTable(tableName, families,
1241 new Configuration(getConfiguration()));
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 public HTable createTable(TableName tableName, byte[][] families)
1253 throws IOException {
1254 return createTable(tableName, families, new Configuration(getConfiguration()));
1255 }
1256
1257 public HTable createTable(byte[] tableName, byte[][] families,
1258 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1259 return createTable(TableName.valueOf(tableName), families, numVersions,
1260 startKey, endKey, numRegions);
1261 }
1262
1263 public HTable createTable(String tableName, byte[][] families,
1264 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1265 return createTable(TableName.valueOf(tableName), families, numVersions,
1266 startKey, endKey, numRegions);
1267 }
1268
1269 public HTable createTable(TableName tableName, byte[][] families,
1270 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1271 throws IOException{
1272 HTableDescriptor desc = new HTableDescriptor(tableName);
1273 for (byte[] family : families) {
1274 HColumnDescriptor hcd = new HColumnDescriptor(family)
1275 .setMaxVersions(numVersions);
1276 desc.addFamily(hcd);
1277 }
1278 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1279
1280 waitUntilAllRegionsAssigned(tableName);
1281 return new HTable(getConfiguration(), tableName);
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1293 throws IOException {
1294 for(byte[] family : families) {
1295 HColumnDescriptor hcd = new HColumnDescriptor(family);
1296
1297
1298
1299 hcd.setBloomFilterType(BloomType.NONE);
1300 htd.addFamily(hcd);
1301 }
1302 getHBaseAdmin().createTable(htd);
1303
1304 waitUntilAllRegionsAssigned(htd.getTableName());
1305 return (HTable)getConnection().getTable(htd.getTableName());
1306 }
1307
1308
1309
1310
1311
1312
1313
1314
1315 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1316 throws IOException {
1317 getHBaseAdmin().createTable(htd, splitRows);
1318
1319 waitUntilAllRegionsAssigned(htd.getTableName());
1320 return new HTable(getConfiguration(), htd.getTableName());
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 public HTable createTable(TableName tableName, byte[][] families,
1332 final Configuration c)
1333 throws IOException {
1334 return createTable(new HTableDescriptor(tableName), families, c);
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 public HTable createTable(byte[] tableName, byte[][] families,
1346 final Configuration c)
1347 throws IOException {
1348 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1349 for(byte[] family : families) {
1350 HColumnDescriptor hcd = new HColumnDescriptor(family);
1351
1352
1353
1354 hcd.setBloomFilterType(BloomType.NONE);
1355 desc.addFamily(hcd);
1356 }
1357 getHBaseAdmin().createTable(desc);
1358 return new HTable(c, desc.getTableName());
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 public HTable createTable(TableName tableName, byte[][] families,
1371 final Configuration c, int numVersions)
1372 throws IOException {
1373 HTableDescriptor desc = new HTableDescriptor(tableName);
1374 for(byte[] family : families) {
1375 HColumnDescriptor hcd = new HColumnDescriptor(family)
1376 .setMaxVersions(numVersions);
1377 desc.addFamily(hcd);
1378 }
1379 getHBaseAdmin().createTable(desc);
1380
1381 waitUntilAllRegionsAssigned(tableName);
1382 return new HTable(c, tableName);
1383 }
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 public HTable createTable(byte[] tableName, byte[][] families,
1395 final Configuration c, int numVersions)
1396 throws IOException {
1397 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1398 for(byte[] family : families) {
1399 HColumnDescriptor hcd = new HColumnDescriptor(family)
1400 .setMaxVersions(numVersions);
1401 desc.addFamily(hcd);
1402 }
1403 getHBaseAdmin().createTable(desc);
1404 return new HTable(c, desc.getTableName());
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1416 throws IOException {
1417 return createTable(tableName, new byte[][]{family}, numVersions);
1418 }
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1429 throws IOException {
1430 return createTable(tableName, new byte[][]{family}, numVersions);
1431 }
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 public HTable createTable(byte[] tableName, byte[][] families,
1442 int numVersions)
1443 throws IOException {
1444 return createTable(TableName.valueOf(tableName), families, numVersions);
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 public HTable createTable(TableName tableName, byte[][] families,
1456 int numVersions)
1457 throws IOException {
1458 HTableDescriptor desc = new HTableDescriptor(tableName);
1459 for (byte[] family : families) {
1460 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1461 desc.addFamily(hcd);
1462 }
1463 getHBaseAdmin().createTable(desc);
1464
1465 waitUntilAllRegionsAssigned(tableName);
1466 return new HTable(new Configuration(getConfiguration()), tableName);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 public HTable createTable(byte[] tableName, byte[][] families,
1478 int numVersions, int blockSize) throws IOException {
1479 return createTable(TableName.valueOf(tableName),
1480 families, numVersions, blockSize);
1481 }
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 public HTable createTable(TableName tableName, byte[][] families,
1492 int numVersions, int blockSize) throws IOException {
1493 HTableDescriptor desc = new HTableDescriptor(tableName);
1494 for (byte[] family : families) {
1495 HColumnDescriptor hcd = new HColumnDescriptor(family)
1496 .setMaxVersions(numVersions)
1497 .setBlocksize(blockSize);
1498 desc.addFamily(hcd);
1499 }
1500 getHBaseAdmin().createTable(desc);
1501
1502 waitUntilAllRegionsAssigned(tableName);
1503 return new HTable(new Configuration(getConfiguration()), tableName);
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 public HTable createTable(byte[] tableName, byte[][] families,
1515 int[] numVersions)
1516 throws IOException {
1517 return createTable(TableName.valueOf(tableName), families, numVersions);
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 public HTable createTable(TableName tableName, byte[][] families,
1529 int[] numVersions)
1530 throws IOException {
1531 HTableDescriptor desc = new HTableDescriptor(tableName);
1532 int i = 0;
1533 for (byte[] family : families) {
1534 HColumnDescriptor hcd = new HColumnDescriptor(family)
1535 .setMaxVersions(numVersions[i]);
1536 desc.addFamily(hcd);
1537 i++;
1538 }
1539 getHBaseAdmin().createTable(desc);
1540
1541 waitUntilAllRegionsAssigned(tableName);
1542 return new HTable(new Configuration(getConfiguration()), tableName);
1543 }
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1554 throws IOException{
1555 return createTable(TableName.valueOf(tableName), family, splitRows);
1556 }
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1567 throws IOException {
1568 HTableDescriptor desc = new HTableDescriptor(tableName);
1569 HColumnDescriptor hcd = new HColumnDescriptor(family);
1570 desc.addFamily(hcd);
1571 getHBaseAdmin().createTable(desc, splitRows);
1572
1573 waitUntilAllRegionsAssigned(tableName);
1574 return new HTable(getConfiguration(), tableName);
1575 }
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1586 throws IOException {
1587 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1588 for(byte[] family:families) {
1589 HColumnDescriptor hcd = new HColumnDescriptor(family);
1590 desc.addFamily(hcd);
1591 }
1592 getHBaseAdmin().createTable(desc, splitRows);
1593
1594 waitUntilAllRegionsAssigned(desc.getTableName());
1595 return new HTable(getConfiguration(), desc.getTableName());
1596 }
1597
1598
1599
1600
1601 @SuppressWarnings("serial")
1602 public static void modifyTableSync(Admin admin, HTableDescriptor desc)
1603 throws IOException, InterruptedException {
1604 admin.modifyTable(desc.getTableName(), desc);
1605 Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1606 setFirst(0);
1607 setSecond(0);
1608 }};
1609 int i = 0;
1610 do {
1611 status = admin.getAlterStatus(desc.getTableName());
1612 if (status.getSecond() != 0) {
1613 LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1614 + " regions updated.");
1615 Thread.sleep(1 * 1000l);
1616 } else {
1617 LOG.debug("All regions updated.");
1618 break;
1619 }
1620 } while (status.getFirst() != 0 && i++ < 500);
1621 if (status.getFirst() != 0) {
1622 throw new IOException("Failed to update all regions even after 500 seconds.");
1623 }
1624 }
1625
1626
1627
1628
1629 public static void setReplicas(Admin admin, TableName table, int replicaCount)
1630 throws IOException, InterruptedException {
1631 admin.disableTable(table);
1632 HTableDescriptor desc = admin.getTableDescriptor(table);
1633 desc.setRegionReplication(replicaCount);
1634 admin.modifyTable(desc.getTableName(), desc);
1635 admin.enableTable(table);
1636 }
1637
1638
1639
1640
1641
1642 public void deleteTable(String tableName) throws IOException {
1643 deleteTable(TableName.valueOf(tableName));
1644 }
1645
1646
1647
1648
1649
1650 public void deleteTable(byte[] tableName) throws IOException {
1651 deleteTable(TableName.valueOf(tableName));
1652 }
1653
1654
1655
1656
1657
1658 public void deleteTable(TableName tableName) throws IOException {
1659 try {
1660 getHBaseAdmin().disableTable(tableName);
1661 } catch (TableNotEnabledException e) {
1662 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1663 }
1664 getHBaseAdmin().deleteTable(tableName);
1665 }
1666
1667
1668
1669
1670
1671 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1672 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1673 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1674 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1675 private static final int MAXVERSIONS = 3;
1676
1677 public static final char FIRST_CHAR = 'a';
1678 public static final char LAST_CHAR = 'z';
1679 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1680 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1681
1682
1683
1684
1685
1686
1687
1688
1689 public HTableDescriptor createTableDescriptor(final String name,
1690 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1691 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1692 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1693 htd.addFamily(new HColumnDescriptor(cfName)
1694 .setMinVersions(minVersions)
1695 .setMaxVersions(versions)
1696 .setKeepDeletedCells(keepDeleted)
1697 .setBlockCacheEnabled(false)
1698 .setTimeToLive(ttl)
1699 );
1700 }
1701 return htd;
1702 }
1703
1704
1705
1706
1707
1708
1709
1710 public HTableDescriptor createTableDescriptor(final String name) {
1711 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1712 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1713 }
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1724 byte [] endKey)
1725 throws IOException {
1726 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1727 return createLocalHRegion(hri, desc);
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1738 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal)
1750 throws IOException {
1751 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1767 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1768 WAL wal, byte[]... families) throws IOException {
1769 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1770 htd.setReadOnly(isReadOnly);
1771 for (byte[] family : families) {
1772 HColumnDescriptor hcd = new HColumnDescriptor(family);
1773
1774 hcd.setMaxVersions(Integer.MAX_VALUE);
1775 htd.addFamily(hcd);
1776 }
1777 htd.setDurability(durability);
1778 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1779 return createLocalHRegion(info, htd, wal);
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 public HTable truncateTable(byte[] tableName) throws IOException {
1791 return truncateTable(TableName.valueOf(tableName));
1792 }
1793
1794
1795
1796
1797
1798
1799
1800 public HTable truncateTable(TableName tableName) throws IOException {
1801 HTable table = new HTable(getConfiguration(), tableName);
1802 Scan scan = new Scan();
1803 ResultScanner resScan = table.getScanner(scan);
1804 for(Result res : resScan) {
1805 Delete del = new Delete(res.getRow());
1806 table.delete(del);
1807 }
1808 resScan = table.getScanner(scan);
1809 resScan.close();
1810 return table;
1811 }
1812
1813
1814
1815
1816
1817
1818
1819
1820 public int loadTable(final Table t, final byte[] f) throws IOException {
1821 return loadTable(t, new byte[][] {f});
1822 }
1823
1824
1825
1826
1827
1828
1829
1830
1831 public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
1832 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1833 }
1834
1835
1836
1837
1838
1839
1840
1841
1842 public int loadTable(final Table t, final byte[][] f) throws IOException {
1843 return loadTable(t, f, null);
1844 }
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854 public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
1855 return loadTable(t, f, value, true);
1856 }
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866 public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1867 List<Put> puts = new ArrayList<>();
1868 for (byte[] row : HBaseTestingUtility.ROWS) {
1869 Put put = new Put(row);
1870 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1871 for (int i = 0; i < f.length; i++) {
1872 put.add(f[i], null, value != null ? value : row);
1873 }
1874 puts.add(put);
1875 }
1876 t.put(puts);
1877 return puts.size();
1878 }
1879
1880
1881
1882
1883 public static class SeenRowTracker {
1884 int dim = 'z' - 'a' + 1;
1885 int[][][] seenRows = new int[dim][dim][dim];
1886 byte[] startRow;
1887 byte[] stopRow;
1888
1889 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1890 this.startRow = startRow;
1891 this.stopRow = stopRow;
1892 }
1893
1894 void reset() {
1895 for (byte[] row : ROWS) {
1896 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1897 }
1898 }
1899
1900 int i(byte b) {
1901 return b - 'a';
1902 }
1903
1904 public void addRow(byte[] row) {
1905 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1906 }
1907
1908
1909
1910
1911 public void validate() {
1912 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1913 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1914 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1915 int count = seenRows[i(b1)][i(b2)][i(b3)];
1916 int expectedCount = 0;
1917 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1918 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1919 expectedCount = 1;
1920 }
1921 if (count != expectedCount) {
1922 String row = new String(new byte[] {b1,b2,b3});
1923 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1924 }
1925 }
1926 }
1927 }
1928 }
1929 }
1930
1931 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1932 return loadRegion(r, f, false);
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1944 throws IOException {
1945 byte[] k = new byte[3];
1946 int rowCount = 0;
1947 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1948 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1949 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1950 k[0] = b1;
1951 k[1] = b2;
1952 k[2] = b3;
1953 Put put = new Put(k);
1954 put.setDurability(Durability.SKIP_WAL);
1955 put.add(f, null, k);
1956 if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL);
1957
1958 int preRowCount = rowCount;
1959 int pause = 10;
1960 int maxPause = 1000;
1961 while (rowCount == preRowCount) {
1962 try {
1963 r.put(put);
1964 rowCount++;
1965 } catch (RegionTooBusyException e) {
1966 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1967 Threads.sleep(pause);
1968 }
1969 }
1970 }
1971 }
1972 if (flush) {
1973 r.flushcache();
1974 }
1975 }
1976 return rowCount;
1977 }
1978
1979 public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1980 for (int i = startRow; i < endRow; i++) {
1981 byte[] data = Bytes.toBytes(String.valueOf(i));
1982 Put put = new Put(data);
1983 put.add(f, null, data);
1984 t.put(put);
1985 }
1986 }
1987
1988 public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) throws IOException {
1989 for (int i = startRow; i < endRow; i++) {
1990 byte[] data = Bytes.toBytes(String.valueOf(i));
1991 Delete delete = new Delete(data);
1992 delete.deleteFamily(f);
1993 t.delete(delete);
1994 }
1995 }
1996
1997
1998
1999
2000 public int countRows(final Table table) throws IOException {
2001 Scan scan = new Scan();
2002 ResultScanner results = table.getScanner(scan);
2003 int count = 0;
2004 for (@SuppressWarnings("unused") Result res : results) {
2005 count++;
2006 }
2007 results.close();
2008 return count;
2009 }
2010
2011 public int countRows(final Table table, final byte[]... families) throws IOException {
2012 Scan scan = new Scan();
2013 for (byte[] family: families) {
2014 scan.addFamily(family);
2015 }
2016 ResultScanner results = table.getScanner(scan);
2017 int count = 0;
2018 for (@SuppressWarnings("unused") Result res : results) {
2019 count++;
2020 }
2021 results.close();
2022 return count;
2023 }
2024
2025
2026
2027
2028 public String checksumRows(final Table table) throws Exception {
2029 Scan scan = new Scan();
2030 ResultScanner results = table.getScanner(scan);
2031 MessageDigest digest = MessageDigest.getInstance("MD5");
2032 for (Result res : results) {
2033 digest.update(res.getRow());
2034 }
2035 results.close();
2036 return digest.toString();
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 public int createMultiRegions(HTable table, byte[] columnFamily)
2048 throws IOException {
2049 return createMultiRegions(getConfiguration(), table, columnFamily);
2050 }
2051
2052
2053 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
2054 static {
2055 int i = 0;
2056 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2057 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2058 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2059 ROWS[i][0] = b1;
2060 ROWS[i][1] = b2;
2061 ROWS[i][2] = b3;
2062 i++;
2063 }
2064 }
2065 }
2066 }
2067
2068 public static final byte[][] KEYS = {
2069 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2070 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2071 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2072 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2073 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2074 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2075 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2076 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2077 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2078 };
2079
2080 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2081 Bytes.toBytes("bbb"),
2082 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2083 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2084 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2085 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2086 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2087 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2088 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2089 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2090 };
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100 public int createMultiRegions(final Configuration c, final HTable table,
2101 final byte[] columnFamily)
2102 throws IOException {
2103 return createMultiRegions(c, table, columnFamily, KEYS);
2104 }
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 public int createMultiRegions(final Configuration c, final HTable table,
2116 final byte [] family, int numRegions)
2117 throws IOException {
2118 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
2119 byte [] startKey = Bytes.toBytes("aaaaa");
2120 byte [] endKey = Bytes.toBytes("zzzzz");
2121 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2122 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
2123 System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
2124 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
2125 return createMultiRegions(c, table, family, regionStartKeys);
2126 }
2127
2128 public int createMultiRegions(final Configuration c, final HTable table,
2129 final byte[] columnFamily, byte [][] startKeys)
2130 throws IOException {
2131 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2132 Table meta = new HTable(c, TableName.META_TABLE_NAME);
2133 HTableDescriptor htd = table.getTableDescriptor();
2134 if(!htd.hasFamily(columnFamily)) {
2135 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2136 htd.addFamily(hcd);
2137 }
2138
2139
2140
2141
2142 List<byte[]> rows = getMetaTableRows(htd.getTableName());
2143 String regionToDeleteInFS = table
2144 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
2145 .getRegionInfo().getEncodedName();
2146 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2147
2148 int count = 0;
2149 for (int i = 0; i < startKeys.length; i++) {
2150 int j = (i + 1) % startKeys.length;
2151 HRegionInfo hri = new HRegionInfo(table.getName(),
2152 startKeys[i], startKeys[j]);
2153 MetaTableAccessor.addRegionToMeta(meta, hri);
2154 newRegions.add(hri);
2155 count++;
2156 }
2157
2158 for (byte[] row : rows) {
2159 LOG.info("createMultiRegions: deleting meta row -> " +
2160 Bytes.toStringBinary(row));
2161 meta.delete(new Delete(row));
2162 }
2163
2164 Path tableDir = new Path(getDefaultRootDirPath().toString()
2165 + System.getProperty("file.separator") + htd.getTableName()
2166 + System.getProperty("file.separator") + regionToDeleteInFS);
2167 FileSystem.get(c).delete(tableDir, true);
2168
2169 HConnection conn = table.getConnection();
2170 conn.clearRegionCache();
2171
2172 Admin admin = getHBaseAdmin();
2173 if (admin.isTableEnabled(table.getName())) {
2174 for(HRegionInfo hri : newRegions) {
2175 admin.assign(hri.getRegionName());
2176 }
2177 }
2178
2179 meta.close();
2180
2181 return count;
2182 }
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2195 final HTableDescriptor htd, byte [][] startKeys)
2196 throws IOException {
2197 Table meta = new HTable(conf, TableName.META_TABLE_NAME);
2198 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2199 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2200
2201 for (int i = 0; i < startKeys.length; i++) {
2202 int j = (i + 1) % startKeys.length;
2203 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2204 startKeys[j]);
2205 MetaTableAccessor.addRegionToMeta(meta, hri);
2206 newRegions.add(hri);
2207 }
2208
2209 meta.close();
2210 return newRegions;
2211 }
2212
2213
2214
2215
2216
2217
2218 public List<byte[]> getMetaTableRows() throws IOException {
2219
2220 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2221 List<byte[]> rows = new ArrayList<byte[]>();
2222 ResultScanner s = t.getScanner(new Scan());
2223 for (Result result : s) {
2224 LOG.info("getMetaTableRows: row -> " +
2225 Bytes.toStringBinary(result.getRow()));
2226 rows.add(result.getRow());
2227 }
2228 s.close();
2229 t.close();
2230 return rows;
2231 }
2232
2233
2234
2235
2236
2237
2238 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2239
2240 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2241 List<byte[]> rows = new ArrayList<byte[]>();
2242 ResultScanner s = t.getScanner(new Scan());
2243 for (Result result : s) {
2244 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2245 if (info == null) {
2246 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2247
2248 continue;
2249 }
2250
2251 if (info.getTable().equals(tableName)) {
2252 LOG.info("getMetaTableRows: row -> " +
2253 Bytes.toStringBinary(result.getRow()) + info);
2254 rows.add(result.getRow());
2255 }
2256 }
2257 s.close();
2258 t.close();
2259 return rows;
2260 }
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2274 throws IOException, InterruptedException {
2275 List<byte[]> metaRows = getMetaTableRows(tableName);
2276 if (metaRows == null || metaRows.isEmpty()) {
2277 return null;
2278 }
2279 LOG.debug("Found " + metaRows.size() + " rows for table " +
2280 tableName);
2281 byte [] firstrow = metaRows.get(0);
2282 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2283 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2284 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2285 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2286 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2287 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2288 while(retrier.shouldRetry()) {
2289 int index = getMiniHBaseCluster().getServerWith(firstrow);
2290 if (index != -1) {
2291 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2292 }
2293
2294 retrier.sleepUntilNextRetry();
2295 }
2296 return null;
2297 }
2298
2299
2300
2301
2302
2303
2304
2305 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2306 startMiniMapReduceCluster(2);
2307 return mrCluster;
2308 }
2309
2310
2311
2312
2313
2314 private void forceChangeTaskLogDir() {
2315 Field logDirField;
2316 try {
2317 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2318 logDirField.setAccessible(true);
2319
2320 Field modifiersField = Field.class.getDeclaredField("modifiers");
2321 modifiersField.setAccessible(true);
2322 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2323
2324 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2325 } catch (SecurityException e) {
2326 throw new RuntimeException(e);
2327 } catch (NoSuchFieldException e) {
2328
2329 throw new RuntimeException(e);
2330 } catch (IllegalArgumentException e) {
2331 throw new RuntimeException(e);
2332 } catch (IllegalAccessException e) {
2333 throw new RuntimeException(e);
2334 }
2335 }
2336
2337
2338
2339
2340
2341
2342
2343 private void startMiniMapReduceCluster(final int servers) throws IOException {
2344 if (mrCluster != null) {
2345 throw new IllegalStateException("MiniMRCluster is already running");
2346 }
2347 LOG.info("Starting mini mapreduce cluster...");
2348 setupClusterTestDir();
2349 createDirsAndSetProperties();
2350
2351 forceChangeTaskLogDir();
2352
2353
2354
2355
2356 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2357
2358
2359
2360 conf.setBoolean("mapreduce.map.speculative", false);
2361 conf.setBoolean("mapreduce.reduce.speculative", false);
2362
2363
2364
2365 mrCluster = new MiniMRCluster(servers,
2366 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2367 null, null, new JobConf(this.conf));
2368 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2369 if (jobConf == null) {
2370 jobConf = mrCluster.createJobConf();
2371 }
2372
2373 jobConf.set("mapreduce.cluster.local.dir",
2374 conf.get("mapreduce.cluster.local.dir"));
2375 LOG.info("Mini mapreduce cluster started");
2376
2377
2378
2379
2380 conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2381
2382 conf.set("mapreduce.framework.name", "yarn");
2383 conf.setBoolean("yarn.is.minicluster", true);
2384 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2385 if (rmAddress != null) {
2386 conf.set("yarn.resourcemanager.address", rmAddress);
2387 }
2388 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2389 if (historyAddress != null) {
2390 conf.set("mapreduce.jobhistory.address", historyAddress);
2391 }
2392 String schedulerAddress =
2393 jobConf.get("yarn.resourcemanager.scheduler.address");
2394 if (schedulerAddress != null) {
2395 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2396 }
2397 }
2398
2399
2400
2401
2402 public void shutdownMiniMapReduceCluster() {
2403 if (mrCluster != null) {
2404 LOG.info("Stopping mini mapreduce cluster...");
2405 mrCluster.shutdown();
2406 mrCluster = null;
2407 LOG.info("Mini mapreduce cluster stopped");
2408 }
2409
2410 conf.set("mapreduce.jobtracker.address", "local");
2411 }
2412
2413
2414
2415
2416 public RegionServerServices createMockRegionServerService() throws IOException {
2417 return createMockRegionServerService((ServerName)null);
2418 }
2419
2420
2421
2422
2423
2424 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2425 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2426 rss.setFileSystem(getTestFileSystem());
2427 rss.setRpcServer(rpc);
2428 return rss;
2429 }
2430
2431
2432
2433
2434
2435 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2436 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2437 rss.setFileSystem(getTestFileSystem());
2438 return rss;
2439 }
2440
2441
2442
2443
2444
2445
2446 public void enableDebug(Class<?> clazz) {
2447 Log l = LogFactory.getLog(clazz);
2448 if (l instanceof Log4JLogger) {
2449 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2450 } else if (l instanceof Jdk14Logger) {
2451 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2452 }
2453 }
2454
2455
2456
2457
2458
2459 public void expireMasterSession() throws Exception {
2460 HMaster master = getMiniHBaseCluster().getMaster();
2461 expireSession(master.getZooKeeper(), false);
2462 }
2463
2464
2465
2466
2467
2468
2469 public void expireRegionServerSession(int index) throws Exception {
2470 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2471 expireSession(rs.getZooKeeper(), false);
2472 decrementMinRegionServerCount();
2473 }
2474
2475 private void decrementMinRegionServerCount() {
2476
2477
2478 decrementMinRegionServerCount(getConfiguration());
2479
2480
2481 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2482 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2483 }
2484 }
2485
2486 private void decrementMinRegionServerCount(Configuration conf) {
2487 int currentCount = conf.getInt(
2488 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2489 if (currentCount != -1) {
2490 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2491 Math.max(currentCount - 1, 1));
2492 }
2493 }
2494
2495 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2496 expireSession(nodeZK, false);
2497 }
2498
2499 @Deprecated
2500 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2501 throws Exception {
2502 expireSession(nodeZK, false);
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2517 throws Exception {
2518 Configuration c = new Configuration(this.conf);
2519 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2520 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2521 byte[] password = zk.getSessionPasswd();
2522 long sessionID = zk.getSessionId();
2523
2524
2525
2526
2527
2528
2529
2530
2531 ZooKeeper monitor = new ZooKeeper(quorumServers,
2532 1000, new org.apache.zookeeper.Watcher(){
2533 @Override
2534 public void process(WatchedEvent watchedEvent) {
2535 LOG.info("Monitor ZKW received event="+watchedEvent);
2536 }
2537 } , sessionID, password);
2538
2539
2540 ZooKeeper newZK = new ZooKeeper(quorumServers,
2541 1000, EmptyWatcher.instance, sessionID, password);
2542
2543
2544
2545 long start = System.currentTimeMillis();
2546 while (newZK.getState() != States.CONNECTED
2547 && System.currentTimeMillis() - start < 1000) {
2548 Thread.sleep(1);
2549 }
2550 newZK.close();
2551 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2552
2553
2554 monitor.close();
2555
2556 if (checkStatus) {
2557 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2558 }
2559 }
2560
2561
2562
2563
2564
2565
2566
2567 public MiniHBaseCluster getHBaseCluster() {
2568 return getMiniHBaseCluster();
2569 }
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 public HBaseCluster getHBaseClusterInterface() {
2580
2581
2582 return hbaseCluster;
2583 }
2584
2585
2586
2587
2588
2589
2590
2591 public Connection getConnection() throws IOException {
2592 if (this.connection == null) {
2593 this.connection = ConnectionFactory.createConnection(this.conf);
2594 }
2595 return this.connection;
2596 }
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607 public synchronized HBaseAdmin getHBaseAdmin()
2608 throws IOException {
2609 if (hbaseAdmin == null){
2610 this.hbaseAdmin = new HBaseAdminForTests(getConnection());
2611 }
2612 return hbaseAdmin;
2613 }
2614
2615 private HBaseAdminForTests hbaseAdmin = null;
2616 private static class HBaseAdminForTests extends HBaseAdmin {
2617 public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
2618 ZooKeeperConnectionException, IOException {
2619 super(connection);
2620 }
2621
2622 @Override
2623 public synchronized void close() throws IOException {
2624 LOG.warn("close() called on HBaseAdmin instance returned from " +
2625 "HBaseTestingUtility.getHBaseAdmin()");
2626 }
2627
2628 private synchronized void close0() throws IOException {
2629 super.close();
2630 }
2631 }
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2643 throws IOException {
2644 if (zooKeeperWatcher == null) {
2645 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2646 new Abortable() {
2647 @Override public void abort(String why, Throwable e) {
2648 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2649 }
2650 @Override public boolean isAborted() {return false;}
2651 });
2652 }
2653 return zooKeeperWatcher;
2654 }
2655 private ZooKeeperWatcher zooKeeperWatcher;
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665 public void closeRegion(String regionName) throws IOException {
2666 closeRegion(Bytes.toBytes(regionName));
2667 }
2668
2669
2670
2671
2672
2673
2674
2675 public void closeRegion(byte[] regionName) throws IOException {
2676 getHBaseAdmin().closeRegion(regionName, null);
2677 }
2678
2679
2680
2681
2682
2683
2684
2685
2686 public void closeRegionByRow(String row, RegionLocator table) throws IOException {
2687 closeRegionByRow(Bytes.toBytes(row), table);
2688 }
2689
2690
2691
2692
2693
2694
2695
2696
2697 public void closeRegionByRow(byte[] row, RegionLocator table) throws IOException {
2698 HRegionLocation hrl = table.getRegionLocation(row);
2699 closeRegion(hrl.getRegionInfo().getRegionName());
2700 }
2701
2702
2703
2704
2705
2706
2707
2708
2709 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2710 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2711 int regCount = regions.size();
2712 Set<Integer> attempted = new HashSet<Integer>();
2713 int idx;
2714 int attempts = 0;
2715 do {
2716 regions = getHBaseCluster().getRegions(tableName);
2717 if (regCount != regions.size()) {
2718
2719 attempted.clear();
2720 }
2721 regCount = regions.size();
2722
2723
2724 if (regCount > 0) {
2725 idx = random.nextInt(regCount);
2726
2727 if (attempted.contains(idx))
2728 continue;
2729 try {
2730 regions.get(idx).checkSplit();
2731 return regions.get(idx);
2732 } catch (Exception ex) {
2733 LOG.warn("Caught exception", ex);
2734 attempted.add(idx);
2735 }
2736 }
2737 attempts++;
2738 } while (maxAttempts == -1 || attempts < maxAttempts);
2739 return null;
2740 }
2741
2742 public MiniZooKeeperCluster getZkCluster() {
2743 return zkCluster;
2744 }
2745
2746 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2747 this.passedZkCluster = true;
2748 this.zkCluster = zkCluster;
2749 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2750 }
2751
2752 public MiniDFSCluster getDFSCluster() {
2753 return dfsCluster;
2754 }
2755
2756 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2757 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2758 throw new IOException("DFSCluster is already running! Shut it down first.");
2759 }
2760 this.dfsCluster = cluster;
2761 }
2762
2763 public FileSystem getTestFileSystem() throws IOException {
2764 return HFileSystem.get(conf);
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774 public void waitTableAvailable(TableName table)
2775 throws InterruptedException, IOException {
2776 waitTableAvailable(getHBaseAdmin(), table.getName(), 30000);
2777 }
2778
2779 public void waitTableAvailable(Admin admin, byte[] table)
2780 throws InterruptedException, IOException {
2781 waitTableAvailable(admin, table, 30000);
2782 }
2783
2784
2785
2786
2787
2788
2789
2790
2791 public void waitTableAvailable(byte[] table, long timeoutMillis)
2792 throws InterruptedException, IOException {
2793 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2794 }
2795
2796 public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
2797 throws InterruptedException, IOException {
2798 long startWait = System.currentTimeMillis();
2799 while (!admin.isTableAvailable(TableName.valueOf(table))) {
2800 assertTrue("Timed out waiting for table to become available " +
2801 Bytes.toStringBinary(table),
2802 System.currentTimeMillis() - startWait < timeoutMillis);
2803 Thread.sleep(200);
2804 }
2805 }
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816 public void waitTableEnabled(TableName table)
2817 throws InterruptedException, IOException {
2818 waitTableEnabled(getHBaseAdmin(), table.getName(), 30000);
2819 }
2820
2821 public void waitTableEnabled(Admin admin, byte[] table)
2822 throws InterruptedException, IOException {
2823 waitTableEnabled(admin, table, 30000);
2824 }
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835 public void waitTableEnabled(byte[] table, long timeoutMillis)
2836 throws InterruptedException, IOException {
2837 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2838 }
2839
2840 public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis)
2841 throws InterruptedException, IOException {
2842 TableName tableName = TableName.valueOf(table);
2843 long startWait = System.currentTimeMillis();
2844 waitTableAvailable(admin, table, timeoutMillis);
2845 while (!admin.isTableEnabled(tableName)) {
2846 assertTrue("Timed out waiting for table to become available and enabled " +
2847 Bytes.toStringBinary(table),
2848 System.currentTimeMillis() - startWait < timeoutMillis);
2849 Thread.sleep(200);
2850 }
2851
2852
2853
2854
2855
2856 try {
2857 Canary.sniff(admin, tableName);
2858 } catch (Exception e) {
2859 throw new IOException(e);
2860 }
2861 }
2862
2863
2864
2865
2866
2867
2868
2869
2870 public void waitTableDisabled(byte[] table)
2871 throws InterruptedException, IOException {
2872 waitTableDisabled(getHBaseAdmin(), table, 30000);
2873 }
2874
2875 public void waitTableDisabled(Admin admin, byte[] table)
2876 throws InterruptedException, IOException {
2877 waitTableDisabled(admin, table, 30000);
2878 }
2879
2880
2881
2882
2883
2884
2885
2886
2887 public void waitTableDisabled(byte[] table, long timeoutMillis)
2888 throws InterruptedException, IOException {
2889 waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
2890 }
2891
2892 public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
2893 throws InterruptedException, IOException {
2894 TableName tableName = TableName.valueOf(table);
2895 long startWait = System.currentTimeMillis();
2896 while (!admin.isTableDisabled(tableName)) {
2897 assertTrue("Timed out waiting for table to become disabled " +
2898 Bytes.toStringBinary(table),
2899 System.currentTimeMillis() - startWait < timeoutMillis);
2900 Thread.sleep(200);
2901 }
2902 }
2903
2904
2905
2906
2907
2908
2909
2910
2911 public boolean ensureSomeRegionServersAvailable(final int num)
2912 throws IOException {
2913 boolean startedServer = false;
2914 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2915 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2916 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2917 startedServer = true;
2918 }
2919
2920 return startedServer;
2921 }
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2933 throws IOException {
2934 boolean startedServer = ensureSomeRegionServersAvailable(num);
2935
2936 int nonStoppedServers = 0;
2937 for (JVMClusterUtil.RegionServerThread rst :
2938 getMiniHBaseCluster().getRegionServerThreads()) {
2939
2940 HRegionServer hrs = rst.getRegionServer();
2941 if (hrs.isStopping() || hrs.isStopped()) {
2942 LOG.info("A region server is stopped or stopping:"+hrs);
2943 } else {
2944 nonStoppedServers++;
2945 }
2946 }
2947 for (int i=nonStoppedServers; i<num; ++i) {
2948 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2949 startedServer = true;
2950 }
2951 return startedServer;
2952 }
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964 public static User getDifferentUser(final Configuration c,
2965 final String differentiatingSuffix)
2966 throws IOException {
2967 FileSystem currentfs = FileSystem.get(c);
2968 if (!(currentfs instanceof DistributedFileSystem)) {
2969 return User.getCurrent();
2970 }
2971
2972
2973 String username = User.getCurrent().getName() +
2974 differentiatingSuffix;
2975 User user = User.createUserForTesting(c, username,
2976 new String[]{"supergroup"});
2977 return user;
2978 }
2979
2980 public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
2981 throws IOException {
2982 NavigableSet<String> online = new TreeSet<String>();
2983 for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
2984 try {
2985 for (HRegionInfo region :
2986 ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
2987 online.add(region.getRegionNameAsString());
2988 }
2989 } catch (RegionServerStoppedException e) {
2990
2991 }
2992 }
2993 for (MasterThread mt : cluster.getLiveMasterThreads()) {
2994 try {
2995 for (HRegionInfo region :
2996 ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
2997 online.add(region.getRegionNameAsString());
2998 }
2999 } catch (RegionServerStoppedException e) {
3000
3001 } catch (ServerNotRunningYetException e) {
3002
3003 }
3004 }
3005 return online;
3006 }
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 public static void setMaxRecoveryErrorCount(final OutputStream stream,
3022 final int max) {
3023 try {
3024 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3025 for (Class<?> clazz: clazzes) {
3026 String className = clazz.getSimpleName();
3027 if (className.equals("DFSOutputStream")) {
3028 if (clazz.isInstance(stream)) {
3029 Field maxRecoveryErrorCountField =
3030 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3031 maxRecoveryErrorCountField.setAccessible(true);
3032 maxRecoveryErrorCountField.setInt(stream, max);
3033 break;
3034 }
3035 }
3036 }
3037 } catch (Exception e) {
3038 LOG.info("Could not set max recovery field", e);
3039 }
3040 }
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3051 waitUntilAllRegionsAssigned(tableName, 60000);
3052 }
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3064 throws IOException {
3065 final Table meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3066 try {
3067 waitFor(timeout, 200, true, new Predicate<IOException>() {
3068 @Override
3069 public boolean evaluate() throws IOException {
3070 boolean allRegionsAssigned = true;
3071 Scan scan = new Scan();
3072 scan.addFamily(HConstants.CATALOG_FAMILY);
3073 ResultScanner s = meta.getScanner(scan);
3074 try {
3075 Result r;
3076 while ((r = s.next()) != null) {
3077 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3078 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3079 if (info != null && info.getTable().equals(tableName)) {
3080 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3081 allRegionsAssigned &= (b != null);
3082 }
3083 }
3084 } finally {
3085 s.close();
3086 }
3087 return allRegionsAssigned;
3088 }
3089 });
3090 } finally {
3091 meta.close();
3092 }
3093
3094
3095 if (!getHBaseClusterInterface().isDistributedCluster()) {
3096
3097
3098 HMaster master = getHBaseCluster().getMaster();
3099 final RegionStates states = master.getAssignmentManager().getRegionStates();
3100 waitFor(timeout, 200, new Predicate<IOException>() {
3101 @Override
3102 public boolean evaluate() throws IOException {
3103 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3104 return hris != null && !hris.isEmpty();
3105 }
3106 });
3107 }
3108 }
3109
3110
3111
3112
3113
3114 public static List<Cell> getFromStoreFile(HStore store,
3115 Get get) throws IOException {
3116 Scan scan = new Scan(get);
3117 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3118 scan.getFamilyMap().get(store.getFamily().getName()),
3119
3120
3121 0);
3122
3123 List<Cell> result = new ArrayList<Cell>();
3124 scanner.next(result);
3125 if (!result.isEmpty()) {
3126
3127 Cell kv = result.get(0);
3128 if (!CellUtil.matchingRow(kv, get.getRow())) {
3129 result.clear();
3130 }
3131 }
3132 scanner.close();
3133 return result;
3134 }
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3145 assertTrue(numRegions>3);
3146 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3147 byte [][] result = new byte[tmpSplitKeys.length+1][];
3148 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3149 result[0] = HConstants.EMPTY_BYTE_ARRAY;
3150 return result;
3151 }
3152
3153
3154
3155
3156
3157 public static List<Cell> getFromStoreFile(HStore store,
3158 byte [] row,
3159 NavigableSet<byte[]> columns
3160 ) throws IOException {
3161 Get get = new Get(row);
3162 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3163 s.put(store.getFamily().getName(), columns);
3164
3165 return getFromStoreFile(store,get);
3166 }
3167
3168
3169
3170
3171
3172 public static ZooKeeperWatcher getZooKeeperWatcher(
3173 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3174 IOException {
3175 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3176 "unittest", new Abortable() {
3177 boolean aborted = false;
3178
3179 @Override
3180 public void abort(String why, Throwable e) {
3181 aborted = true;
3182 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3183 }
3184
3185 @Override
3186 public boolean isAborted() {
3187 return aborted;
3188 }
3189 });
3190 return zkw;
3191 }
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3205 HBaseTestingUtility TEST_UTIL, HRegion region,
3206 ServerName serverName) throws ZooKeeperConnectionException,
3207 IOException, KeeperException, NodeExistsException {
3208 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3209 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3210 int version = ZKAssign.transitionNodeOpening(zkw, region
3211 .getRegionInfo(), serverName);
3212 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3213 version);
3214 return zkw;
3215 }
3216
3217 public static void assertKVListsEqual(String additionalMsg,
3218 final List<? extends Cell> expected,
3219 final List<? extends Cell> actual) {
3220 final int eLen = expected.size();
3221 final int aLen = actual.size();
3222 final int minLen = Math.min(eLen, aLen);
3223
3224 int i;
3225 for (i = 0; i < minLen
3226 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3227 ++i) {}
3228
3229 if (additionalMsg == null) {
3230 additionalMsg = "";
3231 }
3232 if (!additionalMsg.isEmpty()) {
3233 additionalMsg = ". " + additionalMsg;
3234 }
3235
3236 if (eLen != aLen || i != minLen) {
3237 throw new AssertionError(
3238 "Expected and actual KV arrays differ at position " + i + ": " +
3239 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3240 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3241 }
3242 }
3243
3244 public static <T> String safeGetAsStr(List<T> lst, int i) {
3245 if (0 <= i && i < lst.size()) {
3246 return lst.get(i).toString();
3247 } else {
3248 return "<out_of_range>";
3249 }
3250 }
3251
3252 public String getClusterKey() {
3253 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3254 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3255 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3256 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3257 }
3258
3259
3260 public HTable createRandomTable(String tableName,
3261 final Collection<String> families,
3262 final int maxVersions,
3263 final int numColsPerRow,
3264 final int numFlushes,
3265 final int numRegions,
3266 final int numRowsPerFlush)
3267 throws IOException, InterruptedException {
3268
3269 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3270 " regions, " + numFlushes + " storefiles per region, " +
3271 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3272 "\n");
3273
3274 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3275 final int numCF = families.size();
3276 final byte[][] cfBytes = new byte[numCF][];
3277 {
3278 int cfIndex = 0;
3279 for (String cf : families) {
3280 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3281 }
3282 }
3283
3284 final int actualStartKey = 0;
3285 final int actualEndKey = Integer.MAX_VALUE;
3286 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3287 final int splitStartKey = actualStartKey + keysPerRegion;
3288 final int splitEndKey = actualEndKey - keysPerRegion;
3289 final String keyFormat = "%08x";
3290 final HTable table = createTable(tableName, cfBytes,
3291 maxVersions,
3292 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3293 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3294 numRegions);
3295
3296 if (hbaseCluster != null) {
3297 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3298 }
3299
3300 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3301 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3302 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3303 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3304
3305 Put put = new Put(row);
3306 Delete del = new Delete(row);
3307 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3308 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3309 final long ts = rand.nextInt();
3310 final byte[] qual = Bytes.toBytes("col" + iCol);
3311 if (rand.nextBoolean()) {
3312 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3313 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3314 ts + "_random_" + rand.nextLong());
3315 put.add(cf, qual, ts, value);
3316 } else if (rand.nextDouble() < 0.8) {
3317 del.deleteColumn(cf, qual, ts);
3318 } else {
3319 del.deleteColumns(cf, qual, ts);
3320 }
3321 }
3322
3323 if (!put.isEmpty()) {
3324 table.put(put);
3325 }
3326
3327 if (!del.isEmpty()) {
3328 table.delete(del);
3329 }
3330 }
3331 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3332 table.flushCommits();
3333 if (hbaseCluster != null) {
3334 getMiniHBaseCluster().flushcache(table.getName());
3335 }
3336 }
3337
3338 return table;
3339 }
3340
3341 private static final int MIN_RANDOM_PORT = 0xc000;
3342 private static final int MAX_RANDOM_PORT = 0xfffe;
3343 private static Random random = new Random();
3344
3345
3346
3347
3348
3349 public static int randomPort() {
3350 return MIN_RANDOM_PORT
3351 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3352 }
3353
3354
3355
3356
3357
3358 public static int randomFreePort() {
3359 int port = 0;
3360 do {
3361 port = randomPort();
3362 if (takenRandomPorts.contains(port)) {
3363 continue;
3364 }
3365 takenRandomPorts.add(port);
3366
3367 try {
3368 ServerSocket sock = new ServerSocket(port);
3369 sock.close();
3370 } catch (IOException ex) {
3371 port = 0;
3372 }
3373 } while (port == 0);
3374 return port;
3375 }
3376
3377
3378 public static String randomMultiCastAddress() {
3379 return "226.1.1." + random.nextInt(254);
3380 }
3381
3382
3383
3384 public static void waitForHostPort(String host, int port)
3385 throws IOException {
3386 final int maxTimeMs = 10000;
3387 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3388 IOException savedException = null;
3389 LOG.info("Waiting for server at " + host + ":" + port);
3390 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3391 try {
3392 Socket sock = new Socket(InetAddress.getByName(host), port);
3393 sock.close();
3394 savedException = null;
3395 LOG.info("Server at " + host + ":" + port + " is available");
3396 break;
3397 } catch (UnknownHostException e) {
3398 throw new IOException("Failed to look up " + host, e);
3399 } catch (IOException e) {
3400 savedException = e;
3401 }
3402 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3403 }
3404
3405 if (savedException != null) {
3406 throw savedException;
3407 }
3408 }
3409
3410
3411
3412
3413
3414
3415 public static int createPreSplitLoadTestTable(Configuration conf,
3416 TableName tableName, byte[] columnFamily, Algorithm compression,
3417 DataBlockEncoding dataBlockEncoding) throws IOException {
3418 return createPreSplitLoadTestTable(conf, tableName,
3419 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3420 Durability.USE_DEFAULT);
3421 }
3422
3423
3424
3425
3426
3427 public static int createPreSplitLoadTestTable(Configuration conf,
3428 TableName tableName, byte[] columnFamily, Algorithm compression,
3429 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3430 Durability durability)
3431 throws IOException {
3432 HTableDescriptor desc = new HTableDescriptor(tableName);
3433 desc.setDurability(durability);
3434 desc.setRegionReplication(regionReplication);
3435 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3436 hcd.setDataBlockEncoding(dataBlockEncoding);
3437 hcd.setCompressionType(compression);
3438 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3439 }
3440
3441
3442
3443
3444
3445
3446 public static int createPreSplitLoadTestTable(Configuration conf,
3447 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3448 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3449 }
3450
3451
3452
3453
3454
3455
3456 public static int createPreSplitLoadTestTable(Configuration conf,
3457 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3458 if (!desc.hasFamily(hcd.getName())) {
3459 desc.addFamily(hcd);
3460 }
3461
3462 int totalNumberOfRegions = 0;
3463 Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3464 Admin admin = unmanagedConnection.getAdmin();
3465
3466 try {
3467
3468
3469
3470 int numberOfServers = admin.getClusterStatus().getServers().size();
3471 if (numberOfServers == 0) {
3472 throw new IllegalStateException("No live regionservers");
3473 }
3474
3475 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3476 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3477 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3478 "(regions per server: " + numRegionsPerServer + ")");
3479
3480 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3481 totalNumberOfRegions);
3482
3483 admin.createTable(desc, splits);
3484 } catch (MasterNotRunningException e) {
3485 LOG.error("Master not running", e);
3486 throw new IOException(e);
3487 } catch (TableExistsException e) {
3488 LOG.warn("Table " + desc.getTableName() +
3489 " already exists, continuing");
3490 } finally {
3491 admin.close();
3492 unmanagedConnection.close();
3493 }
3494 return totalNumberOfRegions;
3495 }
3496
3497 public static int getMetaRSPort(Configuration conf) throws IOException {
3498 RegionLocator table = new HTable(conf, TableName.META_TABLE_NAME);
3499 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3500 table.close();
3501 return hloc.getPort();
3502 }
3503
3504
3505
3506
3507
3508
3509
3510 public void assertRegionOnServer(
3511 final HRegionInfo hri, final ServerName server,
3512 final long timeout) throws IOException, InterruptedException {
3513 long timeoutTime = System.currentTimeMillis() + timeout;
3514 while (true) {
3515 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3516 if (regions.contains(hri)) return;
3517 long now = System.currentTimeMillis();
3518 if (now > timeoutTime) break;
3519 Thread.sleep(10);
3520 }
3521 fail("Could not find region " + hri.getRegionNameAsString()
3522 + " on server " + server);
3523 }
3524
3525
3526
3527
3528
3529 public void assertRegionOnlyOnServer(
3530 final HRegionInfo hri, final ServerName server,
3531 final long timeout) throws IOException, InterruptedException {
3532 long timeoutTime = System.currentTimeMillis() + timeout;
3533 while (true) {
3534 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3535 if (regions.contains(hri)) {
3536 List<JVMClusterUtil.RegionServerThread> rsThreads =
3537 getHBaseCluster().getLiveRegionServerThreads();
3538 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3539 HRegionServer rs = rsThread.getRegionServer();
3540 if (server.equals(rs.getServerName())) {
3541 continue;
3542 }
3543 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3544 for (HRegion r: hrs) {
3545 assertTrue("Region should not be double assigned",
3546 r.getRegionId() != hri.getRegionId());
3547 }
3548 }
3549 return;
3550 }
3551 long now = System.currentTimeMillis();
3552 if (now > timeoutTime) break;
3553 Thread.sleep(10);
3554 }
3555 fail("Could not find region " + hri.getRegionNameAsString()
3556 + " on server " + server);
3557 }
3558
3559 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3560 throws IOException {
3561 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3562 htd.addFamily(hcd);
3563 HRegionInfo info =
3564 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3565 HRegion region =
3566 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3567 return region;
3568 }
3569
3570 public void setFileSystemURI(String fsURI) {
3571 FS_URI = fsURI;
3572 }
3573
3574
3575
3576
3577 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3578 throws E {
3579 return Waiter.waitFor(this.conf, timeout, predicate);
3580 }
3581
3582
3583
3584
3585 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3586 throws E {
3587 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3588 }
3589
3590
3591
3592
3593 public <E extends Exception> long waitFor(long timeout, long interval,
3594 boolean failIfTimeout, Predicate<E> predicate) throws E {
3595 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3596 }
3597
3598
3599
3600
3601 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3602 return new Waiter.Predicate<Exception>() {
3603 @Override
3604 public boolean evaluate() throws Exception {
3605 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3606 .getAssignmentManager().getRegionStates();
3607 return !regionStates.isRegionsInTransition();
3608 }
3609 };
3610 }
3611
3612
3613
3614
3615 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3616 return new Waiter.Predicate<Exception>() {
3617 @Override
3618 public boolean evaluate() throws Exception {
3619 return getHBaseAdmin().isTableEnabled(tableName);
3620 }
3621 };
3622 }
3623
3624
3625
3626
3627
3628
3629 public static List<HColumnDescriptor> generateColumnDescriptors() {
3630 return generateColumnDescriptors("");
3631 }
3632
3633
3634
3635
3636
3637
3638
3639 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3640 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3641 long familyId = 0;
3642 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3643 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3644 for (BloomType bloomType: BloomType.values()) {
3645 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3646 HColumnDescriptor htd = new HColumnDescriptor(name);
3647 htd.setCompressionType(compressionType);
3648 htd.setDataBlockEncoding(encodingType);
3649 htd.setBloomFilterType(bloomType);
3650 htds.add(htd);
3651 familyId++;
3652 }
3653 }
3654 }
3655 return htds;
3656 }
3657
3658
3659
3660
3661
3662 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3663 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3664 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3665 for (String algoName : allAlgos) {
3666 try {
3667 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3668 algo.getCompressor();
3669 supportedAlgos.add(algo);
3670 } catch (Throwable t) {
3671
3672 }
3673 }
3674 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
3675 }
3676
3677
3678
3679
3680
3681
3682 public void waitUntilNoRegionsInTransition(final long timeout) throws Exception {
3683 waitFor(timeout, predicateNoRegionsInTransition());
3684 }
3685 }