1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 package org.apache.hadoop.hbase;
22
23 import static org.junit.Assert.assertTrue;
24 import static org.junit.Assert.fail;
25
26 import java.io.File;
27 import java.io.IOException;
28 import java.io.OutputStream;
29 import java.lang.reflect.Field;
30 import java.lang.reflect.Modifier;
31 import java.net.InetAddress;
32 import java.net.ServerSocket;
33 import java.net.Socket;
34 import java.net.UnknownHostException;
35 import java.security.MessageDigest;
36 import java.util.ArrayList;
37 import java.util.Arrays;
38 import java.util.Collection;
39 import java.util.Collections;
40 import java.util.HashSet;
41 import java.util.List;
42 import java.util.Map;
43 import java.util.NavigableSet;
44 import java.util.Random;
45 import java.util.Set;
46 import java.util.UUID;
47 import java.util.concurrent.TimeUnit;
48
49 import org.apache.commons.logging.Log;
50 import org.apache.commons.logging.LogFactory;
51 import org.apache.commons.logging.impl.Jdk14Logger;
52 import org.apache.commons.logging.impl.Log4JLogger;
53 import org.apache.hadoop.classification.InterfaceAudience;
54 import org.apache.hadoop.classification.InterfaceStability;
55 import org.apache.hadoop.conf.Configuration;
56 import org.apache.hadoop.fs.FileSystem;
57 import org.apache.hadoop.fs.Path;
58 import org.apache.hadoop.hbase.Waiter.Predicate;
59 import org.apache.hadoop.hbase.catalog.MetaEditor;
60 import org.apache.hadoop.hbase.client.Delete;
61 import org.apache.hadoop.hbase.client.Durability;
62 import org.apache.hadoop.hbase.client.Get;
63 import org.apache.hadoop.hbase.client.HBaseAdmin;
64 import org.apache.hadoop.hbase.client.HConnection;
65 import org.apache.hadoop.hbase.client.HTable;
66 import org.apache.hadoop.hbase.client.Put;
67 import org.apache.hadoop.hbase.client.Result;
68 import org.apache.hadoop.hbase.client.ResultScanner;
69 import org.apache.hadoop.hbase.client.Scan;
70 import org.apache.hadoop.hbase.fs.HFileSystem;
71 import org.apache.hadoop.hbase.io.compress.Compression;
72 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
73 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
74 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
75 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
76 import org.apache.hadoop.hbase.master.HMaster;
77 import org.apache.hadoop.hbase.master.RegionStates;
78 import org.apache.hadoop.hbase.master.ServerManager;
79 import org.apache.hadoop.hbase.regionserver.BloomType;
80 import org.apache.hadoop.hbase.regionserver.HRegion;
81 import org.apache.hadoop.hbase.regionserver.HRegionServer;
82 import org.apache.hadoop.hbase.regionserver.HStore;
83 import org.apache.hadoop.hbase.regionserver.InternalScanner;
84 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
85 import org.apache.hadoop.hbase.security.User;
86 import org.apache.hadoop.hbase.util.Bytes;
87 import org.apache.hadoop.hbase.util.FSUtils;
88 import org.apache.hadoop.hbase.util.JVMClusterUtil;
89 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
90 import org.apache.hadoop.hbase.util.RegionSplitter;
91 import org.apache.hadoop.hbase.util.RetryCounter;
92 import org.apache.hadoop.hbase.util.Threads;
93 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
94 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
95 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
96 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
97 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
98 import org.apache.hadoop.hbase.tool.Canary;
99 import org.apache.hadoop.hdfs.DFSClient;
100 import org.apache.hadoop.hdfs.DistributedFileSystem;
101 import org.apache.hadoop.hdfs.MiniDFSCluster;
102 import org.apache.hadoop.mapred.JobConf;
103 import org.apache.hadoop.mapred.MiniMRCluster;
104 import org.apache.hadoop.mapred.TaskLog;
105 import org.apache.zookeeper.KeeperException;
106 import org.apache.zookeeper.KeeperException.NodeExistsException;
107 import org.apache.zookeeper.WatchedEvent;
108 import org.apache.zookeeper.ZooKeeper;
109 import org.apache.zookeeper.ZooKeeper.States;
110
111
112
113
114
115
116
117
118
119
120
121
122
123 @InterfaceAudience.Public
124 @InterfaceStability.Evolving
125 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
126 private Configuration conf;
127 private MiniZooKeeperCluster zkCluster = null;
128
129
130
131
132
133 private static int DEFAULT_REGIONS_PER_SERVER = 5;
134
135
136
137
138
139 private boolean passedZkCluster = false;
140 private MiniDFSCluster dfsCluster = null;
141
142 private HBaseCluster hbaseCluster = null;
143 private MiniMRCluster mrCluster = null;
144
145
146 private boolean miniClusterRunning;
147
148 private String hadoopLogDir;
149
150
151 private File clusterTestDir = null;
152
153
154
155 private Path dataTestDirOnTestFS = null;
156
157
158
159
160
161
162
163
164 private static final String TEST_DIRECTORY_KEY = "test.build.data";
165
166
167 private static String FS_URI;
168
169
170 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
171
172
173 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
174 Arrays.asList(new Object[][] {
175 { Compression.Algorithm.NONE },
176 { Compression.Algorithm.GZ }
177 });
178
179
180 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
181 Arrays.asList(new Object[][] {
182 { new Boolean(false) },
183 { new Boolean(true) }
184 });
185
186
187 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
188 Compression.Algorithm.NONE, Compression.Algorithm.GZ
189 };
190
191
192
193
194
195 private static List<Object[]> bloomAndCompressionCombinations() {
196 List<Object[]> configurations = new ArrayList<Object[]>();
197 for (Compression.Algorithm comprAlgo :
198 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
199 for (BloomType bloomType : BloomType.values()) {
200 configurations.add(new Object[] { comprAlgo, bloomType });
201 }
202 }
203 return Collections.unmodifiableList(configurations);
204 }
205
206 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
207 bloomAndCompressionCombinations();
208
209 public HBaseTestingUtility() {
210 this(HBaseConfiguration.create());
211 }
212
213 public HBaseTestingUtility(Configuration conf) {
214 this.conf = conf;
215
216
217 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
218 }
219
220
221
222
223
224
225
226
227
228
229
230
231 public Configuration getConfiguration() {
232 return this.conf;
233 }
234
235 public void setHBaseCluster(HBaseCluster hbaseCluster) {
236 this.hbaseCluster = hbaseCluster;
237 }
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255 @Override
256 protected Path setupDataTestDir() {
257 Path testPath = super.setupDataTestDir();
258 if (null == testPath) {
259 return null;
260 }
261
262 createSubDirAndSystemProperty(
263 "hadoop.log.dir",
264 testPath, "hadoop-log-dir");
265
266
267
268 createSubDirAndSystemProperty(
269 "hadoop.tmp.dir",
270 testPath, "hadoop-tmp-dir");
271
272
273 createSubDir(
274 "mapred.local.dir",
275 testPath, "mapred-local-dir");
276
277 createSubDir(
278 "hbase.local.dir",
279 testPath, "hbase-local-dir");
280 return testPath;
281 }
282
283 private void createSubDir(String propertyName, Path parent, String subDirName){
284 Path newPath= new Path(parent, subDirName);
285 File newDir = new File(newPath.toString()).getAbsoluteFile();
286 newDir.deleteOnExit();
287 conf.set(propertyName, newDir.getAbsolutePath());
288 }
289
290 private void createSubDirAndSystemProperty(
291 String propertyName, Path parent, String subDirName){
292
293 String sysValue = System.getProperty(propertyName);
294
295 if (sysValue != null) {
296
297
298 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
299 sysValue + " so I do NOT create it in " + parent);
300 String confValue = conf.get(propertyName);
301 if (confValue != null && !confValue.endsWith(sysValue)){
302 LOG.warn(
303 propertyName + " property value differs in configuration and system: "+
304 "Configuration="+confValue+" while System="+sysValue+
305 " Erasing configuration value by system value."
306 );
307 }
308 conf.set(propertyName, sysValue);
309 } else {
310
311 createSubDir(propertyName, parent, subDirName);
312 System.setProperty(propertyName, conf.get(propertyName));
313 }
314 }
315
316
317
318
319
320
321
322 private Path getBaseTestDirOnTestFS() throws IOException {
323 FileSystem fs = getTestFileSystem();
324 return new Path(fs.getWorkingDirectory(), "test-data");
325 }
326
327
328
329
330
331
332 Path getClusterTestDir() {
333 if (clusterTestDir == null){
334 setupClusterTestDir();
335 }
336 return new Path(clusterTestDir.getAbsolutePath());
337 }
338
339
340
341
342 private void setupClusterTestDir() {
343 if (clusterTestDir != null) {
344 return;
345 }
346
347
348
349 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
350 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
351
352 clusterTestDir.deleteOnExit();
353 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
354 LOG.info("Created new mini-cluster data directory: " + clusterTestDir);
355 }
356
357
358
359
360
361
362
363 public Path getDataTestDirOnTestFS() throws IOException {
364 if (dataTestDirOnTestFS == null) {
365 setupDataTestDirOnTestFS();
366 }
367
368 return dataTestDirOnTestFS;
369 }
370
371
372
373
374
375
376
377
378 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
379 return new Path(getDataTestDirOnTestFS(), subdirName);
380 }
381
382
383
384
385 private void setupDataTestDirOnTestFS() throws IOException {
386 if (dataTestDirOnTestFS != null) {
387 LOG.warn("Data test on test fs dir already setup in "
388 + dataTestDirOnTestFS.toString());
389 return;
390 }
391
392
393
394
395
396 FileSystem fs = getTestFileSystem();
397 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
398 File dataTestDir = new File(getDataTestDir().toString());
399 dataTestDir.deleteOnExit();
400 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
401 } else {
402 Path base = getBaseTestDirOnTestFS();
403 String randomStr = UUID.randomUUID().toString();
404 dataTestDirOnTestFS = new Path(base, randomStr);
405 fs.deleteOnExit(dataTestDirOnTestFS);
406 }
407 }
408
409
410
411
412
413
414 public boolean cleanupDataTestDirOnTestFS() throws IOException {
415 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
416 if (ret)
417 dataTestDirOnTestFS = null;
418 return ret;
419 }
420
421
422
423
424
425
426 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
427 Path cpath = getDataTestDirOnTestFS(subdirName);
428 return getTestFileSystem().delete(cpath, true);
429 }
430
431
432
433
434
435
436
437
438 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
439 return startMiniDFSCluster(servers, null);
440 }
441
442
443
444
445
446
447
448
449
450
451
452
453 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
454 throws Exception {
455 if ( hosts != null && hosts.length != 0) {
456 return startMiniDFSCluster(hosts.length, hosts);
457 } else {
458 return startMiniDFSCluster(1, null);
459 }
460 }
461
462
463
464
465
466
467
468
469
470
471 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
472 throws Exception {
473 createDirsAndSetProperties();
474
475
476 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
477 setLevel(org.apache.log4j.Level.ERROR);
478 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
479 setLevel(org.apache.log4j.Level.ERROR);
480
481
482 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
483 true, null, null, hosts, null);
484
485
486 FileSystem fs = this.dfsCluster.getFileSystem();
487 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
488
489
490 this.dfsCluster.waitClusterUp();
491
492
493 dataTestDirOnTestFS = null;
494
495 return this.dfsCluster;
496 }
497
498
499 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
500 throws Exception {
501 createDirsAndSetProperties();
502 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
503 true, null, racks, hosts, null);
504
505
506 FileSystem fs = this.dfsCluster.getFileSystem();
507 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
508
509
510 this.dfsCluster.waitClusterUp();
511
512
513 dataTestDirOnTestFS = null;
514
515 return this.dfsCluster;
516 }
517
518 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
519 createDirsAndSetProperties();
520 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
521 null, null, null);
522 return dfsCluster;
523 }
524
525
526 private void createDirsAndSetProperties() throws IOException {
527 setupClusterTestDir();
528 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
529 createDirAndSetProperty("cache_data", "test.cache.data");
530 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
531 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
532 createDirAndSetProperty("mapred_local", "mapred.local.dir");
533 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
534 enableShortCircuit();
535
536 Path root = getDataTestDirOnTestFS("hadoop");
537 conf.set(MapreduceTestingShim.getMROutputDirProp(),
538 new Path(root, "mapred-output-dir").toString());
539 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
540 conf.set("mapreduce.jobtracker.staging.root.dir",
541 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
542 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
543 }
544
545
546
547
548
549
550
551 public boolean isReadShortCircuitOn(){
552 final String propName = "hbase.tests.use.shortcircuit.reads";
553 String readOnProp = System.getProperty(propName);
554 if (readOnProp != null){
555 return Boolean.parseBoolean(readOnProp);
556 } else {
557 return conf.getBoolean(propName, false);
558 }
559 }
560
561
562
563
564 private void enableShortCircuit() {
565 if (isReadShortCircuitOn()) {
566 String curUser = System.getProperty("user.name");
567 LOG.info("read short circuit is ON for user " + curUser);
568
569 conf.set("dfs.block.local-path-access.user", curUser);
570
571 conf.setBoolean("dfs.client.read.shortcircuit", true);
572
573 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
574 } else {
575 LOG.info("read short circuit is OFF");
576 }
577 }
578
579 private String createDirAndSetProperty(final String relPath, String property) {
580 String path = getDataTestDir(relPath).toString();
581 System.setProperty(property, path);
582 conf.set(property, path);
583 new File(path).mkdirs();
584 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
585 return path;
586 }
587
588
589
590
591
592
593 public void shutdownMiniDFSCluster() throws IOException {
594 if (this.dfsCluster != null) {
595
596 this.dfsCluster.shutdown();
597 dfsCluster = null;
598 dataTestDirOnTestFS = null;
599 FSUtils.setFsDefault(this.conf, new Path("file:///"));
600 }
601 }
602
603
604
605
606
607
608
609
610 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
611 return startMiniZKCluster(1);
612 }
613
614
615
616
617
618
619
620
621
622 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
623 throws Exception {
624 setupClusterTestDir();
625 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
626 }
627
628 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
629 throws Exception {
630 return startMiniZKCluster(dir,1);
631 }
632
633
634
635
636
637 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
638 int zooKeeperServerNum)
639 throws Exception {
640 if (this.zkCluster != null) {
641 throw new IOException("Cluster already running at " + dir);
642 }
643 this.passedZkCluster = false;
644 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
645 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
646 if (defPort > 0){
647
648 this.zkCluster.setDefaultClientPort(defPort);
649 }
650 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
651 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
652 Integer.toString(clientPort));
653 return this.zkCluster;
654 }
655
656
657
658
659
660
661
662 public void shutdownMiniZKCluster() throws IOException {
663 if (this.zkCluster != null) {
664 this.zkCluster.shutdown();
665 this.zkCluster = null;
666 }
667 }
668
669
670
671
672
673
674
675 public MiniHBaseCluster startMiniCluster() throws Exception {
676 return startMiniCluster(1, 1);
677 }
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 public MiniHBaseCluster startMiniCluster(final int numSlaves)
693 throws Exception {
694 return startMiniCluster(1, numSlaves);
695 }
696
697
698
699
700
701
702
703
704 public MiniHBaseCluster startMiniCluster(final int numMasters,
705 final int numSlaves)
706 throws Exception {
707 return startMiniCluster(numMasters, numSlaves, null);
708 }
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734 public MiniHBaseCluster startMiniCluster(final int numMasters,
735 final int numSlaves, final String[] dataNodeHosts) throws Exception {
736 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
737 }
738
739
740
741
742
743 public MiniHBaseCluster startMiniCluster(final int numMasters,
744 final int numSlaves, final int numDataNodes) throws Exception {
745 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
746 }
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775 public MiniHBaseCluster startMiniCluster(final int numMasters,
776 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
777 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
778 throws Exception {
779 return startMiniCluster(
780 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
781 }
782
783
784
785
786
787
788 public MiniHBaseCluster startMiniCluster(final int numMasters,
789 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
790 Class<? extends HMaster> masterClass,
791 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
792 throws Exception {
793 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
794 numDataNodes = dataNodeHosts.length;
795 }
796
797 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
798 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
799
800
801 if (miniClusterRunning) {
802 throw new IllegalStateException("A mini-cluster is already running");
803 }
804 miniClusterRunning = true;
805
806 setupClusterTestDir();
807 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
808
809
810
811 startMiniDFSCluster(numDataNodes, dataNodeHosts);
812
813
814 if (this.zkCluster == null) {
815 startMiniZKCluster(clusterTestDir);
816 }
817
818
819 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
820 }
821
822 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
823 throws IOException, InterruptedException{
824 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
825 }
826
827
828
829
830
831
832
833
834
835
836
837
838 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
839 final int numSlaves, Class<? extends HMaster> masterClass,
840 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
841 throws IOException, InterruptedException {
842
843 createRootDir();
844
845
846
847 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
848 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
849 }
850 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
851 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
852 }
853
854 Configuration c = new Configuration(this.conf);
855 this.hbaseCluster =
856 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
857
858 HTable t = new HTable(c, TableName.META_TABLE_NAME);
859 ResultScanner s = t.getScanner(new Scan());
860 while (s.next() != null) {
861 continue;
862 }
863 s.close();
864 t.close();
865
866 getHBaseAdmin();
867 LOG.info("Minicluster is up");
868 return (MiniHBaseCluster)this.hbaseCluster;
869 }
870
871
872
873
874
875
876
877 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
878 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
879
880 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
881 ResultScanner s = t.getScanner(new Scan());
882 while (s.next() != null) {
883
884 }
885 LOG.info("HBase has been restarted");
886 s.close();
887 t.close();
888 }
889
890
891
892
893
894
895 public MiniHBaseCluster getMiniHBaseCluster() {
896 if (this.hbaseCluster instanceof MiniHBaseCluster) {
897 return (MiniHBaseCluster)this.hbaseCluster;
898 }
899 throw new RuntimeException(hbaseCluster + " not an instance of " +
900 MiniHBaseCluster.class.getName());
901 }
902
903
904
905
906
907
908 public void shutdownMiniCluster() throws Exception {
909 LOG.info("Shutting down minicluster");
910 shutdownMiniHBaseCluster();
911 if (!this.passedZkCluster){
912 shutdownMiniZKCluster();
913 }
914 shutdownMiniDFSCluster();
915
916 cleanupTestDir();
917 miniClusterRunning = false;
918 LOG.info("Minicluster is down");
919 }
920
921
922
923
924
925 public void shutdownMiniHBaseCluster() throws IOException {
926 if (hbaseAdmin != null) {
927 hbaseAdmin.close();
928 hbaseAdmin = null;
929 }
930
931 if (zooKeeperWatcher != null) {
932 zooKeeperWatcher.close();
933 zooKeeperWatcher = null;
934 }
935
936
937 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
938 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
939 if (this.hbaseCluster != null) {
940 this.hbaseCluster.shutdown();
941
942 this.hbaseCluster.waitUntilShutDown();
943 this.hbaseCluster = null;
944 }
945 }
946
947
948
949
950
951
952
953 public Path getDefaultRootDirPath() throws IOException {
954 FileSystem fs = FileSystem.get(this.conf);
955 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
956 }
957
958
959
960
961
962
963
964
965
966 public Path createRootDir() throws IOException {
967 FileSystem fs = FileSystem.get(this.conf);
968 Path hbaseRootdir = getDefaultRootDirPath();
969 FSUtils.setRootDir(this.conf, hbaseRootdir);
970 fs.mkdirs(hbaseRootdir);
971 FSUtils.setVersion(fs, hbaseRootdir);
972 return hbaseRootdir;
973 }
974
975
976
977
978
979 public void flush() throws IOException {
980 getMiniHBaseCluster().flushcache();
981 }
982
983
984
985
986
987 public void flush(TableName tableName) throws IOException {
988 getMiniHBaseCluster().flushcache(tableName);
989 }
990
991
992
993
994
995 public void compact(boolean major) throws IOException {
996 getMiniHBaseCluster().compact(major);
997 }
998
999
1000
1001
1002
1003 public void compact(TableName tableName, boolean major) throws IOException {
1004 getMiniHBaseCluster().compact(tableName, major);
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014 public HTable createTable(String tableName, String family)
1015 throws IOException{
1016 return createTable(TableName.valueOf(tableName), new String[]{family});
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026 public HTable createTable(byte[] tableName, byte[] family)
1027 throws IOException{
1028 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038 public HTable createTable(String tableName, String[] families)
1039 throws IOException {
1040 return createTable(tableName, families);
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050 public HTable createTable(TableName tableName, String[] families)
1051 throws IOException {
1052 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1053 for (String family : families) {
1054 fams.add(Bytes.toBytes(family));
1055 }
1056 return createTable(tableName, fams.toArray(new byte[0][]));
1057 }
1058
1059
1060
1061
1062
1063
1064
1065
1066 public HTable createTable(TableName tableName, byte[] family)
1067 throws IOException{
1068 return createTable(tableName, new byte[][]{family});
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 public HTable createTable(byte[] tableName, byte[][] families)
1080 throws IOException {
1081 return createTable(tableName, families,
1082 new Configuration(getConfiguration()));
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092 public HTable createTable(TableName tableName, byte[][] families)
1093 throws IOException {
1094 return createTable(tableName, families,
1095 new Configuration(getConfiguration()));
1096 }
1097
1098 public HTable createTable(byte[] tableName, byte[][] families,
1099 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1100 return createTable(TableName.valueOf(tableName), families, numVersions,
1101 startKey, endKey, numRegions);
1102 }
1103
1104 public HTable createTable(String tableName, byte[][] families,
1105 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1106 return createTable(TableName.valueOf(tableName), families, numVersions,
1107 startKey, endKey, numRegions);
1108 }
1109
1110 public HTable createTable(TableName tableName, byte[][] families,
1111 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1112 throws IOException{
1113 HTableDescriptor desc = new HTableDescriptor(tableName);
1114 for (byte[] family : families) {
1115 HColumnDescriptor hcd = new HColumnDescriptor(family)
1116 .setMaxVersions(numVersions);
1117 desc.addFamily(hcd);
1118 }
1119 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1120
1121 waitUntilAllRegionsAssigned(tableName);
1122 return new HTable(getConfiguration(), tableName);
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 public HTable createTable(TableName tableName, byte[][] families,
1134 final Configuration c)
1135 throws IOException {
1136 HTableDescriptor desc = new HTableDescriptor(tableName);
1137 for(byte[] family : families) {
1138 HColumnDescriptor hcd = new HColumnDescriptor(family);
1139
1140
1141
1142 hcd.setBloomFilterType(BloomType.NONE);
1143 desc.addFamily(hcd);
1144 }
1145 getHBaseAdmin().createTable(desc);
1146
1147 waitUntilAllRegionsAssigned(tableName);
1148 return new HTable(c, tableName);
1149 }
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 public HTable createTable(byte[] tableName, byte[][] families,
1160 final Configuration c)
1161 throws IOException {
1162 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1163 for(byte[] family : families) {
1164 HColumnDescriptor hcd = new HColumnDescriptor(family);
1165
1166
1167
1168 hcd.setBloomFilterType(BloomType.NONE);
1169 desc.addFamily(hcd);
1170 }
1171 getHBaseAdmin().createTable(desc);
1172 return new HTable(c, tableName);
1173 }
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 public HTable createTable(TableName tableName, byte[][] families,
1185 final Configuration c, int numVersions)
1186 throws IOException {
1187 HTableDescriptor desc = new HTableDescriptor(tableName);
1188 for(byte[] family : families) {
1189 HColumnDescriptor hcd = new HColumnDescriptor(family)
1190 .setMaxVersions(numVersions);
1191 desc.addFamily(hcd);
1192 }
1193 getHBaseAdmin().createTable(desc);
1194
1195 waitUntilAllRegionsAssigned(tableName);
1196 return new HTable(c, tableName);
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 public HTable createTable(byte[] tableName, byte[][] families,
1209 final Configuration c, int numVersions)
1210 throws IOException {
1211 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1212 for(byte[] family : families) {
1213 HColumnDescriptor hcd = new HColumnDescriptor(family)
1214 .setMaxVersions(numVersions);
1215 desc.addFamily(hcd);
1216 }
1217 getHBaseAdmin().createTable(desc);
1218 return new HTable(c, tableName);
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1230 throws IOException {
1231 return createTable(tableName, new byte[][]{family}, numVersions);
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1243 throws IOException {
1244 return createTable(tableName, new byte[][]{family}, numVersions);
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 public HTable createTable(byte[] tableName, byte[][] families,
1256 int numVersions)
1257 throws IOException {
1258 return createTable(TableName.valueOf(tableName), families, numVersions);
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 public HTable createTable(TableName tableName, byte[][] families,
1270 int numVersions)
1271 throws IOException {
1272 HTableDescriptor desc = new HTableDescriptor(tableName);
1273 for (byte[] family : families) {
1274 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1275 desc.addFamily(hcd);
1276 }
1277 getHBaseAdmin().createTable(desc);
1278
1279 waitUntilAllRegionsAssigned(tableName);
1280 return new HTable(new Configuration(getConfiguration()), tableName);
1281 }
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 public HTable createTable(byte[] tableName, byte[][] families,
1292 int numVersions, int blockSize) throws IOException {
1293 return createTable(TableName.valueOf(tableName),
1294 families, numVersions, blockSize);
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 public HTable createTable(TableName tableName, byte[][] families,
1306 int numVersions, int blockSize) throws IOException {
1307 HTableDescriptor desc = new HTableDescriptor(tableName);
1308 for (byte[] family : families) {
1309 HColumnDescriptor hcd = new HColumnDescriptor(family)
1310 .setMaxVersions(numVersions)
1311 .setBlocksize(blockSize);
1312 desc.addFamily(hcd);
1313 }
1314 getHBaseAdmin().createTable(desc);
1315
1316 waitUntilAllRegionsAssigned(tableName);
1317 return new HTable(new Configuration(getConfiguration()), tableName);
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 public HTable createTable(byte[] tableName, byte[][] families,
1329 int[] numVersions)
1330 throws IOException {
1331 return createTable(TableName.valueOf(tableName), families, numVersions);
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 public HTable createTable(TableName tableName, byte[][] families,
1343 int[] numVersions)
1344 throws IOException {
1345 HTableDescriptor desc = new HTableDescriptor(tableName);
1346 int i = 0;
1347 for (byte[] family : families) {
1348 HColumnDescriptor hcd = new HColumnDescriptor(family)
1349 .setMaxVersions(numVersions[i]);
1350 desc.addFamily(hcd);
1351 i++;
1352 }
1353 getHBaseAdmin().createTable(desc);
1354
1355 waitUntilAllRegionsAssigned(tableName);
1356 return new HTable(new Configuration(getConfiguration()), tableName);
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1368 throws IOException{
1369 return createTable(TableName.valueOf(tableName), family, splitRows);
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1381 throws IOException {
1382 HTableDescriptor desc = new HTableDescriptor(tableName);
1383 HColumnDescriptor hcd = new HColumnDescriptor(family);
1384 desc.addFamily(hcd);
1385 getHBaseAdmin().createTable(desc, splitRows);
1386
1387 waitUntilAllRegionsAssigned(tableName);
1388 return new HTable(getConfiguration(), tableName);
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1400 throws IOException {
1401 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1402 for(byte[] family:families) {
1403 HColumnDescriptor hcd = new HColumnDescriptor(family);
1404 desc.addFamily(hcd);
1405 }
1406 getHBaseAdmin().createTable(desc, splitRows);
1407
1408 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1409 return new HTable(getConfiguration(), tableName);
1410 }
1411
1412
1413
1414
1415
1416 public void deleteTable(String tableName) throws IOException {
1417 deleteTable(TableName.valueOf(tableName));
1418 }
1419
1420
1421
1422
1423
1424 public void deleteTable(byte[] tableName) throws IOException {
1425 deleteTable(TableName.valueOf(tableName));
1426 }
1427
1428
1429
1430
1431
1432 public void deleteTable(TableName tableName) throws IOException {
1433 try {
1434 getHBaseAdmin().disableTable(tableName);
1435 } catch (TableNotEnabledException e) {
1436 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1437 }
1438 getHBaseAdmin().deleteTable(tableName);
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448 public HTable truncateTable(byte[] tableName) throws IOException {
1449 return truncateTable(TableName.valueOf(tableName));
1450 }
1451
1452
1453
1454
1455
1456
1457
1458 public HTable truncateTable(TableName tableName) throws IOException {
1459 HTable table = new HTable(getConfiguration(), tableName);
1460 Scan scan = new Scan();
1461 ResultScanner resScan = table.getScanner(scan);
1462 for(Result res : resScan) {
1463 Delete del = new Delete(res.getRow());
1464 table.delete(del);
1465 }
1466 resScan = table.getScanner(scan);
1467 resScan.close();
1468 return table;
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478 public int loadTable(final HTable t, final byte[] f) throws IOException {
1479 t.setAutoFlush(false);
1480 byte[] k = new byte[3];
1481 int rowCount = 0;
1482 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1483 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1484 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1485 k[0] = b1;
1486 k[1] = b2;
1487 k[2] = b3;
1488 Put put = new Put(k);
1489 put.add(f, null, k);
1490 t.put(put);
1491 rowCount++;
1492 }
1493 }
1494 }
1495 t.flushCommits();
1496 return rowCount;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505
1506 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1507 t.setAutoFlush(false);
1508 byte[] k = new byte[3];
1509 int rowCount = 0;
1510 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1511 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1512 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1513 k[0] = b1;
1514 k[1] = b2;
1515 k[2] = b3;
1516 Put put = new Put(k);
1517 for (int i = 0; i < f.length; i++) {
1518 put.add(f[i], null, k);
1519 }
1520 t.put(put);
1521 rowCount++;
1522 }
1523 }
1524 }
1525 t.flushCommits();
1526 return rowCount;
1527 }
1528
1529 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1530 return loadRegion(r, f, false);
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1542 throws IOException {
1543 byte[] k = new byte[3];
1544 int rowCount = 0;
1545 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1546 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1547 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1548 k[0] = b1;
1549 k[1] = b2;
1550 k[2] = b3;
1551 Put put = new Put(k);
1552 put.add(f, null, k);
1553 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1554 r.put(put);
1555 rowCount++;
1556 }
1557 }
1558 if (flush) {
1559 r.flushcache();
1560 }
1561 }
1562 return rowCount;
1563 }
1564
1565 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1566 for (int i = startRow; i < endRow; i++) {
1567 byte[] data = Bytes.toBytes(String.valueOf(i));
1568 Put put = new Put(data);
1569 put.add(f, null, data);
1570 t.put(put);
1571 }
1572 }
1573
1574
1575
1576
1577 public int countRows(final HTable table) throws IOException {
1578 Scan scan = new Scan();
1579 ResultScanner results = table.getScanner(scan);
1580 int count = 0;
1581 for (@SuppressWarnings("unused") Result res : results) {
1582 count++;
1583 }
1584 results.close();
1585 return count;
1586 }
1587
1588 public int countRows(final HTable table, final byte[]... families) throws IOException {
1589 Scan scan = new Scan();
1590 for (byte[] family: families) {
1591 scan.addFamily(family);
1592 }
1593 ResultScanner results = table.getScanner(scan);
1594 int count = 0;
1595 for (@SuppressWarnings("unused") Result res : results) {
1596 count++;
1597 }
1598 results.close();
1599 return count;
1600 }
1601
1602
1603
1604
1605 public String checksumRows(final HTable table) throws Exception {
1606 Scan scan = new Scan();
1607 ResultScanner results = table.getScanner(scan);
1608 MessageDigest digest = MessageDigest.getInstance("MD5");
1609 for (Result res : results) {
1610 digest.update(res.getRow());
1611 }
1612 results.close();
1613 return digest.toString();
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 public int createMultiRegions(HTable table, byte[] columnFamily)
1625 throws IOException {
1626 return createMultiRegions(getConfiguration(), table, columnFamily);
1627 }
1628
1629 public static final byte[][] KEYS = {
1630 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1631 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1632 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1633 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1634 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1635 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1636 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1637 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1638 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1639 };
1640
1641 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1642 Bytes.toBytes("bbb"),
1643 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1644 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1645 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1646 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1647 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1648 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1649 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1650 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1651 };
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 public int createMultiRegions(final Configuration c, final HTable table,
1662 final byte[] columnFamily)
1663 throws IOException {
1664 return createMultiRegions(c, table, columnFamily, KEYS);
1665 }
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676 public int createMultiRegions(final Configuration c, final HTable table,
1677 final byte [] family, int numRegions)
1678 throws IOException {
1679 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1680 byte [] startKey = Bytes.toBytes("aaaaa");
1681 byte [] endKey = Bytes.toBytes("zzzzz");
1682 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1683 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1684 for (int i=0;i<splitKeys.length;i++) {
1685 regionStartKeys[i+1] = splitKeys[i];
1686 }
1687 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1688 return createMultiRegions(c, table, family, regionStartKeys);
1689 }
1690
1691 public int createMultiRegions(final Configuration c, final HTable table,
1692 final byte[] columnFamily, byte [][] startKeys)
1693 throws IOException {
1694 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1695 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1696 HTableDescriptor htd = table.getTableDescriptor();
1697 if(!htd.hasFamily(columnFamily)) {
1698 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1699 htd.addFamily(hcd);
1700 }
1701
1702
1703
1704
1705 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1706 String regionToDeleteInFS = table
1707 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1708 .getRegionInfo().getEncodedName();
1709 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1710
1711 int count = 0;
1712 for (int i = 0; i < startKeys.length; i++) {
1713 int j = (i + 1) % startKeys.length;
1714 HRegionInfo hri = new HRegionInfo(table.getName(),
1715 startKeys[i], startKeys[j]);
1716 MetaEditor.addRegionToMeta(meta, hri);
1717 newRegions.add(hri);
1718 count++;
1719 }
1720
1721 for (byte[] row : rows) {
1722 LOG.info("createMultiRegions: deleting meta row -> " +
1723 Bytes.toStringBinary(row));
1724 meta.delete(new Delete(row));
1725 }
1726
1727 Path tableDir = new Path(getDefaultRootDirPath().toString()
1728 + System.getProperty("file.separator") + htd.getTableName()
1729 + System.getProperty("file.separator") + regionToDeleteInFS);
1730 FileSystem.get(c).delete(tableDir);
1731
1732 HConnection conn = table.getConnection();
1733 conn.clearRegionCache();
1734
1735 HBaseAdmin admin = getHBaseAdmin();
1736 if (admin.isTableEnabled(table.getTableName())) {
1737 for(HRegionInfo hri : newRegions) {
1738 admin.assign(hri.getRegionName());
1739 }
1740 }
1741
1742 meta.close();
1743
1744 return count;
1745 }
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1758 final HTableDescriptor htd, byte [][] startKeys)
1759 throws IOException {
1760 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1761 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1762 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1763
1764 for (int i = 0; i < startKeys.length; i++) {
1765 int j = (i + 1) % startKeys.length;
1766 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1767 startKeys[j]);
1768 MetaEditor.addRegionToMeta(meta, hri);
1769 newRegions.add(hri);
1770 }
1771
1772 meta.close();
1773 return newRegions;
1774 }
1775
1776
1777
1778
1779
1780
1781 public List<byte[]> getMetaTableRows() throws IOException {
1782
1783 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1784 List<byte[]> rows = new ArrayList<byte[]>();
1785 ResultScanner s = t.getScanner(new Scan());
1786 for (Result result : s) {
1787 LOG.info("getMetaTableRows: row -> " +
1788 Bytes.toStringBinary(result.getRow()));
1789 rows.add(result.getRow());
1790 }
1791 s.close();
1792 t.close();
1793 return rows;
1794 }
1795
1796
1797
1798
1799
1800
1801 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
1802
1803 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1804 List<byte[]> rows = new ArrayList<byte[]>();
1805 ResultScanner s = t.getScanner(new Scan());
1806 for (Result result : s) {
1807 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
1808 if (info == null) {
1809 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1810
1811 continue;
1812 }
1813
1814 if (info.getTableName().equals(tableName)) {
1815 LOG.info("getMetaTableRows: row -> " +
1816 Bytes.toStringBinary(result.getRow()) + info);
1817 rows.add(result.getRow());
1818 }
1819 }
1820 s.close();
1821 t.close();
1822 return rows;
1823 }
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1837 throws IOException, InterruptedException {
1838 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
1851 throws IOException, InterruptedException {
1852 List<byte[]> metaRows = getMetaTableRows(tableName);
1853 if (metaRows == null || metaRows.isEmpty()) {
1854 return null;
1855 }
1856 LOG.debug("Found " + metaRows.size() + " rows for table " +
1857 tableName);
1858 byte [] firstrow = metaRows.get(0);
1859 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1860 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
1861 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
1862 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
1863 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
1864 RetryCounter retrier = new RetryCounter(numRetries, (int)pause, TimeUnit.MICROSECONDS);
1865 while(retrier.shouldRetry()) {
1866 int index = getMiniHBaseCluster().getServerWith(firstrow);
1867 if (index != -1) {
1868 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1869 }
1870
1871 retrier.sleepUntilNextRetry();
1872 }
1873 return null;
1874 }
1875
1876
1877
1878
1879
1880
1881
1882 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
1883 startMiniMapReduceCluster(2);
1884 return mrCluster;
1885 }
1886
1887
1888
1889
1890
1891 private void forceChangeTaskLogDir() {
1892 Field logDirField;
1893 try {
1894 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
1895 logDirField.setAccessible(true);
1896
1897 Field modifiersField = Field.class.getDeclaredField("modifiers");
1898 modifiersField.setAccessible(true);
1899 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
1900
1901 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
1902 } catch (SecurityException e) {
1903 throw new RuntimeException(e);
1904 } catch (NoSuchFieldException e) {
1905
1906 throw new RuntimeException(e);
1907 } catch (IllegalArgumentException e) {
1908 throw new RuntimeException(e);
1909 } catch (IllegalAccessException e) {
1910 throw new RuntimeException(e);
1911 }
1912 }
1913
1914
1915
1916
1917
1918
1919
1920 private void startMiniMapReduceCluster(final int servers) throws IOException {
1921 if (mrCluster != null) {
1922 throw new IllegalStateException("MiniMRCluster is already running");
1923 }
1924 LOG.info("Starting mini mapreduce cluster...");
1925 setupClusterTestDir();
1926 createDirsAndSetProperties();
1927
1928 forceChangeTaskLogDir();
1929
1930
1931
1932
1933 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1934
1935
1936
1937 conf.setBoolean("mapreduce.map.speculative", false);
1938 conf.setBoolean("mapreduce.reduce.speculative", false);
1939
1940
1941
1942 mrCluster = new MiniMRCluster(servers,
1943 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
1944 null, null, new JobConf(this.conf));
1945 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
1946 if (jobConf == null) {
1947 jobConf = mrCluster.createJobConf();
1948 }
1949
1950 jobConf.set("mapred.local.dir",
1951 conf.get("mapred.local.dir"));
1952 LOG.info("Mini mapreduce cluster started");
1953
1954
1955
1956
1957 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
1958
1959 conf.set("mapreduce.framework.name", "yarn");
1960 conf.setBoolean("yarn.is.minicluster", true);
1961 String rmAddress = jobConf.get("yarn.resourcemanager.address");
1962 if (rmAddress != null) {
1963 conf.set("yarn.resourcemanager.address", rmAddress);
1964 }
1965 String schedulerAddress =
1966 jobConf.get("yarn.resourcemanager.scheduler.address");
1967 if (schedulerAddress != null) {
1968 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
1969 }
1970 }
1971
1972
1973
1974
1975 public void shutdownMiniMapReduceCluster() {
1976 LOG.info("Stopping mini mapreduce cluster...");
1977 if (mrCluster != null) {
1978 mrCluster.shutdown();
1979 mrCluster = null;
1980 }
1981
1982 conf.set("mapred.job.tracker", "local");
1983 LOG.info("Mini mapreduce cluster stopped");
1984 }
1985
1986
1987
1988
1989
1990
1991 public void enableDebug(Class<?> clazz) {
1992 Log l = LogFactory.getLog(clazz);
1993 if (l instanceof Log4JLogger) {
1994 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1995 } else if (l instanceof Jdk14Logger) {
1996 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1997 }
1998 }
1999
2000
2001
2002
2003
2004 public void expireMasterSession() throws Exception {
2005 HMaster master = getMiniHBaseCluster().getMaster();
2006 expireSession(master.getZooKeeper(), false);
2007 }
2008
2009
2010
2011
2012
2013
2014 public void expireRegionServerSession(int index) throws Exception {
2015 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2016 expireSession(rs.getZooKeeper(), false);
2017 decrementMinRegionServerCount();
2018 }
2019
2020 private void decrementMinRegionServerCount() {
2021
2022
2023 decrementMinRegionServerCount(getConfiguration());
2024
2025
2026 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2027 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2028 }
2029 }
2030
2031 private void decrementMinRegionServerCount(Configuration conf) {
2032 int currentCount = conf.getInt(
2033 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2034 if (currentCount != -1) {
2035 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2036 Math.max(currentCount - 1, 1));
2037 }
2038 }
2039
2040 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2041 expireSession(nodeZK, false);
2042 }
2043
2044 @Deprecated
2045 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2046 throws Exception {
2047 expireSession(nodeZK, false);
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2062 throws Exception {
2063 Configuration c = new Configuration(this.conf);
2064 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2065 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2066 byte[] password = zk.getSessionPasswd();
2067 long sessionID = zk.getSessionId();
2068
2069
2070
2071
2072
2073
2074
2075
2076 ZooKeeper monitor = new ZooKeeper(quorumServers,
2077 1000, new org.apache.zookeeper.Watcher(){
2078 @Override
2079 public void process(WatchedEvent watchedEvent) {
2080 LOG.info("Monitor ZKW received event="+watchedEvent);
2081 }
2082 } , sessionID, password);
2083
2084
2085 ZooKeeper newZK = new ZooKeeper(quorumServers,
2086 1000, EmptyWatcher.instance, sessionID, password);
2087
2088
2089
2090 long start = System.currentTimeMillis();
2091 while (newZK.getState() != States.CONNECTED
2092 && System.currentTimeMillis() - start < 1000) {
2093 Thread.sleep(1);
2094 }
2095 newZK.close();
2096 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2097
2098
2099 monitor.close();
2100
2101 if (checkStatus) {
2102 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2103 }
2104 }
2105
2106
2107
2108
2109
2110
2111
2112 public MiniHBaseCluster getHBaseCluster() {
2113 return getMiniHBaseCluster();
2114 }
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 public HBaseCluster getHBaseClusterInterface() {
2125
2126
2127 return hbaseCluster;
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 public synchronized HBaseAdmin getHBaseAdmin()
2140 throws IOException {
2141 if (hbaseAdmin == null){
2142 hbaseAdmin = new HBaseAdmin(getConfiguration());
2143 }
2144 return hbaseAdmin;
2145 }
2146 private HBaseAdmin hbaseAdmin = null;
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2158 throws IOException {
2159 if (zooKeeperWatcher == null) {
2160 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2161 new Abortable() {
2162 @Override public void abort(String why, Throwable e) {
2163 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2164 }
2165 @Override public boolean isAborted() {return false;}
2166 });
2167 }
2168 return zooKeeperWatcher;
2169 }
2170 private ZooKeeperWatcher zooKeeperWatcher;
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180 public void closeRegion(String regionName) throws IOException {
2181 closeRegion(Bytes.toBytes(regionName));
2182 }
2183
2184
2185
2186
2187
2188
2189
2190 public void closeRegion(byte[] regionName) throws IOException {
2191 getHBaseAdmin().closeRegion(regionName, null);
2192 }
2193
2194
2195
2196
2197
2198
2199
2200
2201 public void closeRegionByRow(String row, HTable table) throws IOException {
2202 closeRegionByRow(Bytes.toBytes(row), table);
2203 }
2204
2205
2206
2207
2208
2209
2210
2211
2212 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2213 HRegionLocation hrl = table.getRegionLocation(row);
2214 closeRegion(hrl.getRegionInfo().getRegionName());
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2225 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2226 int regCount = regions.size();
2227 Set<Integer> attempted = new HashSet<Integer>();
2228 int idx;
2229 int attempts = 0;
2230 do {
2231 regions = getHBaseCluster().getRegions(tableName);
2232 if (regCount != regions.size()) {
2233
2234 attempted.clear();
2235 }
2236 regCount = regions.size();
2237
2238
2239 if (regCount > 0) {
2240 idx = random.nextInt(regCount);
2241
2242 if (attempted.contains(idx))
2243 continue;
2244 try {
2245 regions.get(idx).checkSplit();
2246 return regions.get(idx);
2247 } catch (Exception ex) {
2248 LOG.warn("Caught exception", ex);
2249 attempted.add(idx);
2250 }
2251 }
2252 attempts++;
2253 } while (maxAttempts == -1 || attempts < maxAttempts);
2254 return null;
2255 }
2256
2257 public MiniZooKeeperCluster getZkCluster() {
2258 return zkCluster;
2259 }
2260
2261 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2262 this.passedZkCluster = true;
2263 this.zkCluster = zkCluster;
2264 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2265 }
2266
2267 public MiniDFSCluster getDFSCluster() {
2268 return dfsCluster;
2269 }
2270
2271 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2272 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2273 throw new IOException("DFSCluster is already running! Shut it down first.");
2274 }
2275 this.dfsCluster = cluster;
2276 }
2277
2278 public FileSystem getTestFileSystem() throws IOException {
2279 return HFileSystem.get(conf);
2280 }
2281
2282
2283
2284
2285
2286
2287
2288
2289 public void waitTableAvailable(byte[] table)
2290 throws InterruptedException, IOException {
2291 waitTableAvailable(getHBaseAdmin(), table, 30000);
2292 }
2293
2294 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2295 throws InterruptedException, IOException {
2296 waitTableAvailable(admin, table, 30000);
2297 }
2298
2299
2300
2301
2302
2303
2304
2305
2306 public void waitTableAvailable(byte[] table, long timeoutMillis)
2307 throws InterruptedException, IOException {
2308 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2309 }
2310
2311 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2312 throws InterruptedException, IOException {
2313 long startWait = System.currentTimeMillis();
2314 while (!admin.isTableAvailable(table)) {
2315 assertTrue("Timed out waiting for table to become available " +
2316 Bytes.toStringBinary(table),
2317 System.currentTimeMillis() - startWait < timeoutMillis);
2318 Thread.sleep(200);
2319 }
2320
2321
2322
2323
2324
2325 try {
2326 Canary.sniff(admin, TableName.valueOf(table));
2327 } catch (Exception e) {
2328 throw new IOException(e);
2329 }
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 public void waitTableEnabled(byte[] table)
2342 throws InterruptedException, IOException {
2343 waitTableEnabled(getHBaseAdmin(), table, 30000);
2344 }
2345
2346 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2347 throws InterruptedException, IOException {
2348 waitTableEnabled(admin, table, 30000);
2349 }
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360 public void waitTableEnabled(byte[] table, long timeoutMillis)
2361 throws InterruptedException, IOException {
2362 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2363 }
2364
2365 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2366 throws InterruptedException, IOException {
2367 long startWait = System.currentTimeMillis();
2368 waitTableAvailable(admin, table, timeoutMillis);
2369 long remainder = System.currentTimeMillis() - startWait;
2370 while (!admin.isTableEnabled(table)) {
2371 assertTrue("Timed out waiting for table to become available and enabled " +
2372 Bytes.toStringBinary(table),
2373 System.currentTimeMillis() - remainder < timeoutMillis);
2374 Thread.sleep(200);
2375 }
2376 LOG.debug("REMOVE AFTER table=" + Bytes.toString(table) + ", isTableAvailable=" +
2377 admin.isTableAvailable(table) +
2378 ", isTableEnabled=" + admin.isTableEnabled(table));
2379 }
2380
2381
2382
2383
2384
2385
2386
2387
2388 public boolean ensureSomeRegionServersAvailable(final int num)
2389 throws IOException {
2390 boolean startedServer = false;
2391 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2392 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2393 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2394 startedServer = true;
2395 }
2396
2397 return startedServer;
2398 }
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2410 throws IOException {
2411 boolean startedServer = ensureSomeRegionServersAvailable(num);
2412
2413 int nonStoppedServers = 0;
2414 for (JVMClusterUtil.RegionServerThread rst :
2415 getMiniHBaseCluster().getRegionServerThreads()) {
2416
2417 HRegionServer hrs = rst.getRegionServer();
2418 if (hrs.isStopping() || hrs.isStopped()) {
2419 LOG.info("A region server is stopped or stopping:"+hrs);
2420 } else {
2421 nonStoppedServers++;
2422 }
2423 }
2424 for (int i=nonStoppedServers; i<num; ++i) {
2425 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2426 startedServer = true;
2427 }
2428 return startedServer;
2429 }
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441 public static User getDifferentUser(final Configuration c,
2442 final String differentiatingSuffix)
2443 throws IOException {
2444 FileSystem currentfs = FileSystem.get(c);
2445 if (!(currentfs instanceof DistributedFileSystem)) {
2446 return User.getCurrent();
2447 }
2448
2449
2450 String username = User.getCurrent().getName() +
2451 differentiatingSuffix;
2452 User user = User.createUserForTesting(c, username,
2453 new String[]{"supergroup"});
2454 return user;
2455 }
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2471 final int max) {
2472 try {
2473 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2474 for (Class<?> clazz: clazzes) {
2475 String className = clazz.getSimpleName();
2476 if (className.equals("DFSOutputStream")) {
2477 if (clazz.isInstance(stream)) {
2478 Field maxRecoveryErrorCountField =
2479 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2480 maxRecoveryErrorCountField.setAccessible(true);
2481 maxRecoveryErrorCountField.setInt(stream, max);
2482 break;
2483 }
2484 }
2485 }
2486 } catch (Exception e) {
2487 LOG.info("Could not set max recovery field", e);
2488 }
2489 }
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2500 waitUntilAllRegionsAssigned(tableName, 60000);
2501 }
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2513 throws IOException {
2514 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2515 try {
2516 waitFor(timeout, 200, true, new Predicate<IOException>() {
2517 @Override
2518 public boolean evaluate() throws IOException {
2519 boolean allRegionsAssigned = true;
2520 Scan scan = new Scan();
2521 scan.addFamily(HConstants.CATALOG_FAMILY);
2522 ResultScanner s = meta.getScanner(scan);
2523 try {
2524 Result r;
2525 while ((r = s.next()) != null) {
2526 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2527 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2528 if (info != null && info.getTableName().equals(tableName)) {
2529 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2530 allRegionsAssigned &= (b != null);
2531 }
2532 }
2533 } finally {
2534 s.close();
2535 }
2536 return allRegionsAssigned;
2537 }
2538 });
2539 } finally {
2540 meta.close();
2541 }
2542 }
2543
2544
2545
2546
2547
2548 public static List<KeyValue> getFromStoreFile(HStore store,
2549 Get get) throws IOException {
2550 MultiVersionConsistencyControl.resetThreadReadPoint();
2551 Scan scan = new Scan(get);
2552 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2553 scan.getFamilyMap().get(store.getFamily().getName()));
2554
2555 List<KeyValue> result = new ArrayList<KeyValue>();
2556 scanner.next(result);
2557 if (!result.isEmpty()) {
2558
2559 KeyValue kv = result.get(0);
2560 if (!Bytes.equals(kv.getRow(), get.getRow())) {
2561 result.clear();
2562 }
2563 }
2564 scanner.close();
2565 return result;
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2577 assertTrue(numRegions>3);
2578 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2579 byte [][] result = new byte[tmpSplitKeys.length+1][];
2580 for (int i=0;i<tmpSplitKeys.length;i++) {
2581 result[i+1] = tmpSplitKeys[i];
2582 }
2583 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2584 return result;
2585 }
2586
2587
2588
2589
2590
2591 public static List<KeyValue> getFromStoreFile(HStore store,
2592 byte [] row,
2593 NavigableSet<byte[]> columns
2594 ) throws IOException {
2595 Get get = new Get(row);
2596 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2597 s.put(store.getFamily().getName(), columns);
2598
2599 return getFromStoreFile(store,get);
2600 }
2601
2602
2603
2604
2605
2606 public static ZooKeeperWatcher getZooKeeperWatcher(
2607 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2608 IOException {
2609 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2610 "unittest", new Abortable() {
2611 boolean aborted = false;
2612
2613 @Override
2614 public void abort(String why, Throwable e) {
2615 aborted = true;
2616 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2617 }
2618
2619 @Override
2620 public boolean isAborted() {
2621 return aborted;
2622 }
2623 });
2624 return zkw;
2625 }
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2639 HBaseTestingUtility TEST_UTIL, HRegion region,
2640 ServerName serverName) throws ZooKeeperConnectionException,
2641 IOException, KeeperException, NodeExistsException {
2642 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2643 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2644 int version = ZKAssign.transitionNodeOpening(zkw, region
2645 .getRegionInfo(), serverName);
2646 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2647 version);
2648 return zkw;
2649 }
2650
2651 public static void assertKVListsEqual(String additionalMsg,
2652 final List<KeyValue> expected,
2653 final List<KeyValue> actual) {
2654 final int eLen = expected.size();
2655 final int aLen = actual.size();
2656 final int minLen = Math.min(eLen, aLen);
2657
2658 int i;
2659 for (i = 0; i < minLen
2660 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2661 ++i) {}
2662
2663 if (additionalMsg == null) {
2664 additionalMsg = "";
2665 }
2666 if (!additionalMsg.isEmpty()) {
2667 additionalMsg = ". " + additionalMsg;
2668 }
2669
2670 if (eLen != aLen || i != minLen) {
2671 throw new AssertionError(
2672 "Expected and actual KV arrays differ at position " + i + ": " +
2673 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2674 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2675 }
2676 }
2677
2678 private static <T> String safeGetAsStr(List<T> lst, int i) {
2679 if (0 <= i && i < lst.size()) {
2680 return lst.get(i).toString();
2681 } else {
2682 return "<out_of_range>";
2683 }
2684 }
2685
2686 public String getClusterKey() {
2687 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2688 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2689 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2690 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2691 }
2692
2693
2694 public HTable createRandomTable(String tableName,
2695 final Collection<String> families,
2696 final int maxVersions,
2697 final int numColsPerRow,
2698 final int numFlushes,
2699 final int numRegions,
2700 final int numRowsPerFlush)
2701 throws IOException, InterruptedException {
2702
2703 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2704 " regions, " + numFlushes + " storefiles per region, " +
2705 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2706 "\n");
2707
2708 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2709 final int numCF = families.size();
2710 final byte[][] cfBytes = new byte[numCF][];
2711 {
2712 int cfIndex = 0;
2713 for (String cf : families) {
2714 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2715 }
2716 }
2717
2718 final int actualStartKey = 0;
2719 final int actualEndKey = Integer.MAX_VALUE;
2720 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2721 final int splitStartKey = actualStartKey + keysPerRegion;
2722 final int splitEndKey = actualEndKey - keysPerRegion;
2723 final String keyFormat = "%08x";
2724 final HTable table = createTable(tableName, cfBytes,
2725 maxVersions,
2726 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2727 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2728 numRegions);
2729
2730 if (hbaseCluster != null) {
2731 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2732 }
2733
2734 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2735 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2736 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2737 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2738
2739 Put put = new Put(row);
2740 Delete del = new Delete(row);
2741 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2742 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2743 final long ts = rand.nextInt();
2744 final byte[] qual = Bytes.toBytes("col" + iCol);
2745 if (rand.nextBoolean()) {
2746 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2747 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2748 ts + "_random_" + rand.nextLong());
2749 put.add(cf, qual, ts, value);
2750 } else if (rand.nextDouble() < 0.8) {
2751 del.deleteColumn(cf, qual, ts);
2752 } else {
2753 del.deleteColumns(cf, qual, ts);
2754 }
2755 }
2756
2757 if (!put.isEmpty()) {
2758 table.put(put);
2759 }
2760
2761 if (!del.isEmpty()) {
2762 table.delete(del);
2763 }
2764 }
2765 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2766 table.flushCommits();
2767 if (hbaseCluster != null) {
2768 getMiniHBaseCluster().flushcache(table.getName());
2769 }
2770 }
2771
2772 return table;
2773 }
2774
2775 private static final int MIN_RANDOM_PORT = 0xc000;
2776 private static final int MAX_RANDOM_PORT = 0xfffe;
2777 private static Random random = new Random();
2778
2779
2780
2781
2782
2783 public static int randomPort() {
2784 return MIN_RANDOM_PORT
2785 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2786 }
2787
2788
2789
2790
2791
2792 public static int randomFreePort() {
2793 int port = 0;
2794 do {
2795 port = randomPort();
2796 if (takenRandomPorts.contains(port)) {
2797 continue;
2798 }
2799 takenRandomPorts.add(port);
2800
2801 try {
2802 ServerSocket sock = new ServerSocket(port);
2803 sock.close();
2804 } catch (IOException ex) {
2805 port = 0;
2806 }
2807 } while (port == 0);
2808 return port;
2809 }
2810
2811
2812 public static String randomMultiCastAddress() {
2813 return "226.1.1." + random.nextInt(254);
2814 }
2815
2816
2817
2818 public static void waitForHostPort(String host, int port)
2819 throws IOException {
2820 final int maxTimeMs = 10000;
2821 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2822 IOException savedException = null;
2823 LOG.info("Waiting for server at " + host + ":" + port);
2824 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2825 try {
2826 Socket sock = new Socket(InetAddress.getByName(host), port);
2827 sock.close();
2828 savedException = null;
2829 LOG.info("Server at " + host + ":" + port + " is available");
2830 break;
2831 } catch (UnknownHostException e) {
2832 throw new IOException("Failed to look up " + host, e);
2833 } catch (IOException e) {
2834 savedException = e;
2835 }
2836 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2837 }
2838
2839 if (savedException != null) {
2840 throw savedException;
2841 }
2842 }
2843
2844
2845
2846
2847
2848
2849 public static int createPreSplitLoadTestTable(Configuration conf,
2850 TableName tableName, byte[] columnFamily, Algorithm compression,
2851 DataBlockEncoding dataBlockEncoding) throws IOException {
2852 HTableDescriptor desc = new HTableDescriptor(tableName);
2853 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2854 hcd.setDataBlockEncoding(dataBlockEncoding);
2855 hcd.setCompressionType(compression);
2856 return createPreSplitLoadTestTable(conf, desc, hcd);
2857 }
2858
2859
2860
2861
2862
2863
2864 public static int createPreSplitLoadTestTable(Configuration conf,
2865 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2866 if (!desc.hasFamily(hcd.getName())) {
2867 desc.addFamily(hcd);
2868 }
2869
2870 int totalNumberOfRegions = 0;
2871 HBaseAdmin admin = new HBaseAdmin(conf);
2872 try {
2873
2874
2875
2876 int numberOfServers = admin.getClusterStatus().getServers().size();
2877 if (numberOfServers == 0) {
2878 throw new IllegalStateException("No live regionservers");
2879 }
2880
2881 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2882 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2883 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2884 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2885
2886 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2887 totalNumberOfRegions);
2888
2889 admin.createTable(desc, splits);
2890 admin.close();
2891 } catch (MasterNotRunningException e) {
2892 LOG.error("Master not running", e);
2893 throw new IOException(e);
2894 } catch (TableExistsException e) {
2895 LOG.warn("Table " + desc.getTableName() +
2896 " already exists, continuing");
2897 } finally {
2898 admin.close();
2899 }
2900 return totalNumberOfRegions;
2901 }
2902
2903 public static int getMetaRSPort(Configuration conf) throws IOException {
2904 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
2905 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2906 table.close();
2907 return hloc.getPort();
2908 }
2909
2910
2911
2912
2913
2914
2915
2916 public void assertRegionOnServer(
2917 final HRegionInfo hri, final ServerName server,
2918 final long timeout) throws IOException, InterruptedException {
2919 long timeoutTime = System.currentTimeMillis() + timeout;
2920 while (true) {
2921 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
2922 if (regions.contains(hri)) return;
2923 long now = System.currentTimeMillis();
2924 if (now > timeoutTime) break;
2925 Thread.sleep(10);
2926 }
2927 fail("Could not find region " + hri.getRegionNameAsString()
2928 + " on server " + server);
2929 }
2930
2931 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2932 throws IOException {
2933 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
2934 htd.addFamily(hcd);
2935 HRegionInfo info =
2936 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
2937 HRegion region =
2938 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2939 return region;
2940 }
2941
2942 public void setFileSystemURI(String fsURI) {
2943 FS_URI = fsURI;
2944 }
2945
2946
2947
2948
2949 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
2950 throws E {
2951 return Waiter.waitFor(this.conf, timeout, predicate);
2952 }
2953
2954
2955
2956
2957 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
2958 throws E {
2959 return Waiter.waitFor(this.conf, timeout, interval, predicate);
2960 }
2961
2962
2963
2964
2965 public <E extends Exception> long waitFor(long timeout, long interval,
2966 boolean failIfTimeout, Predicate<E> predicate) throws E {
2967 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
2968 }
2969
2970
2971
2972
2973 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
2974 return new Waiter.Predicate<Exception>() {
2975 @Override
2976 public boolean evaluate() throws Exception {
2977 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
2978 .getAssignmentManager().getRegionStates();
2979 return !regionStates.isRegionsInTransition();
2980 }
2981 };
2982 }
2983
2984 }