1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.UUID;
44 import java.util.concurrent.TimeUnit;
45
46 import org.apache.commons.logging.Log;
47 import org.apache.commons.logging.LogFactory;
48 import org.apache.commons.logging.impl.Jdk14Logger;
49 import org.apache.commons.logging.impl.Log4JLogger;
50 import org.apache.hadoop.classification.InterfaceAudience;
51 import org.apache.hadoop.classification.InterfaceStability;
52 import org.apache.hadoop.conf.Configuration;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.hbase.Waiter.Predicate;
56 import org.apache.hadoop.hbase.catalog.MetaEditor;
57 import org.apache.hadoop.hbase.client.Delete;
58 import org.apache.hadoop.hbase.client.Durability;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.HBaseAdmin;
61 import org.apache.hadoop.hbase.client.HConnection;
62 import org.apache.hadoop.hbase.client.HTable;
63 import org.apache.hadoop.hbase.client.Put;
64 import org.apache.hadoop.hbase.client.Result;
65 import org.apache.hadoop.hbase.client.ResultScanner;
66 import org.apache.hadoop.hbase.client.Scan;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.io.compress.Compression;
69 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72 import org.apache.hadoop.hbase.io.hfile.HFile;
73 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.master.RegionStates;
77 import org.apache.hadoop.hbase.master.ServerManager;
78 import org.apache.hadoop.hbase.regionserver.BloomType;
79 import org.apache.hadoop.hbase.regionserver.HRegion;
80 import org.apache.hadoop.hbase.regionserver.HRegionServer;
81 import org.apache.hadoop.hbase.regionserver.HStore;
82 import org.apache.hadoop.hbase.regionserver.InternalScanner;
83 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
84 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
85 import org.apache.hadoop.hbase.regionserver.wal.HLog;
86 import org.apache.hadoop.hbase.security.User;
87 import org.apache.hadoop.hbase.tool.Canary;
88 import org.apache.hadoop.hbase.util.Bytes;
89 import org.apache.hadoop.hbase.util.FSUtils;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil;
91 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
92 import org.apache.hadoop.hbase.util.RegionSplitter;
93 import org.apache.hadoop.hbase.util.RetryCounter;
94 import org.apache.hadoop.hbase.util.Threads;
95 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
96 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
97 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
98 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
99 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
100 import org.apache.hadoop.hdfs.DFSClient;
101 import org.apache.hadoop.hdfs.DistributedFileSystem;
102 import org.apache.hadoop.hdfs.MiniDFSCluster;
103 import org.apache.hadoop.mapred.JobConf;
104 import org.apache.hadoop.mapred.MiniMRCluster;
105 import org.apache.hadoop.mapred.TaskLog;
106 import org.apache.zookeeper.KeeperException;
107 import org.apache.zookeeper.KeeperException.NodeExistsException;
108 import org.apache.zookeeper.WatchedEvent;
109 import org.apache.zookeeper.ZooKeeper;
110 import org.apache.zookeeper.ZooKeeper.States;
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 @InterfaceAudience.Public
127 @InterfaceStability.Evolving
128 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
129 private MiniZooKeeperCluster zkCluster = null;
130
131
132
133
134
135 private static int DEFAULT_REGIONS_PER_SERVER = 5;
136
137
138
139
140
141 private boolean passedZkCluster = false;
142 private MiniDFSCluster dfsCluster = null;
143
144 private HBaseCluster hbaseCluster = null;
145 private MiniMRCluster mrCluster = null;
146
147
148 private boolean miniClusterRunning;
149
150 private String hadoopLogDir;
151
152
153 private File clusterTestDir = null;
154
155
156
157 private Path dataTestDirOnTestFS = null;
158
159
160
161
162
163
164
165
166 @Deprecated
167 private static final String TEST_DIRECTORY_KEY = "test.build.data";
168
169
170 private static String FS_URI;
171
172
173 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
174
175
176 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
177 Arrays.asList(new Object[][] {
178 { Compression.Algorithm.NONE },
179 { Compression.Algorithm.GZ }
180 });
181
182
183 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
184 Arrays.asList(new Object[][] {
185 { new Boolean(false) },
186 { new Boolean(true) }
187 });
188
189
190 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
191 Compression.Algorithm.NONE, Compression.Algorithm.GZ
192 };
193
194
195
196
197
198 private static List<Object[]> bloomAndCompressionCombinations() {
199 List<Object[]> configurations = new ArrayList<Object[]>();
200 for (Compression.Algorithm comprAlgo :
201 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
202 for (BloomType bloomType : BloomType.values()) {
203 configurations.add(new Object[] { comprAlgo, bloomType });
204 }
205 }
206 return Collections.unmodifiableList(configurations);
207 }
208
209 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
210 bloomAndCompressionCombinations();
211
212 public HBaseTestingUtility() {
213 this(HBaseConfiguration.create());
214 }
215
216 public HBaseTestingUtility(Configuration conf) {
217 super(conf);
218
219
220 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
221 }
222
223
224
225
226
227
228
229 public static HBaseTestingUtility createLocalHTU() {
230 Configuration c = HBaseConfiguration.create();
231 return createLocalHTU(c);
232 }
233
234
235
236
237
238
239
240
241 public static HBaseTestingUtility createLocalHTU(Configuration c) {
242 HBaseTestingUtility htu = new HBaseTestingUtility(c);
243 String dataTestDir = htu.getDataTestDir().toString();
244 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
245 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
246 return htu;
247 }
248
249
250
251
252
253
254
255
256
257
258
259
260 @Override
261 public Configuration getConfiguration() {
262 return super.getConfiguration();
263 }
264
265 public void setHBaseCluster(HBaseCluster hbaseCluster) {
266 this.hbaseCluster = hbaseCluster;
267 }
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 @Override
286 protected Path setupDataTestDir() {
287 Path testPath = super.setupDataTestDir();
288 if (null == testPath) {
289 return null;
290 }
291
292 createSubDirAndSystemProperty(
293 "hadoop.log.dir",
294 testPath, "hadoop-log-dir");
295
296
297
298 createSubDirAndSystemProperty(
299 "hadoop.tmp.dir",
300 testPath, "hadoop-tmp-dir");
301
302
303 createSubDir(
304 "mapred.local.dir",
305 testPath, "mapred-local-dir");
306
307 return testPath;
308 }
309
310 private void createSubDirAndSystemProperty(
311 String propertyName, Path parent, String subDirName){
312
313 String sysValue = System.getProperty(propertyName);
314
315 if (sysValue != null) {
316
317
318 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
319 sysValue + " so I do NOT create it in " + parent);
320 String confValue = conf.get(propertyName);
321 if (confValue != null && !confValue.endsWith(sysValue)){
322 LOG.warn(
323 propertyName + " property value differs in configuration and system: "+
324 "Configuration="+confValue+" while System="+sysValue+
325 " Erasing configuration value by system value."
326 );
327 }
328 conf.set(propertyName, sysValue);
329 } else {
330
331 createSubDir(propertyName, parent, subDirName);
332 System.setProperty(propertyName, conf.get(propertyName));
333 }
334 }
335
336
337
338
339
340
341
342 private Path getBaseTestDirOnTestFS() throws IOException {
343 FileSystem fs = getTestFileSystem();
344 return new Path(fs.getWorkingDirectory(), "test-data");
345 }
346
347
348
349
350
351
352 Path getClusterTestDir() {
353 if (clusterTestDir == null){
354 setupClusterTestDir();
355 }
356 return new Path(clusterTestDir.getAbsolutePath());
357 }
358
359
360
361
362 private void setupClusterTestDir() {
363 if (clusterTestDir != null) {
364 return;
365 }
366
367
368
369 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
370 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
371
372 boolean b = deleteOnExit();
373 if (b) clusterTestDir.deleteOnExit();
374 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
375 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
376 }
377
378
379
380
381
382
383
384 public Path getDataTestDirOnTestFS() throws IOException {
385 if (dataTestDirOnTestFS == null) {
386 setupDataTestDirOnTestFS();
387 }
388
389 return dataTestDirOnTestFS;
390 }
391
392
393
394
395
396
397
398
399 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
400 return new Path(getDataTestDirOnTestFS(), subdirName);
401 }
402
403
404
405
406 private void setupDataTestDirOnTestFS() throws IOException {
407 if (dataTestDirOnTestFS != null) {
408 LOG.warn("Data test on test fs dir already setup in "
409 + dataTestDirOnTestFS.toString());
410 return;
411 }
412
413
414
415
416
417 FileSystem fs = getTestFileSystem();
418 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
419 File dataTestDir = new File(getDataTestDir().toString());
420 if (deleteOnExit()) dataTestDir.deleteOnExit();
421 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
422 } else {
423 Path base = getBaseTestDirOnTestFS();
424 String randomStr = UUID.randomUUID().toString();
425 dataTestDirOnTestFS = new Path(base, randomStr);
426 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
427 }
428 }
429
430
431
432
433
434
435 public boolean cleanupDataTestDirOnTestFS() throws IOException {
436 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
437 if (ret)
438 dataTestDirOnTestFS = null;
439 return ret;
440 }
441
442
443
444
445
446
447 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
448 Path cpath = getDataTestDirOnTestFS(subdirName);
449 return getTestFileSystem().delete(cpath, true);
450 }
451
452
453
454
455
456
457
458
459 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
460 return startMiniDFSCluster(servers, null);
461 }
462
463
464
465
466
467
468
469
470
471
472
473
474 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
475 throws Exception {
476 if ( hosts != null && hosts.length != 0) {
477 return startMiniDFSCluster(hosts.length, hosts);
478 } else {
479 return startMiniDFSCluster(1, null);
480 }
481 }
482
483
484
485
486
487
488
489
490
491
492 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
493 throws Exception {
494 createDirsAndSetProperties();
495
496
497 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
498 setLevel(org.apache.log4j.Level.ERROR);
499 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
500 setLevel(org.apache.log4j.Level.ERROR);
501
502
503 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
504 true, null, null, hosts, null);
505
506
507 FileSystem fs = this.dfsCluster.getFileSystem();
508 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
509
510
511 this.dfsCluster.waitClusterUp();
512
513
514 dataTestDirOnTestFS = null;
515
516 return this.dfsCluster;
517 }
518
519
520 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
521 throws Exception {
522 createDirsAndSetProperties();
523 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
524 true, null, racks, hosts, null);
525
526
527 FileSystem fs = this.dfsCluster.getFileSystem();
528 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
529
530
531 this.dfsCluster.waitClusterUp();
532
533
534 dataTestDirOnTestFS = null;
535
536 return this.dfsCluster;
537 }
538
539 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
540 createDirsAndSetProperties();
541 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
542 null, null, null);
543 return dfsCluster;
544 }
545
546
547 private void createDirsAndSetProperties() throws IOException {
548 setupClusterTestDir();
549 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
550 createDirAndSetProperty("cache_data", "test.cache.data");
551 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
552 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
553 createDirAndSetProperty("mapred_local", "mapred.local.dir");
554 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
555 enableShortCircuit();
556
557 Path root = getDataTestDirOnTestFS("hadoop");
558 conf.set(MapreduceTestingShim.getMROutputDirProp(),
559 new Path(root, "mapred-output-dir").toString());
560 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
561 conf.set("mapreduce.jobtracker.staging.root.dir",
562 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
563 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
564 }
565
566
567
568
569
570
571
572 public boolean isReadShortCircuitOn(){
573 final String propName = "hbase.tests.use.shortcircuit.reads";
574 String readOnProp = System.getProperty(propName);
575 if (readOnProp != null){
576 return Boolean.parseBoolean(readOnProp);
577 } else {
578 return conf.getBoolean(propName, false);
579 }
580 }
581
582
583
584
585 private void enableShortCircuit() {
586 if (isReadShortCircuitOn()) {
587 String curUser = System.getProperty("user.name");
588 LOG.info("read short circuit is ON for user " + curUser);
589
590 conf.set("dfs.block.local-path-access.user", curUser);
591
592 conf.setBoolean("dfs.client.read.shortcircuit", true);
593
594 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
595 } else {
596 LOG.info("read short circuit is OFF");
597 }
598 }
599
600 private String createDirAndSetProperty(final String relPath, String property) {
601 String path = getDataTestDir(relPath).toString();
602 System.setProperty(property, path);
603 conf.set(property, path);
604 new File(path).mkdirs();
605 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
606 return path;
607 }
608
609
610
611
612
613
614 public void shutdownMiniDFSCluster() throws IOException {
615 if (this.dfsCluster != null) {
616
617 this.dfsCluster.shutdown();
618 dfsCluster = null;
619 dataTestDirOnTestFS = null;
620 FSUtils.setFsDefault(this.conf, new Path("file:///"));
621 }
622 }
623
624
625
626
627
628
629
630
631 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
632 return startMiniZKCluster(1);
633 }
634
635
636
637
638
639
640
641
642
643 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
644 throws Exception {
645 setupClusterTestDir();
646 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
647 }
648
649 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
650 throws Exception {
651 return startMiniZKCluster(dir,1);
652 }
653
654
655
656
657
658 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
659 int zooKeeperServerNum)
660 throws Exception {
661 if (this.zkCluster != null) {
662 throw new IOException("Cluster already running at " + dir);
663 }
664 this.passedZkCluster = false;
665 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
666 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
667 if (defPort > 0){
668
669 this.zkCluster.setDefaultClientPort(defPort);
670 }
671 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
672 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
673 Integer.toString(clientPort));
674 return this.zkCluster;
675 }
676
677
678
679
680
681
682
683 public void shutdownMiniZKCluster() throws IOException {
684 if (this.zkCluster != null) {
685 this.zkCluster.shutdown();
686 this.zkCluster = null;
687 }
688 }
689
690
691
692
693
694
695
696 public MiniHBaseCluster startMiniCluster() throws Exception {
697 return startMiniCluster(1, 1);
698 }
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713 public MiniHBaseCluster startMiniCluster(final int numSlaves)
714 throws Exception {
715 return startMiniCluster(1, numSlaves);
716 }
717
718
719
720
721
722
723
724
725 public MiniHBaseCluster startMiniCluster(final int numMasters,
726 final int numSlaves)
727 throws Exception {
728 return startMiniCluster(numMasters, numSlaves, null);
729 }
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755 public MiniHBaseCluster startMiniCluster(final int numMasters,
756 final int numSlaves, final String[] dataNodeHosts) throws Exception {
757 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
758 }
759
760
761
762
763
764 public MiniHBaseCluster startMiniCluster(final int numMasters,
765 final int numSlaves, final int numDataNodes) throws Exception {
766 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
767 }
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796 public MiniHBaseCluster startMiniCluster(final int numMasters,
797 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
798 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
799 throws Exception {
800 return startMiniCluster(
801 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
802 }
803
804
805
806
807
808
809 public MiniHBaseCluster startMiniCluster(final int numMasters,
810 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
811 Class<? extends HMaster> masterClass,
812 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
813 throws Exception {
814 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
815 numDataNodes = dataNodeHosts.length;
816 }
817
818 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
819 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
820
821
822 if (miniClusterRunning) {
823 throw new IllegalStateException("A mini-cluster is already running");
824 }
825 miniClusterRunning = true;
826
827 setupClusterTestDir();
828 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
829
830
831
832 startMiniDFSCluster(numDataNodes, dataNodeHosts);
833
834
835 if (this.zkCluster == null) {
836 startMiniZKCluster(clusterTestDir);
837 }
838
839
840 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
841 }
842
843 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
844 throws IOException, InterruptedException{
845 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
846 }
847
848
849
850
851
852
853
854
855
856
857
858
859 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
860 final int numSlaves, Class<? extends HMaster> masterClass,
861 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
862 throws IOException, InterruptedException {
863
864 createRootDir();
865
866
867
868 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
869 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
870 }
871 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
872 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
873 }
874
875 Configuration c = new Configuration(this.conf);
876 this.hbaseCluster =
877 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
878
879 HTable t = new HTable(c, TableName.META_TABLE_NAME);
880 ResultScanner s = t.getScanner(new Scan());
881 while (s.next() != null) {
882 continue;
883 }
884 s.close();
885 t.close();
886
887 getHBaseAdmin();
888 LOG.info("Minicluster is up");
889 return (MiniHBaseCluster)this.hbaseCluster;
890 }
891
892
893
894
895
896
897
898 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
899 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
900
901 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
902 ResultScanner s = t.getScanner(new Scan());
903 while (s.next() != null) {
904
905 }
906 LOG.info("HBase has been restarted");
907 s.close();
908 t.close();
909 }
910
911
912
913
914
915
916 public MiniHBaseCluster getMiniHBaseCluster() {
917 if (this.hbaseCluster instanceof MiniHBaseCluster) {
918 return (MiniHBaseCluster)this.hbaseCluster;
919 }
920 throw new RuntimeException(hbaseCluster + " not an instance of " +
921 MiniHBaseCluster.class.getName());
922 }
923
924
925
926
927
928
929 public void shutdownMiniCluster() throws Exception {
930 LOG.info("Shutting down minicluster");
931 shutdownMiniHBaseCluster();
932 if (!this.passedZkCluster){
933 shutdownMiniZKCluster();
934 }
935 shutdownMiniDFSCluster();
936
937 cleanupTestDir();
938 miniClusterRunning = false;
939 LOG.info("Minicluster is down");
940 }
941
942
943
944
945
946 public void shutdownMiniHBaseCluster() throws IOException {
947 if (hbaseAdmin != null) {
948 hbaseAdmin.close0();
949 hbaseAdmin = null;
950 }
951
952 if (zooKeeperWatcher != null) {
953 zooKeeperWatcher.close();
954 zooKeeperWatcher = null;
955 }
956
957
958 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
959 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
960 if (this.hbaseCluster != null) {
961 this.hbaseCluster.shutdown();
962
963 this.hbaseCluster.waitUntilShutDown();
964 this.hbaseCluster = null;
965 }
966 }
967
968
969
970
971
972
973
974 public Path getDefaultRootDirPath() throws IOException {
975 FileSystem fs = FileSystem.get(this.conf);
976 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
977 }
978
979
980
981
982
983
984
985
986
987 public Path createRootDir() throws IOException {
988 FileSystem fs = FileSystem.get(this.conf);
989 Path hbaseRootdir = getDefaultRootDirPath();
990 FSUtils.setRootDir(this.conf, hbaseRootdir);
991 fs.mkdirs(hbaseRootdir);
992 FSUtils.setVersion(fs, hbaseRootdir);
993 return hbaseRootdir;
994 }
995
996
997
998
999
1000 public void flush() throws IOException {
1001 getMiniHBaseCluster().flushcache();
1002 }
1003
1004
1005
1006
1007
1008 public void flush(TableName tableName) throws IOException {
1009 getMiniHBaseCluster().flushcache(tableName);
1010 }
1011
1012
1013
1014
1015
1016 public void compact(boolean major) throws IOException {
1017 getMiniHBaseCluster().compact(major);
1018 }
1019
1020
1021
1022
1023
1024 public void compact(TableName tableName, boolean major) throws IOException {
1025 getMiniHBaseCluster().compact(tableName, major);
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035 public HTable createTable(String tableName, String family)
1036 throws IOException{
1037 return createTable(TableName.valueOf(tableName), new String[]{family});
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 public HTable createTable(byte[] tableName, byte[] family)
1048 throws IOException{
1049 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059 public HTable createTable(TableName tableName, String[] families)
1060 throws IOException {
1061 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1062 for (String family : families) {
1063 fams.add(Bytes.toBytes(family));
1064 }
1065 return createTable(tableName, fams.toArray(new byte[0][]));
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075 public HTable createTable(TableName tableName, byte[] family)
1076 throws IOException{
1077 return createTable(tableName, new byte[][]{family});
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 public HTable createTable(byte[] tableName, byte[][] families)
1089 throws IOException {
1090 return createTable(tableName, families,
1091 new Configuration(getConfiguration()));
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101 public HTable createTable(TableName tableName, byte[][] families)
1102 throws IOException {
1103 return createTable(tableName, families,
1104 new Configuration(getConfiguration()));
1105 }
1106
1107 public HTable createTable(byte[] tableName, byte[][] families,
1108 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1109 return createTable(TableName.valueOf(tableName), families, numVersions,
1110 startKey, endKey, numRegions);
1111 }
1112
1113 public HTable createTable(String tableName, byte[][] families,
1114 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1115 return createTable(TableName.valueOf(tableName), families, numVersions,
1116 startKey, endKey, numRegions);
1117 }
1118
1119 public HTable createTable(TableName tableName, byte[][] families,
1120 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1121 throws IOException{
1122 HTableDescriptor desc = new HTableDescriptor(tableName);
1123 for (byte[] family : families) {
1124 HColumnDescriptor hcd = new HColumnDescriptor(family)
1125 .setMaxVersions(numVersions);
1126 desc.addFamily(hcd);
1127 }
1128 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1129
1130 waitUntilAllRegionsAssigned(tableName);
1131 return new HTable(getConfiguration(), tableName);
1132 }
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1143 throws IOException {
1144 for(byte[] family : families) {
1145 HColumnDescriptor hcd = new HColumnDescriptor(family);
1146
1147
1148
1149 hcd.setBloomFilterType(BloomType.NONE);
1150 htd.addFamily(hcd);
1151 }
1152 getHBaseAdmin().createTable(htd);
1153
1154 waitUntilAllRegionsAssigned(htd.getTableName());
1155 return new HTable(c, htd.getTableName());
1156 }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 public HTable createTable(TableName tableName, byte[][] families,
1167 final Configuration c)
1168 throws IOException {
1169 return createTable(new HTableDescriptor(tableName), families, c);
1170 }
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 public HTable createTable(byte[] tableName, byte[][] families,
1181 final Configuration c)
1182 throws IOException {
1183 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1184 for(byte[] family : families) {
1185 HColumnDescriptor hcd = new HColumnDescriptor(family);
1186
1187
1188
1189 hcd.setBloomFilterType(BloomType.NONE);
1190 desc.addFamily(hcd);
1191 }
1192 getHBaseAdmin().createTable(desc);
1193 return new HTable(c, tableName);
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 public HTable createTable(TableName tableName, byte[][] families,
1206 final Configuration c, int numVersions)
1207 throws IOException {
1208 HTableDescriptor desc = new HTableDescriptor(tableName);
1209 for(byte[] family : families) {
1210 HColumnDescriptor hcd = new HColumnDescriptor(family)
1211 .setMaxVersions(numVersions);
1212 desc.addFamily(hcd);
1213 }
1214 getHBaseAdmin().createTable(desc);
1215
1216 waitUntilAllRegionsAssigned(tableName);
1217 return new HTable(c, tableName);
1218 }
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 public HTable createTable(byte[] tableName, byte[][] families,
1230 final Configuration c, int numVersions)
1231 throws IOException {
1232 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1233 for(byte[] family : families) {
1234 HColumnDescriptor hcd = new HColumnDescriptor(family)
1235 .setMaxVersions(numVersions);
1236 desc.addFamily(hcd);
1237 }
1238 getHBaseAdmin().createTable(desc);
1239 return new HTable(c, tableName);
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1251 throws IOException {
1252 return createTable(tableName, new byte[][]{family}, numVersions);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1264 throws IOException {
1265 return createTable(tableName, new byte[][]{family}, numVersions);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 public HTable createTable(byte[] tableName, byte[][] families,
1277 int numVersions)
1278 throws IOException {
1279 return createTable(TableName.valueOf(tableName), families, numVersions);
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 public HTable createTable(TableName tableName, byte[][] families,
1291 int numVersions)
1292 throws IOException {
1293 HTableDescriptor desc = new HTableDescriptor(tableName);
1294 for (byte[] family : families) {
1295 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1296 desc.addFamily(hcd);
1297 }
1298 getHBaseAdmin().createTable(desc);
1299
1300 waitUntilAllRegionsAssigned(tableName);
1301 return new HTable(new Configuration(getConfiguration()), tableName);
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 public HTable createTable(byte[] tableName, byte[][] families,
1313 int numVersions, int blockSize) throws IOException {
1314 return createTable(TableName.valueOf(tableName),
1315 families, numVersions, blockSize);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 public HTable createTable(TableName tableName, byte[][] families,
1327 int numVersions, int blockSize) throws IOException {
1328 HTableDescriptor desc = new HTableDescriptor(tableName);
1329 for (byte[] family : families) {
1330 HColumnDescriptor hcd = new HColumnDescriptor(family)
1331 .setMaxVersions(numVersions)
1332 .setBlocksize(blockSize);
1333 desc.addFamily(hcd);
1334 }
1335 getHBaseAdmin().createTable(desc);
1336
1337 waitUntilAllRegionsAssigned(tableName);
1338 return new HTable(new Configuration(getConfiguration()), tableName);
1339 }
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 public HTable createTable(byte[] tableName, byte[][] families,
1350 int[] numVersions)
1351 throws IOException {
1352 return createTable(TableName.valueOf(tableName), families, numVersions);
1353 }
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 public HTable createTable(TableName tableName, byte[][] families,
1364 int[] numVersions)
1365 throws IOException {
1366 HTableDescriptor desc = new HTableDescriptor(tableName);
1367 int i = 0;
1368 for (byte[] family : families) {
1369 HColumnDescriptor hcd = new HColumnDescriptor(family)
1370 .setMaxVersions(numVersions[i]);
1371 desc.addFamily(hcd);
1372 i++;
1373 }
1374 getHBaseAdmin().createTable(desc);
1375
1376 waitUntilAllRegionsAssigned(tableName);
1377 return new HTable(new Configuration(getConfiguration()), tableName);
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1389 throws IOException{
1390 return createTable(TableName.valueOf(tableName), family, splitRows);
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1402 throws IOException {
1403 HTableDescriptor desc = new HTableDescriptor(tableName);
1404 HColumnDescriptor hcd = new HColumnDescriptor(family);
1405 desc.addFamily(hcd);
1406 getHBaseAdmin().createTable(desc, splitRows);
1407
1408 waitUntilAllRegionsAssigned(tableName);
1409 return new HTable(getConfiguration(), tableName);
1410 }
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1421 throws IOException {
1422 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1423 for(byte[] family:families) {
1424 HColumnDescriptor hcd = new HColumnDescriptor(family);
1425 desc.addFamily(hcd);
1426 }
1427 getHBaseAdmin().createTable(desc, splitRows);
1428
1429 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1430 return new HTable(getConfiguration(), tableName);
1431 }
1432
1433
1434
1435
1436
1437 public void deleteTable(String tableName) throws IOException {
1438 deleteTable(TableName.valueOf(tableName));
1439 }
1440
1441
1442
1443
1444
1445 public void deleteTable(byte[] tableName) throws IOException {
1446 deleteTable(TableName.valueOf(tableName));
1447 }
1448
1449
1450
1451
1452
1453 public void deleteTable(TableName tableName) throws IOException {
1454 try {
1455 getHBaseAdmin().disableTable(tableName);
1456 } catch (TableNotEnabledException e) {
1457 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1458 }
1459 getHBaseAdmin().deleteTable(tableName);
1460 }
1461
1462
1463
1464
1465
1466 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1467 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1468 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1469 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1470 private static final int MAXVERSIONS = 3;
1471
1472 public static final char FIRST_CHAR = 'a';
1473 public static final char LAST_CHAR = 'z';
1474 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1475 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1476
1477
1478
1479
1480
1481
1482
1483
1484 public HTableDescriptor createTableDescriptor(final String name,
1485 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1486 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1487 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1488 htd.addFamily(new HColumnDescriptor(cfName)
1489 .setMinVersions(minVersions)
1490 .setMaxVersions(versions)
1491 .setKeepDeletedCells(keepDeleted)
1492 .setBlockCacheEnabled(false)
1493 .setTimeToLive(ttl)
1494 );
1495 }
1496 return htd;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505 public HTableDescriptor createTableDescriptor(final String name) {
1506 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1507 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1508 }
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1519 byte [] endKey)
1520 throws IOException {
1521 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1522 return createLocalHRegion(hri, desc);
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1533 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1545 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1562 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1563 HLog hlog, byte[]... families) throws IOException {
1564 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1565 htd.setReadOnly(isReadOnly);
1566 for (byte[] family : families) {
1567 HColumnDescriptor hcd = new HColumnDescriptor(family);
1568
1569 hcd.setMaxVersions(Integer.MAX_VALUE);
1570 htd.addFamily(hcd);
1571 }
1572 htd.setDurability(durability);
1573 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1574 return createLocalHRegion(info, htd, hlog);
1575 }
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 public HTable truncateTable(byte[] tableName) throws IOException {
1586 return truncateTable(TableName.valueOf(tableName));
1587 }
1588
1589
1590
1591
1592
1593
1594
1595 public HTable truncateTable(TableName tableName) throws IOException {
1596 HTable table = new HTable(getConfiguration(), tableName);
1597 Scan scan = new Scan();
1598 ResultScanner resScan = table.getScanner(scan);
1599 for(Result res : resScan) {
1600 Delete del = new Delete(res.getRow());
1601 table.delete(del);
1602 }
1603 resScan = table.getScanner(scan);
1604 resScan.close();
1605 return table;
1606 }
1607
1608
1609
1610
1611
1612
1613
1614
1615 public int loadTable(final HTable t, final byte[] f) throws IOException {
1616 return loadTable(t, new byte[][] {f});
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1627 return loadTable(t, f, null);
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1639 t.setAutoFlush(false);
1640 int rowCount = 0;
1641 for (byte[] row : HBaseTestingUtility.ROWS) {
1642 Put put = new Put(row);
1643 for (int i = 0; i < f.length; i++) {
1644 put.add(f[i], null, value != null ? value : row);
1645 }
1646 t.put(put);
1647 rowCount++;
1648 }
1649 t.flushCommits();
1650 return rowCount;
1651 }
1652
1653
1654
1655
1656 public static class SeenRowTracker {
1657 int dim = 'z' - 'a' + 1;
1658 int[][][] seenRows = new int[dim][dim][dim];
1659 byte[] startRow;
1660 byte[] stopRow;
1661
1662 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1663 this.startRow = startRow;
1664 this.stopRow = stopRow;
1665 }
1666
1667 void reset() {
1668 for (byte[] row : ROWS) {
1669 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1670 }
1671 }
1672
1673 int i(byte b) {
1674 return b - 'a';
1675 }
1676
1677 public void addRow(byte[] row) {
1678 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1679 }
1680
1681
1682
1683
1684 public void validate() {
1685 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1686 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1687 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1688 int count = seenRows[i(b1)][i(b2)][i(b3)];
1689 int expectedCount = 0;
1690 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1691 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1692 expectedCount = 1;
1693 }
1694 if (count != expectedCount) {
1695 String row = new String(new byte[] {b1,b2,b3});
1696 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1697 }
1698 }
1699 }
1700 }
1701 }
1702 }
1703
1704 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1705 return loadRegion(r, f, false);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1717 throws IOException {
1718 byte[] k = new byte[3];
1719 int rowCount = 0;
1720 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1721 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1722 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1723 k[0] = b1;
1724 k[1] = b2;
1725 k[2] = b3;
1726 Put put = new Put(k);
1727 put.add(f, null, k);
1728 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1729
1730 int preRowCount = rowCount;
1731 int pause = 10;
1732 int maxPause = 1000;
1733 while (rowCount == preRowCount) {
1734 try {
1735 r.put(put);
1736 rowCount++;
1737 } catch (RegionTooBusyException e) {
1738 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1739 Threads.sleep(pause);
1740 }
1741 }
1742 }
1743 }
1744 if (flush) {
1745 r.flushcache();
1746 }
1747 }
1748 return rowCount;
1749 }
1750
1751 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1752 for (int i = startRow; i < endRow; i++) {
1753 byte[] data = Bytes.toBytes(String.valueOf(i));
1754 Put put = new Put(data);
1755 put.add(f, null, data);
1756 t.put(put);
1757 }
1758 }
1759
1760
1761
1762
1763 public int countRows(final HTable table) throws IOException {
1764 Scan scan = new Scan();
1765 ResultScanner results = table.getScanner(scan);
1766 int count = 0;
1767 for (@SuppressWarnings("unused") Result res : results) {
1768 count++;
1769 }
1770 results.close();
1771 return count;
1772 }
1773
1774 public int countRows(final HTable table, final byte[]... families) throws IOException {
1775 Scan scan = new Scan();
1776 for (byte[] family: families) {
1777 scan.addFamily(family);
1778 }
1779 ResultScanner results = table.getScanner(scan);
1780 int count = 0;
1781 for (@SuppressWarnings("unused") Result res : results) {
1782 count++;
1783 }
1784 results.close();
1785 return count;
1786 }
1787
1788
1789
1790
1791 public String checksumRows(final HTable table) throws Exception {
1792 Scan scan = new Scan();
1793 ResultScanner results = table.getScanner(scan);
1794 MessageDigest digest = MessageDigest.getInstance("MD5");
1795 for (Result res : results) {
1796 digest.update(res.getRow());
1797 }
1798 results.close();
1799 return digest.toString();
1800 }
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 public int createMultiRegions(HTable table, byte[] columnFamily)
1811 throws IOException {
1812 return createMultiRegions(getConfiguration(), table, columnFamily);
1813 }
1814
1815
1816 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1817 static {
1818 int i = 0;
1819 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1820 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1821 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1822 ROWS[i][0] = b1;
1823 ROWS[i][1] = b2;
1824 ROWS[i][2] = b3;
1825 i++;
1826 }
1827 }
1828 }
1829 }
1830
1831 public static final byte[][] KEYS = {
1832 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1833 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1834 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1835 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1836 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1837 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1838 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1839 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1840 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1841 };
1842
1843 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1844 Bytes.toBytes("bbb"),
1845 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1846 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1847 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1848 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1849 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1850 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1851 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1852 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1853 };
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863 public int createMultiRegions(final Configuration c, final HTable table,
1864 final byte[] columnFamily)
1865 throws IOException {
1866 return createMultiRegions(c, table, columnFamily, KEYS);
1867 }
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 public int createMultiRegions(final Configuration c, final HTable table,
1879 final byte [] family, int numRegions)
1880 throws IOException {
1881 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1882 byte [] startKey = Bytes.toBytes("aaaaa");
1883 byte [] endKey = Bytes.toBytes("zzzzz");
1884 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1885 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1886 for (int i=0;i<splitKeys.length;i++) {
1887 regionStartKeys[i+1] = splitKeys[i];
1888 }
1889 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1890 return createMultiRegions(c, table, family, regionStartKeys);
1891 }
1892
1893 @SuppressWarnings("deprecation")
1894 public int createMultiRegions(final Configuration c, final HTable table,
1895 final byte[] columnFamily, byte [][] startKeys)
1896 throws IOException {
1897 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1898 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1899 HTableDescriptor htd = table.getTableDescriptor();
1900 if(!htd.hasFamily(columnFamily)) {
1901 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1902 htd.addFamily(hcd);
1903 }
1904
1905
1906
1907
1908 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1909 String regionToDeleteInFS = table
1910 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1911 .getRegionInfo().getEncodedName();
1912 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1913
1914 int count = 0;
1915 for (int i = 0; i < startKeys.length; i++) {
1916 int j = (i + 1) % startKeys.length;
1917 HRegionInfo hri = new HRegionInfo(table.getName(),
1918 startKeys[i], startKeys[j]);
1919 MetaEditor.addRegionToMeta(meta, hri);
1920 newRegions.add(hri);
1921 count++;
1922 }
1923
1924 for (byte[] row : rows) {
1925 LOG.info("createMultiRegions: deleting meta row -> " +
1926 Bytes.toStringBinary(row));
1927 meta.delete(new Delete(row));
1928 }
1929
1930 Path tableDir = new Path(getDefaultRootDirPath().toString()
1931 + System.getProperty("file.separator") + htd.getTableName()
1932 + System.getProperty("file.separator") + regionToDeleteInFS);
1933 FileSystem.get(c).delete(tableDir);
1934
1935 HConnection conn = table.getConnection();
1936 conn.clearRegionCache();
1937
1938 HBaseAdmin admin = getHBaseAdmin();
1939 if (admin.isTableEnabled(table.getTableName())) {
1940 for(HRegionInfo hri : newRegions) {
1941 admin.assign(hri.getRegionName());
1942 }
1943 }
1944
1945 meta.close();
1946
1947 return count;
1948 }
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1961 final HTableDescriptor htd, byte [][] startKeys)
1962 throws IOException {
1963 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1964 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1965 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1966
1967 for (int i = 0; i < startKeys.length; i++) {
1968 int j = (i + 1) % startKeys.length;
1969 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1970 startKeys[j]);
1971 MetaEditor.addRegionToMeta(meta, hri);
1972 newRegions.add(hri);
1973 }
1974
1975 meta.close();
1976 return newRegions;
1977 }
1978
1979
1980
1981
1982
1983
1984 public List<byte[]> getMetaTableRows() throws IOException {
1985
1986 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1987 List<byte[]> rows = new ArrayList<byte[]>();
1988 ResultScanner s = t.getScanner(new Scan());
1989 for (Result result : s) {
1990 LOG.info("getMetaTableRows: row -> " +
1991 Bytes.toStringBinary(result.getRow()));
1992 rows.add(result.getRow());
1993 }
1994 s.close();
1995 t.close();
1996 return rows;
1997 }
1998
1999
2000
2001
2002
2003
2004 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2005
2006 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2007 List<byte[]> rows = new ArrayList<byte[]>();
2008 ResultScanner s = t.getScanner(new Scan());
2009 for (Result result : s) {
2010 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2011 if (info == null) {
2012 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2013
2014 continue;
2015 }
2016
2017 if (info.getTable().equals(tableName)) {
2018 LOG.info("getMetaTableRows: row -> " +
2019 Bytes.toStringBinary(result.getRow()) + info);
2020 rows.add(result.getRow());
2021 }
2022 }
2023 s.close();
2024 t.close();
2025 return rows;
2026 }
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2040 throws IOException, InterruptedException {
2041 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2042 }
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2054 throws IOException, InterruptedException {
2055 List<byte[]> metaRows = getMetaTableRows(tableName);
2056 if (metaRows == null || metaRows.isEmpty()) {
2057 return null;
2058 }
2059 LOG.debug("Found " + metaRows.size() + " rows for table " +
2060 tableName);
2061 byte [] firstrow = metaRows.get(0);
2062 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2063 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2064 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2065 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2066 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2067 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2068 while(retrier.shouldRetry()) {
2069 int index = getMiniHBaseCluster().getServerWith(firstrow);
2070 if (index != -1) {
2071 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2072 }
2073
2074 retrier.sleepUntilNextRetry();
2075 }
2076 return null;
2077 }
2078
2079
2080
2081
2082
2083
2084
2085 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2086 startMiniMapReduceCluster(2);
2087 return mrCluster;
2088 }
2089
2090
2091
2092
2093
2094 private void forceChangeTaskLogDir() {
2095 Field logDirField;
2096 try {
2097 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2098 logDirField.setAccessible(true);
2099
2100 Field modifiersField = Field.class.getDeclaredField("modifiers");
2101 modifiersField.setAccessible(true);
2102 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2103
2104 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2105 } catch (SecurityException e) {
2106 throw new RuntimeException(e);
2107 } catch (NoSuchFieldException e) {
2108
2109 throw new RuntimeException(e);
2110 } catch (IllegalArgumentException e) {
2111 throw new RuntimeException(e);
2112 } catch (IllegalAccessException e) {
2113 throw new RuntimeException(e);
2114 }
2115 }
2116
2117
2118
2119
2120
2121
2122
2123 private void startMiniMapReduceCluster(final int servers) throws IOException {
2124 if (mrCluster != null) {
2125 throw new IllegalStateException("MiniMRCluster is already running");
2126 }
2127 LOG.info("Starting mini mapreduce cluster...");
2128 setupClusterTestDir();
2129 createDirsAndSetProperties();
2130
2131 forceChangeTaskLogDir();
2132
2133
2134
2135
2136 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2137
2138
2139
2140 conf.setBoolean("mapreduce.map.speculative", false);
2141 conf.setBoolean("mapreduce.reduce.speculative", false);
2142
2143
2144
2145 mrCluster = new MiniMRCluster(servers,
2146 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2147 null, null, new JobConf(this.conf));
2148 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2149 if (jobConf == null) {
2150 jobConf = mrCluster.createJobConf();
2151 }
2152
2153 jobConf.set("mapred.local.dir",
2154 conf.get("mapred.local.dir"));
2155 LOG.info("Mini mapreduce cluster started");
2156
2157
2158
2159
2160 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2161
2162 conf.set("mapreduce.framework.name", "yarn");
2163 conf.setBoolean("yarn.is.minicluster", true);
2164 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2165 if (rmAddress != null) {
2166 conf.set("yarn.resourcemanager.address", rmAddress);
2167 }
2168 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2169 if (historyAddress != null) {
2170 conf.set("mapreduce.jobhistory.address", historyAddress);
2171 }
2172 String schedulerAddress =
2173 jobConf.get("yarn.resourcemanager.scheduler.address");
2174 if (schedulerAddress != null) {
2175 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2176 }
2177 }
2178
2179
2180
2181
2182 public void shutdownMiniMapReduceCluster() {
2183 LOG.info("Stopping mini mapreduce cluster...");
2184 if (mrCluster != null) {
2185 mrCluster.shutdown();
2186 mrCluster = null;
2187 }
2188
2189 conf.set("mapred.job.tracker", "local");
2190 LOG.info("Mini mapreduce cluster stopped");
2191 }
2192
2193
2194
2195
2196 public RegionServerServices createMockRegionServerService() throws IOException {
2197 return createMockRegionServerService((ServerName)null);
2198 }
2199
2200
2201
2202
2203
2204 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2205 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2206 rss.setFileSystem(getTestFileSystem());
2207 rss.setRpcServer(rpc);
2208 return rss;
2209 }
2210
2211
2212
2213
2214
2215 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2216 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2217 rss.setFileSystem(getTestFileSystem());
2218 return rss;
2219 }
2220
2221
2222
2223
2224
2225
2226 public void enableDebug(Class<?> clazz) {
2227 Log l = LogFactory.getLog(clazz);
2228 if (l instanceof Log4JLogger) {
2229 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2230 } else if (l instanceof Jdk14Logger) {
2231 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2232 }
2233 }
2234
2235
2236
2237
2238
2239 public void expireMasterSession() throws Exception {
2240 HMaster master = getMiniHBaseCluster().getMaster();
2241 expireSession(master.getZooKeeper(), false);
2242 }
2243
2244
2245
2246
2247
2248
2249 public void expireRegionServerSession(int index) throws Exception {
2250 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2251 expireSession(rs.getZooKeeper(), false);
2252 decrementMinRegionServerCount();
2253 }
2254
2255 private void decrementMinRegionServerCount() {
2256
2257
2258 decrementMinRegionServerCount(getConfiguration());
2259
2260
2261 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2262 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2263 }
2264 }
2265
2266 private void decrementMinRegionServerCount(Configuration conf) {
2267 int currentCount = conf.getInt(
2268 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2269 if (currentCount != -1) {
2270 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2271 Math.max(currentCount - 1, 1));
2272 }
2273 }
2274
2275 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2276 expireSession(nodeZK, false);
2277 }
2278
2279 @Deprecated
2280 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2281 throws Exception {
2282 expireSession(nodeZK, false);
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2297 throws Exception {
2298 Configuration c = new Configuration(this.conf);
2299 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2300 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2301 byte[] password = zk.getSessionPasswd();
2302 long sessionID = zk.getSessionId();
2303
2304
2305
2306
2307
2308
2309
2310
2311 ZooKeeper monitor = new ZooKeeper(quorumServers,
2312 1000, new org.apache.zookeeper.Watcher(){
2313 @Override
2314 public void process(WatchedEvent watchedEvent) {
2315 LOG.info("Monitor ZKW received event="+watchedEvent);
2316 }
2317 } , sessionID, password);
2318
2319
2320 ZooKeeper newZK = new ZooKeeper(quorumServers,
2321 1000, EmptyWatcher.instance, sessionID, password);
2322
2323
2324
2325 long start = System.currentTimeMillis();
2326 while (newZK.getState() != States.CONNECTED
2327 && System.currentTimeMillis() - start < 1000) {
2328 Thread.sleep(1);
2329 }
2330 newZK.close();
2331 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2332
2333
2334 monitor.close();
2335
2336 if (checkStatus) {
2337 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2338 }
2339 }
2340
2341
2342
2343
2344
2345
2346
2347 public MiniHBaseCluster getHBaseCluster() {
2348 return getMiniHBaseCluster();
2349 }
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359 public HBaseCluster getHBaseClusterInterface() {
2360
2361
2362 return hbaseCluster;
2363 }
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 public synchronized HBaseAdmin getHBaseAdmin()
2375 throws IOException {
2376 if (hbaseAdmin == null){
2377 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2378 }
2379 return hbaseAdmin;
2380 }
2381
2382 private HBaseAdminForTests hbaseAdmin = null;
2383 private static class HBaseAdminForTests extends HBaseAdmin {
2384 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2385 ZooKeeperConnectionException, IOException {
2386 super(c);
2387 }
2388
2389 @Override
2390 public synchronized void close() throws IOException {
2391 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2392 }
2393
2394 private synchronized void close0() throws IOException {
2395 super.close();
2396 }
2397 }
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2409 throws IOException {
2410 if (zooKeeperWatcher == null) {
2411 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2412 new Abortable() {
2413 @Override public void abort(String why, Throwable e) {
2414 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2415 }
2416 @Override public boolean isAborted() {return false;}
2417 });
2418 }
2419 return zooKeeperWatcher;
2420 }
2421 private ZooKeeperWatcher zooKeeperWatcher;
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431 public void closeRegion(String regionName) throws IOException {
2432 closeRegion(Bytes.toBytes(regionName));
2433 }
2434
2435
2436
2437
2438
2439
2440
2441 public void closeRegion(byte[] regionName) throws IOException {
2442 getHBaseAdmin().closeRegion(regionName, null);
2443 }
2444
2445
2446
2447
2448
2449
2450
2451
2452 public void closeRegionByRow(String row, HTable table) throws IOException {
2453 closeRegionByRow(Bytes.toBytes(row), table);
2454 }
2455
2456
2457
2458
2459
2460
2461
2462
2463 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2464 HRegionLocation hrl = table.getRegionLocation(row);
2465 closeRegion(hrl.getRegionInfo().getRegionName());
2466 }
2467
2468
2469
2470
2471
2472
2473
2474
2475 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2476 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2477 int regCount = regions.size();
2478 Set<Integer> attempted = new HashSet<Integer>();
2479 int idx;
2480 int attempts = 0;
2481 do {
2482 regions = getHBaseCluster().getRegions(tableName);
2483 if (regCount != regions.size()) {
2484
2485 attempted.clear();
2486 }
2487 regCount = regions.size();
2488
2489
2490 if (regCount > 0) {
2491 idx = random.nextInt(regCount);
2492
2493 if (attempted.contains(idx))
2494 continue;
2495 try {
2496 regions.get(idx).checkSplit();
2497 return regions.get(idx);
2498 } catch (Exception ex) {
2499 LOG.warn("Caught exception", ex);
2500 attempted.add(idx);
2501 }
2502 }
2503 attempts++;
2504 } while (maxAttempts == -1 || attempts < maxAttempts);
2505 return null;
2506 }
2507
2508 public MiniZooKeeperCluster getZkCluster() {
2509 return zkCluster;
2510 }
2511
2512 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2513 this.passedZkCluster = true;
2514 this.zkCluster = zkCluster;
2515 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2516 }
2517
2518 public MiniDFSCluster getDFSCluster() {
2519 return dfsCluster;
2520 }
2521
2522 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2523 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2524 throw new IOException("DFSCluster is already running! Shut it down first.");
2525 }
2526 this.dfsCluster = cluster;
2527 }
2528
2529 public FileSystem getTestFileSystem() throws IOException {
2530 return HFileSystem.get(conf);
2531 }
2532
2533
2534
2535
2536
2537
2538
2539
2540 public void waitTableAvailable(byte[] table)
2541 throws InterruptedException, IOException {
2542 waitTableAvailable(getHBaseAdmin(), table, 30000);
2543 }
2544
2545 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2546 throws InterruptedException, IOException {
2547 waitTableAvailable(admin, table, 30000);
2548 }
2549
2550
2551
2552
2553
2554
2555
2556
2557 public void waitTableAvailable(byte[] table, long timeoutMillis)
2558 throws InterruptedException, IOException {
2559 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2560 }
2561
2562 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2563 throws InterruptedException, IOException {
2564 long startWait = System.currentTimeMillis();
2565 while (!admin.isTableAvailable(table)) {
2566 assertTrue("Timed out waiting for table to become available " +
2567 Bytes.toStringBinary(table),
2568 System.currentTimeMillis() - startWait < timeoutMillis);
2569 Thread.sleep(200);
2570 }
2571
2572
2573
2574
2575
2576 try {
2577 Canary.sniff(admin, TableName.valueOf(table));
2578 } catch (Exception e) {
2579 throw new IOException(e);
2580 }
2581 }
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 public void waitTableEnabled(byte[] table)
2593 throws InterruptedException, IOException {
2594 waitTableEnabled(getHBaseAdmin(), table, 30000);
2595 }
2596
2597 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2598 throws InterruptedException, IOException {
2599 waitTableEnabled(admin, table, 30000);
2600 }
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 public void waitTableEnabled(byte[] table, long timeoutMillis)
2612 throws InterruptedException, IOException {
2613 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2614 }
2615
2616 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2617 throws InterruptedException, IOException {
2618 long startWait = System.currentTimeMillis();
2619 waitTableAvailable(admin, table, timeoutMillis);
2620 long remainder = System.currentTimeMillis() - startWait;
2621 while (!admin.isTableEnabled(table)) {
2622 assertTrue("Timed out waiting for table to become available and enabled " +
2623 Bytes.toStringBinary(table),
2624 System.currentTimeMillis() - remainder < timeoutMillis);
2625 Thread.sleep(200);
2626 }
2627 LOG.debug("REMOVE AFTER table=" + Bytes.toString(table) + ", isTableAvailable=" +
2628 admin.isTableAvailable(table) +
2629 ", isTableEnabled=" + admin.isTableEnabled(table));
2630 }
2631
2632
2633
2634
2635
2636
2637
2638
2639 public boolean ensureSomeRegionServersAvailable(final int num)
2640 throws IOException {
2641 boolean startedServer = false;
2642 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2643 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2644 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2645 startedServer = true;
2646 }
2647
2648 return startedServer;
2649 }
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2661 throws IOException {
2662 boolean startedServer = ensureSomeRegionServersAvailable(num);
2663
2664 int nonStoppedServers = 0;
2665 for (JVMClusterUtil.RegionServerThread rst :
2666 getMiniHBaseCluster().getRegionServerThreads()) {
2667
2668 HRegionServer hrs = rst.getRegionServer();
2669 if (hrs.isStopping() || hrs.isStopped()) {
2670 LOG.info("A region server is stopped or stopping:"+hrs);
2671 } else {
2672 nonStoppedServers++;
2673 }
2674 }
2675 for (int i=nonStoppedServers; i<num; ++i) {
2676 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2677 startedServer = true;
2678 }
2679 return startedServer;
2680 }
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 public static User getDifferentUser(final Configuration c,
2693 final String differentiatingSuffix)
2694 throws IOException {
2695 FileSystem currentfs = FileSystem.get(c);
2696 if (!(currentfs instanceof DistributedFileSystem)) {
2697 return User.getCurrent();
2698 }
2699
2700
2701 String username = User.getCurrent().getName() +
2702 differentiatingSuffix;
2703 User user = User.createUserForTesting(c, username,
2704 new String[]{"supergroup"});
2705 return user;
2706 }
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2722 final int max) {
2723 try {
2724 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2725 for (Class<?> clazz: clazzes) {
2726 String className = clazz.getSimpleName();
2727 if (className.equals("DFSOutputStream")) {
2728 if (clazz.isInstance(stream)) {
2729 Field maxRecoveryErrorCountField =
2730 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2731 maxRecoveryErrorCountField.setAccessible(true);
2732 maxRecoveryErrorCountField.setInt(stream, max);
2733 break;
2734 }
2735 }
2736 }
2737 } catch (Exception e) {
2738 LOG.info("Could not set max recovery field", e);
2739 }
2740 }
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2751 waitUntilAllRegionsAssigned(tableName, 60000);
2752 }
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2764 throws IOException {
2765 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2766 try {
2767 waitFor(timeout, 200, true, new Predicate<IOException>() {
2768 @Override
2769 public boolean evaluate() throws IOException {
2770 boolean allRegionsAssigned = true;
2771 Scan scan = new Scan();
2772 scan.addFamily(HConstants.CATALOG_FAMILY);
2773 ResultScanner s = meta.getScanner(scan);
2774 try {
2775 Result r;
2776 while ((r = s.next()) != null) {
2777 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2778 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2779 if (info != null && info.getTable().equals(tableName)) {
2780 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2781 allRegionsAssigned &= (b != null);
2782 }
2783 }
2784 } finally {
2785 s.close();
2786 }
2787 return allRegionsAssigned;
2788 }
2789 });
2790 } finally {
2791 meta.close();
2792 }
2793 }
2794
2795
2796
2797
2798
2799 public static List<Cell> getFromStoreFile(HStore store,
2800 Get get) throws IOException {
2801 MultiVersionConsistencyControl.resetThreadReadPoint();
2802 Scan scan = new Scan(get);
2803 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2804 scan.getFamilyMap().get(store.getFamily().getName()));
2805
2806 List<Cell> result = new ArrayList<Cell>();
2807 scanner.next(result);
2808 if (!result.isEmpty()) {
2809
2810 Cell kv = result.get(0);
2811 if (!CellUtil.matchingRow(kv, get.getRow())) {
2812 result.clear();
2813 }
2814 }
2815 scanner.close();
2816 return result;
2817 }
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2828 assertTrue(numRegions>3);
2829 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2830 byte [][] result = new byte[tmpSplitKeys.length+1][];
2831 for (int i=0;i<tmpSplitKeys.length;i++) {
2832 result[i+1] = tmpSplitKeys[i];
2833 }
2834 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2835 return result;
2836 }
2837
2838
2839
2840
2841
2842 public static List<Cell> getFromStoreFile(HStore store,
2843 byte [] row,
2844 NavigableSet<byte[]> columns
2845 ) throws IOException {
2846 Get get = new Get(row);
2847 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2848 s.put(store.getFamily().getName(), columns);
2849
2850 return getFromStoreFile(store,get);
2851 }
2852
2853
2854
2855
2856
2857 public static ZooKeeperWatcher getZooKeeperWatcher(
2858 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2859 IOException {
2860 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2861 "unittest", new Abortable() {
2862 boolean aborted = false;
2863
2864 @Override
2865 public void abort(String why, Throwable e) {
2866 aborted = true;
2867 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2868 }
2869
2870 @Override
2871 public boolean isAborted() {
2872 return aborted;
2873 }
2874 });
2875 return zkw;
2876 }
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2890 HBaseTestingUtility TEST_UTIL, HRegion region,
2891 ServerName serverName) throws ZooKeeperConnectionException,
2892 IOException, KeeperException, NodeExistsException {
2893 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2894 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2895 int version = ZKAssign.transitionNodeOpening(zkw, region
2896 .getRegionInfo(), serverName);
2897 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2898 version);
2899 return zkw;
2900 }
2901
2902 public static void assertKVListsEqual(String additionalMsg,
2903 final List<? extends Cell> expected,
2904 final List<? extends Cell> actual) {
2905 final int eLen = expected.size();
2906 final int aLen = actual.size();
2907 final int minLen = Math.min(eLen, aLen);
2908
2909 int i;
2910 for (i = 0; i < minLen
2911 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2912 ++i) {}
2913
2914 if (additionalMsg == null) {
2915 additionalMsg = "";
2916 }
2917 if (!additionalMsg.isEmpty()) {
2918 additionalMsg = ". " + additionalMsg;
2919 }
2920
2921 if (eLen != aLen || i != minLen) {
2922 throw new AssertionError(
2923 "Expected and actual KV arrays differ at position " + i + ": " +
2924 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2925 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2926 }
2927 }
2928
2929 private static <T> String safeGetAsStr(List<T> lst, int i) {
2930 if (0 <= i && i < lst.size()) {
2931 return lst.get(i).toString();
2932 } else {
2933 return "<out_of_range>";
2934 }
2935 }
2936
2937 public String getClusterKey() {
2938 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2939 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2940 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2941 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2942 }
2943
2944
2945 public HTable createRandomTable(String tableName,
2946 final Collection<String> families,
2947 final int maxVersions,
2948 final int numColsPerRow,
2949 final int numFlushes,
2950 final int numRegions,
2951 final int numRowsPerFlush)
2952 throws IOException, InterruptedException {
2953
2954 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2955 " regions, " + numFlushes + " storefiles per region, " +
2956 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2957 "\n");
2958
2959 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2960 final int numCF = families.size();
2961 final byte[][] cfBytes = new byte[numCF][];
2962 {
2963 int cfIndex = 0;
2964 for (String cf : families) {
2965 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2966 }
2967 }
2968
2969 final int actualStartKey = 0;
2970 final int actualEndKey = Integer.MAX_VALUE;
2971 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2972 final int splitStartKey = actualStartKey + keysPerRegion;
2973 final int splitEndKey = actualEndKey - keysPerRegion;
2974 final String keyFormat = "%08x";
2975 final HTable table = createTable(tableName, cfBytes,
2976 maxVersions,
2977 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2978 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2979 numRegions);
2980
2981 if (hbaseCluster != null) {
2982 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2983 }
2984
2985 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2986 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2987 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2988 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2989
2990 Put put = new Put(row);
2991 Delete del = new Delete(row);
2992 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2993 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2994 final long ts = rand.nextInt();
2995 final byte[] qual = Bytes.toBytes("col" + iCol);
2996 if (rand.nextBoolean()) {
2997 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2998 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2999 ts + "_random_" + rand.nextLong());
3000 put.add(cf, qual, ts, value);
3001 } else if (rand.nextDouble() < 0.8) {
3002 del.deleteColumn(cf, qual, ts);
3003 } else {
3004 del.deleteColumns(cf, qual, ts);
3005 }
3006 }
3007
3008 if (!put.isEmpty()) {
3009 table.put(put);
3010 }
3011
3012 if (!del.isEmpty()) {
3013 table.delete(del);
3014 }
3015 }
3016 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3017 table.flushCommits();
3018 if (hbaseCluster != null) {
3019 getMiniHBaseCluster().flushcache(table.getName());
3020 }
3021 }
3022
3023 return table;
3024 }
3025
3026 private static final int MIN_RANDOM_PORT = 0xc000;
3027 private static final int MAX_RANDOM_PORT = 0xfffe;
3028 private static Random random = new Random();
3029
3030
3031
3032
3033
3034 public static int randomPort() {
3035 return MIN_RANDOM_PORT
3036 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3037 }
3038
3039
3040
3041
3042
3043 public static int randomFreePort() {
3044 int port = 0;
3045 do {
3046 port = randomPort();
3047 if (takenRandomPorts.contains(port)) {
3048 continue;
3049 }
3050 takenRandomPorts.add(port);
3051
3052 try {
3053 ServerSocket sock = new ServerSocket(port);
3054 sock.close();
3055 } catch (IOException ex) {
3056 port = 0;
3057 }
3058 } while (port == 0);
3059 return port;
3060 }
3061
3062
3063 public static String randomMultiCastAddress() {
3064 return "226.1.1." + random.nextInt(254);
3065 }
3066
3067
3068
3069 public static void waitForHostPort(String host, int port)
3070 throws IOException {
3071 final int maxTimeMs = 10000;
3072 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3073 IOException savedException = null;
3074 LOG.info("Waiting for server at " + host + ":" + port);
3075 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3076 try {
3077 Socket sock = new Socket(InetAddress.getByName(host), port);
3078 sock.close();
3079 savedException = null;
3080 LOG.info("Server at " + host + ":" + port + " is available");
3081 break;
3082 } catch (UnknownHostException e) {
3083 throw new IOException("Failed to look up " + host, e);
3084 } catch (IOException e) {
3085 savedException = e;
3086 }
3087 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3088 }
3089
3090 if (savedException != null) {
3091 throw savedException;
3092 }
3093 }
3094
3095
3096
3097
3098
3099
3100 public static int createPreSplitLoadTestTable(Configuration conf,
3101 TableName tableName, byte[] columnFamily, Algorithm compression,
3102 DataBlockEncoding dataBlockEncoding) throws IOException {
3103 HTableDescriptor desc = new HTableDescriptor(tableName);
3104 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3105 hcd.setDataBlockEncoding(dataBlockEncoding);
3106 hcd.setCompressionType(compression);
3107 return createPreSplitLoadTestTable(conf, desc, hcd);
3108 }
3109
3110
3111
3112
3113
3114
3115 public static int createPreSplitLoadTestTable(Configuration conf,
3116 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3117 if (!desc.hasFamily(hcd.getName())) {
3118 desc.addFamily(hcd);
3119 }
3120
3121 int totalNumberOfRegions = 0;
3122 HBaseAdmin admin = new HBaseAdmin(conf);
3123 try {
3124
3125
3126
3127 int numberOfServers = admin.getClusterStatus().getServers().size();
3128 if (numberOfServers == 0) {
3129 throw new IllegalStateException("No live regionservers");
3130 }
3131
3132 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3133 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3134 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3135 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3136
3137 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3138 totalNumberOfRegions);
3139
3140 admin.createTable(desc, splits);
3141 } catch (MasterNotRunningException e) {
3142 LOG.error("Master not running", e);
3143 throw new IOException(e);
3144 } catch (TableExistsException e) {
3145 LOG.warn("Table " + desc.getTableName() +
3146 " already exists, continuing");
3147 } finally {
3148 admin.close();
3149 }
3150 return totalNumberOfRegions;
3151 }
3152
3153 public static int getMetaRSPort(Configuration conf) throws IOException {
3154 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3155 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3156 table.close();
3157 return hloc.getPort();
3158 }
3159
3160
3161
3162
3163
3164
3165
3166 public void assertRegionOnServer(
3167 final HRegionInfo hri, final ServerName server,
3168 final long timeout) throws IOException, InterruptedException {
3169 long timeoutTime = System.currentTimeMillis() + timeout;
3170 while (true) {
3171 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3172 if (regions.contains(hri)) return;
3173 long now = System.currentTimeMillis();
3174 if (now > timeoutTime) break;
3175 Thread.sleep(10);
3176 }
3177 fail("Could not find region " + hri.getRegionNameAsString()
3178 + " on server " + server);
3179 }
3180
3181
3182
3183
3184
3185 public void assertRegionOnlyOnServer(
3186 final HRegionInfo hri, final ServerName server,
3187 final long timeout) throws IOException, InterruptedException {
3188 long timeoutTime = System.currentTimeMillis() + timeout;
3189 while (true) {
3190 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3191 if (regions.contains(hri)) {
3192 List<JVMClusterUtil.RegionServerThread> rsThreads =
3193 getHBaseCluster().getLiveRegionServerThreads();
3194 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3195 HRegionServer rs = rsThread.getRegionServer();
3196 if (server.equals(rs.getServerName())) {
3197 continue;
3198 }
3199 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3200 for (HRegion r: hrs) {
3201 assertTrue("Region should not be double assigned",
3202 r.getRegionId() != hri.getRegionId());
3203 }
3204 }
3205 return;
3206 }
3207 long now = System.currentTimeMillis();
3208 if (now > timeoutTime) break;
3209 Thread.sleep(10);
3210 }
3211 fail("Could not find region " + hri.getRegionNameAsString()
3212 + " on server " + server);
3213 }
3214
3215 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3216 throws IOException {
3217 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3218 htd.addFamily(hcd);
3219 HRegionInfo info =
3220 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3221 HRegion region =
3222 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3223 return region;
3224 }
3225
3226 public void setFileSystemURI(String fsURI) {
3227 FS_URI = fsURI;
3228 }
3229
3230
3231
3232
3233 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3234 throws E {
3235 return Waiter.waitFor(this.conf, timeout, predicate);
3236 }
3237
3238
3239
3240
3241 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3242 throws E {
3243 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3244 }
3245
3246
3247
3248
3249 public <E extends Exception> long waitFor(long timeout, long interval,
3250 boolean failIfTimeout, Predicate<E> predicate) throws E {
3251 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3252 }
3253
3254
3255
3256
3257 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3258 return new Waiter.Predicate<Exception>() {
3259 @Override
3260 public boolean evaluate() throws Exception {
3261 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3262 .getAssignmentManager().getRegionStates();
3263 return !regionStates.isRegionsInTransition();
3264 }
3265 };
3266 }
3267
3268
3269
3270
3271 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3272 return new Waiter.Predicate<Exception>() {
3273 @Override
3274 public boolean evaluate() throws Exception {
3275 return getHBaseAdmin().isTableEnabled(tableName);
3276 }
3277 };
3278 }
3279 }