1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.UUID;
44 import java.util.concurrent.TimeUnit;
45
46 import org.apache.commons.logging.Log;
47 import org.apache.commons.logging.LogFactory;
48 import org.apache.commons.logging.impl.Jdk14Logger;
49 import org.apache.commons.logging.impl.Log4JLogger;
50 import org.apache.hadoop.classification.InterfaceAudience;
51 import org.apache.hadoop.classification.InterfaceStability;
52 import org.apache.hadoop.conf.Configuration;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.hbase.Waiter.Predicate;
56 import org.apache.hadoop.hbase.catalog.MetaEditor;
57 import org.apache.hadoop.hbase.client.Delete;
58 import org.apache.hadoop.hbase.client.Durability;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.HBaseAdmin;
61 import org.apache.hadoop.hbase.client.HConnection;
62 import org.apache.hadoop.hbase.client.HTable;
63 import org.apache.hadoop.hbase.client.Put;
64 import org.apache.hadoop.hbase.client.Result;
65 import org.apache.hadoop.hbase.client.ResultScanner;
66 import org.apache.hadoop.hbase.client.Scan;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.io.compress.Compression;
69 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72 import org.apache.hadoop.hbase.io.hfile.HFile;
73 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.master.RegionStates;
77 import org.apache.hadoop.hbase.master.ServerManager;
78 import org.apache.hadoop.hbase.regionserver.BloomType;
79 import org.apache.hadoop.hbase.regionserver.HRegion;
80 import org.apache.hadoop.hbase.regionserver.HRegionServer;
81 import org.apache.hadoop.hbase.regionserver.HStore;
82 import org.apache.hadoop.hbase.regionserver.InternalScanner;
83 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
84 import org.apache.hadoop.hbase.regionserver.wal.HLog;
85 import org.apache.hadoop.hbase.security.User;
86 import org.apache.hadoop.hbase.tool.Canary;
87 import org.apache.hadoop.hbase.util.Bytes;
88 import org.apache.hadoop.hbase.util.FSUtils;
89 import org.apache.hadoop.hbase.util.JVMClusterUtil;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
91 import org.apache.hadoop.hbase.util.RegionSplitter;
92 import org.apache.hadoop.hbase.util.RetryCounter;
93 import org.apache.hadoop.hbase.util.Threads;
94 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
95 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
96 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
97 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
98 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
99 import org.apache.hadoop.hdfs.DFSClient;
100 import org.apache.hadoop.hdfs.DistributedFileSystem;
101 import org.apache.hadoop.hdfs.MiniDFSCluster;
102 import org.apache.hadoop.mapred.JobConf;
103 import org.apache.hadoop.mapred.MiniMRCluster;
104 import org.apache.hadoop.mapred.TaskLog;
105 import org.apache.zookeeper.KeeperException;
106 import org.apache.zookeeper.KeeperException.NodeExistsException;
107 import org.apache.zookeeper.WatchedEvent;
108 import org.apache.zookeeper.ZooKeeper;
109 import org.apache.zookeeper.ZooKeeper.States;
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 @InterfaceAudience.Public
126 @InterfaceStability.Evolving
127 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
128 private MiniZooKeeperCluster zkCluster = null;
129
130
131
132
133
134 private static int DEFAULT_REGIONS_PER_SERVER = 5;
135
136
137
138
139
140 private boolean passedZkCluster = false;
141 private MiniDFSCluster dfsCluster = null;
142
143 private HBaseCluster hbaseCluster = null;
144 private MiniMRCluster mrCluster = null;
145
146
147 private boolean miniClusterRunning;
148
149 private String hadoopLogDir;
150
151
152 private File clusterTestDir = null;
153
154
155
156 private Path dataTestDirOnTestFS = null;
157
158
159
160
161
162
163
164
165 @Deprecated
166 private static final String TEST_DIRECTORY_KEY = "test.build.data";
167
168
169 private static String FS_URI;
170
171
172 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
173
174
175 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
176 Arrays.asList(new Object[][] {
177 { Compression.Algorithm.NONE },
178 { Compression.Algorithm.GZ }
179 });
180
181
182 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
183 Arrays.asList(new Object[][] {
184 { new Boolean(false) },
185 { new Boolean(true) }
186 });
187
188
189 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
190
191 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
192 Compression.Algorithm.NONE, Compression.Algorithm.GZ
193 };
194
195
196
197
198
199 private static List<Object[]> bloomAndCompressionCombinations() {
200 List<Object[]> configurations = new ArrayList<Object[]>();
201 for (Compression.Algorithm comprAlgo :
202 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
203 for (BloomType bloomType : BloomType.values()) {
204 configurations.add(new Object[] { comprAlgo, bloomType });
205 }
206 }
207 return Collections.unmodifiableList(configurations);
208 }
209
210
211
212
213 private static List<Object[]> memStoreTSAndTagsCombination() {
214 List<Object[]> configurations = new ArrayList<Object[]>();
215 configurations.add(new Object[] { false, false });
216 configurations.add(new Object[] { false, true });
217 configurations.add(new Object[] { true, false });
218 configurations.add(new Object[] { true, true });
219 return Collections.unmodifiableList(configurations);
220 }
221
222 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
223 bloomAndCompressionCombinations();
224
225 public HBaseTestingUtility() {
226 this(HBaseConfiguration.create());
227 }
228
229 public HBaseTestingUtility(Configuration conf) {
230 super(conf);
231
232
233 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
234 }
235
236
237
238
239
240
241
242 public static HBaseTestingUtility createLocalHTU() {
243 Configuration c = HBaseConfiguration.create();
244 return createLocalHTU(c);
245 }
246
247
248
249
250
251
252
253
254 public static HBaseTestingUtility createLocalHTU(Configuration c) {
255 HBaseTestingUtility htu = new HBaseTestingUtility(c);
256 String dataTestDir = htu.getDataTestDir().toString();
257 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
258 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
259 return htu;
260 }
261
262
263
264
265
266
267
268
269
270
271
272
273 @Override
274 public Configuration getConfiguration() {
275 return super.getConfiguration();
276 }
277
278 public void setHBaseCluster(HBaseCluster hbaseCluster) {
279 this.hbaseCluster = hbaseCluster;
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 @Override
299 protected Path setupDataTestDir() {
300 Path testPath = super.setupDataTestDir();
301 if (null == testPath) {
302 return null;
303 }
304
305 createSubDirAndSystemProperty(
306 "hadoop.log.dir",
307 testPath, "hadoop-log-dir");
308
309
310
311 createSubDirAndSystemProperty(
312 "hadoop.tmp.dir",
313 testPath, "hadoop-tmp-dir");
314
315
316 createSubDir(
317 "mapred.local.dir",
318 testPath, "mapred-local-dir");
319
320 return testPath;
321 }
322
323 private void createSubDirAndSystemProperty(
324 String propertyName, Path parent, String subDirName){
325
326 String sysValue = System.getProperty(propertyName);
327
328 if (sysValue != null) {
329
330
331 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
332 sysValue + " so I do NOT create it in " + parent);
333 String confValue = conf.get(propertyName);
334 if (confValue != null && !confValue.endsWith(sysValue)){
335 LOG.warn(
336 propertyName + " property value differs in configuration and system: "+
337 "Configuration="+confValue+" while System="+sysValue+
338 " Erasing configuration value by system value."
339 );
340 }
341 conf.set(propertyName, sysValue);
342 } else {
343
344 createSubDir(propertyName, parent, subDirName);
345 System.setProperty(propertyName, conf.get(propertyName));
346 }
347 }
348
349
350
351
352
353
354
355 private Path getBaseTestDirOnTestFS() throws IOException {
356 FileSystem fs = getTestFileSystem();
357 return new Path(fs.getWorkingDirectory(), "test-data");
358 }
359
360
361
362
363
364
365 Path getClusterTestDir() {
366 if (clusterTestDir == null){
367 setupClusterTestDir();
368 }
369 return new Path(clusterTestDir.getAbsolutePath());
370 }
371
372
373
374
375 private void setupClusterTestDir() {
376 if (clusterTestDir != null) {
377 return;
378 }
379
380
381
382 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
383 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
384
385 boolean b = deleteOnExit();
386 if (b) clusterTestDir.deleteOnExit();
387 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
388 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
389 }
390
391
392
393
394
395
396
397 public Path getDataTestDirOnTestFS() throws IOException {
398 if (dataTestDirOnTestFS == null) {
399 setupDataTestDirOnTestFS();
400 }
401
402 return dataTestDirOnTestFS;
403 }
404
405
406
407
408
409
410
411
412 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
413 return new Path(getDataTestDirOnTestFS(), subdirName);
414 }
415
416
417
418
419 private void setupDataTestDirOnTestFS() throws IOException {
420 if (dataTestDirOnTestFS != null) {
421 LOG.warn("Data test on test fs dir already setup in "
422 + dataTestDirOnTestFS.toString());
423 return;
424 }
425
426
427
428
429
430 FileSystem fs = getTestFileSystem();
431 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
432 File dataTestDir = new File(getDataTestDir().toString());
433 if (deleteOnExit()) dataTestDir.deleteOnExit();
434 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
435 } else {
436 Path base = getBaseTestDirOnTestFS();
437 String randomStr = UUID.randomUUID().toString();
438 dataTestDirOnTestFS = new Path(base, randomStr);
439 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
440 }
441 }
442
443
444
445
446
447
448 public boolean cleanupDataTestDirOnTestFS() throws IOException {
449 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
450 if (ret)
451 dataTestDirOnTestFS = null;
452 return ret;
453 }
454
455
456
457
458
459
460 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
461 Path cpath = getDataTestDirOnTestFS(subdirName);
462 return getTestFileSystem().delete(cpath, true);
463 }
464
465
466
467
468
469
470
471
472 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
473 return startMiniDFSCluster(servers, null);
474 }
475
476
477
478
479
480
481
482
483
484
485
486
487 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
488 throws Exception {
489 if ( hosts != null && hosts.length != 0) {
490 return startMiniDFSCluster(hosts.length, hosts);
491 } else {
492 return startMiniDFSCluster(1, null);
493 }
494 }
495
496
497
498
499
500
501
502
503
504
505 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
506 throws Exception {
507 createDirsAndSetProperties();
508
509
510 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
511 setLevel(org.apache.log4j.Level.ERROR);
512 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
513 setLevel(org.apache.log4j.Level.ERROR);
514
515
516 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
517 true, null, null, hosts, null);
518
519
520 FileSystem fs = this.dfsCluster.getFileSystem();
521 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
522
523
524 this.dfsCluster.waitClusterUp();
525
526
527 dataTestDirOnTestFS = null;
528
529 return this.dfsCluster;
530 }
531
532
533 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
534 throws Exception {
535 createDirsAndSetProperties();
536 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
537 true, null, racks, hosts, null);
538
539
540 FileSystem fs = this.dfsCluster.getFileSystem();
541 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
542
543
544 this.dfsCluster.waitClusterUp();
545
546
547 dataTestDirOnTestFS = null;
548
549 return this.dfsCluster;
550 }
551
552 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
553 createDirsAndSetProperties();
554 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
555 null, null, null);
556 return dfsCluster;
557 }
558
559
560 private void createDirsAndSetProperties() throws IOException {
561 setupClusterTestDir();
562 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
563 createDirAndSetProperty("cache_data", "test.cache.data");
564 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
565 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
566 createDirAndSetProperty("mapred_local", "mapred.local.dir");
567 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
568 enableShortCircuit();
569
570 Path root = getDataTestDirOnTestFS("hadoop");
571 conf.set(MapreduceTestingShim.getMROutputDirProp(),
572 new Path(root, "mapred-output-dir").toString());
573 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
574 conf.set("mapreduce.jobtracker.staging.root.dir",
575 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
576 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
577 }
578
579
580
581
582
583
584
585 public boolean isReadShortCircuitOn(){
586 final String propName = "hbase.tests.use.shortcircuit.reads";
587 String readOnProp = System.getProperty(propName);
588 if (readOnProp != null){
589 return Boolean.parseBoolean(readOnProp);
590 } else {
591 return conf.getBoolean(propName, false);
592 }
593 }
594
595
596
597
598 private void enableShortCircuit() {
599 if (isReadShortCircuitOn()) {
600 String curUser = System.getProperty("user.name");
601 LOG.info("read short circuit is ON for user " + curUser);
602
603 conf.set("dfs.block.local-path-access.user", curUser);
604
605 conf.setBoolean("dfs.client.read.shortcircuit", true);
606
607 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
608 } else {
609 LOG.info("read short circuit is OFF");
610 }
611 }
612
613 private String createDirAndSetProperty(final String relPath, String property) {
614 String path = getDataTestDir(relPath).toString();
615 System.setProperty(property, path);
616 conf.set(property, path);
617 new File(path).mkdirs();
618 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
619 return path;
620 }
621
622
623
624
625
626
627 public void shutdownMiniDFSCluster() throws IOException {
628 if (this.dfsCluster != null) {
629
630 this.dfsCluster.shutdown();
631 dfsCluster = null;
632 dataTestDirOnTestFS = null;
633 FSUtils.setFsDefault(this.conf, new Path("file:///"));
634 }
635 }
636
637
638
639
640
641
642
643
644 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
645 return startMiniZKCluster(1);
646 }
647
648
649
650
651
652
653
654
655
656 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
657 throws Exception {
658 setupClusterTestDir();
659 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
660 }
661
662 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
663 throws Exception {
664 return startMiniZKCluster(dir,1);
665 }
666
667
668
669
670
671 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
672 int zooKeeperServerNum)
673 throws Exception {
674 if (this.zkCluster != null) {
675 throw new IOException("Cluster already running at " + dir);
676 }
677 this.passedZkCluster = false;
678 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
679 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
680 if (defPort > 0){
681
682 this.zkCluster.setDefaultClientPort(defPort);
683 }
684 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
685 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
686 Integer.toString(clientPort));
687 return this.zkCluster;
688 }
689
690
691
692
693
694
695
696 public void shutdownMiniZKCluster() throws IOException {
697 if (this.zkCluster != null) {
698 this.zkCluster.shutdown();
699 this.zkCluster = null;
700 }
701 }
702
703
704
705
706
707
708
709 public MiniHBaseCluster startMiniCluster() throws Exception {
710 return startMiniCluster(1, 1);
711 }
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 public MiniHBaseCluster startMiniCluster(final int numSlaves)
727 throws Exception {
728 return startMiniCluster(1, numSlaves);
729 }
730
731
732
733
734
735
736
737
738 public MiniHBaseCluster startMiniCluster(final int numMasters,
739 final int numSlaves)
740 throws Exception {
741 return startMiniCluster(numMasters, numSlaves, null);
742 }
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768 public MiniHBaseCluster startMiniCluster(final int numMasters,
769 final int numSlaves, final String[] dataNodeHosts) throws Exception {
770 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
771 }
772
773
774
775
776
777 public MiniHBaseCluster startMiniCluster(final int numMasters,
778 final int numSlaves, final int numDataNodes) throws Exception {
779 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
780 }
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809 public MiniHBaseCluster startMiniCluster(final int numMasters,
810 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
811 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
812 throws Exception {
813 return startMiniCluster(
814 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
815 }
816
817
818
819
820
821
822 public MiniHBaseCluster startMiniCluster(final int numMasters,
823 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
824 Class<? extends HMaster> masterClass,
825 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
826 throws Exception {
827 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
828 numDataNodes = dataNodeHosts.length;
829 }
830
831 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
832 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
833
834
835 if (miniClusterRunning) {
836 throw new IllegalStateException("A mini-cluster is already running");
837 }
838 miniClusterRunning = true;
839
840 setupClusterTestDir();
841 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
842
843
844
845 startMiniDFSCluster(numDataNodes, dataNodeHosts);
846
847
848 if (this.zkCluster == null) {
849 startMiniZKCluster(clusterTestDir);
850 }
851
852
853 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
854 }
855
856 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
857 throws IOException, InterruptedException{
858 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
859 }
860
861
862
863
864
865
866
867
868
869
870
871
872 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
873 final int numSlaves, Class<? extends HMaster> masterClass,
874 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
875 throws IOException, InterruptedException {
876
877 createRootDir();
878
879
880
881 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
882 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
883 }
884 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
885 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
886 }
887
888 Configuration c = new Configuration(this.conf);
889 this.hbaseCluster =
890 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
891
892 HTable t = new HTable(c, TableName.META_TABLE_NAME);
893 ResultScanner s = t.getScanner(new Scan());
894 while (s.next() != null) {
895 continue;
896 }
897 s.close();
898 t.close();
899
900 getHBaseAdmin();
901 LOG.info("Minicluster is up");
902 return (MiniHBaseCluster)this.hbaseCluster;
903 }
904
905
906
907
908
909
910
911 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
912 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
913
914 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
915 ResultScanner s = t.getScanner(new Scan());
916 while (s.next() != null) {
917
918 }
919 LOG.info("HBase has been restarted");
920 s.close();
921 t.close();
922 }
923
924
925
926
927
928
929 public MiniHBaseCluster getMiniHBaseCluster() {
930 if (this.hbaseCluster instanceof MiniHBaseCluster) {
931 return (MiniHBaseCluster)this.hbaseCluster;
932 }
933 throw new RuntimeException(hbaseCluster + " not an instance of " +
934 MiniHBaseCluster.class.getName());
935 }
936
937
938
939
940
941
942 public void shutdownMiniCluster() throws Exception {
943 LOG.info("Shutting down minicluster");
944 shutdownMiniHBaseCluster();
945 if (!this.passedZkCluster){
946 shutdownMiniZKCluster();
947 }
948 shutdownMiniDFSCluster();
949
950 cleanupTestDir();
951 miniClusterRunning = false;
952 LOG.info("Minicluster is down");
953 }
954
955
956
957
958
959 public void shutdownMiniHBaseCluster() throws IOException {
960 if (hbaseAdmin != null) {
961 hbaseAdmin.close0();
962 hbaseAdmin = null;
963 }
964
965
966 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
967 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
968 if (this.hbaseCluster != null) {
969 this.hbaseCluster.shutdown();
970
971 this.hbaseCluster.waitUntilShutDown();
972 this.hbaseCluster = null;
973 }
974
975 if (zooKeeperWatcher != null) {
976 zooKeeperWatcher.close();
977 zooKeeperWatcher = null;
978 }
979 }
980
981
982
983
984
985
986
987 public Path getDefaultRootDirPath() throws IOException {
988 FileSystem fs = FileSystem.get(this.conf);
989 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
990 }
991
992
993
994
995
996
997
998
999
1000 public Path createRootDir() throws IOException {
1001 FileSystem fs = FileSystem.get(this.conf);
1002 Path hbaseRootdir = getDefaultRootDirPath();
1003 FSUtils.setRootDir(this.conf, hbaseRootdir);
1004 fs.mkdirs(hbaseRootdir);
1005 FSUtils.setVersion(fs, hbaseRootdir);
1006 return hbaseRootdir;
1007 }
1008
1009
1010
1011
1012
1013 public void flush() throws IOException {
1014 getMiniHBaseCluster().flushcache();
1015 }
1016
1017
1018
1019
1020
1021 public void flush(TableName tableName) throws IOException {
1022 getMiniHBaseCluster().flushcache(tableName);
1023 }
1024
1025
1026
1027
1028
1029 public void compact(boolean major) throws IOException {
1030 getMiniHBaseCluster().compact(major);
1031 }
1032
1033
1034
1035
1036
1037 public void compact(TableName tableName, boolean major) throws IOException {
1038 getMiniHBaseCluster().compact(tableName, major);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048 public HTable createTable(String tableName, String family)
1049 throws IOException{
1050 return createTable(TableName.valueOf(tableName), new String[]{family});
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060 public HTable createTable(byte[] tableName, byte[] family)
1061 throws IOException{
1062 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072 public HTable createTable(TableName tableName, String[] families)
1073 throws IOException {
1074 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1075 for (String family : families) {
1076 fams.add(Bytes.toBytes(family));
1077 }
1078 return createTable(tableName, fams.toArray(new byte[0][]));
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088 public HTable createTable(TableName tableName, byte[] family)
1089 throws IOException{
1090 return createTable(tableName, new byte[][]{family});
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 public HTable createTable(byte[] tableName, byte[][] families)
1102 throws IOException {
1103 return createTable(tableName, families,
1104 new Configuration(getConfiguration()));
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114 public HTable createTable(TableName tableName, byte[][] families)
1115 throws IOException {
1116 return createTable(tableName, families,
1117 new Configuration(getConfiguration()));
1118 }
1119
1120 public HTable createTable(byte[] tableName, byte[][] families,
1121 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1122 return createTable(TableName.valueOf(tableName), families, numVersions,
1123 startKey, endKey, numRegions);
1124 }
1125
1126 public HTable createTable(String tableName, byte[][] families,
1127 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1128 return createTable(TableName.valueOf(tableName), families, numVersions,
1129 startKey, endKey, numRegions);
1130 }
1131
1132 public HTable createTable(TableName tableName, byte[][] families,
1133 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1134 throws IOException{
1135 HTableDescriptor desc = new HTableDescriptor(tableName);
1136 for (byte[] family : families) {
1137 HColumnDescriptor hcd = new HColumnDescriptor(family)
1138 .setMaxVersions(numVersions);
1139 desc.addFamily(hcd);
1140 }
1141 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1142
1143 waitUntilAllRegionsAssigned(tableName);
1144 return new HTable(getConfiguration(), tableName);
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1156 throws IOException {
1157 for(byte[] family : families) {
1158 HColumnDescriptor hcd = new HColumnDescriptor(family);
1159
1160
1161
1162 hcd.setBloomFilterType(BloomType.NONE);
1163 htd.addFamily(hcd);
1164 }
1165 getHBaseAdmin().createTable(htd);
1166
1167 waitUntilAllRegionsAssigned(htd.getTableName());
1168 return new HTable(c, htd.getTableName());
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 public HTable createTable(TableName tableName, byte[][] families,
1180 final Configuration c)
1181 throws IOException {
1182 return createTable(new HTableDescriptor(tableName), families, c);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 public HTable createTable(byte[] tableName, byte[][] families,
1194 final Configuration c)
1195 throws IOException {
1196 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1197 for(byte[] family : families) {
1198 HColumnDescriptor hcd = new HColumnDescriptor(family);
1199
1200
1201
1202 hcd.setBloomFilterType(BloomType.NONE);
1203 desc.addFamily(hcd);
1204 }
1205 getHBaseAdmin().createTable(desc);
1206 return new HTable(c, tableName);
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 public HTable createTable(TableName tableName, byte[][] families,
1219 final Configuration c, int numVersions)
1220 throws IOException {
1221 HTableDescriptor desc = new HTableDescriptor(tableName);
1222 for(byte[] family : families) {
1223 HColumnDescriptor hcd = new HColumnDescriptor(family)
1224 .setMaxVersions(numVersions);
1225 desc.addFamily(hcd);
1226 }
1227 getHBaseAdmin().createTable(desc);
1228
1229 waitUntilAllRegionsAssigned(tableName);
1230 return new HTable(c, tableName);
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 public HTable createTable(byte[] tableName, byte[][] families,
1243 final Configuration c, int numVersions)
1244 throws IOException {
1245 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1246 for(byte[] family : families) {
1247 HColumnDescriptor hcd = new HColumnDescriptor(family)
1248 .setMaxVersions(numVersions);
1249 desc.addFamily(hcd);
1250 }
1251 getHBaseAdmin().createTable(desc);
1252 return new HTable(c, tableName);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1264 throws IOException {
1265 return createTable(tableName, new byte[][]{family}, numVersions);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1277 throws IOException {
1278 return createTable(tableName, new byte[][]{family}, numVersions);
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 public HTable createTable(byte[] tableName, byte[][] families,
1290 int numVersions)
1291 throws IOException {
1292 return createTable(TableName.valueOf(tableName), families, numVersions);
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 public HTable createTable(TableName tableName, byte[][] families,
1304 int numVersions)
1305 throws IOException {
1306 HTableDescriptor desc = new HTableDescriptor(tableName);
1307 for (byte[] family : families) {
1308 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1309 desc.addFamily(hcd);
1310 }
1311 getHBaseAdmin().createTable(desc);
1312
1313 waitUntilAllRegionsAssigned(tableName);
1314 return new HTable(new Configuration(getConfiguration()), tableName);
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 public HTable createTable(byte[] tableName, byte[][] families,
1326 int numVersions, int blockSize) throws IOException {
1327 return createTable(TableName.valueOf(tableName),
1328 families, numVersions, blockSize);
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 public HTable createTable(TableName tableName, byte[][] families,
1340 int numVersions, int blockSize) throws IOException {
1341 HTableDescriptor desc = new HTableDescriptor(tableName);
1342 for (byte[] family : families) {
1343 HColumnDescriptor hcd = new HColumnDescriptor(family)
1344 .setMaxVersions(numVersions)
1345 .setBlocksize(blockSize);
1346 desc.addFamily(hcd);
1347 }
1348 getHBaseAdmin().createTable(desc);
1349
1350 waitUntilAllRegionsAssigned(tableName);
1351 return new HTable(new Configuration(getConfiguration()), tableName);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public HTable createTable(byte[] tableName, byte[][] families,
1363 int[] numVersions)
1364 throws IOException {
1365 return createTable(TableName.valueOf(tableName), families, numVersions);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 public HTable createTable(TableName tableName, byte[][] families,
1377 int[] numVersions)
1378 throws IOException {
1379 HTableDescriptor desc = new HTableDescriptor(tableName);
1380 int i = 0;
1381 for (byte[] family : families) {
1382 HColumnDescriptor hcd = new HColumnDescriptor(family)
1383 .setMaxVersions(numVersions[i]);
1384 desc.addFamily(hcd);
1385 i++;
1386 }
1387 getHBaseAdmin().createTable(desc);
1388
1389 waitUntilAllRegionsAssigned(tableName);
1390 return new HTable(new Configuration(getConfiguration()), tableName);
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1402 throws IOException{
1403 return createTable(TableName.valueOf(tableName), family, splitRows);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1415 throws IOException {
1416 HTableDescriptor desc = new HTableDescriptor(tableName);
1417 HColumnDescriptor hcd = new HColumnDescriptor(family);
1418 desc.addFamily(hcd);
1419 getHBaseAdmin().createTable(desc, splitRows);
1420
1421 waitUntilAllRegionsAssigned(tableName);
1422 return new HTable(getConfiguration(), tableName);
1423 }
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1434 throws IOException {
1435 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1436 for(byte[] family:families) {
1437 HColumnDescriptor hcd = new HColumnDescriptor(family);
1438 desc.addFamily(hcd);
1439 }
1440 getHBaseAdmin().createTable(desc, splitRows);
1441
1442 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1443 return new HTable(getConfiguration(), tableName);
1444 }
1445
1446
1447
1448
1449
1450 public void deleteTable(String tableName) throws IOException {
1451 deleteTable(TableName.valueOf(tableName));
1452 }
1453
1454
1455
1456
1457
1458 public void deleteTable(byte[] tableName) throws IOException {
1459 deleteTable(TableName.valueOf(tableName));
1460 }
1461
1462
1463
1464
1465
1466 public void deleteTable(TableName tableName) throws IOException {
1467 try {
1468 getHBaseAdmin().disableTable(tableName);
1469 } catch (TableNotEnabledException e) {
1470 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1471 }
1472 getHBaseAdmin().deleteTable(tableName);
1473 }
1474
1475
1476
1477
1478
1479 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1480 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1481 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1482 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1483 private static final int MAXVERSIONS = 3;
1484
1485 public static final char FIRST_CHAR = 'a';
1486 public static final char LAST_CHAR = 'z';
1487 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1488 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1489
1490
1491
1492
1493
1494
1495
1496
1497 public HTableDescriptor createTableDescriptor(final String name,
1498 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1499 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1500 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1501 htd.addFamily(new HColumnDescriptor(cfName)
1502 .setMinVersions(minVersions)
1503 .setMaxVersions(versions)
1504 .setKeepDeletedCells(keepDeleted)
1505 .setBlockCacheEnabled(false)
1506 .setTimeToLive(ttl)
1507 );
1508 }
1509 return htd;
1510 }
1511
1512
1513
1514
1515
1516
1517
1518 public HTableDescriptor createTableDescriptor(final String name) {
1519 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1520 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1532 byte [] endKey)
1533 throws IOException {
1534 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1535 return createLocalHRegion(hri, desc);
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1546 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1558 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1575 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1576 HLog hlog, byte[]... families) throws IOException {
1577 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1578 htd.setReadOnly(isReadOnly);
1579 for (byte[] family : families) {
1580 HColumnDescriptor hcd = new HColumnDescriptor(family);
1581
1582 hcd.setMaxVersions(Integer.MAX_VALUE);
1583 htd.addFamily(hcd);
1584 }
1585 htd.setDurability(durability);
1586 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1587 return createLocalHRegion(info, htd, hlog);
1588 }
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 public HTable truncateTable(byte[] tableName) throws IOException {
1599 return truncateTable(TableName.valueOf(tableName));
1600 }
1601
1602
1603
1604
1605
1606
1607
1608 public HTable truncateTable(TableName tableName) throws IOException {
1609 HTable table = new HTable(getConfiguration(), tableName);
1610 Scan scan = new Scan();
1611 ResultScanner resScan = table.getScanner(scan);
1612 for(Result res : resScan) {
1613 Delete del = new Delete(res.getRow());
1614 table.delete(del);
1615 }
1616 resScan = table.getScanner(scan);
1617 resScan.close();
1618 return table;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628 public int loadTable(final HTable t, final byte[] f) throws IOException {
1629 return loadTable(t, new byte[][] {f});
1630 }
1631
1632
1633
1634
1635
1636
1637
1638
1639 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1640 return loadTable(t, f, null);
1641 }
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1652 t.setAutoFlush(false);
1653 int rowCount = 0;
1654 for (byte[] row : HBaseTestingUtility.ROWS) {
1655 Put put = new Put(row);
1656 for (int i = 0; i < f.length; i++) {
1657 put.add(f[i], null, value != null ? value : row);
1658 }
1659 t.put(put);
1660 rowCount++;
1661 }
1662 t.flushCommits();
1663 return rowCount;
1664 }
1665
1666
1667
1668
1669 public static class SeenRowTracker {
1670 int dim = 'z' - 'a' + 1;
1671 int[][][] seenRows = new int[dim][dim][dim];
1672 byte[] startRow;
1673 byte[] stopRow;
1674
1675 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1676 this.startRow = startRow;
1677 this.stopRow = stopRow;
1678 }
1679
1680 void reset() {
1681 for (byte[] row : ROWS) {
1682 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1683 }
1684 }
1685
1686 int i(byte b) {
1687 return b - 'a';
1688 }
1689
1690 public void addRow(byte[] row) {
1691 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1692 }
1693
1694
1695
1696
1697 public void validate() {
1698 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1699 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1700 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1701 int count = seenRows[i(b1)][i(b2)][i(b3)];
1702 int expectedCount = 0;
1703 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1704 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1705 expectedCount = 1;
1706 }
1707 if (count != expectedCount) {
1708 String row = new String(new byte[] {b1,b2,b3});
1709 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1710 }
1711 }
1712 }
1713 }
1714 }
1715 }
1716
1717 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1718 return loadRegion(r, f, false);
1719 }
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1730 throws IOException {
1731 byte[] k = new byte[3];
1732 int rowCount = 0;
1733 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1734 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1735 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1736 k[0] = b1;
1737 k[1] = b2;
1738 k[2] = b3;
1739 Put put = new Put(k);
1740 put.add(f, null, k);
1741 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1742
1743 int preRowCount = rowCount;
1744 int pause = 10;
1745 int maxPause = 1000;
1746 while (rowCount == preRowCount) {
1747 try {
1748 r.put(put);
1749 rowCount++;
1750 } catch (RegionTooBusyException e) {
1751 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1752 Threads.sleep(pause);
1753 }
1754 }
1755 }
1756 }
1757 if (flush) {
1758 r.flushcache();
1759 }
1760 }
1761 return rowCount;
1762 }
1763
1764 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1765 for (int i = startRow; i < endRow; i++) {
1766 byte[] data = Bytes.toBytes(String.valueOf(i));
1767 Put put = new Put(data);
1768 put.add(f, null, data);
1769 t.put(put);
1770 }
1771 }
1772
1773
1774
1775
1776 public int countRows(final HTable table) throws IOException {
1777 Scan scan = new Scan();
1778 ResultScanner results = table.getScanner(scan);
1779 int count = 0;
1780 for (@SuppressWarnings("unused") Result res : results) {
1781 count++;
1782 }
1783 results.close();
1784 return count;
1785 }
1786
1787 public int countRows(final HTable table, final byte[]... families) throws IOException {
1788 Scan scan = new Scan();
1789 for (byte[] family: families) {
1790 scan.addFamily(family);
1791 }
1792 ResultScanner results = table.getScanner(scan);
1793 int count = 0;
1794 for (@SuppressWarnings("unused") Result res : results) {
1795 count++;
1796 }
1797 results.close();
1798 return count;
1799 }
1800
1801
1802
1803
1804 public String checksumRows(final HTable table) throws Exception {
1805 Scan scan = new Scan();
1806 ResultScanner results = table.getScanner(scan);
1807 MessageDigest digest = MessageDigest.getInstance("MD5");
1808 for (Result res : results) {
1809 digest.update(res.getRow());
1810 }
1811 results.close();
1812 return digest.toString();
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 public int createMultiRegions(HTable table, byte[] columnFamily)
1824 throws IOException {
1825 return createMultiRegions(getConfiguration(), table, columnFamily);
1826 }
1827
1828
1829 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1830 static {
1831 int i = 0;
1832 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1833 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1834 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1835 ROWS[i][0] = b1;
1836 ROWS[i][1] = b2;
1837 ROWS[i][2] = b3;
1838 i++;
1839 }
1840 }
1841 }
1842 }
1843
1844 public static final byte[][] KEYS = {
1845 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1846 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1847 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1848 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1849 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1850 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1851 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1852 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1853 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1854 };
1855
1856 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1857 Bytes.toBytes("bbb"),
1858 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1859 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1860 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1861 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1862 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1863 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1864 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1865 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1866 };
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876 public int createMultiRegions(final Configuration c, final HTable table,
1877 final byte[] columnFamily)
1878 throws IOException {
1879 return createMultiRegions(c, table, columnFamily, KEYS);
1880 }
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 public int createMultiRegions(final Configuration c, final HTable table,
1892 final byte [] family, int numRegions)
1893 throws IOException {
1894 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1895 byte [] startKey = Bytes.toBytes("aaaaa");
1896 byte [] endKey = Bytes.toBytes("zzzzz");
1897 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1898 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1899 for (int i=0;i<splitKeys.length;i++) {
1900 regionStartKeys[i+1] = splitKeys[i];
1901 }
1902 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1903 return createMultiRegions(c, table, family, regionStartKeys);
1904 }
1905
1906 @SuppressWarnings("deprecation")
1907 public int createMultiRegions(final Configuration c, final HTable table,
1908 final byte[] columnFamily, byte [][] startKeys)
1909 throws IOException {
1910 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1911 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1912 HTableDescriptor htd = table.getTableDescriptor();
1913 if(!htd.hasFamily(columnFamily)) {
1914 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1915 htd.addFamily(hcd);
1916 }
1917
1918
1919
1920
1921 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1922 String regionToDeleteInFS = table
1923 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1924 .getRegionInfo().getEncodedName();
1925 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1926
1927 int count = 0;
1928 for (int i = 0; i < startKeys.length; i++) {
1929 int j = (i + 1) % startKeys.length;
1930 HRegionInfo hri = new HRegionInfo(table.getName(),
1931 startKeys[i], startKeys[j]);
1932 MetaEditor.addRegionToMeta(meta, hri);
1933 newRegions.add(hri);
1934 count++;
1935 }
1936
1937 for (byte[] row : rows) {
1938 LOG.info("createMultiRegions: deleting meta row -> " +
1939 Bytes.toStringBinary(row));
1940 meta.delete(new Delete(row));
1941 }
1942
1943 Path tableDir = new Path(getDefaultRootDirPath().toString()
1944 + System.getProperty("file.separator") + htd.getTableName()
1945 + System.getProperty("file.separator") + regionToDeleteInFS);
1946 FileSystem.get(c).delete(tableDir);
1947
1948 HConnection conn = table.getConnection();
1949 conn.clearRegionCache();
1950
1951 HBaseAdmin admin = getHBaseAdmin();
1952 if (admin.isTableEnabled(table.getTableName())) {
1953 for(HRegionInfo hri : newRegions) {
1954 admin.assign(hri.getRegionName());
1955 }
1956 }
1957
1958 meta.close();
1959
1960 return count;
1961 }
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1974 final HTableDescriptor htd, byte [][] startKeys)
1975 throws IOException {
1976 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1977 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1978 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1979
1980 for (int i = 0; i < startKeys.length; i++) {
1981 int j = (i + 1) % startKeys.length;
1982 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1983 startKeys[j]);
1984 MetaEditor.addRegionToMeta(meta, hri);
1985 newRegions.add(hri);
1986 }
1987
1988 meta.close();
1989 return newRegions;
1990 }
1991
1992
1993
1994
1995
1996
1997 public List<byte[]> getMetaTableRows() throws IOException {
1998
1999 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2000 List<byte[]> rows = new ArrayList<byte[]>();
2001 ResultScanner s = t.getScanner(new Scan());
2002 for (Result result : s) {
2003 LOG.info("getMetaTableRows: row -> " +
2004 Bytes.toStringBinary(result.getRow()));
2005 rows.add(result.getRow());
2006 }
2007 s.close();
2008 t.close();
2009 return rows;
2010 }
2011
2012
2013
2014
2015
2016
2017 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2018
2019 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2020 List<byte[]> rows = new ArrayList<byte[]>();
2021 ResultScanner s = t.getScanner(new Scan());
2022 for (Result result : s) {
2023 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2024 if (info == null) {
2025 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2026
2027 continue;
2028 }
2029
2030 if (info.getTable().equals(tableName)) {
2031 LOG.info("getMetaTableRows: row -> " +
2032 Bytes.toStringBinary(result.getRow()) + info);
2033 rows.add(result.getRow());
2034 }
2035 }
2036 s.close();
2037 t.close();
2038 return rows;
2039 }
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2053 throws IOException, InterruptedException {
2054 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2055 }
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2067 throws IOException, InterruptedException {
2068 List<byte[]> metaRows = getMetaTableRows(tableName);
2069 if (metaRows == null || metaRows.isEmpty()) {
2070 return null;
2071 }
2072 LOG.debug("Found " + metaRows.size() + " rows for table " +
2073 tableName);
2074 byte [] firstrow = metaRows.get(0);
2075 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2076 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2077 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2078 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2079 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2080 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2081 while(retrier.shouldRetry()) {
2082 int index = getMiniHBaseCluster().getServerWith(firstrow);
2083 if (index != -1) {
2084 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2085 }
2086
2087 retrier.sleepUntilNextRetry();
2088 }
2089 return null;
2090 }
2091
2092
2093
2094
2095
2096
2097
2098 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2099 startMiniMapReduceCluster(2);
2100 return mrCluster;
2101 }
2102
2103
2104
2105
2106
2107 private void forceChangeTaskLogDir() {
2108 Field logDirField;
2109 try {
2110 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2111 logDirField.setAccessible(true);
2112
2113 Field modifiersField = Field.class.getDeclaredField("modifiers");
2114 modifiersField.setAccessible(true);
2115 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2116
2117 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2118 } catch (SecurityException e) {
2119 throw new RuntimeException(e);
2120 } catch (NoSuchFieldException e) {
2121
2122 throw new RuntimeException(e);
2123 } catch (IllegalArgumentException e) {
2124 throw new RuntimeException(e);
2125 } catch (IllegalAccessException e) {
2126 throw new RuntimeException(e);
2127 }
2128 }
2129
2130
2131
2132
2133
2134
2135
2136 private void startMiniMapReduceCluster(final int servers) throws IOException {
2137 if (mrCluster != null) {
2138 throw new IllegalStateException("MiniMRCluster is already running");
2139 }
2140 LOG.info("Starting mini mapreduce cluster...");
2141 setupClusterTestDir();
2142 createDirsAndSetProperties();
2143
2144 forceChangeTaskLogDir();
2145
2146
2147
2148
2149 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2150
2151
2152
2153 conf.setBoolean("mapreduce.map.speculative", false);
2154 conf.setBoolean("mapreduce.reduce.speculative", false);
2155
2156
2157
2158 mrCluster = new MiniMRCluster(servers,
2159 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2160 null, null, new JobConf(this.conf));
2161 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2162 if (jobConf == null) {
2163 jobConf = mrCluster.createJobConf();
2164 }
2165
2166 jobConf.set("mapred.local.dir",
2167 conf.get("mapred.local.dir"));
2168 LOG.info("Mini mapreduce cluster started");
2169
2170
2171
2172
2173 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2174
2175 conf.set("mapreduce.framework.name", "yarn");
2176 conf.setBoolean("yarn.is.minicluster", true);
2177 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2178 if (rmAddress != null) {
2179 conf.set("yarn.resourcemanager.address", rmAddress);
2180 }
2181 String schedulerAddress =
2182 jobConf.get("yarn.resourcemanager.scheduler.address");
2183 if (schedulerAddress != null) {
2184 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2185 }
2186 }
2187
2188
2189
2190
2191 public void shutdownMiniMapReduceCluster() {
2192 LOG.info("Stopping mini mapreduce cluster...");
2193 if (mrCluster != null) {
2194 mrCluster.shutdown();
2195 mrCluster = null;
2196 }
2197
2198 conf.set("mapred.job.tracker", "local");
2199 LOG.info("Mini mapreduce cluster stopped");
2200 }
2201
2202
2203
2204
2205 public RegionServerServices createMockRegionServerService() throws IOException {
2206 return createMockRegionServerService((ServerName)null);
2207 }
2208
2209
2210
2211
2212
2213 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2214 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2215 rss.setFileSystem(getTestFileSystem());
2216 rss.setRpcServer(rpc);
2217 return rss;
2218 }
2219
2220
2221
2222
2223
2224 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2225 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2226 rss.setFileSystem(getTestFileSystem());
2227 return rss;
2228 }
2229
2230
2231
2232
2233
2234
2235 public void enableDebug(Class<?> clazz) {
2236 Log l = LogFactory.getLog(clazz);
2237 if (l instanceof Log4JLogger) {
2238 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2239 } else if (l instanceof Jdk14Logger) {
2240 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2241 }
2242 }
2243
2244
2245
2246
2247
2248 public void expireMasterSession() throws Exception {
2249 HMaster master = getMiniHBaseCluster().getMaster();
2250 expireSession(master.getZooKeeper(), false);
2251 }
2252
2253
2254
2255
2256
2257
2258 public void expireRegionServerSession(int index) throws Exception {
2259 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2260 expireSession(rs.getZooKeeper(), false);
2261 decrementMinRegionServerCount();
2262 }
2263
2264 private void decrementMinRegionServerCount() {
2265
2266
2267 decrementMinRegionServerCount(getConfiguration());
2268
2269
2270 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2271 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2272 }
2273 }
2274
2275 private void decrementMinRegionServerCount(Configuration conf) {
2276 int currentCount = conf.getInt(
2277 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2278 if (currentCount != -1) {
2279 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2280 Math.max(currentCount - 1, 1));
2281 }
2282 }
2283
2284 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2285 expireSession(nodeZK, false);
2286 }
2287
2288 @Deprecated
2289 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2290 throws Exception {
2291 expireSession(nodeZK, false);
2292 }
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2306 throws Exception {
2307 Configuration c = new Configuration(this.conf);
2308 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2309 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2310 byte[] password = zk.getSessionPasswd();
2311 long sessionID = zk.getSessionId();
2312
2313
2314
2315
2316
2317
2318
2319
2320 ZooKeeper monitor = new ZooKeeper(quorumServers,
2321 1000, new org.apache.zookeeper.Watcher(){
2322 @Override
2323 public void process(WatchedEvent watchedEvent) {
2324 LOG.info("Monitor ZKW received event="+watchedEvent);
2325 }
2326 } , sessionID, password);
2327
2328
2329 ZooKeeper newZK = new ZooKeeper(quorumServers,
2330 1000, EmptyWatcher.instance, sessionID, password);
2331
2332
2333
2334 long start = System.currentTimeMillis();
2335 while (newZK.getState() != States.CONNECTED
2336 && System.currentTimeMillis() - start < 1000) {
2337 Thread.sleep(1);
2338 }
2339 newZK.close();
2340 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2341
2342
2343 monitor.close();
2344
2345 if (checkStatus) {
2346 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2347 }
2348 }
2349
2350
2351
2352
2353
2354
2355
2356 public MiniHBaseCluster getHBaseCluster() {
2357 return getMiniHBaseCluster();
2358 }
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368 public HBaseCluster getHBaseClusterInterface() {
2369
2370
2371 return hbaseCluster;
2372 }
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383 public synchronized HBaseAdmin getHBaseAdmin()
2384 throws IOException {
2385 if (hbaseAdmin == null){
2386 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2387 }
2388 return hbaseAdmin;
2389 }
2390
2391 private HBaseAdminForTests hbaseAdmin = null;
2392 private static class HBaseAdminForTests extends HBaseAdmin {
2393 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2394 ZooKeeperConnectionException, IOException {
2395 super(c);
2396 }
2397
2398 @Override
2399 public synchronized void close() throws IOException {
2400 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2401 }
2402
2403 private synchronized void close0() throws IOException {
2404 super.close();
2405 }
2406 }
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2418 throws IOException {
2419 if (zooKeeperWatcher == null) {
2420 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2421 new Abortable() {
2422 @Override public void abort(String why, Throwable e) {
2423 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2424 }
2425 @Override public boolean isAborted() {return false;}
2426 });
2427 }
2428 return zooKeeperWatcher;
2429 }
2430 private ZooKeeperWatcher zooKeeperWatcher;
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 public void closeRegion(String regionName) throws IOException {
2441 closeRegion(Bytes.toBytes(regionName));
2442 }
2443
2444
2445
2446
2447
2448
2449
2450 public void closeRegion(byte[] regionName) throws IOException {
2451 getHBaseAdmin().closeRegion(regionName, null);
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461 public void closeRegionByRow(String row, HTable table) throws IOException {
2462 closeRegionByRow(Bytes.toBytes(row), table);
2463 }
2464
2465
2466
2467
2468
2469
2470
2471
2472 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2473 HRegionLocation hrl = table.getRegionLocation(row);
2474 closeRegion(hrl.getRegionInfo().getRegionName());
2475 }
2476
2477
2478
2479
2480
2481
2482
2483
2484 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2485 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2486 int regCount = regions.size();
2487 Set<Integer> attempted = new HashSet<Integer>();
2488 int idx;
2489 int attempts = 0;
2490 do {
2491 regions = getHBaseCluster().getRegions(tableName);
2492 if (regCount != regions.size()) {
2493
2494 attempted.clear();
2495 }
2496 regCount = regions.size();
2497
2498
2499 if (regCount > 0) {
2500 idx = random.nextInt(regCount);
2501
2502 if (attempted.contains(idx))
2503 continue;
2504 try {
2505 regions.get(idx).checkSplit();
2506 return regions.get(idx);
2507 } catch (Exception ex) {
2508 LOG.warn("Caught exception", ex);
2509 attempted.add(idx);
2510 }
2511 }
2512 attempts++;
2513 } while (maxAttempts == -1 || attempts < maxAttempts);
2514 return null;
2515 }
2516
2517 public MiniZooKeeperCluster getZkCluster() {
2518 return zkCluster;
2519 }
2520
2521 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2522 this.passedZkCluster = true;
2523 this.zkCluster = zkCluster;
2524 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2525 }
2526
2527 public MiniDFSCluster getDFSCluster() {
2528 return dfsCluster;
2529 }
2530
2531 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2532 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2533 throw new IOException("DFSCluster is already running! Shut it down first.");
2534 }
2535 this.dfsCluster = cluster;
2536 }
2537
2538 public FileSystem getTestFileSystem() throws IOException {
2539 return HFileSystem.get(conf);
2540 }
2541
2542
2543
2544
2545
2546
2547
2548
2549 public void waitTableAvailable(byte[] table)
2550 throws InterruptedException, IOException {
2551 waitTableAvailable(getHBaseAdmin(), table, 30000);
2552 }
2553
2554 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2555 throws InterruptedException, IOException {
2556 waitTableAvailable(admin, table, 30000);
2557 }
2558
2559
2560
2561
2562
2563
2564
2565
2566 public void waitTableAvailable(byte[] table, long timeoutMillis)
2567 throws InterruptedException, IOException {
2568 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2569 }
2570
2571 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2572 throws InterruptedException, IOException {
2573 long startWait = System.currentTimeMillis();
2574 while (!admin.isTableAvailable(table)) {
2575 assertTrue("Timed out waiting for table to become available " +
2576 Bytes.toStringBinary(table),
2577 System.currentTimeMillis() - startWait < timeoutMillis);
2578 Thread.sleep(200);
2579 }
2580 }
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591 public void waitTableEnabled(byte[] table)
2592 throws InterruptedException, IOException {
2593 waitTableEnabled(getHBaseAdmin(), table, 30000);
2594 }
2595
2596 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2597 throws InterruptedException, IOException {
2598 waitTableEnabled(admin, table, 30000);
2599 }
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610 public void waitTableEnabled(byte[] table, long timeoutMillis)
2611 throws InterruptedException, IOException {
2612 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2613 }
2614
2615 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2616 throws InterruptedException, IOException {
2617 long startWait = System.currentTimeMillis();
2618 waitTableAvailable(admin, table, timeoutMillis);
2619 long remainder = System.currentTimeMillis() - startWait;
2620 while (!admin.isTableEnabled(table)) {
2621 assertTrue("Timed out waiting for table to become available and enabled " +
2622 Bytes.toStringBinary(table),
2623 System.currentTimeMillis() - remainder < timeoutMillis);
2624 Thread.sleep(200);
2625 }
2626
2627
2628
2629
2630
2631 try {
2632 Canary.sniff(admin, TableName.valueOf(table));
2633 } catch (Exception e) {
2634 throw new IOException(e);
2635 }
2636 }
2637
2638
2639
2640
2641
2642
2643
2644
2645 public boolean ensureSomeRegionServersAvailable(final int num)
2646 throws IOException {
2647 boolean startedServer = false;
2648 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2649 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2650 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2651 startedServer = true;
2652 }
2653
2654 return startedServer;
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2667 throws IOException {
2668 boolean startedServer = ensureSomeRegionServersAvailable(num);
2669
2670 int nonStoppedServers = 0;
2671 for (JVMClusterUtil.RegionServerThread rst :
2672 getMiniHBaseCluster().getRegionServerThreads()) {
2673
2674 HRegionServer hrs = rst.getRegionServer();
2675 if (hrs.isStopping() || hrs.isStopped()) {
2676 LOG.info("A region server is stopped or stopping:"+hrs);
2677 } else {
2678 nonStoppedServers++;
2679 }
2680 }
2681 for (int i=nonStoppedServers; i<num; ++i) {
2682 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2683 startedServer = true;
2684 }
2685 return startedServer;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698 public static User getDifferentUser(final Configuration c,
2699 final String differentiatingSuffix)
2700 throws IOException {
2701 FileSystem currentfs = FileSystem.get(c);
2702 if (!(currentfs instanceof DistributedFileSystem)) {
2703 return User.getCurrent();
2704 }
2705
2706
2707 String username = User.getCurrent().getName() +
2708 differentiatingSuffix;
2709 User user = User.createUserForTesting(c, username,
2710 new String[]{"supergroup"});
2711 return user;
2712 }
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2728 final int max) {
2729 try {
2730 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2731 for (Class<?> clazz: clazzes) {
2732 String className = clazz.getSimpleName();
2733 if (className.equals("DFSOutputStream")) {
2734 if (clazz.isInstance(stream)) {
2735 Field maxRecoveryErrorCountField =
2736 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2737 maxRecoveryErrorCountField.setAccessible(true);
2738 maxRecoveryErrorCountField.setInt(stream, max);
2739 break;
2740 }
2741 }
2742 }
2743 } catch (Exception e) {
2744 LOG.info("Could not set max recovery field", e);
2745 }
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2757 waitUntilAllRegionsAssigned(tableName, 60000);
2758 }
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2770 throws IOException {
2771 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2772 try {
2773 waitFor(timeout, 200, true, new Predicate<IOException>() {
2774 @Override
2775 public boolean evaluate() throws IOException {
2776 boolean allRegionsAssigned = true;
2777 Scan scan = new Scan();
2778 scan.addFamily(HConstants.CATALOG_FAMILY);
2779 ResultScanner s = meta.getScanner(scan);
2780 try {
2781 Result r;
2782 while ((r = s.next()) != null) {
2783 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2784 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2785 if (info != null && info.getTable().equals(tableName)) {
2786 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2787 allRegionsAssigned &= (b != null);
2788 }
2789 }
2790 } finally {
2791 s.close();
2792 }
2793 return allRegionsAssigned;
2794 }
2795 });
2796 } finally {
2797 meta.close();
2798 }
2799 }
2800
2801
2802
2803
2804
2805 public static List<Cell> getFromStoreFile(HStore store,
2806 Get get) throws IOException {
2807 Scan scan = new Scan(get);
2808 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2809 scan.getFamilyMap().get(store.getFamily().getName()),
2810
2811
2812 0);
2813
2814 List<Cell> result = new ArrayList<Cell>();
2815 scanner.next(result);
2816 if (!result.isEmpty()) {
2817
2818 Cell kv = result.get(0);
2819 if (!CellUtil.matchingRow(kv, get.getRow())) {
2820 result.clear();
2821 }
2822 }
2823 scanner.close();
2824 return result;
2825 }
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2836 assertTrue(numRegions>3);
2837 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2838 byte [][] result = new byte[tmpSplitKeys.length+1][];
2839 for (int i=0;i<tmpSplitKeys.length;i++) {
2840 result[i+1] = tmpSplitKeys[i];
2841 }
2842 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2843 return result;
2844 }
2845
2846
2847
2848
2849
2850 public static List<Cell> getFromStoreFile(HStore store,
2851 byte [] row,
2852 NavigableSet<byte[]> columns
2853 ) throws IOException {
2854 Get get = new Get(row);
2855 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2856 s.put(store.getFamily().getName(), columns);
2857
2858 return getFromStoreFile(store,get);
2859 }
2860
2861
2862
2863
2864
2865 public static ZooKeeperWatcher getZooKeeperWatcher(
2866 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2867 IOException {
2868 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2869 "unittest", new Abortable() {
2870 boolean aborted = false;
2871
2872 @Override
2873 public void abort(String why, Throwable e) {
2874 aborted = true;
2875 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2876 }
2877
2878 @Override
2879 public boolean isAborted() {
2880 return aborted;
2881 }
2882 });
2883 return zkw;
2884 }
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2898 HBaseTestingUtility TEST_UTIL, HRegion region,
2899 ServerName serverName) throws ZooKeeperConnectionException,
2900 IOException, KeeperException, NodeExistsException {
2901 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2902 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2903 int version = ZKAssign.transitionNodeOpening(zkw, region
2904 .getRegionInfo(), serverName);
2905 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2906 version);
2907 return zkw;
2908 }
2909
2910 public static void assertKVListsEqual(String additionalMsg,
2911 final List<? extends Cell> expected,
2912 final List<? extends Cell> actual) {
2913 final int eLen = expected.size();
2914 final int aLen = actual.size();
2915 final int minLen = Math.min(eLen, aLen);
2916
2917 int i;
2918 for (i = 0; i < minLen
2919 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2920 ++i) {}
2921
2922 if (additionalMsg == null) {
2923 additionalMsg = "";
2924 }
2925 if (!additionalMsg.isEmpty()) {
2926 additionalMsg = ". " + additionalMsg;
2927 }
2928
2929 if (eLen != aLen || i != minLen) {
2930 throw new AssertionError(
2931 "Expected and actual KV arrays differ at position " + i + ": " +
2932 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2933 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2934 }
2935 }
2936
2937 private static <T> String safeGetAsStr(List<T> lst, int i) {
2938 if (0 <= i && i < lst.size()) {
2939 return lst.get(i).toString();
2940 } else {
2941 return "<out_of_range>";
2942 }
2943 }
2944
2945 public String getClusterKey() {
2946 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2947 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2948 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2949 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2950 }
2951
2952
2953 public HTable createRandomTable(String tableName,
2954 final Collection<String> families,
2955 final int maxVersions,
2956 final int numColsPerRow,
2957 final int numFlushes,
2958 final int numRegions,
2959 final int numRowsPerFlush)
2960 throws IOException, InterruptedException {
2961
2962 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2963 " regions, " + numFlushes + " storefiles per region, " +
2964 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2965 "\n");
2966
2967 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2968 final int numCF = families.size();
2969 final byte[][] cfBytes = new byte[numCF][];
2970 {
2971 int cfIndex = 0;
2972 for (String cf : families) {
2973 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2974 }
2975 }
2976
2977 final int actualStartKey = 0;
2978 final int actualEndKey = Integer.MAX_VALUE;
2979 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2980 final int splitStartKey = actualStartKey + keysPerRegion;
2981 final int splitEndKey = actualEndKey - keysPerRegion;
2982 final String keyFormat = "%08x";
2983 final HTable table = createTable(tableName, cfBytes,
2984 maxVersions,
2985 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2986 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2987 numRegions);
2988
2989 if (hbaseCluster != null) {
2990 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2991 }
2992
2993 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2994 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2995 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2996 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2997
2998 Put put = new Put(row);
2999 Delete del = new Delete(row);
3000 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3001 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3002 final long ts = rand.nextInt();
3003 final byte[] qual = Bytes.toBytes("col" + iCol);
3004 if (rand.nextBoolean()) {
3005 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3006 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3007 ts + "_random_" + rand.nextLong());
3008 put.add(cf, qual, ts, value);
3009 } else if (rand.nextDouble() < 0.8) {
3010 del.deleteColumn(cf, qual, ts);
3011 } else {
3012 del.deleteColumns(cf, qual, ts);
3013 }
3014 }
3015
3016 if (!put.isEmpty()) {
3017 table.put(put);
3018 }
3019
3020 if (!del.isEmpty()) {
3021 table.delete(del);
3022 }
3023 }
3024 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3025 table.flushCommits();
3026 if (hbaseCluster != null) {
3027 getMiniHBaseCluster().flushcache(table.getName());
3028 }
3029 }
3030
3031 return table;
3032 }
3033
3034 private static final int MIN_RANDOM_PORT = 0xc000;
3035 private static final int MAX_RANDOM_PORT = 0xfffe;
3036 private static Random random = new Random();
3037
3038
3039
3040
3041
3042 public static int randomPort() {
3043 return MIN_RANDOM_PORT
3044 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3045 }
3046
3047
3048
3049
3050
3051 public static int randomFreePort() {
3052 int port = 0;
3053 do {
3054 port = randomPort();
3055 if (takenRandomPorts.contains(port)) {
3056 continue;
3057 }
3058 takenRandomPorts.add(port);
3059
3060 try {
3061 ServerSocket sock = new ServerSocket(port);
3062 sock.close();
3063 } catch (IOException ex) {
3064 port = 0;
3065 }
3066 } while (port == 0);
3067 return port;
3068 }
3069
3070
3071 public static String randomMultiCastAddress() {
3072 return "226.1.1." + random.nextInt(254);
3073 }
3074
3075
3076
3077 public static void waitForHostPort(String host, int port)
3078 throws IOException {
3079 final int maxTimeMs = 10000;
3080 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3081 IOException savedException = null;
3082 LOG.info("Waiting for server at " + host + ":" + port);
3083 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3084 try {
3085 Socket sock = new Socket(InetAddress.getByName(host), port);
3086 sock.close();
3087 savedException = null;
3088 LOG.info("Server at " + host + ":" + port + " is available");
3089 break;
3090 } catch (UnknownHostException e) {
3091 throw new IOException("Failed to look up " + host, e);
3092 } catch (IOException e) {
3093 savedException = e;
3094 }
3095 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3096 }
3097
3098 if (savedException != null) {
3099 throw savedException;
3100 }
3101 }
3102
3103
3104
3105
3106
3107
3108 public static int createPreSplitLoadTestTable(Configuration conf,
3109 TableName tableName, byte[] columnFamily, Algorithm compression,
3110 DataBlockEncoding dataBlockEncoding) throws IOException {
3111 HTableDescriptor desc = new HTableDescriptor(tableName);
3112 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3113 hcd.setDataBlockEncoding(dataBlockEncoding);
3114 hcd.setCompressionType(compression);
3115 return createPreSplitLoadTestTable(conf, desc, hcd);
3116 }
3117
3118
3119
3120
3121
3122
3123 public static int createPreSplitLoadTestTable(Configuration conf,
3124 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3125 if (!desc.hasFamily(hcd.getName())) {
3126 desc.addFamily(hcd);
3127 }
3128
3129 int totalNumberOfRegions = 0;
3130 HBaseAdmin admin = new HBaseAdmin(conf);
3131 try {
3132
3133
3134
3135 int numberOfServers = admin.getClusterStatus().getServers().size();
3136 if (numberOfServers == 0) {
3137 throw new IllegalStateException("No live regionservers");
3138 }
3139
3140 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3141 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3142 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3143 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3144
3145 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3146 totalNumberOfRegions);
3147
3148 admin.createTable(desc, splits);
3149 } catch (MasterNotRunningException e) {
3150 LOG.error("Master not running", e);
3151 throw new IOException(e);
3152 } catch (TableExistsException e) {
3153 LOG.warn("Table " + desc.getTableName() +
3154 " already exists, continuing");
3155 } finally {
3156 admin.close();
3157 }
3158 return totalNumberOfRegions;
3159 }
3160
3161 public static int getMetaRSPort(Configuration conf) throws IOException {
3162 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3163 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3164 table.close();
3165 return hloc.getPort();
3166 }
3167
3168
3169
3170
3171
3172
3173
3174 public void assertRegionOnServer(
3175 final HRegionInfo hri, final ServerName server,
3176 final long timeout) throws IOException, InterruptedException {
3177 long timeoutTime = System.currentTimeMillis() + timeout;
3178 while (true) {
3179 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3180 if (regions.contains(hri)) return;
3181 long now = System.currentTimeMillis();
3182 if (now > timeoutTime) break;
3183 Thread.sleep(10);
3184 }
3185 fail("Could not find region " + hri.getRegionNameAsString()
3186 + " on server " + server);
3187 }
3188
3189
3190
3191
3192
3193 public void assertRegionOnlyOnServer(
3194 final HRegionInfo hri, final ServerName server,
3195 final long timeout) throws IOException, InterruptedException {
3196 long timeoutTime = System.currentTimeMillis() + timeout;
3197 while (true) {
3198 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3199 if (regions.contains(hri)) {
3200 List<JVMClusterUtil.RegionServerThread> rsThreads =
3201 getHBaseCluster().getLiveRegionServerThreads();
3202 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3203 HRegionServer rs = rsThread.getRegionServer();
3204 if (server.equals(rs.getServerName())) {
3205 continue;
3206 }
3207 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3208 for (HRegion r: hrs) {
3209 assertTrue("Region should not be double assigned",
3210 r.getRegionId() != hri.getRegionId());
3211 }
3212 }
3213 return;
3214 }
3215 long now = System.currentTimeMillis();
3216 if (now > timeoutTime) break;
3217 Thread.sleep(10);
3218 }
3219 fail("Could not find region " + hri.getRegionNameAsString()
3220 + " on server " + server);
3221 }
3222
3223 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3224 throws IOException {
3225 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3226 htd.addFamily(hcd);
3227 HRegionInfo info =
3228 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3229 HRegion region =
3230 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3231 return region;
3232 }
3233
3234 public void setFileSystemURI(String fsURI) {
3235 FS_URI = fsURI;
3236 }
3237
3238
3239
3240
3241 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3242 throws E {
3243 return Waiter.waitFor(this.conf, timeout, predicate);
3244 }
3245
3246
3247
3248
3249 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3250 throws E {
3251 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3252 }
3253
3254
3255
3256
3257 public <E extends Exception> long waitFor(long timeout, long interval,
3258 boolean failIfTimeout, Predicate<E> predicate) throws E {
3259 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3260 }
3261
3262
3263
3264
3265 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3266 return new Waiter.Predicate<Exception>() {
3267 @Override
3268 public boolean evaluate() throws Exception {
3269 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3270 .getAssignmentManager().getRegionStates();
3271 return !regionStates.isRegionsInTransition();
3272 }
3273 };
3274 }
3275
3276
3277
3278
3279 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3280 return new Waiter.Predicate<Exception>() {
3281 @Override
3282 public boolean evaluate() throws Exception {
3283 return getHBaseAdmin().isTableEnabled(tableName);
3284 }
3285 };
3286 }
3287
3288
3289
3290
3291
3292
3293 public static List<HColumnDescriptor> generateColumnDescriptors() {
3294 return generateColumnDescriptors("");
3295 }
3296
3297
3298
3299
3300
3301
3302
3303 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3304 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3305 long familyId = 0;
3306 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3307 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3308 for (BloomType bloomType: BloomType.values()) {
3309 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3310 HColumnDescriptor htd = new HColumnDescriptor(name);
3311 htd.setCompressionType(compressionType);
3312 htd.setDataBlockEncoding(encodingType);
3313 htd.setBloomFilterType(bloomType);
3314 htds.add(htd);
3315 familyId++;
3316 }
3317 }
3318 }
3319 return htds;
3320 }
3321
3322
3323
3324
3325
3326 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3327 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3328 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3329 for (String algoName : allAlgos) {
3330 try {
3331 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3332 algo.getCompressor();
3333 supportedAlgos.add(algo);
3334 } catch (Throwable t) {
3335
3336 }
3337 }
3338 return supportedAlgos.toArray(new Compression.Algorithm[0]);
3339 }
3340 }