1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.UUID;
44 import java.util.concurrent.TimeUnit;
45
46 import org.apache.commons.logging.Log;
47 import org.apache.commons.logging.LogFactory;
48 import org.apache.commons.logging.impl.Jdk14Logger;
49 import org.apache.commons.logging.impl.Log4JLogger;
50 import org.apache.hadoop.classification.InterfaceAudience;
51 import org.apache.hadoop.classification.InterfaceStability;
52 import org.apache.hadoop.conf.Configuration;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.hbase.Waiter.Predicate;
56 import org.apache.hadoop.hbase.catalog.MetaEditor;
57 import org.apache.hadoop.hbase.client.Delete;
58 import org.apache.hadoop.hbase.client.Durability;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.HBaseAdmin;
61 import org.apache.hadoop.hbase.client.HConnection;
62 import org.apache.hadoop.hbase.client.HTable;
63 import org.apache.hadoop.hbase.client.Put;
64 import org.apache.hadoop.hbase.client.Result;
65 import org.apache.hadoop.hbase.client.ResultScanner;
66 import org.apache.hadoop.hbase.client.Scan;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.io.compress.Compression;
69 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72 import org.apache.hadoop.hbase.io.hfile.HFile;
73 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.master.RegionStates;
77 import org.apache.hadoop.hbase.master.ServerManager;
78 import org.apache.hadoop.hbase.regionserver.BloomType;
79 import org.apache.hadoop.hbase.regionserver.HRegion;
80 import org.apache.hadoop.hbase.regionserver.HRegionServer;
81 import org.apache.hadoop.hbase.regionserver.HStore;
82 import org.apache.hadoop.hbase.regionserver.InternalScanner;
83 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
84 import org.apache.hadoop.hbase.regionserver.wal.HLog;
85 import org.apache.hadoop.hbase.security.User;
86 import org.apache.hadoop.hbase.tool.Canary;
87 import org.apache.hadoop.hbase.util.Bytes;
88 import org.apache.hadoop.hbase.util.FSUtils;
89 import org.apache.hadoop.hbase.util.JVMClusterUtil;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
91 import org.apache.hadoop.hbase.util.RegionSplitter;
92 import org.apache.hadoop.hbase.util.RetryCounter;
93 import org.apache.hadoop.hbase.util.Threads;
94 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
95 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
96 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
97 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
98 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
99 import org.apache.hadoop.hdfs.DFSClient;
100 import org.apache.hadoop.hdfs.DistributedFileSystem;
101 import org.apache.hadoop.hdfs.MiniDFSCluster;
102 import org.apache.hadoop.mapred.JobConf;
103 import org.apache.hadoop.mapred.MiniMRCluster;
104 import org.apache.hadoop.mapred.TaskLog;
105 import org.apache.zookeeper.KeeperException;
106 import org.apache.zookeeper.KeeperException.NodeExistsException;
107 import org.apache.zookeeper.WatchedEvent;
108 import org.apache.zookeeper.ZooKeeper;
109 import org.apache.zookeeper.ZooKeeper.States;
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 @InterfaceAudience.Public
126 @InterfaceStability.Evolving
127 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
128 private MiniZooKeeperCluster zkCluster = null;
129
130
131
132
133
134 private static int DEFAULT_REGIONS_PER_SERVER = 5;
135
136
137
138
139
140 private boolean passedZkCluster = false;
141 private MiniDFSCluster dfsCluster = null;
142
143 private HBaseCluster hbaseCluster = null;
144 private MiniMRCluster mrCluster = null;
145
146
147 private boolean miniClusterRunning;
148
149 private String hadoopLogDir;
150
151
152 private File clusterTestDir = null;
153
154
155
156 private Path dataTestDirOnTestFS = null;
157
158
159
160
161
162
163
164
165 @Deprecated
166 private static final String TEST_DIRECTORY_KEY = "test.build.data";
167
168
169 private static String FS_URI;
170
171
172 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
173
174
175 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
176 Arrays.asList(new Object[][] {
177 { Compression.Algorithm.NONE },
178 { Compression.Algorithm.GZ }
179 });
180
181
182 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
183 Arrays.asList(new Object[][] {
184 { new Boolean(false) },
185 { new Boolean(true) }
186 });
187
188
189 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
190
191 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
192 Compression.Algorithm.NONE, Compression.Algorithm.GZ
193 };
194
195
196
197
198
199 private static List<Object[]> bloomAndCompressionCombinations() {
200 List<Object[]> configurations = new ArrayList<Object[]>();
201 for (Compression.Algorithm comprAlgo :
202 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
203 for (BloomType bloomType : BloomType.values()) {
204 configurations.add(new Object[] { comprAlgo, bloomType });
205 }
206 }
207 return Collections.unmodifiableList(configurations);
208 }
209
210
211
212
213 private static List<Object[]> memStoreTSAndTagsCombination() {
214 List<Object[]> configurations = new ArrayList<Object[]>();
215 configurations.add(new Object[] { false, false });
216 configurations.add(new Object[] { false, true });
217 configurations.add(new Object[] { true, false });
218 configurations.add(new Object[] { true, true });
219 return Collections.unmodifiableList(configurations);
220 }
221
222 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
223 bloomAndCompressionCombinations();
224
225 public HBaseTestingUtility() {
226 this(HBaseConfiguration.create());
227 }
228
229 public HBaseTestingUtility(Configuration conf) {
230 super(conf);
231
232
233 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
234 }
235
236
237
238
239
240
241
242 public static HBaseTestingUtility createLocalHTU() {
243 Configuration c = HBaseConfiguration.create();
244 return createLocalHTU(c);
245 }
246
247
248
249
250
251
252
253
254 public static HBaseTestingUtility createLocalHTU(Configuration c) {
255 HBaseTestingUtility htu = new HBaseTestingUtility(c);
256 String dataTestDir = htu.getDataTestDir().toString();
257 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
258 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
259 return htu;
260 }
261
262
263
264
265
266
267
268
269
270
271
272
273 @Override
274 public Configuration getConfiguration() {
275 return super.getConfiguration();
276 }
277
278 public void setHBaseCluster(HBaseCluster hbaseCluster) {
279 this.hbaseCluster = hbaseCluster;
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 @Override
299 protected Path setupDataTestDir() {
300 Path testPath = super.setupDataTestDir();
301 if (null == testPath) {
302 return null;
303 }
304
305 createSubDirAndSystemProperty(
306 "hadoop.log.dir",
307 testPath, "hadoop-log-dir");
308
309
310
311 createSubDirAndSystemProperty(
312 "hadoop.tmp.dir",
313 testPath, "hadoop-tmp-dir");
314
315
316 createSubDir(
317 "mapred.local.dir",
318 testPath, "mapred-local-dir");
319
320 return testPath;
321 }
322
323 private void createSubDirAndSystemProperty(
324 String propertyName, Path parent, String subDirName){
325
326 String sysValue = System.getProperty(propertyName);
327
328 if (sysValue != null) {
329
330
331 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
332 sysValue + " so I do NOT create it in " + parent);
333 String confValue = conf.get(propertyName);
334 if (confValue != null && !confValue.endsWith(sysValue)){
335 LOG.warn(
336 propertyName + " property value differs in configuration and system: "+
337 "Configuration="+confValue+" while System="+sysValue+
338 " Erasing configuration value by system value."
339 );
340 }
341 conf.set(propertyName, sysValue);
342 } else {
343
344 createSubDir(propertyName, parent, subDirName);
345 System.setProperty(propertyName, conf.get(propertyName));
346 }
347 }
348
349
350
351
352
353
354
355 private Path getBaseTestDirOnTestFS() throws IOException {
356 FileSystem fs = getTestFileSystem();
357 return new Path(fs.getWorkingDirectory(), "test-data");
358 }
359
360
361
362
363
364
365 Path getClusterTestDir() {
366 if (clusterTestDir == null){
367 setupClusterTestDir();
368 }
369 return new Path(clusterTestDir.getAbsolutePath());
370 }
371
372
373
374
375 private void setupClusterTestDir() {
376 if (clusterTestDir != null) {
377 return;
378 }
379
380
381
382 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
383 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
384
385 boolean b = deleteOnExit();
386 if (b) clusterTestDir.deleteOnExit();
387 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
388 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
389 }
390
391
392
393
394
395
396
397 public Path getDataTestDirOnTestFS() throws IOException {
398 if (dataTestDirOnTestFS == null) {
399 setupDataTestDirOnTestFS();
400 }
401
402 return dataTestDirOnTestFS;
403 }
404
405
406
407
408
409
410
411
412 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
413 return new Path(getDataTestDirOnTestFS(), subdirName);
414 }
415
416
417
418
419 private void setupDataTestDirOnTestFS() throws IOException {
420 if (dataTestDirOnTestFS != null) {
421 LOG.warn("Data test on test fs dir already setup in "
422 + dataTestDirOnTestFS.toString());
423 return;
424 }
425
426
427
428
429
430 FileSystem fs = getTestFileSystem();
431 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
432 File dataTestDir = new File(getDataTestDir().toString());
433 if (deleteOnExit()) dataTestDir.deleteOnExit();
434 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
435 } else {
436 Path base = getBaseTestDirOnTestFS();
437 String randomStr = UUID.randomUUID().toString();
438 dataTestDirOnTestFS = new Path(base, randomStr);
439 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
440 }
441 }
442
443
444
445
446
447
448 public boolean cleanupDataTestDirOnTestFS() throws IOException {
449 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
450 if (ret)
451 dataTestDirOnTestFS = null;
452 return ret;
453 }
454
455
456
457
458
459
460 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
461 Path cpath = getDataTestDirOnTestFS(subdirName);
462 return getTestFileSystem().delete(cpath, true);
463 }
464
465
466
467
468
469
470
471
472 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
473 return startMiniDFSCluster(servers, null);
474 }
475
476
477
478
479
480
481
482
483
484
485
486
487 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
488 throws Exception {
489 if ( hosts != null && hosts.length != 0) {
490 return startMiniDFSCluster(hosts.length, hosts);
491 } else {
492 return startMiniDFSCluster(1, null);
493 }
494 }
495
496
497
498
499
500
501
502
503
504
505 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
506 throws Exception {
507 createDirsAndSetProperties();
508
509
510 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
511 setLevel(org.apache.log4j.Level.ERROR);
512 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
513 setLevel(org.apache.log4j.Level.ERROR);
514
515
516 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
517 true, null, null, hosts, null);
518
519
520 FileSystem fs = this.dfsCluster.getFileSystem();
521 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
522
523
524 this.dfsCluster.waitClusterUp();
525
526
527 dataTestDirOnTestFS = null;
528
529 return this.dfsCluster;
530 }
531
532
533 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
534 throws Exception {
535 createDirsAndSetProperties();
536 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
537 true, null, racks, hosts, null);
538
539
540 FileSystem fs = this.dfsCluster.getFileSystem();
541 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
542
543
544 this.dfsCluster.waitClusterUp();
545
546
547 dataTestDirOnTestFS = null;
548
549 return this.dfsCluster;
550 }
551
552 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
553 createDirsAndSetProperties();
554 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
555 null, null, null);
556 return dfsCluster;
557 }
558
559
560 private void createDirsAndSetProperties() throws IOException {
561 setupClusterTestDir();
562 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
563 createDirAndSetProperty("cache_data", "test.cache.data");
564 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
565 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
566 createDirAndSetProperty("mapred_local", "mapred.local.dir");
567 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
568 enableShortCircuit();
569
570 Path root = getDataTestDirOnTestFS("hadoop");
571 conf.set(MapreduceTestingShim.getMROutputDirProp(),
572 new Path(root, "mapred-output-dir").toString());
573 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
574 conf.set("mapreduce.jobtracker.staging.root.dir",
575 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
576 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
577 }
578
579
580
581
582
583
584
585 public boolean isReadShortCircuitOn(){
586 final String propName = "hbase.tests.use.shortcircuit.reads";
587 String readOnProp = System.getProperty(propName);
588 if (readOnProp != null){
589 return Boolean.parseBoolean(readOnProp);
590 } else {
591 return conf.getBoolean(propName, false);
592 }
593 }
594
595
596
597
598 private void enableShortCircuit() {
599 if (isReadShortCircuitOn()) {
600 String curUser = System.getProperty("user.name");
601 LOG.info("read short circuit is ON for user " + curUser);
602
603 conf.set("dfs.block.local-path-access.user", curUser);
604
605 conf.setBoolean("dfs.client.read.shortcircuit", true);
606
607 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
608 } else {
609 LOG.info("read short circuit is OFF");
610 }
611 }
612
613 private String createDirAndSetProperty(final String relPath, String property) {
614 String path = getDataTestDir(relPath).toString();
615 System.setProperty(property, path);
616 conf.set(property, path);
617 new File(path).mkdirs();
618 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
619 return path;
620 }
621
622
623
624
625
626
627 public void shutdownMiniDFSCluster() throws IOException {
628 if (this.dfsCluster != null) {
629
630 this.dfsCluster.shutdown();
631 dfsCluster = null;
632 dataTestDirOnTestFS = null;
633 FSUtils.setFsDefault(this.conf, new Path("file:///"));
634 }
635 }
636
637
638
639
640
641
642
643
644 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
645 return startMiniZKCluster(1);
646 }
647
648
649
650
651
652
653
654
655
656 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
657 throws Exception {
658 setupClusterTestDir();
659 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
660 }
661
662 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
663 throws Exception {
664 return startMiniZKCluster(dir,1);
665 }
666
667
668
669
670
671 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
672 int zooKeeperServerNum)
673 throws Exception {
674 if (this.zkCluster != null) {
675 throw new IOException("Cluster already running at " + dir);
676 }
677 this.passedZkCluster = false;
678 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
679 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
680 if (defPort > 0){
681
682 this.zkCluster.setDefaultClientPort(defPort);
683 }
684 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
685 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
686 Integer.toString(clientPort));
687 return this.zkCluster;
688 }
689
690
691
692
693
694
695
696 public void shutdownMiniZKCluster() throws IOException {
697 if (this.zkCluster != null) {
698 this.zkCluster.shutdown();
699 this.zkCluster = null;
700 }
701 }
702
703
704
705
706
707
708
709 public MiniHBaseCluster startMiniCluster() throws Exception {
710 return startMiniCluster(1, 1);
711 }
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 public MiniHBaseCluster startMiniCluster(final int numSlaves)
727 throws Exception {
728 return startMiniCluster(1, numSlaves);
729 }
730
731
732
733
734
735
736
737
738 public MiniHBaseCluster startMiniCluster(final int numMasters,
739 final int numSlaves)
740 throws Exception {
741 return startMiniCluster(numMasters, numSlaves, null);
742 }
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768 public MiniHBaseCluster startMiniCluster(final int numMasters,
769 final int numSlaves, final String[] dataNodeHosts) throws Exception {
770 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
771 }
772
773
774
775
776
777 public MiniHBaseCluster startMiniCluster(final int numMasters,
778 final int numSlaves, final int numDataNodes) throws Exception {
779 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
780 }
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809 public MiniHBaseCluster startMiniCluster(final int numMasters,
810 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
811 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
812 throws Exception {
813 return startMiniCluster(
814 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
815 }
816
817
818
819
820
821
822 public MiniHBaseCluster startMiniCluster(final int numMasters,
823 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
824 Class<? extends HMaster> masterClass,
825 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
826 throws Exception {
827 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
828 numDataNodes = dataNodeHosts.length;
829 }
830
831 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
832 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
833
834
835 if (miniClusterRunning) {
836 throw new IllegalStateException("A mini-cluster is already running");
837 }
838 miniClusterRunning = true;
839
840 setupClusterTestDir();
841 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
842
843
844
845 startMiniDFSCluster(numDataNodes, dataNodeHosts);
846
847
848 if (this.zkCluster == null) {
849 startMiniZKCluster(clusterTestDir);
850 }
851
852
853 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
854 }
855
856 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
857 throws IOException, InterruptedException{
858 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
859 }
860
861
862
863
864
865
866
867
868
869
870
871
872 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
873 final int numSlaves, Class<? extends HMaster> masterClass,
874 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
875 throws IOException, InterruptedException {
876
877 createRootDir();
878
879
880
881 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
882 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
883 }
884 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
885 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
886 }
887
888 Configuration c = new Configuration(this.conf);
889 this.hbaseCluster =
890 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
891
892 HTable t = new HTable(c, TableName.META_TABLE_NAME);
893 ResultScanner s = t.getScanner(new Scan());
894 while (s.next() != null) {
895 continue;
896 }
897 s.close();
898 t.close();
899
900 getHBaseAdmin();
901 LOG.info("Minicluster is up");
902 return (MiniHBaseCluster)this.hbaseCluster;
903 }
904
905
906
907
908
909
910
911 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
912 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
913
914 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
915 ResultScanner s = t.getScanner(new Scan());
916 while (s.next() != null) {
917
918 }
919 LOG.info("HBase has been restarted");
920 s.close();
921 t.close();
922 }
923
924
925
926
927
928
929 public MiniHBaseCluster getMiniHBaseCluster() {
930 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
931 return (MiniHBaseCluster)this.hbaseCluster;
932 }
933 throw new RuntimeException(hbaseCluster + " not an instance of " +
934 MiniHBaseCluster.class.getName());
935 }
936
937
938
939
940
941
942 public void shutdownMiniCluster() throws Exception {
943 LOG.info("Shutting down minicluster");
944 shutdownMiniHBaseCluster();
945 if (!this.passedZkCluster){
946 shutdownMiniZKCluster();
947 }
948 shutdownMiniDFSCluster();
949
950 cleanupTestDir();
951 miniClusterRunning = false;
952 LOG.info("Minicluster is down");
953 }
954
955
956
957
958
959 public void shutdownMiniHBaseCluster() throws IOException {
960 if (hbaseAdmin != null) {
961 hbaseAdmin.close0();
962 hbaseAdmin = null;
963 }
964
965
966 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
967 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
968 if (this.hbaseCluster != null) {
969 this.hbaseCluster.shutdown();
970
971 this.hbaseCluster.waitUntilShutDown();
972 this.hbaseCluster = null;
973 }
974
975 if (zooKeeperWatcher != null) {
976 zooKeeperWatcher.close();
977 zooKeeperWatcher = null;
978 }
979 }
980
981
982
983
984
985
986
987 public Path getDefaultRootDirPath() throws IOException {
988 FileSystem fs = FileSystem.get(this.conf);
989 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
990 }
991
992
993
994
995
996
997
998
999
1000 public Path createRootDir() throws IOException {
1001 FileSystem fs = FileSystem.get(this.conf);
1002 Path hbaseRootdir = getDefaultRootDirPath();
1003 FSUtils.setRootDir(this.conf, hbaseRootdir);
1004 fs.mkdirs(hbaseRootdir);
1005 FSUtils.setVersion(fs, hbaseRootdir);
1006 return hbaseRootdir;
1007 }
1008
1009
1010
1011
1012
1013 public void flush() throws IOException {
1014 getMiniHBaseCluster().flushcache();
1015 }
1016
1017
1018
1019
1020
1021 public void flush(TableName tableName) throws IOException {
1022 getMiniHBaseCluster().flushcache(tableName);
1023 }
1024
1025
1026
1027
1028
1029 public void compact(boolean major) throws IOException {
1030 getMiniHBaseCluster().compact(major);
1031 }
1032
1033
1034
1035
1036
1037 public void compact(TableName tableName, boolean major) throws IOException {
1038 getMiniHBaseCluster().compact(tableName, major);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048 public HTable createTable(String tableName, String family)
1049 throws IOException{
1050 return createTable(TableName.valueOf(tableName), new String[]{family});
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060 public HTable createTable(byte[] tableName, byte[] family)
1061 throws IOException{
1062 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072 public HTable createTable(TableName tableName, String[] families)
1073 throws IOException {
1074 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1075 for (String family : families) {
1076 fams.add(Bytes.toBytes(family));
1077 }
1078 return createTable(tableName, fams.toArray(new byte[0][]));
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088 public HTable createTable(TableName tableName, byte[] family)
1089 throws IOException{
1090 return createTable(tableName, new byte[][]{family});
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 public HTable createTable(byte[] tableName, byte[][] families)
1102 throws IOException {
1103 return createTable(tableName, families,
1104 new Configuration(getConfiguration()));
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114 public HTable createTable(TableName tableName, byte[][] families)
1115 throws IOException {
1116 return createTable(tableName, families,
1117 new Configuration(getConfiguration()));
1118 }
1119
1120 public HTable createTable(byte[] tableName, byte[][] families,
1121 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1122 return createTable(TableName.valueOf(tableName), families, numVersions,
1123 startKey, endKey, numRegions);
1124 }
1125
1126 public HTable createTable(String tableName, byte[][] families,
1127 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1128 return createTable(TableName.valueOf(tableName), families, numVersions,
1129 startKey, endKey, numRegions);
1130 }
1131
1132 public HTable createTable(TableName tableName, byte[][] families,
1133 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1134 throws IOException{
1135 HTableDescriptor desc = new HTableDescriptor(tableName);
1136 for (byte[] family : families) {
1137 HColumnDescriptor hcd = new HColumnDescriptor(family)
1138 .setMaxVersions(numVersions);
1139 desc.addFamily(hcd);
1140 }
1141 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1142
1143 waitUntilAllRegionsAssigned(tableName);
1144 return new HTable(getConfiguration(), tableName);
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1156 throws IOException {
1157 for(byte[] family : families) {
1158 HColumnDescriptor hcd = new HColumnDescriptor(family);
1159
1160
1161
1162 hcd.setBloomFilterType(BloomType.NONE);
1163 htd.addFamily(hcd);
1164 }
1165 getHBaseAdmin().createTable(htd);
1166
1167 waitUntilAllRegionsAssigned(htd.getTableName());
1168 return new HTable(c, htd.getTableName());
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 public HTable createTable(TableName tableName, byte[][] families,
1180 final Configuration c)
1181 throws IOException {
1182 return createTable(new HTableDescriptor(tableName), families, c);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 public HTable createTable(byte[] tableName, byte[][] families,
1194 final Configuration c)
1195 throws IOException {
1196 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1197 for(byte[] family : families) {
1198 HColumnDescriptor hcd = new HColumnDescriptor(family);
1199
1200
1201
1202 hcd.setBloomFilterType(BloomType.NONE);
1203 desc.addFamily(hcd);
1204 }
1205 getHBaseAdmin().createTable(desc);
1206 return new HTable(c, tableName);
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 public HTable createTable(TableName tableName, byte[][] families,
1219 final Configuration c, int numVersions)
1220 throws IOException {
1221 HTableDescriptor desc = new HTableDescriptor(tableName);
1222 for(byte[] family : families) {
1223 HColumnDescriptor hcd = new HColumnDescriptor(family)
1224 .setMaxVersions(numVersions);
1225 desc.addFamily(hcd);
1226 }
1227 getHBaseAdmin().createTable(desc);
1228
1229 waitUntilAllRegionsAssigned(tableName);
1230 return new HTable(c, tableName);
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 public HTable createTable(byte[] tableName, byte[][] families,
1243 final Configuration c, int numVersions)
1244 throws IOException {
1245 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1246 for(byte[] family : families) {
1247 HColumnDescriptor hcd = new HColumnDescriptor(family)
1248 .setMaxVersions(numVersions);
1249 desc.addFamily(hcd);
1250 }
1251 getHBaseAdmin().createTable(desc);
1252 return new HTable(c, tableName);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1264 throws IOException {
1265 return createTable(tableName, new byte[][]{family}, numVersions);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1277 throws IOException {
1278 return createTable(tableName, new byte[][]{family}, numVersions);
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 public HTable createTable(byte[] tableName, byte[][] families,
1290 int numVersions)
1291 throws IOException {
1292 return createTable(TableName.valueOf(tableName), families, numVersions);
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 public HTable createTable(TableName tableName, byte[][] families,
1304 int numVersions)
1305 throws IOException {
1306 HTableDescriptor desc = new HTableDescriptor(tableName);
1307 for (byte[] family : families) {
1308 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1309 desc.addFamily(hcd);
1310 }
1311 getHBaseAdmin().createTable(desc);
1312
1313 waitUntilAllRegionsAssigned(tableName);
1314 return new HTable(new Configuration(getConfiguration()), tableName);
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 public HTable createTable(byte[] tableName, byte[][] families,
1326 int numVersions, int blockSize) throws IOException {
1327 return createTable(TableName.valueOf(tableName),
1328 families, numVersions, blockSize);
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 public HTable createTable(TableName tableName, byte[][] families,
1340 int numVersions, int blockSize) throws IOException {
1341 HTableDescriptor desc = new HTableDescriptor(tableName);
1342 for (byte[] family : families) {
1343 HColumnDescriptor hcd = new HColumnDescriptor(family)
1344 .setMaxVersions(numVersions)
1345 .setBlocksize(blockSize);
1346 desc.addFamily(hcd);
1347 }
1348 getHBaseAdmin().createTable(desc);
1349
1350 waitUntilAllRegionsAssigned(tableName);
1351 return new HTable(new Configuration(getConfiguration()), tableName);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public HTable createTable(byte[] tableName, byte[][] families,
1363 int[] numVersions)
1364 throws IOException {
1365 return createTable(TableName.valueOf(tableName), families, numVersions);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 public HTable createTable(TableName tableName, byte[][] families,
1377 int[] numVersions)
1378 throws IOException {
1379 HTableDescriptor desc = new HTableDescriptor(tableName);
1380 int i = 0;
1381 for (byte[] family : families) {
1382 HColumnDescriptor hcd = new HColumnDescriptor(family)
1383 .setMaxVersions(numVersions[i]);
1384 desc.addFamily(hcd);
1385 i++;
1386 }
1387 getHBaseAdmin().createTable(desc);
1388
1389 waitUntilAllRegionsAssigned(tableName);
1390 return new HTable(new Configuration(getConfiguration()), tableName);
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1402 throws IOException{
1403 return createTable(TableName.valueOf(tableName), family, splitRows);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1415 throws IOException {
1416 HTableDescriptor desc = new HTableDescriptor(tableName);
1417 HColumnDescriptor hcd = new HColumnDescriptor(family);
1418 desc.addFamily(hcd);
1419 getHBaseAdmin().createTable(desc, splitRows);
1420
1421 waitUntilAllRegionsAssigned(tableName);
1422 return new HTable(getConfiguration(), tableName);
1423 }
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1434 throws IOException {
1435 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1436 for(byte[] family:families) {
1437 HColumnDescriptor hcd = new HColumnDescriptor(family);
1438 desc.addFamily(hcd);
1439 }
1440 getHBaseAdmin().createTable(desc, splitRows);
1441
1442 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1443 return new HTable(getConfiguration(), tableName);
1444 }
1445
1446
1447
1448
1449
1450 public void deleteTable(String tableName) throws IOException {
1451 deleteTable(TableName.valueOf(tableName));
1452 }
1453
1454
1455
1456
1457
1458 public void deleteTable(byte[] tableName) throws IOException {
1459 deleteTable(TableName.valueOf(tableName));
1460 }
1461
1462
1463
1464
1465
1466 public void deleteTable(TableName tableName) throws IOException {
1467 try {
1468 getHBaseAdmin().disableTable(tableName);
1469 } catch (TableNotEnabledException e) {
1470 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1471 }
1472 getHBaseAdmin().deleteTable(tableName);
1473 }
1474
1475
1476
1477
1478
1479 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1480 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1481 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1482 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1483 private static final int MAXVERSIONS = 3;
1484
1485 public static final char FIRST_CHAR = 'a';
1486 public static final char LAST_CHAR = 'z';
1487 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1488 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1489
1490
1491
1492
1493
1494
1495
1496
1497 public HTableDescriptor createTableDescriptor(final String name,
1498 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1499 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1500 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1501 htd.addFamily(new HColumnDescriptor(cfName)
1502 .setMinVersions(minVersions)
1503 .setMaxVersions(versions)
1504 .setKeepDeletedCells(keepDeleted)
1505 .setBlockCacheEnabled(false)
1506 .setTimeToLive(ttl)
1507 );
1508 }
1509 return htd;
1510 }
1511
1512
1513
1514
1515
1516
1517
1518 public HTableDescriptor createTableDescriptor(final String name) {
1519 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1520 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1532 byte [] endKey)
1533 throws IOException {
1534 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1535 return createLocalHRegion(hri, desc);
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1546 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1558 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1575 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1576 HLog hlog, byte[]... families) throws IOException {
1577 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1578 htd.setReadOnly(isReadOnly);
1579 for (byte[] family : families) {
1580 HColumnDescriptor hcd = new HColumnDescriptor(family);
1581
1582 hcd.setMaxVersions(Integer.MAX_VALUE);
1583 htd.addFamily(hcd);
1584 }
1585 htd.setDurability(durability);
1586 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1587 return createLocalHRegion(info, htd, hlog);
1588 }
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 public HTable truncateTable(byte[] tableName) throws IOException {
1599 return truncateTable(TableName.valueOf(tableName));
1600 }
1601
1602
1603
1604
1605
1606
1607
1608 public HTable truncateTable(TableName tableName) throws IOException {
1609 HTable table = new HTable(getConfiguration(), tableName);
1610 Scan scan = new Scan();
1611 ResultScanner resScan = table.getScanner(scan);
1612 for(Result res : resScan) {
1613 Delete del = new Delete(res.getRow());
1614 table.delete(del);
1615 }
1616 resScan = table.getScanner(scan);
1617 resScan.close();
1618 return table;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628 public int loadTable(final HTable t, final byte[] f) throws IOException {
1629 return loadTable(t, new byte[][] {f});
1630 }
1631
1632
1633
1634
1635
1636
1637
1638
1639 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1640 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1641 }
1642
1643
1644
1645
1646
1647
1648
1649
1650 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1651 return loadTable(t, f, null);
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1663 return loadTable(t, f, value, true);
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1675 t.setAutoFlush(false);
1676 int rowCount = 0;
1677 for (byte[] row : HBaseTestingUtility.ROWS) {
1678 Put put = new Put(row);
1679 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1680 for (int i = 0; i < f.length; i++) {
1681 put.add(f[i], null, value != null ? value : row);
1682 }
1683 t.put(put);
1684 rowCount++;
1685 }
1686 t.flushCommits();
1687 return rowCount;
1688 }
1689
1690
1691
1692
1693 public static class SeenRowTracker {
1694 int dim = 'z' - 'a' + 1;
1695 int[][][] seenRows = new int[dim][dim][dim];
1696 byte[] startRow;
1697 byte[] stopRow;
1698
1699 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1700 this.startRow = startRow;
1701 this.stopRow = stopRow;
1702 }
1703
1704 void reset() {
1705 for (byte[] row : ROWS) {
1706 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1707 }
1708 }
1709
1710 int i(byte b) {
1711 return b - 'a';
1712 }
1713
1714 public void addRow(byte[] row) {
1715 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1716 }
1717
1718
1719
1720
1721 public void validate() {
1722 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1723 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1724 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1725 int count = seenRows[i(b1)][i(b2)][i(b3)];
1726 int expectedCount = 0;
1727 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1728 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1729 expectedCount = 1;
1730 }
1731 if (count != expectedCount) {
1732 String row = new String(new byte[] {b1,b2,b3});
1733 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1734 }
1735 }
1736 }
1737 }
1738 }
1739 }
1740
1741 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1742 return loadRegion(r, f, false);
1743 }
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1754 throws IOException {
1755 byte[] k = new byte[3];
1756 int rowCount = 0;
1757 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1758 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1759 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1760 k[0] = b1;
1761 k[1] = b2;
1762 k[2] = b3;
1763 Put put = new Put(k);
1764 put.setDurability(Durability.SKIP_WAL);
1765 put.add(f, null, k);
1766 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1767
1768 int preRowCount = rowCount;
1769 int pause = 10;
1770 int maxPause = 1000;
1771 while (rowCount == preRowCount) {
1772 try {
1773 r.put(put);
1774 rowCount++;
1775 } catch (RegionTooBusyException e) {
1776 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1777 Threads.sleep(pause);
1778 }
1779 }
1780 }
1781 }
1782 if (flush) {
1783 r.flushcache();
1784 }
1785 }
1786 return rowCount;
1787 }
1788
1789 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1790 for (int i = startRow; i < endRow; i++) {
1791 byte[] data = Bytes.toBytes(String.valueOf(i));
1792 Put put = new Put(data);
1793 put.add(f, null, data);
1794 t.put(put);
1795 }
1796 }
1797
1798
1799
1800
1801 public int countRows(final HTable table) throws IOException {
1802 Scan scan = new Scan();
1803 ResultScanner results = table.getScanner(scan);
1804 int count = 0;
1805 for (@SuppressWarnings("unused") Result res : results) {
1806 count++;
1807 }
1808 results.close();
1809 return count;
1810 }
1811
1812 public int countRows(final HTable table, final byte[]... families) throws IOException {
1813 Scan scan = new Scan();
1814 for (byte[] family: families) {
1815 scan.addFamily(family);
1816 }
1817 ResultScanner results = table.getScanner(scan);
1818 int count = 0;
1819 for (@SuppressWarnings("unused") Result res : results) {
1820 count++;
1821 }
1822 results.close();
1823 return count;
1824 }
1825
1826
1827
1828
1829 public String checksumRows(final HTable table) throws Exception {
1830 Scan scan = new Scan();
1831 ResultScanner results = table.getScanner(scan);
1832 MessageDigest digest = MessageDigest.getInstance("MD5");
1833 for (Result res : results) {
1834 digest.update(res.getRow());
1835 }
1836 results.close();
1837 return digest.toString();
1838 }
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 public int createMultiRegions(HTable table, byte[] columnFamily)
1849 throws IOException {
1850 return createMultiRegions(getConfiguration(), table, columnFamily);
1851 }
1852
1853
1854 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1855 static {
1856 int i = 0;
1857 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1858 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1859 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1860 ROWS[i][0] = b1;
1861 ROWS[i][1] = b2;
1862 ROWS[i][2] = b3;
1863 i++;
1864 }
1865 }
1866 }
1867 }
1868
1869 public static final byte[][] KEYS = {
1870 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1871 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1872 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1873 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1874 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1875 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1876 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1877 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1878 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1879 };
1880
1881 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1882 Bytes.toBytes("bbb"),
1883 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1884 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1885 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1886 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1887 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1888 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1889 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1890 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1891 };
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 public int createMultiRegions(final Configuration c, final HTable table,
1902 final byte[] columnFamily)
1903 throws IOException {
1904 return createMultiRegions(c, table, columnFamily, KEYS);
1905 }
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 public int createMultiRegions(final Configuration c, final HTable table,
1917 final byte [] family, int numRegions)
1918 throws IOException {
1919 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1920 byte [] startKey = Bytes.toBytes("aaaaa");
1921 byte [] endKey = Bytes.toBytes("zzzzz");
1922 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1923 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1924 for (int i=0;i<splitKeys.length;i++) {
1925 regionStartKeys[i+1] = splitKeys[i];
1926 }
1927 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1928 return createMultiRegions(c, table, family, regionStartKeys);
1929 }
1930
1931 @SuppressWarnings("deprecation")
1932 public int createMultiRegions(final Configuration c, final HTable table,
1933 final byte[] columnFamily, byte [][] startKeys)
1934 throws IOException {
1935 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1936 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1937 HTableDescriptor htd = table.getTableDescriptor();
1938 if(!htd.hasFamily(columnFamily)) {
1939 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1940 htd.addFamily(hcd);
1941 }
1942
1943
1944
1945
1946 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1947 String regionToDeleteInFS = table
1948 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1949 .getRegionInfo().getEncodedName();
1950 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1951
1952 int count = 0;
1953 for (int i = 0; i < startKeys.length; i++) {
1954 int j = (i + 1) % startKeys.length;
1955 HRegionInfo hri = new HRegionInfo(table.getName(),
1956 startKeys[i], startKeys[j]);
1957 MetaEditor.addRegionToMeta(meta, hri);
1958 newRegions.add(hri);
1959 count++;
1960 }
1961
1962 for (byte[] row : rows) {
1963 LOG.info("createMultiRegions: deleting meta row -> " +
1964 Bytes.toStringBinary(row));
1965 meta.delete(new Delete(row));
1966 }
1967
1968 Path tableDir = new Path(getDefaultRootDirPath().toString()
1969 + System.getProperty("file.separator") + htd.getTableName()
1970 + System.getProperty("file.separator") + regionToDeleteInFS);
1971 FileSystem.get(c).delete(tableDir);
1972
1973 HConnection conn = table.getConnection();
1974 conn.clearRegionCache();
1975
1976 HBaseAdmin admin = getHBaseAdmin();
1977 if (admin.isTableEnabled(table.getTableName())) {
1978 for(HRegionInfo hri : newRegions) {
1979 admin.assign(hri.getRegionName());
1980 }
1981 }
1982
1983 meta.close();
1984
1985 return count;
1986 }
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1999 final HTableDescriptor htd, byte [][] startKeys)
2000 throws IOException {
2001 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2002 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2003 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2004
2005 for (int i = 0; i < startKeys.length; i++) {
2006 int j = (i + 1) % startKeys.length;
2007 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2008 startKeys[j]);
2009 MetaEditor.addRegionToMeta(meta, hri);
2010 newRegions.add(hri);
2011 }
2012
2013 meta.close();
2014 return newRegions;
2015 }
2016
2017
2018
2019
2020
2021
2022 public List<byte[]> getMetaTableRows() throws IOException {
2023
2024 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2025 List<byte[]> rows = new ArrayList<byte[]>();
2026 ResultScanner s = t.getScanner(new Scan());
2027 for (Result result : s) {
2028 LOG.info("getMetaTableRows: row -> " +
2029 Bytes.toStringBinary(result.getRow()));
2030 rows.add(result.getRow());
2031 }
2032 s.close();
2033 t.close();
2034 return rows;
2035 }
2036
2037
2038
2039
2040
2041
2042 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2043
2044 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2045 List<byte[]> rows = new ArrayList<byte[]>();
2046 ResultScanner s = t.getScanner(new Scan());
2047 for (Result result : s) {
2048 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2049 if (info == null) {
2050 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2051
2052 continue;
2053 }
2054
2055 if (info.getTable().equals(tableName)) {
2056 LOG.info("getMetaTableRows: row -> " +
2057 Bytes.toStringBinary(result.getRow()) + info);
2058 rows.add(result.getRow());
2059 }
2060 }
2061 s.close();
2062 t.close();
2063 return rows;
2064 }
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2078 throws IOException, InterruptedException {
2079 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2092 throws IOException, InterruptedException {
2093 List<byte[]> metaRows = getMetaTableRows(tableName);
2094 if (metaRows == null || metaRows.isEmpty()) {
2095 return null;
2096 }
2097 LOG.debug("Found " + metaRows.size() + " rows for table " +
2098 tableName);
2099 byte [] firstrow = metaRows.get(0);
2100 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2101 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2102 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2103 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2104 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2105 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2106 while(retrier.shouldRetry()) {
2107 int index = getMiniHBaseCluster().getServerWith(firstrow);
2108 if (index != -1) {
2109 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2110 }
2111
2112 retrier.sleepUntilNextRetry();
2113 }
2114 return null;
2115 }
2116
2117
2118
2119
2120
2121
2122
2123 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2124 startMiniMapReduceCluster(2);
2125 return mrCluster;
2126 }
2127
2128
2129
2130
2131
2132 private void forceChangeTaskLogDir() {
2133 Field logDirField;
2134 try {
2135 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2136 logDirField.setAccessible(true);
2137
2138 Field modifiersField = Field.class.getDeclaredField("modifiers");
2139 modifiersField.setAccessible(true);
2140 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2141
2142 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2143 } catch (SecurityException e) {
2144 throw new RuntimeException(e);
2145 } catch (NoSuchFieldException e) {
2146
2147 throw new RuntimeException(e);
2148 } catch (IllegalArgumentException e) {
2149 throw new RuntimeException(e);
2150 } catch (IllegalAccessException e) {
2151 throw new RuntimeException(e);
2152 }
2153 }
2154
2155
2156
2157
2158
2159
2160
2161 private void startMiniMapReduceCluster(final int servers) throws IOException {
2162 if (mrCluster != null) {
2163 throw new IllegalStateException("MiniMRCluster is already running");
2164 }
2165 LOG.info("Starting mini mapreduce cluster...");
2166 setupClusterTestDir();
2167 createDirsAndSetProperties();
2168
2169 forceChangeTaskLogDir();
2170
2171
2172
2173
2174 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2175
2176
2177
2178 conf.setBoolean("mapreduce.map.speculative", false);
2179 conf.setBoolean("mapreduce.reduce.speculative", false);
2180
2181
2182
2183 mrCluster = new MiniMRCluster(servers,
2184 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2185 null, null, new JobConf(this.conf));
2186 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2187 if (jobConf == null) {
2188 jobConf = mrCluster.createJobConf();
2189 }
2190
2191 jobConf.set("mapred.local.dir",
2192 conf.get("mapred.local.dir"));
2193 LOG.info("Mini mapreduce cluster started");
2194
2195
2196
2197
2198 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2199
2200 conf.set("mapreduce.framework.name", "yarn");
2201 conf.setBoolean("yarn.is.minicluster", true);
2202 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2203 if (rmAddress != null) {
2204 conf.set("yarn.resourcemanager.address", rmAddress);
2205 }
2206 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2207 if (historyAddress != null) {
2208 conf.set("mapreduce.jobhistory.address", historyAddress);
2209 }
2210 String schedulerAddress =
2211 jobConf.get("yarn.resourcemanager.scheduler.address");
2212 if (schedulerAddress != null) {
2213 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2214 }
2215 }
2216
2217
2218
2219
2220 public void shutdownMiniMapReduceCluster() {
2221 LOG.info("Stopping mini mapreduce cluster...");
2222 if (mrCluster != null) {
2223 mrCluster.shutdown();
2224 mrCluster = null;
2225 }
2226
2227 conf.set("mapred.job.tracker", "local");
2228 LOG.info("Mini mapreduce cluster stopped");
2229 }
2230
2231
2232
2233
2234 public RegionServerServices createMockRegionServerService() throws IOException {
2235 return createMockRegionServerService((ServerName)null);
2236 }
2237
2238
2239
2240
2241
2242 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2243 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2244 rss.setFileSystem(getTestFileSystem());
2245 rss.setRpcServer(rpc);
2246 return rss;
2247 }
2248
2249
2250
2251
2252
2253 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2254 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2255 rss.setFileSystem(getTestFileSystem());
2256 return rss;
2257 }
2258
2259
2260
2261
2262
2263
2264 public void enableDebug(Class<?> clazz) {
2265 Log l = LogFactory.getLog(clazz);
2266 if (l instanceof Log4JLogger) {
2267 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2268 } else if (l instanceof Jdk14Logger) {
2269 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2270 }
2271 }
2272
2273
2274
2275
2276
2277 public void expireMasterSession() throws Exception {
2278 HMaster master = getMiniHBaseCluster().getMaster();
2279 expireSession(master.getZooKeeper(), false);
2280 }
2281
2282
2283
2284
2285
2286
2287 public void expireRegionServerSession(int index) throws Exception {
2288 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2289 expireSession(rs.getZooKeeper(), false);
2290 decrementMinRegionServerCount();
2291 }
2292
2293 private void decrementMinRegionServerCount() {
2294
2295
2296 decrementMinRegionServerCount(getConfiguration());
2297
2298
2299 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2300 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2301 }
2302 }
2303
2304 private void decrementMinRegionServerCount(Configuration conf) {
2305 int currentCount = conf.getInt(
2306 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2307 if (currentCount != -1) {
2308 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2309 Math.max(currentCount - 1, 1));
2310 }
2311 }
2312
2313 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2314 expireSession(nodeZK, false);
2315 }
2316
2317 @Deprecated
2318 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2319 throws Exception {
2320 expireSession(nodeZK, false);
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2335 throws Exception {
2336 Configuration c = new Configuration(this.conf);
2337 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2338 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2339 byte[] password = zk.getSessionPasswd();
2340 long sessionID = zk.getSessionId();
2341
2342
2343
2344
2345
2346
2347
2348
2349 ZooKeeper monitor = new ZooKeeper(quorumServers,
2350 1000, new org.apache.zookeeper.Watcher(){
2351 @Override
2352 public void process(WatchedEvent watchedEvent) {
2353 LOG.info("Monitor ZKW received event="+watchedEvent);
2354 }
2355 } , sessionID, password);
2356
2357
2358 ZooKeeper newZK = new ZooKeeper(quorumServers,
2359 1000, EmptyWatcher.instance, sessionID, password);
2360
2361
2362
2363 long start = System.currentTimeMillis();
2364 while (newZK.getState() != States.CONNECTED
2365 && System.currentTimeMillis() - start < 1000) {
2366 Thread.sleep(1);
2367 }
2368 newZK.close();
2369 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2370
2371
2372 monitor.close();
2373
2374 if (checkStatus) {
2375 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2376 }
2377 }
2378
2379
2380
2381
2382
2383
2384
2385 public MiniHBaseCluster getHBaseCluster() {
2386 return getMiniHBaseCluster();
2387 }
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397 public HBaseCluster getHBaseClusterInterface() {
2398
2399
2400 return hbaseCluster;
2401 }
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 public synchronized HBaseAdmin getHBaseAdmin()
2413 throws IOException {
2414 if (hbaseAdmin == null){
2415 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2416 }
2417 return hbaseAdmin;
2418 }
2419
2420 private HBaseAdminForTests hbaseAdmin = null;
2421 private static class HBaseAdminForTests extends HBaseAdmin {
2422 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2423 ZooKeeperConnectionException, IOException {
2424 super(c);
2425 }
2426
2427 @Override
2428 public synchronized void close() throws IOException {
2429 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2430 }
2431
2432 private synchronized void close0() throws IOException {
2433 super.close();
2434 }
2435 }
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2447 throws IOException {
2448 if (zooKeeperWatcher == null) {
2449 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2450 new Abortable() {
2451 @Override public void abort(String why, Throwable e) {
2452 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2453 }
2454 @Override public boolean isAborted() {return false;}
2455 });
2456 }
2457 return zooKeeperWatcher;
2458 }
2459 private ZooKeeperWatcher zooKeeperWatcher;
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469 public void closeRegion(String regionName) throws IOException {
2470 closeRegion(Bytes.toBytes(regionName));
2471 }
2472
2473
2474
2475
2476
2477
2478
2479 public void closeRegion(byte[] regionName) throws IOException {
2480 getHBaseAdmin().closeRegion(regionName, null);
2481 }
2482
2483
2484
2485
2486
2487
2488
2489
2490 public void closeRegionByRow(String row, HTable table) throws IOException {
2491 closeRegionByRow(Bytes.toBytes(row), table);
2492 }
2493
2494
2495
2496
2497
2498
2499
2500
2501 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2502 HRegionLocation hrl = table.getRegionLocation(row);
2503 closeRegion(hrl.getRegionInfo().getRegionName());
2504 }
2505
2506
2507
2508
2509
2510
2511
2512
2513 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2514 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2515 int regCount = regions.size();
2516 Set<Integer> attempted = new HashSet<Integer>();
2517 int idx;
2518 int attempts = 0;
2519 do {
2520 regions = getHBaseCluster().getRegions(tableName);
2521 if (regCount != regions.size()) {
2522
2523 attempted.clear();
2524 }
2525 regCount = regions.size();
2526
2527
2528 if (regCount > 0) {
2529 idx = random.nextInt(regCount);
2530
2531 if (attempted.contains(idx))
2532 continue;
2533 try {
2534 regions.get(idx).checkSplit();
2535 return regions.get(idx);
2536 } catch (Exception ex) {
2537 LOG.warn("Caught exception", ex);
2538 attempted.add(idx);
2539 }
2540 }
2541 attempts++;
2542 } while (maxAttempts == -1 || attempts < maxAttempts);
2543 return null;
2544 }
2545
2546 public MiniZooKeeperCluster getZkCluster() {
2547 return zkCluster;
2548 }
2549
2550 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2551 this.passedZkCluster = true;
2552 this.zkCluster = zkCluster;
2553 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2554 }
2555
2556 public MiniDFSCluster getDFSCluster() {
2557 return dfsCluster;
2558 }
2559
2560 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2561 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2562 throw new IOException("DFSCluster is already running! Shut it down first.");
2563 }
2564 this.dfsCluster = cluster;
2565 }
2566
2567 public FileSystem getTestFileSystem() throws IOException {
2568 return HFileSystem.get(conf);
2569 }
2570
2571
2572
2573
2574
2575
2576
2577
2578 public void waitTableAvailable(byte[] table)
2579 throws InterruptedException, IOException {
2580 waitTableAvailable(getHBaseAdmin(), table, 30000);
2581 }
2582
2583 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2584 throws InterruptedException, IOException {
2585 waitTableAvailable(admin, table, 30000);
2586 }
2587
2588
2589
2590
2591
2592
2593
2594
2595 public void waitTableAvailable(byte[] table, long timeoutMillis)
2596 throws InterruptedException, IOException {
2597 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2598 }
2599
2600 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2601 throws InterruptedException, IOException {
2602 long startWait = System.currentTimeMillis();
2603 while (!admin.isTableAvailable(table)) {
2604 assertTrue("Timed out waiting for table to become available " +
2605 Bytes.toStringBinary(table),
2606 System.currentTimeMillis() - startWait < timeoutMillis);
2607 Thread.sleep(200);
2608 }
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620 public void waitTableEnabled(byte[] table)
2621 throws InterruptedException, IOException {
2622 waitTableEnabled(getHBaseAdmin(), table, 30000);
2623 }
2624
2625 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2626 throws InterruptedException, IOException {
2627 waitTableEnabled(admin, table, 30000);
2628 }
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639 public void waitTableEnabled(byte[] table, long timeoutMillis)
2640 throws InterruptedException, IOException {
2641 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2642 }
2643
2644 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2645 throws InterruptedException, IOException {
2646 long startWait = System.currentTimeMillis();
2647 waitTableAvailable(admin, table, timeoutMillis);
2648 long remainder = System.currentTimeMillis() - startWait;
2649 while (!admin.isTableEnabled(table)) {
2650 assertTrue("Timed out waiting for table to become available and enabled " +
2651 Bytes.toStringBinary(table),
2652 System.currentTimeMillis() - remainder < timeoutMillis);
2653 Thread.sleep(200);
2654 }
2655
2656
2657
2658
2659
2660 try {
2661 Canary.sniff(admin, TableName.valueOf(table));
2662 } catch (Exception e) {
2663 throw new IOException(e);
2664 }
2665 }
2666
2667
2668
2669
2670
2671
2672
2673
2674 public boolean ensureSomeRegionServersAvailable(final int num)
2675 throws IOException {
2676 boolean startedServer = false;
2677 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2678 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2679 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2680 startedServer = true;
2681 }
2682
2683 return startedServer;
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2696 throws IOException {
2697 boolean startedServer = ensureSomeRegionServersAvailable(num);
2698
2699 int nonStoppedServers = 0;
2700 for (JVMClusterUtil.RegionServerThread rst :
2701 getMiniHBaseCluster().getRegionServerThreads()) {
2702
2703 HRegionServer hrs = rst.getRegionServer();
2704 if (hrs.isStopping() || hrs.isStopped()) {
2705 LOG.info("A region server is stopped or stopping:"+hrs);
2706 } else {
2707 nonStoppedServers++;
2708 }
2709 }
2710 for (int i=nonStoppedServers; i<num; ++i) {
2711 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2712 startedServer = true;
2713 }
2714 return startedServer;
2715 }
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727 public static User getDifferentUser(final Configuration c,
2728 final String differentiatingSuffix)
2729 throws IOException {
2730 FileSystem currentfs = FileSystem.get(c);
2731 if (!(currentfs instanceof DistributedFileSystem)) {
2732 return User.getCurrent();
2733 }
2734
2735
2736 String username = User.getCurrent().getName() +
2737 differentiatingSuffix;
2738 User user = User.createUserForTesting(c, username,
2739 new String[]{"supergroup"});
2740 return user;
2741 }
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2757 final int max) {
2758 try {
2759 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2760 for (Class<?> clazz: clazzes) {
2761 String className = clazz.getSimpleName();
2762 if (className.equals("DFSOutputStream")) {
2763 if (clazz.isInstance(stream)) {
2764 Field maxRecoveryErrorCountField =
2765 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2766 maxRecoveryErrorCountField.setAccessible(true);
2767 maxRecoveryErrorCountField.setInt(stream, max);
2768 break;
2769 }
2770 }
2771 }
2772 } catch (Exception e) {
2773 LOG.info("Could not set max recovery field", e);
2774 }
2775 }
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2786 waitUntilAllRegionsAssigned(tableName, 60000);
2787 }
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2799 throws IOException {
2800 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2801 try {
2802 waitFor(timeout, 200, true, new Predicate<IOException>() {
2803 @Override
2804 public boolean evaluate() throws IOException {
2805 boolean allRegionsAssigned = true;
2806 Scan scan = new Scan();
2807 scan.addFamily(HConstants.CATALOG_FAMILY);
2808 ResultScanner s = meta.getScanner(scan);
2809 try {
2810 Result r;
2811 while ((r = s.next()) != null) {
2812 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2813 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2814 if (info != null && info.getTable().equals(tableName)) {
2815 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2816 allRegionsAssigned &= (b != null);
2817 }
2818 }
2819 } finally {
2820 s.close();
2821 }
2822 return allRegionsAssigned;
2823 }
2824 });
2825 } finally {
2826 meta.close();
2827 }
2828 }
2829
2830
2831
2832
2833
2834 public static List<Cell> getFromStoreFile(HStore store,
2835 Get get) throws IOException {
2836 Scan scan = new Scan(get);
2837 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2838 scan.getFamilyMap().get(store.getFamily().getName()),
2839
2840
2841 0);
2842
2843 List<Cell> result = new ArrayList<Cell>();
2844 scanner.next(result);
2845 if (!result.isEmpty()) {
2846
2847 Cell kv = result.get(0);
2848 if (!CellUtil.matchingRow(kv, get.getRow())) {
2849 result.clear();
2850 }
2851 }
2852 scanner.close();
2853 return result;
2854 }
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2865 assertTrue(numRegions>3);
2866 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2867 byte [][] result = new byte[tmpSplitKeys.length+1][];
2868 for (int i=0;i<tmpSplitKeys.length;i++) {
2869 result[i+1] = tmpSplitKeys[i];
2870 }
2871 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2872 return result;
2873 }
2874
2875
2876
2877
2878
2879 public static List<Cell> getFromStoreFile(HStore store,
2880 byte [] row,
2881 NavigableSet<byte[]> columns
2882 ) throws IOException {
2883 Get get = new Get(row);
2884 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2885 s.put(store.getFamily().getName(), columns);
2886
2887 return getFromStoreFile(store,get);
2888 }
2889
2890
2891
2892
2893
2894 public static ZooKeeperWatcher getZooKeeperWatcher(
2895 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2896 IOException {
2897 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2898 "unittest", new Abortable() {
2899 boolean aborted = false;
2900
2901 @Override
2902 public void abort(String why, Throwable e) {
2903 aborted = true;
2904 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2905 }
2906
2907 @Override
2908 public boolean isAborted() {
2909 return aborted;
2910 }
2911 });
2912 return zkw;
2913 }
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2927 HBaseTestingUtility TEST_UTIL, HRegion region,
2928 ServerName serverName) throws ZooKeeperConnectionException,
2929 IOException, KeeperException, NodeExistsException {
2930 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2931 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2932 int version = ZKAssign.transitionNodeOpening(zkw, region
2933 .getRegionInfo(), serverName);
2934 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2935 version);
2936 return zkw;
2937 }
2938
2939 public static void assertKVListsEqual(String additionalMsg,
2940 final List<? extends Cell> expected,
2941 final List<? extends Cell> actual) {
2942 final int eLen = expected.size();
2943 final int aLen = actual.size();
2944 final int minLen = Math.min(eLen, aLen);
2945
2946 int i;
2947 for (i = 0; i < minLen
2948 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2949 ++i) {}
2950
2951 if (additionalMsg == null) {
2952 additionalMsg = "";
2953 }
2954 if (!additionalMsg.isEmpty()) {
2955 additionalMsg = ". " + additionalMsg;
2956 }
2957
2958 if (eLen != aLen || i != minLen) {
2959 throw new AssertionError(
2960 "Expected and actual KV arrays differ at position " + i + ": " +
2961 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2962 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2963 }
2964 }
2965
2966 private static <T> String safeGetAsStr(List<T> lst, int i) {
2967 if (0 <= i && i < lst.size()) {
2968 return lst.get(i).toString();
2969 } else {
2970 return "<out_of_range>";
2971 }
2972 }
2973
2974 public String getClusterKey() {
2975 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2976 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2977 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2978 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2979 }
2980
2981
2982 public HTable createRandomTable(String tableName,
2983 final Collection<String> families,
2984 final int maxVersions,
2985 final int numColsPerRow,
2986 final int numFlushes,
2987 final int numRegions,
2988 final int numRowsPerFlush)
2989 throws IOException, InterruptedException {
2990
2991 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2992 " regions, " + numFlushes + " storefiles per region, " +
2993 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2994 "\n");
2995
2996 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2997 final int numCF = families.size();
2998 final byte[][] cfBytes = new byte[numCF][];
2999 {
3000 int cfIndex = 0;
3001 for (String cf : families) {
3002 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3003 }
3004 }
3005
3006 final int actualStartKey = 0;
3007 final int actualEndKey = Integer.MAX_VALUE;
3008 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3009 final int splitStartKey = actualStartKey + keysPerRegion;
3010 final int splitEndKey = actualEndKey - keysPerRegion;
3011 final String keyFormat = "%08x";
3012 final HTable table = createTable(tableName, cfBytes,
3013 maxVersions,
3014 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3015 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3016 numRegions);
3017
3018 if (hbaseCluster != null) {
3019 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3020 }
3021
3022 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3023 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3024 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3025 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3026
3027 Put put = new Put(row);
3028 Delete del = new Delete(row);
3029 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3030 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3031 final long ts = rand.nextInt();
3032 final byte[] qual = Bytes.toBytes("col" + iCol);
3033 if (rand.nextBoolean()) {
3034 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3035 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3036 ts + "_random_" + rand.nextLong());
3037 put.add(cf, qual, ts, value);
3038 } else if (rand.nextDouble() < 0.8) {
3039 del.deleteColumn(cf, qual, ts);
3040 } else {
3041 del.deleteColumns(cf, qual, ts);
3042 }
3043 }
3044
3045 if (!put.isEmpty()) {
3046 table.put(put);
3047 }
3048
3049 if (!del.isEmpty()) {
3050 table.delete(del);
3051 }
3052 }
3053 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3054 table.flushCommits();
3055 if (hbaseCluster != null) {
3056 getMiniHBaseCluster().flushcache(table.getName());
3057 }
3058 }
3059
3060 return table;
3061 }
3062
3063 private static final int MIN_RANDOM_PORT = 0xc000;
3064 private static final int MAX_RANDOM_PORT = 0xfffe;
3065 private static Random random = new Random();
3066
3067
3068
3069
3070
3071 public static int randomPort() {
3072 return MIN_RANDOM_PORT
3073 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3074 }
3075
3076
3077
3078
3079
3080 public static int randomFreePort() {
3081 int port = 0;
3082 do {
3083 port = randomPort();
3084 if (takenRandomPorts.contains(port)) {
3085 continue;
3086 }
3087 takenRandomPorts.add(port);
3088
3089 try {
3090 ServerSocket sock = new ServerSocket(port);
3091 sock.close();
3092 } catch (IOException ex) {
3093 port = 0;
3094 }
3095 } while (port == 0);
3096 return port;
3097 }
3098
3099
3100 public static String randomMultiCastAddress() {
3101 return "226.1.1." + random.nextInt(254);
3102 }
3103
3104
3105
3106 public static void waitForHostPort(String host, int port)
3107 throws IOException {
3108 final int maxTimeMs = 10000;
3109 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3110 IOException savedException = null;
3111 LOG.info("Waiting for server at " + host + ":" + port);
3112 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3113 try {
3114 Socket sock = new Socket(InetAddress.getByName(host), port);
3115 sock.close();
3116 savedException = null;
3117 LOG.info("Server at " + host + ":" + port + " is available");
3118 break;
3119 } catch (UnknownHostException e) {
3120 throw new IOException("Failed to look up " + host, e);
3121 } catch (IOException e) {
3122 savedException = e;
3123 }
3124 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3125 }
3126
3127 if (savedException != null) {
3128 throw savedException;
3129 }
3130 }
3131
3132
3133
3134
3135
3136
3137 public static int createPreSplitLoadTestTable(Configuration conf,
3138 TableName tableName, byte[] columnFamily, Algorithm compression,
3139 DataBlockEncoding dataBlockEncoding) throws IOException {
3140 HTableDescriptor desc = new HTableDescriptor(tableName);
3141 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3142 hcd.setDataBlockEncoding(dataBlockEncoding);
3143 hcd.setCompressionType(compression);
3144 return createPreSplitLoadTestTable(conf, desc, hcd);
3145 }
3146
3147
3148
3149
3150
3151
3152 public static int createPreSplitLoadTestTable(Configuration conf,
3153 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3154 if (!desc.hasFamily(hcd.getName())) {
3155 desc.addFamily(hcd);
3156 }
3157
3158 int totalNumberOfRegions = 0;
3159 HBaseAdmin admin = new HBaseAdmin(conf);
3160 try {
3161
3162
3163
3164 int numberOfServers = admin.getClusterStatus().getServers().size();
3165 if (numberOfServers == 0) {
3166 throw new IllegalStateException("No live regionservers");
3167 }
3168
3169 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3170 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3171 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3172 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3173
3174 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3175 totalNumberOfRegions);
3176
3177 admin.createTable(desc, splits);
3178 } catch (MasterNotRunningException e) {
3179 LOG.error("Master not running", e);
3180 throw new IOException(e);
3181 } catch (TableExistsException e) {
3182 LOG.warn("Table " + desc.getTableName() +
3183 " already exists, continuing");
3184 } finally {
3185 admin.close();
3186 }
3187 return totalNumberOfRegions;
3188 }
3189
3190 public static int getMetaRSPort(Configuration conf) throws IOException {
3191 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3192 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3193 table.close();
3194 return hloc.getPort();
3195 }
3196
3197
3198
3199
3200
3201
3202
3203 public void assertRegionOnServer(
3204 final HRegionInfo hri, final ServerName server,
3205 final long timeout) throws IOException, InterruptedException {
3206 long timeoutTime = System.currentTimeMillis() + timeout;
3207 while (true) {
3208 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3209 if (regions.contains(hri)) return;
3210 long now = System.currentTimeMillis();
3211 if (now > timeoutTime) break;
3212 Thread.sleep(10);
3213 }
3214 fail("Could not find region " + hri.getRegionNameAsString()
3215 + " on server " + server);
3216 }
3217
3218
3219
3220
3221
3222 public void assertRegionOnlyOnServer(
3223 final HRegionInfo hri, final ServerName server,
3224 final long timeout) throws IOException, InterruptedException {
3225 long timeoutTime = System.currentTimeMillis() + timeout;
3226 while (true) {
3227 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3228 if (regions.contains(hri)) {
3229 List<JVMClusterUtil.RegionServerThread> rsThreads =
3230 getHBaseCluster().getLiveRegionServerThreads();
3231 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3232 HRegionServer rs = rsThread.getRegionServer();
3233 if (server.equals(rs.getServerName())) {
3234 continue;
3235 }
3236 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3237 for (HRegion r: hrs) {
3238 assertTrue("Region should not be double assigned",
3239 r.getRegionId() != hri.getRegionId());
3240 }
3241 }
3242 return;
3243 }
3244 long now = System.currentTimeMillis();
3245 if (now > timeoutTime) break;
3246 Thread.sleep(10);
3247 }
3248 fail("Could not find region " + hri.getRegionNameAsString()
3249 + " on server " + server);
3250 }
3251
3252 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3253 throws IOException {
3254 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3255 htd.addFamily(hcd);
3256 HRegionInfo info =
3257 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3258 HRegion region =
3259 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3260 return region;
3261 }
3262
3263 public void setFileSystemURI(String fsURI) {
3264 FS_URI = fsURI;
3265 }
3266
3267
3268
3269
3270 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3271 throws E {
3272 return Waiter.waitFor(this.conf, timeout, predicate);
3273 }
3274
3275
3276
3277
3278 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3279 throws E {
3280 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3281 }
3282
3283
3284
3285
3286 public <E extends Exception> long waitFor(long timeout, long interval,
3287 boolean failIfTimeout, Predicate<E> predicate) throws E {
3288 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3289 }
3290
3291
3292
3293
3294 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3295 return new Waiter.Predicate<Exception>() {
3296 @Override
3297 public boolean evaluate() throws Exception {
3298 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3299 .getAssignmentManager().getRegionStates();
3300 return !regionStates.isRegionsInTransition();
3301 }
3302 };
3303 }
3304
3305
3306
3307
3308 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3309 return new Waiter.Predicate<Exception>() {
3310 @Override
3311 public boolean evaluate() throws Exception {
3312 return getHBaseAdmin().isTableEnabled(tableName);
3313 }
3314 };
3315 }
3316
3317
3318
3319
3320
3321
3322 public static List<HColumnDescriptor> generateColumnDescriptors() {
3323 return generateColumnDescriptors("");
3324 }
3325
3326
3327
3328
3329
3330
3331
3332 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3333 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3334 long familyId = 0;
3335 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3336 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3337 for (BloomType bloomType: BloomType.values()) {
3338 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3339 HColumnDescriptor htd = new HColumnDescriptor(name);
3340 htd.setCompressionType(compressionType);
3341 htd.setDataBlockEncoding(encodingType);
3342 htd.setBloomFilterType(bloomType);
3343 htds.add(htd);
3344 familyId++;
3345 }
3346 }
3347 }
3348 return htds;
3349 }
3350
3351
3352
3353
3354
3355 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3356 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3357 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3358 for (String algoName : allAlgos) {
3359 try {
3360 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3361 algo.getCompressor();
3362 supportedAlgos.add(algo);
3363 } catch (Throwable t) {
3364
3365 }
3366 }
3367 return supportedAlgos.toArray(new Compression.Algorithm[0]);
3368 }
3369 }