1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Method;
28 import java.lang.reflect.Modifier;
29 import java.net.InetAddress;
30 import java.net.ServerSocket;
31 import java.net.Socket;
32 import java.net.UnknownHostException;
33 import java.security.MessageDigest;
34 import java.util.ArrayList;
35 import java.util.Arrays;
36 import java.util.Collection;
37 import java.util.Collections;
38 import java.util.HashSet;
39 import java.util.List;
40 import java.util.Map;
41 import java.util.NavigableSet;
42 import java.util.Random;
43 import java.util.Set;
44 import java.util.UUID;
45 import java.util.concurrent.TimeUnit;
46
47 import org.apache.commons.logging.Log;
48 import org.apache.commons.logging.LogFactory;
49 import org.apache.commons.logging.impl.Jdk14Logger;
50 import org.apache.commons.logging.impl.Log4JLogger;
51 import org.apache.hadoop.classification.InterfaceAudience;
52 import org.apache.hadoop.classification.InterfaceStability;
53 import org.apache.hadoop.conf.Configuration;
54 import org.apache.hadoop.fs.FileSystem;
55 import org.apache.hadoop.fs.Path;
56 import org.apache.hadoop.hbase.Waiter.Predicate;
57 import org.apache.hadoop.hbase.catalog.MetaEditor;
58 import org.apache.hadoop.hbase.client.Delete;
59 import org.apache.hadoop.hbase.client.Durability;
60 import org.apache.hadoop.hbase.client.Get;
61 import org.apache.hadoop.hbase.client.HBaseAdmin;
62 import org.apache.hadoop.hbase.client.HConnection;
63 import org.apache.hadoop.hbase.client.HTable;
64 import org.apache.hadoop.hbase.client.Put;
65 import org.apache.hadoop.hbase.client.Result;
66 import org.apache.hadoop.hbase.client.ResultScanner;
67 import org.apache.hadoop.hbase.client.Scan;
68 import org.apache.hadoop.hbase.fs.HFileSystem;
69 import org.apache.hadoop.hbase.io.compress.Compression;
70 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
71 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
72 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
73 import org.apache.hadoop.hbase.io.hfile.HFile;
74 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
75 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
76 import org.apache.hadoop.hbase.master.HMaster;
77 import org.apache.hadoop.hbase.master.RegionStates;
78 import org.apache.hadoop.hbase.master.ServerManager;
79 import org.apache.hadoop.hbase.regionserver.BloomType;
80 import org.apache.hadoop.hbase.regionserver.HRegion;
81 import org.apache.hadoop.hbase.regionserver.HRegionServer;
82 import org.apache.hadoop.hbase.regionserver.HStore;
83 import org.apache.hadoop.hbase.regionserver.InternalScanner;
84 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
85 import org.apache.hadoop.hbase.regionserver.wal.HLog;
86 import org.apache.hadoop.hbase.security.User;
87 import org.apache.hadoop.hbase.tool.Canary;
88 import org.apache.hadoop.hbase.util.Bytes;
89 import org.apache.hadoop.hbase.util.FSUtils;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil;
91 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
92 import org.apache.hadoop.hbase.util.RegionSplitter;
93 import org.apache.hadoop.hbase.util.RetryCounter;
94 import org.apache.hadoop.hbase.util.Threads;
95 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
96 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
97 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
98 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
99 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
100 import org.apache.hadoop.hdfs.DFSClient;
101 import org.apache.hadoop.hdfs.DistributedFileSystem;
102 import org.apache.hadoop.hdfs.MiniDFSCluster;
103 import org.apache.hadoop.mapred.JobConf;
104 import org.apache.hadoop.mapred.MiniMRCluster;
105 import org.apache.hadoop.mapred.TaskLog;
106 import org.apache.zookeeper.KeeperException;
107 import org.apache.zookeeper.KeeperException.NodeExistsException;
108 import org.apache.zookeeper.WatchedEvent;
109 import org.apache.zookeeper.ZooKeeper;
110 import org.apache.zookeeper.ZooKeeper.States;
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 @InterfaceAudience.Public
127 @InterfaceStability.Evolving
128 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
129 private MiniZooKeeperCluster zkCluster = null;
130
131 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
132
133
134
135
136 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
137
138
139
140
141
142 private boolean passedZkCluster = false;
143 private MiniDFSCluster dfsCluster = null;
144
145 private HBaseCluster hbaseCluster = null;
146 private MiniMRCluster mrCluster = null;
147
148
149 private boolean miniClusterRunning;
150
151 private String hadoopLogDir;
152
153
154 private File clusterTestDir = null;
155
156
157
158 private Path dataTestDirOnTestFS = null;
159
160
161
162
163
164
165
166
167 @Deprecated
168 private static final String TEST_DIRECTORY_KEY = "test.build.data";
169
170
171 private static String FS_URI;
172
173
174 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
175
176
177 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
178 Arrays.asList(new Object[][] {
179 { Compression.Algorithm.NONE },
180 { Compression.Algorithm.GZ }
181 });
182
183
184 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
185 Arrays.asList(new Object[][] {
186 { new Boolean(false) },
187 { new Boolean(true) }
188 });
189
190
191 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
192
193 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
194 Compression.Algorithm.NONE, Compression.Algorithm.GZ
195 };
196
197
198
199
200
201 private static List<Object[]> bloomAndCompressionCombinations() {
202 List<Object[]> configurations = new ArrayList<Object[]>();
203 for (Compression.Algorithm comprAlgo :
204 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
205 for (BloomType bloomType : BloomType.values()) {
206 configurations.add(new Object[] { comprAlgo, bloomType });
207 }
208 }
209 return Collections.unmodifiableList(configurations);
210 }
211
212
213
214
215 private static List<Object[]> memStoreTSAndTagsCombination() {
216 List<Object[]> configurations = new ArrayList<Object[]>();
217 configurations.add(new Object[] { false, false });
218 configurations.add(new Object[] { false, true });
219 configurations.add(new Object[] { true, false });
220 configurations.add(new Object[] { true, true });
221 return Collections.unmodifiableList(configurations);
222 }
223
224 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
225 bloomAndCompressionCombinations();
226
227 public HBaseTestingUtility() {
228 this(HBaseConfiguration.create());
229 }
230
231 public HBaseTestingUtility(Configuration conf) {
232 super(conf);
233
234
235 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
236 }
237
238
239
240
241
242
243
244 public static HBaseTestingUtility createLocalHTU() {
245 Configuration c = HBaseConfiguration.create();
246 return createLocalHTU(c);
247 }
248
249
250
251
252
253
254
255
256 public static HBaseTestingUtility createLocalHTU(Configuration c) {
257 HBaseTestingUtility htu = new HBaseTestingUtility(c);
258 String dataTestDir = htu.getDataTestDir().toString();
259 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
260 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
261 return htu;
262 }
263
264
265
266
267
268
269
270
271
272
273
274
275 @Override
276 public Configuration getConfiguration() {
277 return super.getConfiguration();
278 }
279
280 public void setHBaseCluster(HBaseCluster hbaseCluster) {
281 this.hbaseCluster = hbaseCluster;
282 }
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 @Override
301 protected Path setupDataTestDir() {
302 Path testPath = super.setupDataTestDir();
303 if (null == testPath) {
304 return null;
305 }
306
307 createSubDirAndSystemProperty(
308 "hadoop.log.dir",
309 testPath, "hadoop-log-dir");
310
311
312
313 createSubDirAndSystemProperty(
314 "hadoop.tmp.dir",
315 testPath, "hadoop-tmp-dir");
316
317
318 createSubDir(
319 "mapred.local.dir",
320 testPath, "mapred-local-dir");
321
322 return testPath;
323 }
324
325 private void createSubDirAndSystemProperty(
326 String propertyName, Path parent, String subDirName){
327
328 String sysValue = System.getProperty(propertyName);
329
330 if (sysValue != null) {
331
332
333 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
334 sysValue + " so I do NOT create it in " + parent);
335 String confValue = conf.get(propertyName);
336 if (confValue != null && !confValue.endsWith(sysValue)){
337 LOG.warn(
338 propertyName + " property value differs in configuration and system: "+
339 "Configuration="+confValue+" while System="+sysValue+
340 " Erasing configuration value by system value."
341 );
342 }
343 conf.set(propertyName, sysValue);
344 } else {
345
346 createSubDir(propertyName, parent, subDirName);
347 System.setProperty(propertyName, conf.get(propertyName));
348 }
349 }
350
351
352
353
354
355
356
357 private Path getBaseTestDirOnTestFS() throws IOException {
358 FileSystem fs = getTestFileSystem();
359 return new Path(fs.getWorkingDirectory(), "test-data");
360 }
361
362
363
364
365
366
367 Path getClusterTestDir() {
368 if (clusterTestDir == null){
369 setupClusterTestDir();
370 }
371 return new Path(clusterTestDir.getAbsolutePath());
372 }
373
374
375
376
377 private void setupClusterTestDir() {
378 if (clusterTestDir != null) {
379 return;
380 }
381
382
383
384 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
385 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
386
387 boolean b = deleteOnExit();
388 if (b) clusterTestDir.deleteOnExit();
389 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
390 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
391 }
392
393
394
395
396
397
398
399 public Path getDataTestDirOnTestFS() throws IOException {
400 if (dataTestDirOnTestFS == null) {
401 setupDataTestDirOnTestFS();
402 }
403
404 return dataTestDirOnTestFS;
405 }
406
407
408
409
410
411
412
413
414 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
415 return new Path(getDataTestDirOnTestFS(), subdirName);
416 }
417
418
419
420
421 private void setupDataTestDirOnTestFS() throws IOException {
422 if (dataTestDirOnTestFS != null) {
423 LOG.warn("Data test on test fs dir already setup in "
424 + dataTestDirOnTestFS.toString());
425 return;
426 }
427
428
429
430
431
432 FileSystem fs = getTestFileSystem();
433 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
434 File dataTestDir = new File(getDataTestDir().toString());
435 if (deleteOnExit()) dataTestDir.deleteOnExit();
436 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
437 } else {
438 Path base = getBaseTestDirOnTestFS();
439 String randomStr = UUID.randomUUID().toString();
440 dataTestDirOnTestFS = new Path(base, randomStr);
441 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
442 }
443 }
444
445
446
447
448
449
450 public boolean cleanupDataTestDirOnTestFS() throws IOException {
451 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
452 if (ret)
453 dataTestDirOnTestFS = null;
454 return ret;
455 }
456
457
458
459
460
461
462 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
463 Path cpath = getDataTestDirOnTestFS(subdirName);
464 return getTestFileSystem().delete(cpath, true);
465 }
466
467
468
469
470
471
472
473
474 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
475 return startMiniDFSCluster(servers, null);
476 }
477
478
479
480
481
482
483
484
485
486
487
488
489 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
490 throws Exception {
491 if ( hosts != null && hosts.length != 0) {
492 return startMiniDFSCluster(hosts.length, hosts);
493 } else {
494 return startMiniDFSCluster(1, null);
495 }
496 }
497
498
499
500
501
502
503
504
505
506
507 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
508 throws Exception {
509 createDirsAndSetProperties();
510 try {
511 Method m = Class.forName("org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream")
512 .getMethod("setShouldSkipFsyncForTesting", new Class<?> []{ boolean.class });
513 m.invoke(null, new Object[] {true});
514 } catch (ClassNotFoundException e) {
515 LOG.info("EditLogFileOutputStream not found");
516 }
517
518
519 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
520 setLevel(org.apache.log4j.Level.ERROR);
521 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
522 setLevel(org.apache.log4j.Level.ERROR);
523
524
525 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
526 true, null, null, hosts, null);
527
528
529 FileSystem fs = this.dfsCluster.getFileSystem();
530 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
531
532
533 this.dfsCluster.waitClusterUp();
534
535
536 dataTestDirOnTestFS = null;
537
538 return this.dfsCluster;
539 }
540
541
542 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
543 throws Exception {
544 createDirsAndSetProperties();
545 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
546 true, null, racks, hosts, null);
547
548
549 FileSystem fs = this.dfsCluster.getFileSystem();
550 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
551
552
553 this.dfsCluster.waitClusterUp();
554
555
556 dataTestDirOnTestFS = null;
557
558 return this.dfsCluster;
559 }
560
561 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
562 createDirsAndSetProperties();
563 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
564 null, null, null);
565 return dfsCluster;
566 }
567
568
569 private void createDirsAndSetProperties() throws IOException {
570 setupClusterTestDir();
571 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
572 createDirAndSetProperty("cache_data", "test.cache.data");
573 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
574 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
575 createDirAndSetProperty("mapred_local", "mapred.local.dir");
576 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
577 enableShortCircuit();
578
579 Path root = getDataTestDirOnTestFS("hadoop");
580 conf.set(MapreduceTestingShim.getMROutputDirProp(),
581 new Path(root, "mapred-output-dir").toString());
582 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
583 conf.set("mapreduce.jobtracker.staging.root.dir",
584 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
585 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
586 }
587
588
589
590
591
592
593
594 public boolean isReadShortCircuitOn(){
595 final String propName = "hbase.tests.use.shortcircuit.reads";
596 String readOnProp = System.getProperty(propName);
597 if (readOnProp != null){
598 return Boolean.parseBoolean(readOnProp);
599 } else {
600 return conf.getBoolean(propName, false);
601 }
602 }
603
604
605
606
607 private void enableShortCircuit() {
608 if (isReadShortCircuitOn()) {
609 String curUser = System.getProperty("user.name");
610 LOG.info("read short circuit is ON for user " + curUser);
611
612 conf.set("dfs.block.local-path-access.user", curUser);
613
614 conf.setBoolean("dfs.client.read.shortcircuit", true);
615
616 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
617 } else {
618 LOG.info("read short circuit is OFF");
619 }
620 }
621
622 private String createDirAndSetProperty(final String relPath, String property) {
623 String path = getDataTestDir(relPath).toString();
624 System.setProperty(property, path);
625 conf.set(property, path);
626 new File(path).mkdirs();
627 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
628 return path;
629 }
630
631
632
633
634
635
636 public void shutdownMiniDFSCluster() throws IOException {
637 if (this.dfsCluster != null) {
638
639 this.dfsCluster.shutdown();
640 dfsCluster = null;
641 dataTestDirOnTestFS = null;
642 FSUtils.setFsDefault(this.conf, new Path("file:///"));
643 }
644 }
645
646
647
648
649
650
651
652
653 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
654 return startMiniZKCluster(1);
655 }
656
657
658
659
660
661
662
663
664
665 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
666 throws Exception {
667 setupClusterTestDir();
668 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
669 }
670
671 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
672 throws Exception {
673 return startMiniZKCluster(dir,1);
674 }
675
676
677
678
679
680 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
681 int zooKeeperServerNum)
682 throws Exception {
683 if (this.zkCluster != null) {
684 throw new IOException("Cluster already running at " + dir);
685 }
686 this.passedZkCluster = false;
687 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
688 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
689 if (defPort > 0){
690
691 this.zkCluster.setDefaultClientPort(defPort);
692 }
693 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
694 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
695 Integer.toString(clientPort));
696 return this.zkCluster;
697 }
698
699
700
701
702
703
704
705 public void shutdownMiniZKCluster() throws IOException {
706 if (this.zkCluster != null) {
707 this.zkCluster.shutdown();
708 this.zkCluster = null;
709 }
710 }
711
712
713
714
715
716
717
718 public MiniHBaseCluster startMiniCluster() throws Exception {
719 return startMiniCluster(1, 1);
720 }
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735 public MiniHBaseCluster startMiniCluster(final int numSlaves)
736 throws Exception {
737 return startMiniCluster(1, numSlaves);
738 }
739
740
741
742
743
744
745
746
747 public MiniHBaseCluster startMiniCluster(final int numMasters,
748 final int numSlaves)
749 throws Exception {
750 return startMiniCluster(numMasters, numSlaves, null);
751 }
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777 public MiniHBaseCluster startMiniCluster(final int numMasters,
778 final int numSlaves, final String[] dataNodeHosts) throws Exception {
779 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
780 }
781
782
783
784
785
786 public MiniHBaseCluster startMiniCluster(final int numMasters,
787 final int numSlaves, final int numDataNodes) throws Exception {
788 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
789 }
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818 public MiniHBaseCluster startMiniCluster(final int numMasters,
819 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
820 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
821 throws Exception {
822 return startMiniCluster(
823 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
824 }
825
826
827
828
829
830
831 public MiniHBaseCluster startMiniCluster(final int numMasters,
832 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
833 Class<? extends HMaster> masterClass,
834 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
835 throws Exception {
836 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
837 numDataNodes = dataNodeHosts.length;
838 }
839
840 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
841 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
842
843
844 if (miniClusterRunning) {
845 throw new IllegalStateException("A mini-cluster is already running");
846 }
847 miniClusterRunning = true;
848
849 setupClusterTestDir();
850 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
851
852
853
854 startMiniDFSCluster(numDataNodes, dataNodeHosts);
855
856
857 if (this.zkCluster == null) {
858 startMiniZKCluster(clusterTestDir);
859 }
860
861
862 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
863 }
864
865 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
866 throws IOException, InterruptedException{
867 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
868 }
869
870
871
872
873
874
875
876
877
878
879
880
881 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
882 final int numSlaves, Class<? extends HMaster> masterClass,
883 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
884 throws IOException, InterruptedException {
885
886 createRootDir();
887
888
889
890 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
891 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
892 }
893 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
894 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
895 }
896
897 Configuration c = new Configuration(this.conf);
898 this.hbaseCluster =
899 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
900
901 HTable t = new HTable(c, TableName.META_TABLE_NAME);
902 ResultScanner s = t.getScanner(new Scan());
903 while (s.next() != null) {
904 continue;
905 }
906 s.close();
907 t.close();
908
909 getHBaseAdmin();
910 LOG.info("Minicluster is up");
911 return (MiniHBaseCluster)this.hbaseCluster;
912 }
913
914
915
916
917
918
919
920 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
921 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
922
923 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
924 ResultScanner s = t.getScanner(new Scan());
925 while (s.next() != null) {
926
927 }
928 LOG.info("HBase has been restarted");
929 s.close();
930 t.close();
931 }
932
933
934
935
936
937
938 public MiniHBaseCluster getMiniHBaseCluster() {
939 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
940 return (MiniHBaseCluster)this.hbaseCluster;
941 }
942 throw new RuntimeException(hbaseCluster + " not an instance of " +
943 MiniHBaseCluster.class.getName());
944 }
945
946
947
948
949
950
951 public void shutdownMiniCluster() throws Exception {
952 LOG.info("Shutting down minicluster");
953 shutdownMiniHBaseCluster();
954 if (!this.passedZkCluster){
955 shutdownMiniZKCluster();
956 }
957 shutdownMiniDFSCluster();
958
959 cleanupTestDir();
960 miniClusterRunning = false;
961 LOG.info("Minicluster is down");
962 }
963
964
965
966
967
968 @Override
969 public boolean cleanupTestDir() throws IOException {
970 boolean ret = super.cleanupTestDir();
971 if (deleteDir(this.clusterTestDir)) {
972 this.clusterTestDir = null;
973 return ret & true;
974 }
975 return false;
976 }
977
978
979
980
981
982 public void shutdownMiniHBaseCluster() throws IOException {
983 if (hbaseAdmin != null) {
984 hbaseAdmin.close0();
985 hbaseAdmin = null;
986 }
987
988
989 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
990 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
991 if (this.hbaseCluster != null) {
992 this.hbaseCluster.shutdown();
993
994 this.hbaseCluster.waitUntilShutDown();
995 this.hbaseCluster = null;
996 }
997
998 if (zooKeeperWatcher != null) {
999 zooKeeperWatcher.close();
1000 zooKeeperWatcher = null;
1001 }
1002 }
1003
1004
1005
1006
1007
1008
1009
1010 public Path getDefaultRootDirPath() throws IOException {
1011 FileSystem fs = FileSystem.get(this.conf);
1012 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 public Path createRootDir() throws IOException {
1024 FileSystem fs = FileSystem.get(this.conf);
1025 Path hbaseRootdir = getDefaultRootDirPath();
1026 FSUtils.setRootDir(this.conf, hbaseRootdir);
1027 fs.mkdirs(hbaseRootdir);
1028 FSUtils.setVersion(fs, hbaseRootdir);
1029 return hbaseRootdir;
1030 }
1031
1032
1033
1034
1035
1036 public void flush() throws IOException {
1037 getMiniHBaseCluster().flushcache();
1038 }
1039
1040
1041
1042
1043
1044 public void flush(TableName tableName) throws IOException {
1045 getMiniHBaseCluster().flushcache(tableName);
1046 }
1047
1048
1049
1050
1051
1052 public void compact(boolean major) throws IOException {
1053 getMiniHBaseCluster().compact(major);
1054 }
1055
1056
1057
1058
1059
1060 public void compact(TableName tableName, boolean major) throws IOException {
1061 getMiniHBaseCluster().compact(tableName, major);
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071 public HTable createTable(String tableName, String family)
1072 throws IOException{
1073 return createTable(TableName.valueOf(tableName), new String[]{family});
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083 public HTable createTable(byte[] tableName, byte[] family)
1084 throws IOException{
1085 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095 public HTable createTable(TableName tableName, String[] families)
1096 throws IOException {
1097 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1098 for (String family : families) {
1099 fams.add(Bytes.toBytes(family));
1100 }
1101 return createTable(tableName, fams.toArray(new byte[0][]));
1102 }
1103
1104
1105
1106
1107
1108
1109
1110
1111 public HTable createTable(TableName tableName, byte[] family)
1112 throws IOException{
1113 return createTable(tableName, new byte[][]{family});
1114 }
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 public HTable createTable(byte[] tableName, byte[][] families)
1125 throws IOException {
1126 return createTable(tableName, families,
1127 new Configuration(getConfiguration()));
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137 public HTable createTable(TableName tableName, byte[][] families)
1138 throws IOException {
1139 return createTable(tableName, families,
1140 new Configuration(getConfiguration()));
1141 }
1142
1143 public HTable createTable(byte[] tableName, byte[][] families,
1144 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1145 return createTable(TableName.valueOf(tableName), families, numVersions,
1146 startKey, endKey, numRegions);
1147 }
1148
1149 public HTable createTable(String tableName, byte[][] families,
1150 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1151 return createTable(TableName.valueOf(tableName), families, numVersions,
1152 startKey, endKey, numRegions);
1153 }
1154
1155 public HTable createTable(TableName tableName, byte[][] families,
1156 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1157 throws IOException{
1158 HTableDescriptor desc = new HTableDescriptor(tableName);
1159 for (byte[] family : families) {
1160 HColumnDescriptor hcd = new HColumnDescriptor(family)
1161 .setMaxVersions(numVersions);
1162 desc.addFamily(hcd);
1163 }
1164 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1165
1166 waitUntilAllRegionsAssigned(tableName);
1167 return new HTable(getConfiguration(), tableName);
1168 }
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1179 throws IOException {
1180 for(byte[] family : families) {
1181 HColumnDescriptor hcd = new HColumnDescriptor(family);
1182
1183
1184
1185 hcd.setBloomFilterType(BloomType.NONE);
1186 htd.addFamily(hcd);
1187 }
1188 getHBaseAdmin().createTable(htd);
1189
1190 waitUntilAllRegionsAssigned(htd.getTableName());
1191 return new HTable(c, htd.getTableName());
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 public HTable createTable(TableName tableName, byte[][] families,
1203 final Configuration c)
1204 throws IOException {
1205 return createTable(new HTableDescriptor(tableName), families, c);
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 public HTable createTable(byte[] tableName, byte[][] families,
1217 final Configuration c)
1218 throws IOException {
1219 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1220 for(byte[] family : families) {
1221 HColumnDescriptor hcd = new HColumnDescriptor(family);
1222
1223
1224
1225 hcd.setBloomFilterType(BloomType.NONE);
1226 desc.addFamily(hcd);
1227 }
1228 getHBaseAdmin().createTable(desc);
1229 return new HTable(c, tableName);
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 public HTable createTable(TableName tableName, byte[][] families,
1242 final Configuration c, int numVersions)
1243 throws IOException {
1244 HTableDescriptor desc = new HTableDescriptor(tableName);
1245 for(byte[] family : families) {
1246 HColumnDescriptor hcd = new HColumnDescriptor(family)
1247 .setMaxVersions(numVersions);
1248 desc.addFamily(hcd);
1249 }
1250 getHBaseAdmin().createTable(desc);
1251
1252 waitUntilAllRegionsAssigned(tableName);
1253 return new HTable(c, tableName);
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 public HTable createTable(byte[] tableName, byte[][] families,
1266 final Configuration c, int numVersions)
1267 throws IOException {
1268 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1269 for(byte[] family : families) {
1270 HColumnDescriptor hcd = new HColumnDescriptor(family)
1271 .setMaxVersions(numVersions);
1272 desc.addFamily(hcd);
1273 }
1274 getHBaseAdmin().createTable(desc);
1275 return new HTable(c, tableName);
1276 }
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1287 throws IOException {
1288 return createTable(tableName, new byte[][]{family}, numVersions);
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1300 throws IOException {
1301 return createTable(tableName, new byte[][]{family}, numVersions);
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 public HTable createTable(byte[] tableName, byte[][] families,
1313 int numVersions)
1314 throws IOException {
1315 return createTable(TableName.valueOf(tableName), families, numVersions);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 public HTable createTable(TableName tableName, byte[][] families,
1327 int numVersions)
1328 throws IOException {
1329 HTableDescriptor desc = new HTableDescriptor(tableName);
1330 for (byte[] family : families) {
1331 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1332 desc.addFamily(hcd);
1333 }
1334 getHBaseAdmin().createTable(desc);
1335
1336 waitUntilAllRegionsAssigned(tableName);
1337 return new HTable(new Configuration(getConfiguration()), tableName);
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 public HTable createTable(byte[] tableName, byte[][] families,
1349 int numVersions, int blockSize) throws IOException {
1350 return createTable(TableName.valueOf(tableName),
1351 families, numVersions, blockSize);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public HTable createTable(TableName tableName, byte[][] families,
1363 int numVersions, int blockSize) throws IOException {
1364 HTableDescriptor desc = new HTableDescriptor(tableName);
1365 for (byte[] family : families) {
1366 HColumnDescriptor hcd = new HColumnDescriptor(family)
1367 .setMaxVersions(numVersions)
1368 .setBlocksize(blockSize);
1369 desc.addFamily(hcd);
1370 }
1371 getHBaseAdmin().createTable(desc);
1372
1373 waitUntilAllRegionsAssigned(tableName);
1374 return new HTable(new Configuration(getConfiguration()), tableName);
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 public HTable createTable(byte[] tableName, byte[][] families,
1386 int[] numVersions)
1387 throws IOException {
1388 return createTable(TableName.valueOf(tableName), families, numVersions);
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 public HTable createTable(TableName tableName, byte[][] families,
1400 int[] numVersions)
1401 throws IOException {
1402 HTableDescriptor desc = new HTableDescriptor(tableName);
1403 int i = 0;
1404 for (byte[] family : families) {
1405 HColumnDescriptor hcd = new HColumnDescriptor(family)
1406 .setMaxVersions(numVersions[i]);
1407 desc.addFamily(hcd);
1408 i++;
1409 }
1410 getHBaseAdmin().createTable(desc);
1411
1412 waitUntilAllRegionsAssigned(tableName);
1413 return new HTable(new Configuration(getConfiguration()), tableName);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1425 throws IOException{
1426 return createTable(TableName.valueOf(tableName), family, splitRows);
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1438 throws IOException {
1439 HTableDescriptor desc = new HTableDescriptor(tableName);
1440 HColumnDescriptor hcd = new HColumnDescriptor(family);
1441 desc.addFamily(hcd);
1442 getHBaseAdmin().createTable(desc, splitRows);
1443
1444 waitUntilAllRegionsAssigned(tableName);
1445 return new HTable(getConfiguration(), tableName);
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1457 throws IOException {
1458 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1459 for(byte[] family:families) {
1460 HColumnDescriptor hcd = new HColumnDescriptor(family);
1461 desc.addFamily(hcd);
1462 }
1463 getHBaseAdmin().createTable(desc, splitRows);
1464
1465 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1466 return new HTable(getConfiguration(), tableName);
1467 }
1468
1469
1470
1471
1472
1473 public void deleteTable(String tableName) throws IOException {
1474 deleteTable(TableName.valueOf(tableName));
1475 }
1476
1477
1478
1479
1480
1481 public void deleteTable(byte[] tableName) throws IOException {
1482 deleteTable(TableName.valueOf(tableName));
1483 }
1484
1485
1486
1487
1488
1489 public void deleteTable(TableName tableName) throws IOException {
1490 try {
1491 getHBaseAdmin().disableTable(tableName);
1492 } catch (TableNotEnabledException e) {
1493 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1494 }
1495 getHBaseAdmin().deleteTable(tableName);
1496 }
1497
1498
1499
1500
1501
1502 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1503 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1504 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1505 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1506 private static final int MAXVERSIONS = 3;
1507
1508 public static final char FIRST_CHAR = 'a';
1509 public static final char LAST_CHAR = 'z';
1510 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1511 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1512
1513
1514
1515
1516
1517
1518
1519
1520 public HTableDescriptor createTableDescriptor(final String name,
1521 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1522 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1523 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1524 htd.addFamily(new HColumnDescriptor(cfName)
1525 .setMinVersions(minVersions)
1526 .setMaxVersions(versions)
1527 .setKeepDeletedCells(keepDeleted)
1528 .setBlockCacheEnabled(false)
1529 .setTimeToLive(ttl)
1530 );
1531 }
1532 return htd;
1533 }
1534
1535
1536
1537
1538
1539
1540
1541 public HTableDescriptor createTableDescriptor(final String name) {
1542 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1543 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1555 byte [] endKey)
1556 throws IOException {
1557 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1558 return createLocalHRegion(hri, desc);
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1569 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1570 }
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1581 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1582 }
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1598 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1599 HLog hlog, byte[]... families) throws IOException {
1600 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1601 htd.setReadOnly(isReadOnly);
1602 for (byte[] family : families) {
1603 HColumnDescriptor hcd = new HColumnDescriptor(family);
1604
1605 hcd.setMaxVersions(Integer.MAX_VALUE);
1606 htd.addFamily(hcd);
1607 }
1608 htd.setDurability(durability);
1609 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1610 return createLocalHRegion(info, htd, hlog);
1611 }
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621 public HTable truncateTable(byte[] tableName) throws IOException {
1622 return truncateTable(TableName.valueOf(tableName));
1623 }
1624
1625
1626
1627
1628
1629
1630
1631 public HTable truncateTable(TableName tableName) throws IOException {
1632 HTable table = new HTable(getConfiguration(), tableName);
1633 Scan scan = new Scan();
1634 ResultScanner resScan = table.getScanner(scan);
1635 for(Result res : resScan) {
1636 Delete del = new Delete(res.getRow());
1637 table.delete(del);
1638 }
1639 resScan = table.getScanner(scan);
1640 resScan.close();
1641 return table;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651 public int loadTable(final HTable t, final byte[] f) throws IOException {
1652 return loadTable(t, new byte[][] {f});
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1663 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1674 return loadTable(t, f, null);
1675 }
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1686 return loadTable(t, f, value, true);
1687 }
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1698 t.setAutoFlush(false);
1699 int rowCount = 0;
1700 for (byte[] row : HBaseTestingUtility.ROWS) {
1701 Put put = new Put(row);
1702 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1703 for (int i = 0; i < f.length; i++) {
1704 put.add(f[i], null, value != null ? value : row);
1705 }
1706 t.put(put);
1707 rowCount++;
1708 }
1709 t.flushCommits();
1710 return rowCount;
1711 }
1712
1713
1714
1715
1716 public static class SeenRowTracker {
1717 int dim = 'z' - 'a' + 1;
1718 int[][][] seenRows = new int[dim][dim][dim];
1719 byte[] startRow;
1720 byte[] stopRow;
1721
1722 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1723 this.startRow = startRow;
1724 this.stopRow = stopRow;
1725 }
1726
1727 void reset() {
1728 for (byte[] row : ROWS) {
1729 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1730 }
1731 }
1732
1733 int i(byte b) {
1734 return b - 'a';
1735 }
1736
1737 public void addRow(byte[] row) {
1738 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1739 }
1740
1741
1742
1743
1744 public void validate() {
1745 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1746 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1747 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1748 int count = seenRows[i(b1)][i(b2)][i(b3)];
1749 int expectedCount = 0;
1750 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1751 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1752 expectedCount = 1;
1753 }
1754 if (count != expectedCount) {
1755 String row = new String(new byte[] {b1,b2,b3});
1756 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1757 }
1758 }
1759 }
1760 }
1761 }
1762 }
1763
1764 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1765 return loadRegion(r, f, false);
1766 }
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1777 throws IOException {
1778 byte[] k = new byte[3];
1779 int rowCount = 0;
1780 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1781 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1782 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1783 k[0] = b1;
1784 k[1] = b2;
1785 k[2] = b3;
1786 Put put = new Put(k);
1787 put.setDurability(Durability.SKIP_WAL);
1788 put.add(f, null, k);
1789 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1790
1791 int preRowCount = rowCount;
1792 int pause = 10;
1793 int maxPause = 1000;
1794 while (rowCount == preRowCount) {
1795 try {
1796 r.put(put);
1797 rowCount++;
1798 } catch (RegionTooBusyException e) {
1799 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1800 Threads.sleep(pause);
1801 }
1802 }
1803 }
1804 }
1805 if (flush) {
1806 r.flushcache();
1807 }
1808 }
1809 return rowCount;
1810 }
1811
1812 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1813 for (int i = startRow; i < endRow; i++) {
1814 byte[] data = Bytes.toBytes(String.valueOf(i));
1815 Put put = new Put(data);
1816 put.add(f, null, data);
1817 t.put(put);
1818 }
1819 }
1820
1821
1822
1823
1824 public int countRows(final HTable table) throws IOException {
1825 Scan scan = new Scan();
1826 ResultScanner results = table.getScanner(scan);
1827 int count = 0;
1828 for (@SuppressWarnings("unused") Result res : results) {
1829 count++;
1830 }
1831 results.close();
1832 return count;
1833 }
1834
1835 public int countRows(final HTable table, final byte[]... families) throws IOException {
1836 Scan scan = new Scan();
1837 for (byte[] family: families) {
1838 scan.addFamily(family);
1839 }
1840 ResultScanner results = table.getScanner(scan);
1841 int count = 0;
1842 for (@SuppressWarnings("unused") Result res : results) {
1843 count++;
1844 }
1845 results.close();
1846 return count;
1847 }
1848
1849
1850
1851
1852 public String checksumRows(final HTable table) throws Exception {
1853 Scan scan = new Scan();
1854 ResultScanner results = table.getScanner(scan);
1855 MessageDigest digest = MessageDigest.getInstance("MD5");
1856 for (Result res : results) {
1857 digest.update(res.getRow());
1858 }
1859 results.close();
1860 return digest.toString();
1861 }
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871 public int createMultiRegions(HTable table, byte[] columnFamily)
1872 throws IOException {
1873 return createMultiRegions(getConfiguration(), table, columnFamily);
1874 }
1875
1876
1877 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1878 static {
1879 int i = 0;
1880 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1881 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1882 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1883 ROWS[i][0] = b1;
1884 ROWS[i][1] = b2;
1885 ROWS[i][2] = b3;
1886 i++;
1887 }
1888 }
1889 }
1890 }
1891
1892 public static final byte[][] KEYS = {
1893 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1894 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1895 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1896 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1897 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1898 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1899 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1900 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1901 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1902 };
1903
1904 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1905 Bytes.toBytes("bbb"),
1906 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1907 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1908 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1909 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1910 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1911 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1912 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1913 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1914 };
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924 public int createMultiRegions(final Configuration c, final HTable table,
1925 final byte[] columnFamily)
1926 throws IOException {
1927 return createMultiRegions(c, table, columnFamily, KEYS);
1928 }
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 public int createMultiRegions(final Configuration c, final HTable table,
1940 final byte [] family, int numRegions)
1941 throws IOException {
1942 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1943 byte [] startKey = Bytes.toBytes("aaaaa");
1944 byte [] endKey = Bytes.toBytes("zzzzz");
1945 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1946 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1947 for (int i=0;i<splitKeys.length;i++) {
1948 regionStartKeys[i+1] = splitKeys[i];
1949 }
1950 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1951 return createMultiRegions(c, table, family, regionStartKeys);
1952 }
1953
1954 @SuppressWarnings("deprecation")
1955 public int createMultiRegions(final Configuration c, final HTable table,
1956 final byte[] columnFamily, byte [][] startKeys)
1957 throws IOException {
1958 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1959 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1960 HTableDescriptor htd = table.getTableDescriptor();
1961 if(!htd.hasFamily(columnFamily)) {
1962 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1963 htd.addFamily(hcd);
1964 }
1965
1966
1967
1968
1969 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1970 String regionToDeleteInFS = table
1971 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1972 .getRegionInfo().getEncodedName();
1973 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1974
1975 int count = 0;
1976 for (int i = 0; i < startKeys.length; i++) {
1977 int j = (i + 1) % startKeys.length;
1978 HRegionInfo hri = new HRegionInfo(table.getName(),
1979 startKeys[i], startKeys[j]);
1980 MetaEditor.addRegionToMeta(meta, hri);
1981 newRegions.add(hri);
1982 count++;
1983 }
1984
1985 for (byte[] row : rows) {
1986 LOG.info("createMultiRegions: deleting meta row -> " +
1987 Bytes.toStringBinary(row));
1988 meta.delete(new Delete(row));
1989 }
1990
1991 Path tableDir = new Path(getDefaultRootDirPath().toString()
1992 + System.getProperty("file.separator") + htd.getTableName()
1993 + System.getProperty("file.separator") + regionToDeleteInFS);
1994 FileSystem.get(c).delete(tableDir);
1995
1996 HConnection conn = table.getConnection();
1997 conn.clearRegionCache();
1998
1999 HBaseAdmin admin = getHBaseAdmin();
2000 if (admin.isTableEnabled(table.getTableName())) {
2001 for(HRegionInfo hri : newRegions) {
2002 admin.assign(hri.getRegionName());
2003 }
2004 }
2005
2006 meta.close();
2007
2008 return count;
2009 }
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2022 final HTableDescriptor htd, byte [][] startKeys)
2023 throws IOException {
2024 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2025 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2026 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2027
2028 for (int i = 0; i < startKeys.length; i++) {
2029 int j = (i + 1) % startKeys.length;
2030 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2031 startKeys[j]);
2032 MetaEditor.addRegionToMeta(meta, hri);
2033 newRegions.add(hri);
2034 }
2035
2036 meta.close();
2037 return newRegions;
2038 }
2039
2040
2041
2042
2043
2044
2045 public List<byte[]> getMetaTableRows() throws IOException {
2046
2047 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2048 List<byte[]> rows = new ArrayList<byte[]>();
2049 ResultScanner s = t.getScanner(new Scan());
2050 for (Result result : s) {
2051 LOG.info("getMetaTableRows: row -> " +
2052 Bytes.toStringBinary(result.getRow()));
2053 rows.add(result.getRow());
2054 }
2055 s.close();
2056 t.close();
2057 return rows;
2058 }
2059
2060
2061
2062
2063
2064
2065 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2066
2067 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2068 List<byte[]> rows = new ArrayList<byte[]>();
2069 ResultScanner s = t.getScanner(new Scan());
2070 for (Result result : s) {
2071 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2072 if (info == null) {
2073 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2074
2075 continue;
2076 }
2077
2078 if (info.getTable().equals(tableName)) {
2079 LOG.info("getMetaTableRows: row -> " +
2080 Bytes.toStringBinary(result.getRow()) + info);
2081 rows.add(result.getRow());
2082 }
2083 }
2084 s.close();
2085 t.close();
2086 return rows;
2087 }
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2101 throws IOException, InterruptedException {
2102 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2115 throws IOException, InterruptedException {
2116 List<byte[]> metaRows = getMetaTableRows(tableName);
2117 if (metaRows == null || metaRows.isEmpty()) {
2118 return null;
2119 }
2120 LOG.debug("Found " + metaRows.size() + " rows for table " +
2121 tableName);
2122 byte [] firstrow = metaRows.get(0);
2123 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2124 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2125 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2126 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2127 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2128 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2129 while(retrier.shouldRetry()) {
2130 int index = getMiniHBaseCluster().getServerWith(firstrow);
2131 if (index != -1) {
2132 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2133 }
2134
2135 retrier.sleepUntilNextRetry();
2136 }
2137 return null;
2138 }
2139
2140
2141
2142
2143
2144
2145
2146 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2147 startMiniMapReduceCluster(2);
2148 return mrCluster;
2149 }
2150
2151
2152
2153
2154
2155 private void forceChangeTaskLogDir() {
2156 Field logDirField;
2157 try {
2158 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2159 logDirField.setAccessible(true);
2160
2161 Field modifiersField = Field.class.getDeclaredField("modifiers");
2162 modifiersField.setAccessible(true);
2163 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2164
2165 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2166 } catch (SecurityException e) {
2167 throw new RuntimeException(e);
2168 } catch (NoSuchFieldException e) {
2169
2170 throw new RuntimeException(e);
2171 } catch (IllegalArgumentException e) {
2172 throw new RuntimeException(e);
2173 } catch (IllegalAccessException e) {
2174 throw new RuntimeException(e);
2175 }
2176 }
2177
2178
2179
2180
2181
2182
2183
2184 private void startMiniMapReduceCluster(final int servers) throws IOException {
2185 if (mrCluster != null) {
2186 throw new IllegalStateException("MiniMRCluster is already running");
2187 }
2188 LOG.info("Starting mini mapreduce cluster...");
2189 setupClusterTestDir();
2190 createDirsAndSetProperties();
2191
2192 forceChangeTaskLogDir();
2193
2194
2195
2196
2197 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2198
2199
2200
2201 conf.setBoolean("mapreduce.map.speculative", false);
2202 conf.setBoolean("mapreduce.reduce.speculative", false);
2203
2204
2205
2206 mrCluster = new MiniMRCluster(servers,
2207 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2208 null, null, new JobConf(this.conf));
2209 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2210 if (jobConf == null) {
2211 jobConf = mrCluster.createJobConf();
2212 }
2213
2214 jobConf.set("mapred.local.dir",
2215 conf.get("mapred.local.dir"));
2216 LOG.info("Mini mapreduce cluster started");
2217
2218
2219
2220
2221 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2222
2223 conf.set("mapreduce.framework.name", "yarn");
2224 conf.setBoolean("yarn.is.minicluster", true);
2225 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2226 if (rmAddress != null) {
2227 conf.set("yarn.resourcemanager.address", rmAddress);
2228 }
2229 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2230 if (historyAddress != null) {
2231 conf.set("mapreduce.jobhistory.address", historyAddress);
2232 }
2233 String schedulerAddress =
2234 jobConf.get("yarn.resourcemanager.scheduler.address");
2235 if (schedulerAddress != null) {
2236 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2237 }
2238 }
2239
2240
2241
2242
2243 public void shutdownMiniMapReduceCluster() {
2244 LOG.info("Stopping mini mapreduce cluster...");
2245 if (mrCluster != null) {
2246 mrCluster.shutdown();
2247 mrCluster = null;
2248 }
2249
2250 conf.set("mapred.job.tracker", "local");
2251 LOG.info("Mini mapreduce cluster stopped");
2252 }
2253
2254
2255
2256
2257 public RegionServerServices createMockRegionServerService() throws IOException {
2258 return createMockRegionServerService((ServerName)null);
2259 }
2260
2261
2262
2263
2264
2265 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2266 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2267 rss.setFileSystem(getTestFileSystem());
2268 rss.setRpcServer(rpc);
2269 return rss;
2270 }
2271
2272
2273
2274
2275
2276 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2277 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2278 rss.setFileSystem(getTestFileSystem());
2279 return rss;
2280 }
2281
2282
2283
2284
2285
2286
2287 public void enableDebug(Class<?> clazz) {
2288 Log l = LogFactory.getLog(clazz);
2289 if (l instanceof Log4JLogger) {
2290 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2291 } else if (l instanceof Jdk14Logger) {
2292 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2293 }
2294 }
2295
2296
2297
2298
2299
2300 public void expireMasterSession() throws Exception {
2301 HMaster master = getMiniHBaseCluster().getMaster();
2302 expireSession(master.getZooKeeper(), false);
2303 }
2304
2305
2306
2307
2308
2309
2310 public void expireRegionServerSession(int index) throws Exception {
2311 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2312 expireSession(rs.getZooKeeper(), false);
2313 decrementMinRegionServerCount();
2314 }
2315
2316 private void decrementMinRegionServerCount() {
2317
2318
2319 decrementMinRegionServerCount(getConfiguration());
2320
2321
2322 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2323 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2324 }
2325 }
2326
2327 private void decrementMinRegionServerCount(Configuration conf) {
2328 int currentCount = conf.getInt(
2329 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2330 if (currentCount != -1) {
2331 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2332 Math.max(currentCount - 1, 1));
2333 }
2334 }
2335
2336 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2337 expireSession(nodeZK, false);
2338 }
2339
2340 @Deprecated
2341 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2342 throws Exception {
2343 expireSession(nodeZK, false);
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2358 throws Exception {
2359 Configuration c = new Configuration(this.conf);
2360 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2361 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2362 byte[] password = zk.getSessionPasswd();
2363 long sessionID = zk.getSessionId();
2364
2365
2366
2367
2368
2369
2370
2371
2372 ZooKeeper monitor = new ZooKeeper(quorumServers,
2373 1000, new org.apache.zookeeper.Watcher(){
2374 @Override
2375 public void process(WatchedEvent watchedEvent) {
2376 LOG.info("Monitor ZKW received event="+watchedEvent);
2377 }
2378 } , sessionID, password);
2379
2380
2381 ZooKeeper newZK = new ZooKeeper(quorumServers,
2382 1000, EmptyWatcher.instance, sessionID, password);
2383
2384
2385
2386 long start = System.currentTimeMillis();
2387 while (newZK.getState() != States.CONNECTED
2388 && System.currentTimeMillis() - start < 1000) {
2389 Thread.sleep(1);
2390 }
2391 newZK.close();
2392 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2393
2394
2395 monitor.close();
2396
2397 if (checkStatus) {
2398 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2399 }
2400 }
2401
2402
2403
2404
2405
2406
2407
2408 public MiniHBaseCluster getHBaseCluster() {
2409 return getMiniHBaseCluster();
2410 }
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420 public HBaseCluster getHBaseClusterInterface() {
2421
2422
2423 return hbaseCluster;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435 public synchronized HBaseAdmin getHBaseAdmin()
2436 throws IOException {
2437 if (hbaseAdmin == null){
2438 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2439 }
2440 return hbaseAdmin;
2441 }
2442
2443 private HBaseAdminForTests hbaseAdmin = null;
2444 private static class HBaseAdminForTests extends HBaseAdmin {
2445 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2446 ZooKeeperConnectionException, IOException {
2447 super(c);
2448 }
2449
2450 @Override
2451 public synchronized void close() throws IOException {
2452 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2453 }
2454
2455 private synchronized void close0() throws IOException {
2456 super.close();
2457 }
2458 }
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2470 throws IOException {
2471 if (zooKeeperWatcher == null) {
2472 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2473 new Abortable() {
2474 @Override public void abort(String why, Throwable e) {
2475 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2476 }
2477 @Override public boolean isAborted() {return false;}
2478 });
2479 }
2480 return zooKeeperWatcher;
2481 }
2482 private ZooKeeperWatcher zooKeeperWatcher;
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492 public void closeRegion(String regionName) throws IOException {
2493 closeRegion(Bytes.toBytes(regionName));
2494 }
2495
2496
2497
2498
2499
2500
2501
2502 public void closeRegion(byte[] regionName) throws IOException {
2503 getHBaseAdmin().closeRegion(regionName, null);
2504 }
2505
2506
2507
2508
2509
2510
2511
2512
2513 public void closeRegionByRow(String row, HTable table) throws IOException {
2514 closeRegionByRow(Bytes.toBytes(row), table);
2515 }
2516
2517
2518
2519
2520
2521
2522
2523
2524 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2525 HRegionLocation hrl = table.getRegionLocation(row);
2526 closeRegion(hrl.getRegionInfo().getRegionName());
2527 }
2528
2529
2530
2531
2532
2533
2534
2535
2536 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2537 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2538 int regCount = regions.size();
2539 Set<Integer> attempted = new HashSet<Integer>();
2540 int idx;
2541 int attempts = 0;
2542 do {
2543 regions = getHBaseCluster().getRegions(tableName);
2544 if (regCount != regions.size()) {
2545
2546 attempted.clear();
2547 }
2548 regCount = regions.size();
2549
2550
2551 if (regCount > 0) {
2552 idx = random.nextInt(regCount);
2553
2554 if (attempted.contains(idx))
2555 continue;
2556 try {
2557 regions.get(idx).checkSplit();
2558 return regions.get(idx);
2559 } catch (Exception ex) {
2560 LOG.warn("Caught exception", ex);
2561 attempted.add(idx);
2562 }
2563 }
2564 attempts++;
2565 } while (maxAttempts == -1 || attempts < maxAttempts);
2566 return null;
2567 }
2568
2569 public MiniZooKeeperCluster getZkCluster() {
2570 return zkCluster;
2571 }
2572
2573 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2574 this.passedZkCluster = true;
2575 this.zkCluster = zkCluster;
2576 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2577 }
2578
2579 public MiniDFSCluster getDFSCluster() {
2580 return dfsCluster;
2581 }
2582
2583 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2584 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2585 throw new IOException("DFSCluster is already running! Shut it down first.");
2586 }
2587 this.dfsCluster = cluster;
2588 }
2589
2590 public FileSystem getTestFileSystem() throws IOException {
2591 return HFileSystem.get(conf);
2592 }
2593
2594
2595
2596
2597
2598
2599
2600
2601 public void waitTableAvailable(byte[] table)
2602 throws InterruptedException, IOException {
2603 waitTableAvailable(getHBaseAdmin(), table, 30000);
2604 }
2605
2606 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2607 throws InterruptedException, IOException {
2608 waitTableAvailable(admin, table, 30000);
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618 public void waitTableAvailable(byte[] table, long timeoutMillis)
2619 throws InterruptedException, IOException {
2620 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2621 }
2622
2623 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2624 throws InterruptedException, IOException {
2625 long startWait = System.currentTimeMillis();
2626 while (!admin.isTableAvailable(table)) {
2627 assertTrue("Timed out waiting for table to become available " +
2628 Bytes.toStringBinary(table),
2629 System.currentTimeMillis() - startWait < timeoutMillis);
2630 Thread.sleep(200);
2631 }
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643 public void waitTableEnabled(byte[] table)
2644 throws InterruptedException, IOException {
2645 waitTableEnabled(getHBaseAdmin(), table, 30000);
2646 }
2647
2648 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2649 throws InterruptedException, IOException {
2650 waitTableEnabled(admin, table, 30000);
2651 }
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662 public void waitTableEnabled(byte[] table, long timeoutMillis)
2663 throws InterruptedException, IOException {
2664 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2665 }
2666
2667 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2668 throws InterruptedException, IOException {
2669 long startWait = System.currentTimeMillis();
2670 waitTableAvailable(admin, table, timeoutMillis);
2671 long remainder = System.currentTimeMillis() - startWait;
2672 while (!admin.isTableEnabled(table)) {
2673 assertTrue("Timed out waiting for table to become available and enabled " +
2674 Bytes.toStringBinary(table),
2675 System.currentTimeMillis() - remainder < timeoutMillis);
2676 Thread.sleep(200);
2677 }
2678
2679
2680
2681
2682
2683 try {
2684 Canary.sniff(admin, TableName.valueOf(table));
2685 } catch (Exception e) {
2686 throw new IOException(e);
2687 }
2688 }
2689
2690
2691
2692
2693
2694
2695
2696
2697 public boolean ensureSomeRegionServersAvailable(final int num)
2698 throws IOException {
2699 boolean startedServer = false;
2700 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2701 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2702 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2703 startedServer = true;
2704 }
2705
2706 return startedServer;
2707 }
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2719 throws IOException {
2720 boolean startedServer = ensureSomeRegionServersAvailable(num);
2721
2722 int nonStoppedServers = 0;
2723 for (JVMClusterUtil.RegionServerThread rst :
2724 getMiniHBaseCluster().getRegionServerThreads()) {
2725
2726 HRegionServer hrs = rst.getRegionServer();
2727 if (hrs.isStopping() || hrs.isStopped()) {
2728 LOG.info("A region server is stopped or stopping:"+hrs);
2729 } else {
2730 nonStoppedServers++;
2731 }
2732 }
2733 for (int i=nonStoppedServers; i<num; ++i) {
2734 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2735 startedServer = true;
2736 }
2737 return startedServer;
2738 }
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750 public static User getDifferentUser(final Configuration c,
2751 final String differentiatingSuffix)
2752 throws IOException {
2753 FileSystem currentfs = FileSystem.get(c);
2754 if (!(currentfs instanceof DistributedFileSystem)) {
2755 return User.getCurrent();
2756 }
2757
2758
2759 String username = User.getCurrent().getName() +
2760 differentiatingSuffix;
2761 User user = User.createUserForTesting(c, username,
2762 new String[]{"supergroup"});
2763 return user;
2764 }
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2780 final int max) {
2781 try {
2782 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2783 for (Class<?> clazz: clazzes) {
2784 String className = clazz.getSimpleName();
2785 if (className.equals("DFSOutputStream")) {
2786 if (clazz.isInstance(stream)) {
2787 Field maxRecoveryErrorCountField =
2788 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2789 maxRecoveryErrorCountField.setAccessible(true);
2790 maxRecoveryErrorCountField.setInt(stream, max);
2791 break;
2792 }
2793 }
2794 }
2795 } catch (Exception e) {
2796 LOG.info("Could not set max recovery field", e);
2797 }
2798 }
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2809 waitUntilAllRegionsAssigned(tableName, 60000);
2810 }
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2822 throws IOException {
2823 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2824 try {
2825 waitFor(timeout, 200, true, new Predicate<IOException>() {
2826 @Override
2827 public boolean evaluate() throws IOException {
2828 boolean allRegionsAssigned = true;
2829 Scan scan = new Scan();
2830 scan.addFamily(HConstants.CATALOG_FAMILY);
2831 ResultScanner s = meta.getScanner(scan);
2832 try {
2833 Result r;
2834 while ((r = s.next()) != null) {
2835 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2836 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2837 if (info != null && info.getTable().equals(tableName)) {
2838 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2839 allRegionsAssigned &= (b != null);
2840 }
2841 }
2842 } finally {
2843 s.close();
2844 }
2845 return allRegionsAssigned;
2846 }
2847 });
2848 } finally {
2849 meta.close();
2850 }
2851 }
2852
2853
2854
2855
2856
2857 public static List<Cell> getFromStoreFile(HStore store,
2858 Get get) throws IOException {
2859 Scan scan = new Scan(get);
2860 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2861 scan.getFamilyMap().get(store.getFamily().getName()),
2862
2863
2864 0);
2865
2866 List<Cell> result = new ArrayList<Cell>();
2867 scanner.next(result);
2868 if (!result.isEmpty()) {
2869
2870 Cell kv = result.get(0);
2871 if (!CellUtil.matchingRow(kv, get.getRow())) {
2872 result.clear();
2873 }
2874 }
2875 scanner.close();
2876 return result;
2877 }
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2888 assertTrue(numRegions>3);
2889 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2890 byte [][] result = new byte[tmpSplitKeys.length+1][];
2891 for (int i=0;i<tmpSplitKeys.length;i++) {
2892 result[i+1] = tmpSplitKeys[i];
2893 }
2894 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2895 return result;
2896 }
2897
2898
2899
2900
2901
2902 public static List<Cell> getFromStoreFile(HStore store,
2903 byte [] row,
2904 NavigableSet<byte[]> columns
2905 ) throws IOException {
2906 Get get = new Get(row);
2907 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2908 s.put(store.getFamily().getName(), columns);
2909
2910 return getFromStoreFile(store,get);
2911 }
2912
2913
2914
2915
2916
2917 public static ZooKeeperWatcher getZooKeeperWatcher(
2918 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2919 IOException {
2920 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2921 "unittest", new Abortable() {
2922 boolean aborted = false;
2923
2924 @Override
2925 public void abort(String why, Throwable e) {
2926 aborted = true;
2927 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2928 }
2929
2930 @Override
2931 public boolean isAborted() {
2932 return aborted;
2933 }
2934 });
2935 return zkw;
2936 }
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2950 HBaseTestingUtility TEST_UTIL, HRegion region,
2951 ServerName serverName) throws ZooKeeperConnectionException,
2952 IOException, KeeperException, NodeExistsException {
2953 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2954 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2955 int version = ZKAssign.transitionNodeOpening(zkw, region
2956 .getRegionInfo(), serverName);
2957 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2958 version);
2959 return zkw;
2960 }
2961
2962 public static void assertKVListsEqual(String additionalMsg,
2963 final List<? extends Cell> expected,
2964 final List<? extends Cell> actual) {
2965 final int eLen = expected.size();
2966 final int aLen = actual.size();
2967 final int minLen = Math.min(eLen, aLen);
2968
2969 int i;
2970 for (i = 0; i < minLen
2971 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2972 ++i) {}
2973
2974 if (additionalMsg == null) {
2975 additionalMsg = "";
2976 }
2977 if (!additionalMsg.isEmpty()) {
2978 additionalMsg = ". " + additionalMsg;
2979 }
2980
2981 if (eLen != aLen || i != minLen) {
2982 throw new AssertionError(
2983 "Expected and actual KV arrays differ at position " + i + ": " +
2984 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2985 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2986 }
2987 }
2988
2989 private static <T> String safeGetAsStr(List<T> lst, int i) {
2990 if (0 <= i && i < lst.size()) {
2991 return lst.get(i).toString();
2992 } else {
2993 return "<out_of_range>";
2994 }
2995 }
2996
2997 public String getClusterKey() {
2998 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2999 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3000 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3001 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3002 }
3003
3004
3005 public HTable createRandomTable(String tableName,
3006 final Collection<String> families,
3007 final int maxVersions,
3008 final int numColsPerRow,
3009 final int numFlushes,
3010 final int numRegions,
3011 final int numRowsPerFlush)
3012 throws IOException, InterruptedException {
3013
3014 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3015 " regions, " + numFlushes + " storefiles per region, " +
3016 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3017 "\n");
3018
3019 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3020 final int numCF = families.size();
3021 final byte[][] cfBytes = new byte[numCF][];
3022 {
3023 int cfIndex = 0;
3024 for (String cf : families) {
3025 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3026 }
3027 }
3028
3029 final int actualStartKey = 0;
3030 final int actualEndKey = Integer.MAX_VALUE;
3031 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3032 final int splitStartKey = actualStartKey + keysPerRegion;
3033 final int splitEndKey = actualEndKey - keysPerRegion;
3034 final String keyFormat = "%08x";
3035 final HTable table = createTable(tableName, cfBytes,
3036 maxVersions,
3037 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3038 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3039 numRegions);
3040
3041 if (hbaseCluster != null) {
3042 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3043 }
3044
3045 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3046 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3047 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3048 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3049
3050 Put put = new Put(row);
3051 Delete del = new Delete(row);
3052 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3053 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3054 final long ts = rand.nextInt();
3055 final byte[] qual = Bytes.toBytes("col" + iCol);
3056 if (rand.nextBoolean()) {
3057 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3058 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3059 ts + "_random_" + rand.nextLong());
3060 put.add(cf, qual, ts, value);
3061 } else if (rand.nextDouble() < 0.8) {
3062 del.deleteColumn(cf, qual, ts);
3063 } else {
3064 del.deleteColumns(cf, qual, ts);
3065 }
3066 }
3067
3068 if (!put.isEmpty()) {
3069 table.put(put);
3070 }
3071
3072 if (!del.isEmpty()) {
3073 table.delete(del);
3074 }
3075 }
3076 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3077 table.flushCommits();
3078 if (hbaseCluster != null) {
3079 getMiniHBaseCluster().flushcache(table.getName());
3080 }
3081 }
3082
3083 return table;
3084 }
3085
3086 private static final int MIN_RANDOM_PORT = 0xc000;
3087 private static final int MAX_RANDOM_PORT = 0xfffe;
3088 private static Random random = new Random();
3089
3090
3091
3092
3093
3094 public static int randomPort() {
3095 return MIN_RANDOM_PORT
3096 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3097 }
3098
3099
3100
3101
3102
3103 public static int randomFreePort() {
3104 int port = 0;
3105 do {
3106 port = randomPort();
3107 if (takenRandomPorts.contains(port)) {
3108 continue;
3109 }
3110 takenRandomPorts.add(port);
3111
3112 try {
3113 ServerSocket sock = new ServerSocket(port);
3114 sock.close();
3115 } catch (IOException ex) {
3116 port = 0;
3117 }
3118 } while (port == 0);
3119 return port;
3120 }
3121
3122
3123 public static String randomMultiCastAddress() {
3124 return "226.1.1." + random.nextInt(254);
3125 }
3126
3127
3128
3129 public static void waitForHostPort(String host, int port)
3130 throws IOException {
3131 final int maxTimeMs = 10000;
3132 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3133 IOException savedException = null;
3134 LOG.info("Waiting for server at " + host + ":" + port);
3135 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3136 try {
3137 Socket sock = new Socket(InetAddress.getByName(host), port);
3138 sock.close();
3139 savedException = null;
3140 LOG.info("Server at " + host + ":" + port + " is available");
3141 break;
3142 } catch (UnknownHostException e) {
3143 throw new IOException("Failed to look up " + host, e);
3144 } catch (IOException e) {
3145 savedException = e;
3146 }
3147 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3148 }
3149
3150 if (savedException != null) {
3151 throw savedException;
3152 }
3153 }
3154
3155
3156
3157
3158
3159
3160 public static int createPreSplitLoadTestTable(Configuration conf,
3161 TableName tableName, byte[] columnFamily, Algorithm compression,
3162 DataBlockEncoding dataBlockEncoding) throws IOException {
3163 HTableDescriptor desc = new HTableDescriptor(tableName);
3164 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3165 hcd.setDataBlockEncoding(dataBlockEncoding);
3166 hcd.setCompressionType(compression);
3167 return createPreSplitLoadTestTable(conf, desc, hcd);
3168 }
3169
3170
3171
3172
3173
3174
3175 public static int createPreSplitLoadTestTable(Configuration conf,
3176 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3177 if (!desc.hasFamily(hcd.getName())) {
3178 desc.addFamily(hcd);
3179 }
3180
3181 int totalNumberOfRegions = 0;
3182 HBaseAdmin admin = new HBaseAdmin(conf);
3183 try {
3184
3185
3186
3187 int numberOfServers = admin.getClusterStatus().getServers().size();
3188 if (numberOfServers == 0) {
3189 throw new IllegalStateException("No live regionservers");
3190 }
3191
3192 int regionsPerServer = conf.getInt(REGIONS_PER_SERVER_KEY, DEFAULT_REGIONS_PER_SERVER);
3193 totalNumberOfRegions = numberOfServers * regionsPerServer;
3194 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3195 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3196 "(default regions per server: " + regionsPerServer + ")");
3197
3198 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3199 totalNumberOfRegions);
3200
3201 admin.createTable(desc, splits);
3202 } catch (MasterNotRunningException e) {
3203 LOG.error("Master not running", e);
3204 throw new IOException(e);
3205 } catch (TableExistsException e) {
3206 LOG.warn("Table " + desc.getTableName() +
3207 " already exists, continuing");
3208 } finally {
3209 admin.close();
3210 }
3211 return totalNumberOfRegions;
3212 }
3213
3214 public static int getMetaRSPort(Configuration conf) throws IOException {
3215 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3216 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3217 table.close();
3218 return hloc.getPort();
3219 }
3220
3221
3222
3223
3224
3225
3226
3227 public void assertRegionOnServer(
3228 final HRegionInfo hri, final ServerName server,
3229 final long timeout) throws IOException, InterruptedException {
3230 long timeoutTime = System.currentTimeMillis() + timeout;
3231 while (true) {
3232 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3233 if (regions.contains(hri)) return;
3234 long now = System.currentTimeMillis();
3235 if (now > timeoutTime) break;
3236 Thread.sleep(10);
3237 }
3238 fail("Could not find region " + hri.getRegionNameAsString()
3239 + " on server " + server);
3240 }
3241
3242
3243
3244
3245
3246 public void assertRegionOnlyOnServer(
3247 final HRegionInfo hri, final ServerName server,
3248 final long timeout) throws IOException, InterruptedException {
3249 long timeoutTime = System.currentTimeMillis() + timeout;
3250 while (true) {
3251 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3252 if (regions.contains(hri)) {
3253 List<JVMClusterUtil.RegionServerThread> rsThreads =
3254 getHBaseCluster().getLiveRegionServerThreads();
3255 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3256 HRegionServer rs = rsThread.getRegionServer();
3257 if (server.equals(rs.getServerName())) {
3258 continue;
3259 }
3260 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3261 for (HRegion r: hrs) {
3262 assertTrue("Region should not be double assigned",
3263 r.getRegionId() != hri.getRegionId());
3264 }
3265 }
3266 return;
3267 }
3268 long now = System.currentTimeMillis();
3269 if (now > timeoutTime) break;
3270 Thread.sleep(10);
3271 }
3272 fail("Could not find region " + hri.getRegionNameAsString()
3273 + " on server " + server);
3274 }
3275
3276 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3277 throws IOException {
3278 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3279 htd.addFamily(hcd);
3280 HRegionInfo info =
3281 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3282 HRegion region =
3283 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3284 return region;
3285 }
3286
3287 public void setFileSystemURI(String fsURI) {
3288 FS_URI = fsURI;
3289 }
3290
3291
3292
3293
3294 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3295 throws E {
3296 return Waiter.waitFor(this.conf, timeout, predicate);
3297 }
3298
3299
3300
3301
3302 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3303 throws E {
3304 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3305 }
3306
3307
3308
3309
3310 public <E extends Exception> long waitFor(long timeout, long interval,
3311 boolean failIfTimeout, Predicate<E> predicate) throws E {
3312 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3313 }
3314
3315
3316
3317
3318 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3319 return new Waiter.Predicate<Exception>() {
3320 @Override
3321 public boolean evaluate() throws Exception {
3322 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3323 .getAssignmentManager().getRegionStates();
3324 return !regionStates.isRegionsInTransition();
3325 }
3326 };
3327 }
3328
3329
3330
3331
3332 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3333 return new Waiter.Predicate<Exception>() {
3334 @Override
3335 public boolean evaluate() throws Exception {
3336 return getHBaseAdmin().isTableEnabled(tableName);
3337 }
3338 };
3339 }
3340
3341
3342
3343
3344
3345
3346 public static List<HColumnDescriptor> generateColumnDescriptors() {
3347 return generateColumnDescriptors("");
3348 }
3349
3350
3351
3352
3353
3354
3355
3356 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3357 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3358 long familyId = 0;
3359 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3360 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3361 for (BloomType bloomType: BloomType.values()) {
3362 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3363 HColumnDescriptor htd = new HColumnDescriptor(name);
3364 htd.setCompressionType(compressionType);
3365 htd.setDataBlockEncoding(encodingType);
3366 htd.setBloomFilterType(bloomType);
3367 htds.add(htd);
3368 familyId++;
3369 }
3370 }
3371 }
3372 return htds;
3373 }
3374
3375
3376
3377
3378
3379 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3380 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3381 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3382 for (String algoName : allAlgos) {
3383 try {
3384 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3385 algo.getCompressor();
3386 supportedAlgos.add(algo);
3387 } catch (Throwable t) {
3388
3389 }
3390 }
3391 return supportedAlgos.toArray(new Compression.Algorithm[0]);
3392 }
3393 }