1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
66 import org.apache.hadoop.hbase.master.HMaster;
67 import org.apache.hadoop.hbase.master.ServerManager;
68 import org.apache.hadoop.hbase.regionserver.HRegion;
69 import org.apache.hadoop.hbase.regionserver.HRegionServer;
70 import org.apache.hadoop.hbase.regionserver.InternalScanner;
71 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
72 import org.apache.hadoop.hbase.regionserver.Store;
73 import org.apache.hadoop.hbase.regionserver.StoreFile;
74 import org.apache.hadoop.hbase.security.User;
75 import org.apache.hadoop.hbase.util.Bytes;
76 import org.apache.hadoop.hbase.util.FSUtils;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil;
78 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
79 import org.apache.hadoop.hbase.util.RegionSplitter;
80 import org.apache.hadoop.hbase.util.Threads;
81 import org.apache.hadoop.hbase.util.Writables;
82 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
83 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
84 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
85 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
86 import org.apache.hadoop.hdfs.DFSClient;
87 import org.apache.hadoop.hdfs.DistributedFileSystem;
88 import org.apache.hadoop.hdfs.MiniDFSCluster;
89 import org.apache.hadoop.mapred.JobConf;
90 import org.apache.hadoop.mapred.MiniMRCluster;
91 import org.apache.zookeeper.KeeperException;
92 import org.apache.zookeeper.KeeperException.NodeExistsException;
93 import org.apache.zookeeper.WatchedEvent;
94 import org.apache.zookeeper.ZooKeeper;
95
96
97
98
99
100
101
102
103
104
105
106
107
108 public class HBaseTestingUtility {
109 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
110 private Configuration conf;
111 private MiniZooKeeperCluster zkCluster = null;
112
113
114
115
116
117 private static int DEFAULT_REGIONS_PER_SERVER = 5;
118
119
120
121
122
123 private boolean passedZkCluster = false;
124 private MiniDFSCluster dfsCluster = null;
125
126 private HBaseCluster hbaseCluster = null;
127 private MiniMRCluster mrCluster = null;
128
129
130 private File dataTestDir = null;
131
132
133
134 private File clusterTestDir = null;
135
136
137
138
139
140
141
142
143 private static final String TEST_DIRECTORY_KEY = "test.build.data";
144
145
146
147
148 public static final String BASE_TEST_DIRECTORY_KEY =
149 "test.build.data.basedirectory";
150
151
152
153
154 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
155
156
157 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
158 Arrays.asList(new Object[][] {
159 { Compression.Algorithm.NONE },
160 { Compression.Algorithm.GZ }
161 });
162
163
164 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
165 Arrays.asList(new Object[][] {
166 { new Boolean(false) },
167 { new Boolean(true) }
168 });
169
170
171 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
172 Compression.Algorithm.NONE, Compression.Algorithm.GZ
173 };
174
175
176
177
178
179 private static List<Object[]> bloomAndCompressionCombinations() {
180 List<Object[]> configurations = new ArrayList<Object[]>();
181 for (Compression.Algorithm comprAlgo :
182 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
183 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
184 configurations.add(new Object[] { comprAlgo, bloomType });
185 }
186 }
187 return Collections.unmodifiableList(configurations);
188 }
189
190 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
191 bloomAndCompressionCombinations();
192
193 public HBaseTestingUtility() {
194 this(HBaseConfiguration.create());
195 }
196
197 public HBaseTestingUtility(Configuration conf) {
198 this.conf = conf;
199
200
201 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
202 setHDFSClientRetryProperty();
203 }
204
205 private void setHDFSClientRetryProperty() {
206 this.conf.setInt("hdfs.client.retries.number", 1);
207 HBaseFileSystem.setRetryCounts(conf);
208 }
209
210
211
212
213
214
215
216
217
218
219
220
221 public Configuration getConfiguration() {
222 return this.conf;
223 }
224
225 public void setHBaseCluster(HBaseCluster hbaseCluster) {
226 this.hbaseCluster = hbaseCluster;
227 }
228
229
230
231
232
233
234
235
236
237 private Path getBaseTestDir() {
238 String PathName = System.getProperty(
239 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
240
241 return new Path(PathName);
242 }
243
244
245
246
247
248
249
250 public Path getDataTestDir() {
251 if (dataTestDir == null){
252 setupDataTestDir();
253 }
254 return new Path(dataTestDir.getAbsolutePath());
255 }
256
257
258
259
260
261
262 public Path getClusterTestDir() {
263 if (clusterTestDir == null){
264 setupClusterTestDir();
265 }
266 return new Path(clusterTestDir.getAbsolutePath());
267 }
268
269
270
271
272
273
274
275 public Path getDataTestDir(final String subdirName) {
276 return new Path(getDataTestDir(), subdirName);
277 }
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295 private void setupDataTestDir() {
296 if (dataTestDir != null) {
297 LOG.warn("Data test dir already setup in " +
298 dataTestDir.getAbsolutePath());
299 return;
300 }
301
302 String randomStr = UUID.randomUUID().toString();
303 Path testPath= new Path(getBaseTestDir(), randomStr);
304
305 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
306 dataTestDir.deleteOnExit();
307
308 createSubDirAndSystemProperty(
309 "hadoop.log.dir",
310 testPath, "hadoop-log-dir");
311
312
313
314 createSubDirAndSystemProperty(
315 "hadoop.tmp.dir",
316 testPath, "hadoop-tmp-dir");
317
318
319 createSubDir(
320 "mapred.local.dir",
321 testPath, "mapred-local-dir");
322
323 createSubDirAndSystemProperty(
324 "mapred.working.dir",
325 testPath, "mapred-working-dir");
326
327 createSubDir(
328 "hbase.local.dir",
329 testPath, "hbase-local-dir");
330 }
331
332 private void createSubDir(String propertyName, Path parent, String subDirName){
333 Path newPath= new Path(parent, subDirName);
334 File newDir = new File(newPath.toString()).getAbsoluteFile();
335 newDir.deleteOnExit();
336 conf.set(propertyName, newDir.getAbsolutePath());
337 }
338
339 private void createSubDirAndSystemProperty(
340 String propertyName, Path parent, String subDirName){
341
342 String sysValue = System.getProperty(propertyName);
343
344 if (sysValue != null) {
345
346
347 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
348 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
349 String confValue = conf.get(propertyName);
350 if (confValue != null && !confValue.endsWith(sysValue)){
351 LOG.warn(
352 propertyName + " property value differs in configuration and system: "+
353 "Configuration="+confValue+" while System="+sysValue+
354 " Erasing configuration value by system value."
355 );
356 }
357 conf.set(propertyName, sysValue);
358 } else {
359
360 createSubDir(propertyName, parent, subDirName);
361 System.setProperty(propertyName, conf.get(propertyName));
362 }
363 }
364
365
366
367
368 private void setupClusterTestDir() {
369 if (clusterTestDir != null) {
370 LOG.warn("Cluster test dir already setup in " +
371 clusterTestDir.getAbsolutePath());
372 return;
373 }
374
375
376
377 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
378 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
379
380 clusterTestDir.deleteOnExit();
381 }
382
383
384
385
386 public void isRunningCluster() throws IOException {
387 if (dfsCluster == null) return;
388 throw new IOException("Cluster already running at " +
389 this.clusterTestDir);
390 }
391
392
393
394
395
396
397
398
399 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
400 return startMiniDFSCluster(servers, null);
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
415 throws Exception {
416 if ( hosts != null && hosts.length != 0) {
417 return startMiniDFSCluster(hosts.length, hosts);
418 } else {
419 return startMiniDFSCluster(1, null);
420 }
421 }
422
423
424
425
426
427
428
429
430
431
432 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
433 throws Exception {
434
435
436 isRunningCluster();
437
438
439 if (clusterTestDir == null) {
440 setupClusterTestDir();
441 }
442
443
444 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
445
446
447
448
449 System.setProperty("test.cache.data", this.clusterTestDir.toString());
450
451
452 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
453 true, null, null, hosts, null);
454
455
456 FileSystem fs = this.dfsCluster.getFileSystem();
457 this.conf.set("fs.defaultFS", fs.getUri().toString());
458
459 this.conf.set("fs.default.name", fs.getUri().toString());
460
461
462 this.dfsCluster.waitClusterUp();
463
464 return this.dfsCluster;
465 }
466
467
468
469
470
471
472 public void shutdownMiniDFSCluster() throws Exception {
473 if (this.dfsCluster != null) {
474
475 this.dfsCluster.shutdown();
476 dfsCluster = null;
477 }
478
479 }
480
481
482
483
484
485
486
487
488 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
489 return startMiniZKCluster(1);
490 }
491
492
493
494
495
496
497
498
499
500 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
501 throws Exception {
502 File zkClusterFile = new File(getClusterTestDir().toString());
503 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
504 }
505
506 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
507 throws Exception {
508 return startMiniZKCluster(dir,1);
509 }
510
511 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
512 int zooKeeperServerNum)
513 throws Exception {
514 if (this.zkCluster != null) {
515 throw new IOException("Cluster already running at " + dir);
516 }
517 this.passedZkCluster = false;
518 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
519 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
520 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
521 Integer.toString(clientPort));
522 return this.zkCluster;
523 }
524
525
526
527
528
529
530
531 public void shutdownMiniZKCluster() throws IOException {
532 if (this.zkCluster != null) {
533 this.zkCluster.shutdown();
534 this.zkCluster = null;
535 }
536 }
537
538
539
540
541
542
543
544 public MiniHBaseCluster startMiniCluster() throws Exception {
545 return startMiniCluster(1, 1);
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 public MiniHBaseCluster startMiniCluster(final int numSlaves)
562 throws Exception {
563 return startMiniCluster(1, numSlaves);
564 }
565
566
567
568
569
570
571
572
573 public MiniHBaseCluster startMiniCluster(final int numMasters,
574 final int numSlaves)
575 throws Exception {
576 return startMiniCluster(numMasters, numSlaves, null);
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 public MiniHBaseCluster startMiniCluster(final int numMasters,
605 final int numSlaves, final String[] dataNodeHosts)
606 throws Exception {
607 int numDataNodes = numSlaves;
608 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
609 numDataNodes = dataNodeHosts.length;
610 }
611
612 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
613 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
614
615
616 isRunningCluster();
617
618
619
620 startMiniDFSCluster(numDataNodes, dataNodeHosts);
621
622
623 if (this.zkCluster == null) {
624 startMiniZKCluster(clusterTestDir);
625 }
626
627
628 return startMiniHBaseCluster(numMasters, numSlaves);
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
643 final int numSlaves)
644 throws IOException, InterruptedException {
645
646 createRootDir();
647
648
649
650 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
651 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
652 }
653 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
654 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
655 }
656
657 Configuration c = new Configuration(this.conf);
658 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
659
660 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
661 ResultScanner s = t.getScanner(new Scan());
662 while (s.next() != null) {
663 continue;
664 }
665 s.close();
666 t.close();
667
668 getHBaseAdmin();
669 LOG.info("Minicluster is up");
670 return (MiniHBaseCluster)this.hbaseCluster;
671 }
672
673
674
675
676
677
678
679 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
680 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
681
682 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
683 ResultScanner s = t.getScanner(new Scan());
684 while (s.next() != null) {
685
686 }
687 LOG.info("HBase has been restarted");
688 s.close();
689 t.close();
690 }
691
692
693
694
695
696
697 public MiniHBaseCluster getMiniHBaseCluster() {
698 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
699 return (MiniHBaseCluster)this.hbaseCluster;
700 }
701 throw new RuntimeException(hbaseCluster + " not an instance of " +
702 MiniHBaseCluster.class.getName());
703 }
704
705
706
707
708
709
710 public void shutdownMiniCluster() throws Exception {
711 LOG.info("Shutting down minicluster");
712 shutdownMiniHBaseCluster();
713 if (!this.passedZkCluster){
714 shutdownMiniZKCluster();
715 }
716 shutdownMiniDFSCluster();
717
718
719 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
720
721 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
722 new Path(this.clusterTestDir.toString()))) {
723 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
724 }
725 this.clusterTestDir = null;
726 }
727 LOG.info("Minicluster is down");
728 }
729
730
731
732
733
734 public void shutdownMiniHBaseCluster() throws IOException {
735 if (hbaseAdmin != null) {
736 hbaseAdmin.close();
737 hbaseAdmin = null;
738 }
739
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
741 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
742 if (this.hbaseCluster != null) {
743 this.hbaseCluster.shutdown();
744
745 this.hbaseCluster.waitUntilShutDown();
746 this.hbaseCluster = null;
747 }
748 }
749
750
751
752
753
754
755
756 public Path getDefaultRootDirPath() throws IOException {
757 FileSystem fs = FileSystem.get(this.conf);
758 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
759 }
760
761
762
763
764
765
766
767
768
769 public Path createRootDir() throws IOException {
770 FileSystem fs = FileSystem.get(this.conf);
771 Path hbaseRootdir = getDefaultRootDirPath();
772 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
773 fs.mkdirs(hbaseRootdir);
774 FSUtils.setVersion(fs, hbaseRootdir);
775 return hbaseRootdir;
776 }
777
778
779
780
781
782 public void flush() throws IOException {
783 getMiniHBaseCluster().flushcache();
784 }
785
786
787
788
789
790 public void flush(byte [] tableName) throws IOException {
791 getMiniHBaseCluster().flushcache(tableName);
792 }
793
794
795
796
797
798 public void compact(boolean major) throws IOException {
799 getMiniHBaseCluster().compact(major);
800 }
801
802
803
804
805
806 public void compact(byte [] tableName, boolean major) throws IOException {
807 getMiniHBaseCluster().compact(tableName, major);
808 }
809
810
811
812
813
814
815
816
817
818 public HTable createTable(byte[] tableName, byte[] family)
819 throws IOException{
820 return createTable(tableName, new byte[][]{family});
821 }
822
823
824
825
826
827
828
829
830 public HTable createTable(byte[] tableName, byte[][] families)
831 throws IOException {
832 return createTable(tableName, families,
833 new Configuration(getConfiguration()));
834 }
835
836 public HTable createTable(byte[] tableName, byte[][] families,
837 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
838 throws IOException{
839 HTableDescriptor desc = new HTableDescriptor(tableName);
840 for (byte[] family : families) {
841 HColumnDescriptor hcd = new HColumnDescriptor(family)
842 .setMaxVersions(numVersions);
843 desc.addFamily(hcd);
844 }
845 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
846
847 waitUntilAllRegionsAssigned(tableName);
848 return new HTable(getConfiguration(), tableName);
849 }
850
851
852
853
854
855
856
857
858
859 public HTable createTable(byte[] tableName, byte[][] families,
860 final Configuration c)
861 throws IOException {
862 HTableDescriptor desc = new HTableDescriptor(tableName);
863 for(byte[] family : families) {
864 desc.addFamily(new HColumnDescriptor(family));
865 }
866 getHBaseAdmin().createTable(desc);
867
868 waitUntilAllRegionsAssigned(tableName);
869 return new HTable(c, tableName);
870 }
871
872
873
874
875
876
877
878
879
880
881 public HTable createTable(byte[] tableName, byte[][] families,
882 final Configuration c, int numVersions)
883 throws IOException {
884 HTableDescriptor desc = new HTableDescriptor(tableName);
885 for(byte[] family : families) {
886 HColumnDescriptor hcd = new HColumnDescriptor(family)
887 .setMaxVersions(numVersions);
888 desc.addFamily(hcd);
889 }
890 getHBaseAdmin().createTable(desc);
891
892 waitUntilAllRegionsAssigned(tableName);
893 return new HTable(c, tableName);
894 }
895
896
897
898
899
900
901
902
903
904 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
905 throws IOException {
906 return createTable(tableName, new byte[][]{family}, numVersions);
907 }
908
909
910
911
912
913
914
915
916
917 public HTable createTable(byte[] tableName, byte[][] families,
918 int numVersions)
919 throws IOException {
920 HTableDescriptor desc = new HTableDescriptor(tableName);
921 for (byte[] family : families) {
922 HColumnDescriptor hcd = new HColumnDescriptor(family)
923 .setMaxVersions(numVersions);
924 desc.addFamily(hcd);
925 }
926 getHBaseAdmin().createTable(desc);
927
928 waitUntilAllRegionsAssigned(tableName);
929 return new HTable(new Configuration(getConfiguration()), tableName);
930 }
931
932
933
934
935
936
937
938
939
940 public HTable createTable(byte[] tableName, byte[][] families,
941 int numVersions, int blockSize) throws IOException {
942 HTableDescriptor desc = new HTableDescriptor(tableName);
943 for (byte[] family : families) {
944 HColumnDescriptor hcd = new HColumnDescriptor(family)
945 .setMaxVersions(numVersions)
946 .setBlocksize(blockSize);
947 desc.addFamily(hcd);
948 }
949 getHBaseAdmin().createTable(desc);
950
951 waitUntilAllRegionsAssigned(tableName);
952 return new HTable(new Configuration(getConfiguration()), tableName);
953 }
954
955
956
957
958
959
960
961
962
963 public HTable createTable(byte[] tableName, byte[][] families,
964 int[] numVersions)
965 throws IOException {
966 HTableDescriptor desc = new HTableDescriptor(tableName);
967 int i = 0;
968 for (byte[] family : families) {
969 HColumnDescriptor hcd = new HColumnDescriptor(family)
970 .setMaxVersions(numVersions[i]);
971 desc.addFamily(hcd);
972 i++;
973 }
974 getHBaseAdmin().createTable(desc);
975
976 waitUntilAllRegionsAssigned(tableName);
977 return new HTable(new Configuration(getConfiguration()), tableName);
978 }
979
980
981
982
983
984 public void deleteTable(byte[] tableName) throws IOException {
985 try {
986 getHBaseAdmin().disableTable(tableName);
987 } catch (TableNotEnabledException e) {
988 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
989 }
990 getHBaseAdmin().deleteTable(tableName);
991 }
992
993
994
995
996
997
998
999 public HTable truncateTable(byte [] tableName) throws IOException {
1000 HTable table = new HTable(getConfiguration(), tableName);
1001 Scan scan = new Scan();
1002 ResultScanner resScan = table.getScanner(scan);
1003 for(Result res : resScan) {
1004 Delete del = new Delete(res.getRow());
1005 table.delete(del);
1006 }
1007 resScan = table.getScanner(scan);
1008 resScan.close();
1009 return table;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019 public int loadTable(final HTable t, final byte[] f) throws IOException {
1020 t.setAutoFlush(false);
1021 byte[] k = new byte[3];
1022 int rowCount = 0;
1023 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1024 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1025 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1026 k[0] = b1;
1027 k[1] = b2;
1028 k[2] = b3;
1029 Put put = new Put(k);
1030 put.add(f, null, k);
1031 t.put(put);
1032 rowCount++;
1033 }
1034 }
1035 }
1036 t.flushCommits();
1037 return rowCount;
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1048 return loadTable(t, f, null);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1060 t.setAutoFlush(false);
1061 byte[] k = new byte[3];
1062 int rowCount = 0;
1063 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1064 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1065 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1066 k[0] = b1;
1067 k[1] = b2;
1068 k[2] = b3;
1069 Put put = new Put(k);
1070 for (int i = 0; i < f.length; i++) {
1071 put.add(f[i], null, value != null ? value : k);
1072 }
1073 t.put(put);
1074 rowCount++;
1075 }
1076 }
1077 }
1078 t.flushCommits();
1079 return rowCount;
1080 }
1081
1082
1083
1084
1085 public static class SeenRowTracker {
1086 int dim = 'z' - 'a' + 1;
1087 int[][][] seenRows = new int[dim][dim][dim];
1088 byte[] startRow;
1089 byte[] stopRow;
1090
1091 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1092 this.startRow = startRow;
1093 this.stopRow = stopRow;
1094 }
1095
1096 int i(byte b) {
1097 return b - 'a';
1098 }
1099
1100 public void addRow(byte[] row) {
1101 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1102 }
1103
1104
1105
1106
1107 public void validate() {
1108 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1109 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1110 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1111 int count = seenRows[i(b1)][i(b2)][i(b3)];
1112 int expectedCount = 0;
1113 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1114 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1115 expectedCount = 1;
1116 }
1117 if (count != expectedCount) {
1118 String row = new String(new byte[] {b1,b2,b3});
1119 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1120 }
1121 }
1122 }
1123 }
1124 }
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134 public int loadRegion(final HRegion r, final byte[] f)
1135 throws IOException {
1136 return loadRegion(r, f, false);
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1148 throws IOException {
1149 byte[] k = new byte[3];
1150 int rowCount = 0;
1151 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1152 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1153 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1154 k[0] = b1;
1155 k[1] = b2;
1156 k[2] = b3;
1157 Put put = new Put(k);
1158 put.add(f, null, k);
1159 if (r.getLog() == null) put.setWriteToWAL(false);
1160 r.put(put);
1161 rowCount++;
1162 }
1163 }
1164 if (flush) {
1165 r.flushcache();
1166 }
1167 }
1168 return rowCount;
1169 }
1170
1171
1172
1173
1174 public int countRows(final HTable table) throws IOException {
1175 Scan scan = new Scan();
1176 ResultScanner results = table.getScanner(scan);
1177 int count = 0;
1178 for (@SuppressWarnings("unused") Result res : results) {
1179 count++;
1180 }
1181 results.close();
1182 return count;
1183 }
1184
1185 public int countRows(final HTable table, final byte[]... families) throws IOException {
1186 Scan scan = new Scan();
1187 for (byte[] family: families) {
1188 scan.addFamily(family);
1189 }
1190 ResultScanner results = table.getScanner(scan);
1191 int count = 0;
1192 for (@SuppressWarnings("unused") Result res : results) {
1193 count++;
1194 }
1195 results.close();
1196 return count;
1197 }
1198
1199
1200
1201
1202 public String checksumRows(final HTable table) throws Exception {
1203 Scan scan = new Scan();
1204 ResultScanner results = table.getScanner(scan);
1205 MessageDigest digest = MessageDigest.getInstance("MD5");
1206 for (Result res : results) {
1207 digest.update(res.getRow());
1208 }
1209 results.close();
1210 return digest.toString();
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 public int createMultiRegions(HTable table, byte[] columnFamily)
1222 throws IOException {
1223 return createMultiRegions(table, columnFamily, true);
1224 }
1225
1226 public static final byte[][] KEYS = {
1227 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1228 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1229 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1230 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1231 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1232 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1233 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1234 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1235 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1236 };
1237
1238 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1239 Bytes.toBytes("bbb"),
1240 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1241 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1242 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1243 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1244 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1245 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1246 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1247 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1248 };
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1261 throws IOException {
1262 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 public int createMultiRegions(final Configuration c, final HTable table,
1275 final byte [] family, int numRegions)
1276 throws IOException {
1277 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1278 byte [] startKey = Bytes.toBytes("aaaaa");
1279 byte [] endKey = Bytes.toBytes("zzzzz");
1280 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1281 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1282 for (int i=0;i<splitKeys.length;i++) {
1283 regionStartKeys[i+1] = splitKeys[i];
1284 }
1285 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1286 return createMultiRegions(c, table, family, regionStartKeys);
1287 }
1288
1289 public int createMultiRegions(final Configuration c, final HTable table,
1290 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1291 return createMultiRegions(c, table, columnFamily, startKeys, true);
1292 }
1293
1294 public int createMultiRegions(final Configuration c, final HTable table,
1295 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1296 throws IOException {
1297 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1298 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1299 HTableDescriptor htd = table.getTableDescriptor();
1300 if(!htd.hasFamily(columnFamily)) {
1301 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1302 htd.addFamily(hcd);
1303 }
1304
1305
1306
1307
1308 List<byte[]> rows = getMetaTableRows(htd.getName());
1309 String regionToDeleteInFS = table
1310 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1311 .getRegionInfo().getEncodedName();
1312 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1313
1314 int count = 0;
1315 for (int i = 0; i < startKeys.length; i++) {
1316 int j = (i + 1) % startKeys.length;
1317 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1318 startKeys[i], startKeys[j]);
1319 Put put = new Put(hri.getRegionName());
1320 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1321 Writables.getBytes(hri));
1322 meta.put(put);
1323 LOG.info("createMultiRegions: inserted " + hri.toString());
1324 newRegions.add(hri);
1325 count++;
1326 }
1327
1328 for (byte[] row : rows) {
1329 LOG.info("createMultiRegions: deleting meta row -> " +
1330 Bytes.toStringBinary(row));
1331 meta.delete(new Delete(row));
1332 }
1333 if (cleanupFS) {
1334
1335
1336 Path tableDir = new Path(getDefaultRootDirPath().toString()
1337 + System.getProperty("file.separator") + htd.getNameAsString()
1338 + System.getProperty("file.separator") + regionToDeleteInFS);
1339 FileSystem.get(c).delete(tableDir);
1340 }
1341
1342 HConnection conn = table.getConnection();
1343 conn.clearRegionCache();
1344
1345 HBaseAdmin admin = getHBaseAdmin();
1346 if (admin.isTableEnabled(table.getTableName())) {
1347 for(HRegionInfo hri : newRegions) {
1348 admin.assign(hri.getRegionName());
1349 }
1350 }
1351
1352 meta.close();
1353
1354 return count;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1368 final HTableDescriptor htd, byte [][] startKeys)
1369 throws IOException {
1370 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1371 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1372 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1373
1374 for (int i = 0; i < startKeys.length; i++) {
1375 int j = (i + 1) % startKeys.length;
1376 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1377 startKeys[j]);
1378 Put put = new Put(hri.getRegionName());
1379 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1380 Writables.getBytes(hri));
1381 meta.put(put);
1382 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1383 newRegions.add(hri);
1384 }
1385
1386 meta.close();
1387 return newRegions;
1388 }
1389
1390
1391
1392
1393
1394
1395 public List<byte[]> getMetaTableRows() throws IOException {
1396
1397 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1398 List<byte[]> rows = new ArrayList<byte[]>();
1399 ResultScanner s = t.getScanner(new Scan());
1400 for (Result result : s) {
1401 LOG.info("getMetaTableRows: row -> " +
1402 Bytes.toStringBinary(result.getRow()));
1403 rows.add(result.getRow());
1404 }
1405 s.close();
1406 t.close();
1407 return rows;
1408 }
1409
1410
1411
1412
1413
1414
1415 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1416
1417 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1418 List<byte[]> rows = new ArrayList<byte[]>();
1419 ResultScanner s = t.getScanner(new Scan());
1420 for (Result result : s) {
1421 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1422 if (val == null) {
1423 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1424
1425 continue;
1426 }
1427 HRegionInfo info = Writables.getHRegionInfo(val);
1428 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1429 LOG.info("getMetaTableRows: row -> " +
1430 Bytes.toStringBinary(result.getRow()) + info);
1431 rows.add(result.getRow());
1432 }
1433 }
1434 s.close();
1435 t.close();
1436 return rows;
1437 }
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1450 throws IOException {
1451 List<byte[]> metaRows = getMetaTableRows(tableName);
1452 if (metaRows == null || metaRows.isEmpty()) {
1453 return null;
1454 }
1455 LOG.debug("Found " + metaRows.size() + " rows for table " +
1456 Bytes.toString(tableName));
1457 byte [] firstrow = metaRows.get(0);
1458 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1459 int index = getMiniHBaseCluster().getServerWith(firstrow);
1460 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1461 }
1462
1463
1464
1465
1466
1467
1468
1469 public void startMiniMapReduceCluster() throws IOException {
1470 startMiniMapReduceCluster(2);
1471 }
1472
1473
1474
1475
1476
1477
1478
1479 public void startMiniMapReduceCluster(final int servers) throws IOException {
1480 LOG.info("Starting mini mapreduce cluster...");
1481 if (dataTestDir == null) {
1482 setupDataTestDir();
1483 }
1484
1485 Configuration c = getConfiguration();
1486 String logDir = c.get("hadoop.log.dir");
1487 String tmpDir = c.get("hadoop.tmp.dir");
1488 if (logDir == null) {
1489 logDir = tmpDir;
1490 }
1491 System.setProperty("hadoop.log.dir", logDir);
1492 c.set("mapred.output.dir", tmpDir);
1493
1494
1495
1496 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1497
1498 mrCluster = new MiniMRCluster(0, 0, servers,
1499 FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));
1500
1501 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
1502 if (jobConf == null) {
1503 jobConf = mrCluster.createJobConf();
1504 }
1505 jobConf.set("mapred.local.dir",
1506 conf.get("mapred.local.dir"));
1507
1508 LOG.info("Mini mapreduce cluster started");
1509 JobConf mrClusterJobConf = mrCluster.createJobConf();
1510 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1511
1512 conf.set("mapreduce.framework.name", "yarn");
1513 conf.setBoolean("yarn.is.minicluster", true);
1514 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1515 if (rmAdress != null) {
1516 conf.set("yarn.resourcemanager.address", rmAdress);
1517 }
1518 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
1519 if (historyAddress != null) {
1520 conf.set("mapreduce.jobhistory.address", historyAddress);
1521 }
1522 String schedulerAdress =
1523 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1524 if (schedulerAdress != null) {
1525 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1526 }
1527 }
1528
1529
1530
1531
1532 public void shutdownMiniMapReduceCluster() {
1533 LOG.info("Stopping mini mapreduce cluster...");
1534 if (mrCluster != null) {
1535 mrCluster.shutdown();
1536 mrCluster = null;
1537 }
1538
1539 conf.set("mapred.job.tracker", "local");
1540 LOG.info("Mini mapreduce cluster stopped");
1541 }
1542
1543
1544
1545
1546
1547
1548 public void enableDebug(Class<?> clazz) {
1549 Log l = LogFactory.getLog(clazz);
1550 if (l instanceof Log4JLogger) {
1551 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1552 } else if (l instanceof Jdk14Logger) {
1553 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1554 }
1555 }
1556
1557
1558
1559
1560
1561 public void expireMasterSession() throws Exception {
1562 HMaster master = getMiniHBaseCluster().getMaster();
1563 expireSession(master.getZooKeeper(), false);
1564 }
1565
1566
1567
1568
1569
1570
1571 public void expireRegionServerSession(int index) throws Exception {
1572 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1573 expireSession(rs.getZooKeeper(), false);
1574 decrementMinRegionServerCount();
1575 }
1576
1577 private void decrementMinRegionServerCount() {
1578
1579
1580 decrementMinRegionServerCount(getConfiguration());
1581
1582
1583 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1584 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1585 }
1586 }
1587
1588 private void decrementMinRegionServerCount(Configuration conf) {
1589 int currentCount = conf.getInt(
1590 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1591 if (currentCount != -1) {
1592 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1593 Math.max(currentCount - 1, 1));
1594 }
1595 }
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1609 throws Exception {
1610 Configuration c = new Configuration(this.conf);
1611 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1612 int sessionTimeout = 500;
1613 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1614 byte[] password = zk.getSessionPasswd();
1615 long sessionID = zk.getSessionId();
1616
1617
1618
1619
1620
1621
1622
1623
1624 ZooKeeper monitor = new ZooKeeper(quorumServers,
1625 1000, new org.apache.zookeeper.Watcher(){
1626 @Override
1627 public void process(WatchedEvent watchedEvent) {
1628 LOG.info("Monitor ZKW received event="+watchedEvent);
1629 }
1630 } , sessionID, password);
1631
1632
1633 ZooKeeper newZK = new ZooKeeper(quorumServers,
1634 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1635 newZK.close();
1636 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1637
1638
1639 monitor.close();
1640
1641 if (checkStatus) {
1642 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1643 }
1644 }
1645
1646
1647
1648
1649
1650
1651
1652 public MiniHBaseCluster getHBaseCluster() {
1653 return getMiniHBaseCluster();
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 public HBaseCluster getHBaseClusterInterface() {
1665
1666
1667 return hbaseCluster;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679 public synchronized HBaseAdmin getHBaseAdmin()
1680 throws IOException {
1681 if (hbaseAdmin == null){
1682 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1683 }
1684 return hbaseAdmin;
1685 }
1686 private HBaseAdmin hbaseAdmin = null;
1687
1688
1689
1690
1691
1692
1693
1694 public void closeRegion(String regionName) throws IOException {
1695 closeRegion(Bytes.toBytes(regionName));
1696 }
1697
1698
1699
1700
1701
1702
1703
1704 public void closeRegion(byte[] regionName) throws IOException {
1705 getHBaseAdmin().closeRegion(regionName, null);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715 public void closeRegionByRow(String row, HTable table) throws IOException {
1716 closeRegionByRow(Bytes.toBytes(row), table);
1717 }
1718
1719
1720
1721
1722
1723
1724
1725
1726 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1727 HRegionLocation hrl = table.getRegionLocation(row);
1728 closeRegion(hrl.getRegionInfo().getRegionName());
1729 }
1730
1731 public MiniZooKeeperCluster getZkCluster() {
1732 return zkCluster;
1733 }
1734
1735 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1736 this.passedZkCluster = true;
1737 this.zkCluster = zkCluster;
1738 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1739 }
1740
1741 public MiniDFSCluster getDFSCluster() {
1742 return dfsCluster;
1743 }
1744
1745 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1746 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1747 throw new IOException("DFSCluster is already running! Shut it down first.");
1748 }
1749 this.dfsCluster = cluster;
1750 }
1751
1752 public FileSystem getTestFileSystem() throws IOException {
1753 return HFileSystem.get(conf);
1754 }
1755
1756
1757
1758
1759
1760 public boolean cleanupTestDir() throws IOException {
1761 if (dataTestDir == null ){
1762 return false;
1763 } else {
1764 boolean ret = deleteDir(getDataTestDir());
1765 dataTestDir = null;
1766 return ret;
1767 }
1768 }
1769
1770
1771
1772
1773
1774
1775 public boolean cleanupTestDir(final String subdir) throws IOException {
1776 if (dataTestDir == null){
1777 return false;
1778 }
1779 return deleteDir(getDataTestDir(subdir));
1780 }
1781
1782
1783
1784
1785
1786
1787 public boolean deleteDir(final Path dir) throws IOException {
1788 FileSystem fs = getTestFileSystem();
1789 if (fs.exists(dir)) {
1790 return fs.delete(getDataTestDir(), true);
1791 }
1792 return false;
1793 }
1794
1795 public void waitTableAvailable(byte[] table, long timeoutMillis)
1796 throws InterruptedException, IOException {
1797 long startWait = System.currentTimeMillis();
1798 while (!getHBaseAdmin().isTableAvailable(table)) {
1799 assertTrue("Timed out waiting for table to become available " +
1800 Bytes.toStringBinary(table),
1801 System.currentTimeMillis() - startWait < timeoutMillis);
1802 Thread.sleep(200);
1803 }
1804 }
1805
1806 public void waitTableEnabled(byte[] table, long timeoutMillis)
1807 throws InterruptedException, IOException {
1808 long startWait = System.currentTimeMillis();
1809 while (!getHBaseAdmin().isTableAvailable(table) &&
1810 !getHBaseAdmin().isTableEnabled(table)) {
1811 assertTrue("Timed out waiting for table to become available and enabled " +
1812 Bytes.toStringBinary(table),
1813 System.currentTimeMillis() - startWait < timeoutMillis);
1814 Thread.sleep(200);
1815 }
1816 }
1817
1818
1819
1820
1821
1822
1823
1824
1825 public boolean ensureSomeRegionServersAvailable(final int num)
1826 throws IOException {
1827 boolean startedServer = false;
1828 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1829 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1830 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1831 startedServer = true;
1832 }
1833
1834 return startedServer;
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1846 throws IOException {
1847 boolean startedServer = ensureSomeRegionServersAvailable(num);
1848
1849 int nonStoppedServers = 0;
1850 for (JVMClusterUtil.RegionServerThread rst :
1851 getMiniHBaseCluster().getRegionServerThreads()) {
1852
1853 HRegionServer hrs = rst.getRegionServer();
1854 if (hrs.isStopping() || hrs.isStopped()) {
1855 LOG.info("A region server is stopped or stopping:"+hrs);
1856 } else {
1857 nonStoppedServers++;
1858 }
1859 }
1860 for (int i=nonStoppedServers; i<num; ++i) {
1861 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1862 startedServer = true;
1863 }
1864 return startedServer;
1865 }
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877 public static User getDifferentUser(final Configuration c,
1878 final String differentiatingSuffix)
1879 throws IOException {
1880 FileSystem currentfs = FileSystem.get(c);
1881 if (!(currentfs instanceof DistributedFileSystem)) {
1882 return User.getCurrent();
1883 }
1884
1885
1886 String username = User.getCurrent().getName() +
1887 differentiatingSuffix;
1888 User user = User.createUserForTesting(c, username,
1889 new String[]{"supergroup"});
1890 return user;
1891 }
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1907 final int max) {
1908 try {
1909 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1910 for (Class<?> clazz: clazzes) {
1911 String className = clazz.getSimpleName();
1912 if (className.equals("DFSOutputStream")) {
1913 if (clazz.isInstance(stream)) {
1914 Field maxRecoveryErrorCountField =
1915 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1916 maxRecoveryErrorCountField.setAccessible(true);
1917 maxRecoveryErrorCountField.setInt(stream, max);
1918 break;
1919 }
1920 }
1921 }
1922 } catch (Exception e) {
1923 LOG.info("Could not set max recovery field", e);
1924 }
1925 }
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1936 waitUntilAllRegionsAssigned(tableName, 60000);
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1949 throws IOException {
1950 long deadline = System.currentTimeMillis() + timeout;
1951 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1952 try {
1953 while (true) {
1954 boolean allRegionsAssigned = true;
1955 Scan scan = new Scan();
1956 scan.addFamily(HConstants.CATALOG_FAMILY);
1957 ResultScanner s = meta.getScanner(scan);
1958 try {
1959 Result r;
1960 while ((r = s.next()) != null) {
1961 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1962 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1963 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1964 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1965 allRegionsAssigned &= (b != null);
1966 }
1967 }
1968 } finally {
1969 s.close();
1970 }
1971 if (allRegionsAssigned) {
1972 return;
1973 }
1974 long now = System.currentTimeMillis();
1975 if (now > deadline) {
1976 throw new IOException("Timeout waiting for all regions of " +
1977 Bytes.toStringBinary(tableName) + " to be assigned");
1978 }
1979 try {
1980 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1981 } catch (InterruptedException e) {
1982 throw new IOException(e);
1983 }
1984 }
1985 } finally {
1986 meta.close();
1987 }
1988 }
1989
1990
1991
1992
1993
1994 public static List<KeyValue> getFromStoreFile(Store store,
1995 Get get) throws IOException {
1996 MultiVersionConsistencyControl.resetThreadReadPoint();
1997 Scan scan = new Scan(get);
1998 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1999 scan.getFamilyMap().get(store.getFamily().getName()));
2000
2001 List<KeyValue> result = new ArrayList<KeyValue>();
2002 scanner.next(result);
2003 if (!result.isEmpty()) {
2004
2005 KeyValue kv = result.get(0);
2006 if (!Bytes.equals(kv.getRow(), get.getRow())) {
2007 result.clear();
2008 }
2009 }
2010 scanner.close();
2011 return result;
2012 }
2013
2014
2015
2016
2017
2018 public static List<KeyValue> getFromStoreFile(Store store,
2019 byte [] row,
2020 NavigableSet<byte[]> columns
2021 ) throws IOException {
2022 Get get = new Get(row);
2023 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2024 s.put(store.getFamily().getName(), columns);
2025
2026 return getFromStoreFile(store,get);
2027 }
2028
2029
2030
2031
2032
2033 public static ZooKeeperWatcher getZooKeeperWatcher(
2034 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2035 IOException {
2036 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2037 "unittest", new Abortable() {
2038 boolean aborted = false;
2039
2040 @Override
2041 public void abort(String why, Throwable e) {
2042 aborted = true;
2043 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2044 }
2045
2046 @Override
2047 public boolean isAborted() {
2048 return aborted;
2049 }
2050 });
2051 return zkw;
2052 }
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2066 HBaseTestingUtility TEST_UTIL, HRegion region,
2067 ServerName serverName) throws ZooKeeperConnectionException,
2068 IOException, KeeperException, NodeExistsException {
2069 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2070 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2071 int version = ZKAssign.transitionNodeOpening(zkw, region
2072 .getRegionInfo(), serverName);
2073 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2074 version);
2075 return zkw;
2076 }
2077
2078 public static void assertKVListsEqual(String additionalMsg,
2079 final List<KeyValue> expected,
2080 final List<KeyValue> actual) {
2081 final int eLen = expected.size();
2082 final int aLen = actual.size();
2083 final int minLen = Math.min(eLen, aLen);
2084
2085 int i;
2086 for (i = 0; i < minLen
2087 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2088 ++i) {}
2089
2090 if (additionalMsg == null) {
2091 additionalMsg = "";
2092 }
2093 if (!additionalMsg.isEmpty()) {
2094 additionalMsg = ". " + additionalMsg;
2095 }
2096
2097 if (eLen != aLen || i != minLen) {
2098 throw new AssertionError(
2099 "Expected and actual KV arrays differ at position " + i + ": " +
2100 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2101 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2102 }
2103 }
2104
2105 private static <T> String safeGetAsStr(List<T> lst, int i) {
2106 if (0 <= i && i < lst.size()) {
2107 return lst.get(i).toString();
2108 } else {
2109 return "<out_of_range>";
2110 }
2111 }
2112
2113 public String getClusterKey() {
2114 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2115 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2116 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2117 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2118 }
2119
2120
2121 public HTable createRandomTable(String tableName,
2122 final Collection<String> families,
2123 final int maxVersions,
2124 final int numColsPerRow,
2125 final int numFlushes,
2126 final int numRegions,
2127 final int numRowsPerFlush)
2128 throws IOException, InterruptedException {
2129
2130 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2131 " regions, " + numFlushes + " storefiles per region, " +
2132 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2133 "\n");
2134
2135 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2136 final int numCF = families.size();
2137 final byte[][] cfBytes = new byte[numCF][];
2138 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2139
2140 {
2141 int cfIndex = 0;
2142 for (String cf : families) {
2143 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2144 }
2145 }
2146
2147 final int actualStartKey = 0;
2148 final int actualEndKey = Integer.MAX_VALUE;
2149 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2150 final int splitStartKey = actualStartKey + keysPerRegion;
2151 final int splitEndKey = actualEndKey - keysPerRegion;
2152 final String keyFormat = "%08x";
2153 final HTable table = createTable(tableNameBytes, cfBytes,
2154 maxVersions,
2155 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2156 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2157 numRegions);
2158 if (hbaseCluster != null) {
2159 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2160 }
2161
2162 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2163 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2164 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2165 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2166
2167 Put put = new Put(row);
2168 Delete del = new Delete(row);
2169 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2170 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2171 final long ts = rand.nextInt();
2172 final byte[] qual = Bytes.toBytes("col" + iCol);
2173 if (rand.nextBoolean()) {
2174 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2175 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2176 ts + "_random_" + rand.nextLong());
2177 put.add(cf, qual, ts, value);
2178 } else if (rand.nextDouble() < 0.8) {
2179 del.deleteColumn(cf, qual, ts);
2180 } else {
2181 del.deleteColumns(cf, qual, ts);
2182 }
2183 }
2184
2185 if (!put.isEmpty()) {
2186 table.put(put);
2187 }
2188
2189 if (!del.isEmpty()) {
2190 table.delete(del);
2191 }
2192 }
2193 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2194 table.flushCommits();
2195 if (hbaseCluster != null) {
2196 getMiniHBaseCluster().flushcache(tableNameBytes);
2197 }
2198 }
2199
2200 return table;
2201 }
2202
2203 private static final int MIN_RANDOM_PORT = 0xc000;
2204 private static final int MAX_RANDOM_PORT = 0xfffe;
2205
2206
2207
2208
2209
2210 public static int randomPort() {
2211 return MIN_RANDOM_PORT
2212 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2213 }
2214
2215 public static int randomFreePort() {
2216 int port = 0;
2217 do {
2218 port = randomPort();
2219 try {
2220 ServerSocket sock = new ServerSocket(port);
2221 sock.close();
2222 } catch (IOException ex) {
2223 port = 0;
2224 }
2225 } while (port == 0);
2226 return port;
2227 }
2228
2229 public static void waitForHostPort(String host, int port)
2230 throws IOException {
2231 final int maxTimeMs = 10000;
2232 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2233 IOException savedException = null;
2234 LOG.info("Waiting for server at " + host + ":" + port);
2235 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2236 try {
2237 Socket sock = new Socket(InetAddress.getByName(host), port);
2238 sock.close();
2239 savedException = null;
2240 LOG.info("Server at " + host + ":" + port + " is available");
2241 break;
2242 } catch (UnknownHostException e) {
2243 throw new IOException("Failed to look up " + host, e);
2244 } catch (IOException e) {
2245 savedException = e;
2246 }
2247 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2248 }
2249
2250 if (savedException != null) {
2251 throw savedException;
2252 }
2253 }
2254
2255
2256
2257
2258
2259
2260 public static int createPreSplitLoadTestTable(Configuration conf,
2261 byte[] tableName, byte[] columnFamily, Algorithm compression,
2262 DataBlockEncoding dataBlockEncoding) throws IOException {
2263 HTableDescriptor desc = new HTableDescriptor(tableName);
2264 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2265 hcd.setDataBlockEncoding(dataBlockEncoding);
2266 hcd.setCompressionType(compression);
2267 return createPreSplitLoadTestTable(conf, desc, hcd);
2268 }
2269
2270
2271
2272
2273
2274
2275 public static int createPreSplitLoadTestTable(Configuration conf,
2276 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2277 if (!desc.hasFamily(hcd.getName())) {
2278 desc.addFamily(hcd);
2279 }
2280
2281 int totalNumberOfRegions = 0;
2282 HBaseAdmin admin = new HBaseAdmin(conf);
2283 try {
2284
2285
2286
2287 int numberOfServers = admin.getClusterStatus().getServers().size();
2288 if (numberOfServers == 0) {
2289 throw new IllegalStateException("No live regionservers");
2290 }
2291
2292 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2293 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2294 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2295 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2296
2297 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2298 totalNumberOfRegions);
2299
2300 admin.createTable(desc, splits);
2301 admin.close();
2302 } catch (MasterNotRunningException e) {
2303 LOG.error("Master not running", e);
2304 throw new IOException(e);
2305 } catch (TableExistsException e) {
2306 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2307 " already exists, continuing");
2308 } finally {
2309 admin.close();
2310 }
2311 return totalNumberOfRegions;
2312 }
2313
2314 public static int getMetaRSPort(Configuration conf) throws IOException {
2315 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2316 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2317 table.close();
2318 return hloc.getPort();
2319 }
2320
2321 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2322 throws IOException {
2323 HTableDescriptor htd = new HTableDescriptor(tableName);
2324 htd.addFamily(hcd);
2325 HRegionInfo info =
2326 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2327 HRegion region =
2328 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2329 return region;
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2341 assertTrue(numRegions>3);
2342 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2343 byte [][] result = new byte[tmpSplitKeys.length+1][];
2344 for (int i=0;i<tmpSplitKeys.length;i++) {
2345 result[i+1] = tmpSplitKeys[i];
2346 }
2347 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2348 return result;
2349 }
2350
2351
2352
2353
2354
2355
2356 public static List<HColumnDescriptor> generateColumnDescriptors() {
2357 return generateColumnDescriptors("");
2358 }
2359
2360
2361
2362
2363
2364
2365
2366 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2367 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2368 long familyId = 0;
2369 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2370 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2371 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2372 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2373 HColumnDescriptor htd = new HColumnDescriptor(name);
2374 htd.setCompressionType(compressionType);
2375 htd.setDataBlockEncoding(encodingType);
2376 htd.setBloomFilterType(bloomType);
2377 htds.add(htd);
2378 familyId++;
2379 }
2380 }
2381 }
2382 return htds;
2383 }
2384
2385
2386
2387
2388
2389 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2390 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2391 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2392 for (String algoName : allAlgos) {
2393 try {
2394 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2395 algo.getCompressor();
2396 supportedAlgos.add(algo);
2397 } catch (Throwable t) {
2398
2399 }
2400 }
2401 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2402 }
2403 }