1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
66 import org.apache.hadoop.hbase.master.HMaster;
67 import org.apache.hadoop.hbase.master.ServerManager;
68 import org.apache.hadoop.hbase.regionserver.HRegion;
69 import org.apache.hadoop.hbase.regionserver.HRegionServer;
70 import org.apache.hadoop.hbase.regionserver.InternalScanner;
71 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
72 import org.apache.hadoop.hbase.regionserver.Store;
73 import org.apache.hadoop.hbase.regionserver.StoreFile;
74 import org.apache.hadoop.hbase.security.User;
75 import org.apache.hadoop.hbase.util.Bytes;
76 import org.apache.hadoop.hbase.util.FSUtils;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil;
78 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
79 import org.apache.hadoop.hbase.util.RegionSplitter;
80 import org.apache.hadoop.hbase.util.Threads;
81 import org.apache.hadoop.hbase.util.Writables;
82 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
83 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
84 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
85 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
86 import org.apache.hadoop.hdfs.DFSClient;
87 import org.apache.hadoop.hdfs.DistributedFileSystem;
88 import org.apache.hadoop.hdfs.MiniDFSCluster;
89 import org.apache.hadoop.mapred.JobConf;
90 import org.apache.hadoop.mapred.MiniMRCluster;
91 import org.apache.zookeeper.KeeperException;
92 import org.apache.zookeeper.KeeperException.NodeExistsException;
93 import org.apache.zookeeper.WatchedEvent;
94 import org.apache.zookeeper.ZooKeeper;
95
96
97
98
99
100
101
102
103
104
105
106
107
108 public class HBaseTestingUtility {
109 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
110 private Configuration conf;
111 private MiniZooKeeperCluster zkCluster = null;
112
113
114
115
116
117 private static int DEFAULT_REGIONS_PER_SERVER = 5;
118
119
120
121
122
123 private boolean passedZkCluster = false;
124 private MiniDFSCluster dfsCluster = null;
125
126 private HBaseCluster hbaseCluster = null;
127 private MiniMRCluster mrCluster = null;
128
129
130 private File dataTestDir = null;
131
132
133
134 private File clusterTestDir = null;
135
136
137
138
139
140
141
142
143 private static final String TEST_DIRECTORY_KEY = "test.build.data";
144
145
146
147
148 public static final String BASE_TEST_DIRECTORY_KEY =
149 "test.build.data.basedirectory";
150
151
152
153
154 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
155
156
157 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
158 Arrays.asList(new Object[][] {
159 { Compression.Algorithm.NONE },
160 { Compression.Algorithm.GZ }
161 });
162
163
164 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
165 Arrays.asList(new Object[][] {
166 { new Boolean(false) },
167 { new Boolean(true) }
168 });
169
170
171 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
172 Compression.Algorithm.NONE, Compression.Algorithm.GZ
173 };
174
175
176
177
178
179 private static List<Object[]> bloomAndCompressionCombinations() {
180 List<Object[]> configurations = new ArrayList<Object[]>();
181 for (Compression.Algorithm comprAlgo :
182 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
183 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
184 configurations.add(new Object[] { comprAlgo, bloomType });
185 }
186 }
187 return Collections.unmodifiableList(configurations);
188 }
189
190 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
191 bloomAndCompressionCombinations();
192
193 public HBaseTestingUtility() {
194 this(HBaseConfiguration.create());
195 }
196
197 public HBaseTestingUtility(Configuration conf) {
198 this.conf = conf;
199
200
201 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
202 setHDFSClientRetryProperty();
203 }
204
205 private void setHDFSClientRetryProperty() {
206 this.conf.setInt("hdfs.client.retries.number", 1);
207 HBaseFileSystem.setRetryCounts(conf);
208 }
209
210
211
212
213
214
215
216
217
218
219
220
221 public Configuration getConfiguration() {
222 return this.conf;
223 }
224
225 public void setHBaseCluster(HBaseCluster hbaseCluster) {
226 this.hbaseCluster = hbaseCluster;
227 }
228
229
230
231
232
233
234
235
236
237 private Path getBaseTestDir() {
238 String PathName = System.getProperty(
239 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
240
241 return new Path(PathName);
242 }
243
244
245
246
247
248
249
250 public Path getDataTestDir() {
251 if (dataTestDir == null){
252 setupDataTestDir();
253 }
254 return new Path(dataTestDir.getAbsolutePath());
255 }
256
257
258
259
260
261
262 public Path getClusterTestDir() {
263 if (clusterTestDir == null){
264 setupClusterTestDir();
265 }
266 return new Path(clusterTestDir.getAbsolutePath());
267 }
268
269
270
271
272
273
274
275 public Path getDataTestDir(final String subdirName) {
276 return new Path(getDataTestDir(), subdirName);
277 }
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295 private void setupDataTestDir() {
296 if (dataTestDir != null) {
297 LOG.warn("Data test dir already setup in " +
298 dataTestDir.getAbsolutePath());
299 return;
300 }
301
302 String randomStr = UUID.randomUUID().toString();
303 Path testPath= new Path(getBaseTestDir(), randomStr);
304
305 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
306 dataTestDir.deleteOnExit();
307
308 createSubDirAndSystemProperty(
309 "hadoop.log.dir",
310 testPath, "hadoop-log-dir");
311
312
313
314 createSubDirAndSystemProperty(
315 "hadoop.tmp.dir",
316 testPath, "hadoop-tmp-dir");
317
318
319 createSubDir(
320 "mapred.local.dir",
321 testPath, "mapred-local-dir");
322
323 createSubDirAndSystemProperty(
324 "mapred.working.dir",
325 testPath, "mapred-working-dir");
326
327 createSubDir(
328 "hbase.local.dir",
329 testPath, "hbase-local-dir");
330 }
331
332 private void createSubDir(String propertyName, Path parent, String subDirName){
333 Path newPath= new Path(parent, subDirName);
334 File newDir = new File(newPath.toString()).getAbsoluteFile();
335 newDir.deleteOnExit();
336 conf.set(propertyName, newDir.getAbsolutePath());
337 }
338
339 private void createSubDirAndSystemProperty(
340 String propertyName, Path parent, String subDirName){
341
342 String sysValue = System.getProperty(propertyName);
343
344 if (sysValue != null) {
345
346
347 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
348 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
349 String confValue = conf.get(propertyName);
350 if (confValue != null && !confValue.endsWith(sysValue)){
351 LOG.warn(
352 propertyName + " property value differs in configuration and system: "+
353 "Configuration="+confValue+" while System="+sysValue+
354 " Erasing configuration value by system value."
355 );
356 }
357 conf.set(propertyName, sysValue);
358 } else {
359
360 createSubDir(propertyName, parent, subDirName);
361 System.setProperty(propertyName, conf.get(propertyName));
362 }
363 }
364
365
366
367
368 private void setupClusterTestDir() {
369 if (clusterTestDir != null) {
370 LOG.warn("Cluster test dir already setup in " +
371 clusterTestDir.getAbsolutePath());
372 return;
373 }
374
375
376
377 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
378 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
379
380 clusterTestDir.deleteOnExit();
381 }
382
383
384
385
386 public void isRunningCluster() throws IOException {
387 if (dfsCluster == null) return;
388 throw new IOException("Cluster already running at " +
389 this.clusterTestDir);
390 }
391
392
393
394
395
396
397
398
399 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
400 return startMiniDFSCluster(servers, null);
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
415 throws Exception {
416 if ( hosts != null && hosts.length != 0) {
417 return startMiniDFSCluster(hosts.length, hosts);
418 } else {
419 return startMiniDFSCluster(1, null);
420 }
421 }
422
423
424
425
426
427
428
429
430
431
432 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
433 throws Exception {
434
435
436 isRunningCluster();
437
438
439 if (clusterTestDir == null) {
440 setupClusterTestDir();
441 }
442
443
444 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
445
446
447
448
449 System.setProperty("test.cache.data", this.clusterTestDir.toString());
450
451
452 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
453 true, null, null, hosts, null);
454
455
456 FileSystem fs = this.dfsCluster.getFileSystem();
457 this.conf.set("fs.defaultFS", fs.getUri().toString());
458
459 this.conf.set("fs.default.name", fs.getUri().toString());
460
461
462 this.dfsCluster.waitClusterUp();
463
464 return this.dfsCluster;
465 }
466
467
468
469
470
471
472 public void shutdownMiniDFSCluster() throws Exception {
473 if (this.dfsCluster != null) {
474
475 this.dfsCluster.shutdown();
476 dfsCluster = null;
477 }
478
479 }
480
481
482
483
484
485
486
487
488 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
489 return startMiniZKCluster(1);
490 }
491
492
493
494
495
496
497
498
499
500 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
501 throws Exception {
502 File zkClusterFile = new File(getClusterTestDir().toString());
503 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
504 }
505
506 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
507 throws Exception {
508 return startMiniZKCluster(dir,1);
509 }
510
511 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
512 int zooKeeperServerNum)
513 throws Exception {
514 if (this.zkCluster != null) {
515 throw new IOException("Cluster already running at " + dir);
516 }
517 this.passedZkCluster = false;
518 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
519 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
520 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
521 Integer.toString(clientPort));
522 return this.zkCluster;
523 }
524
525
526
527
528
529
530
531 public void shutdownMiniZKCluster() throws IOException {
532 if (this.zkCluster != null) {
533 this.zkCluster.shutdown();
534 this.zkCluster = null;
535 }
536 }
537
538
539
540
541
542
543
544 public MiniHBaseCluster startMiniCluster() throws Exception {
545 return startMiniCluster(1, 1);
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 public MiniHBaseCluster startMiniCluster(final int numSlaves)
562 throws Exception {
563 return startMiniCluster(1, numSlaves);
564 }
565
566
567
568
569
570
571
572
573 public MiniHBaseCluster startMiniCluster(final int numMasters,
574 final int numSlaves)
575 throws Exception {
576 return startMiniCluster(numMasters, numSlaves, null);
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 public MiniHBaseCluster startMiniCluster(final int numMasters,
605 final int numSlaves, final String[] dataNodeHosts)
606 throws Exception {
607 int numDataNodes = numSlaves;
608 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
609 numDataNodes = dataNodeHosts.length;
610 }
611
612 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
613 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
614
615
616 isRunningCluster();
617
618
619
620 startMiniDFSCluster(numDataNodes, dataNodeHosts);
621
622
623 if (this.zkCluster == null) {
624 startMiniZKCluster(clusterTestDir);
625 }
626
627
628 return startMiniHBaseCluster(numMasters, numSlaves);
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
643 final int numSlaves)
644 throws IOException, InterruptedException {
645
646 createRootDir();
647
648
649
650 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
651 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
652 }
653 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
654 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
655 }
656
657 Configuration c = new Configuration(this.conf);
658 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
659
660 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
661 ResultScanner s = t.getScanner(new Scan());
662 while (s.next() != null) {
663 continue;
664 }
665 s.close();
666 t.close();
667
668 getHBaseAdmin();
669 LOG.info("Minicluster is up");
670 return (MiniHBaseCluster)this.hbaseCluster;
671 }
672
673
674
675
676
677
678
679 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
680 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
681
682 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
683 ResultScanner s = t.getScanner(new Scan());
684 while (s.next() != null) {
685
686 }
687 LOG.info("HBase has been restarted");
688 s.close();
689 t.close();
690 }
691
692
693
694
695
696
697 public MiniHBaseCluster getMiniHBaseCluster() {
698 if (this.hbaseCluster instanceof MiniHBaseCluster) {
699 return (MiniHBaseCluster)this.hbaseCluster;
700 }
701 throw new RuntimeException(hbaseCluster + " not an instance of " +
702 MiniHBaseCluster.class.getName());
703 }
704
705
706
707
708
709
710 public void shutdownMiniCluster() throws Exception {
711 LOG.info("Shutting down minicluster");
712 shutdownMiniHBaseCluster();
713 if (!this.passedZkCluster){
714 shutdownMiniZKCluster();
715 }
716 shutdownMiniDFSCluster();
717
718
719 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
720
721 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
722 new Path(this.clusterTestDir.toString()))) {
723 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
724 }
725 this.clusterTestDir = null;
726 }
727 LOG.info("Minicluster is down");
728 }
729
730
731
732
733
734 public void shutdownMiniHBaseCluster() throws IOException {
735 if (hbaseAdmin != null) {
736 hbaseAdmin.close();
737 hbaseAdmin = null;
738 }
739
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
741 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
742 if (this.hbaseCluster != null) {
743 this.hbaseCluster.shutdown();
744
745 this.hbaseCluster.waitUntilShutDown();
746 this.hbaseCluster = null;
747 }
748 }
749
750
751
752
753
754
755
756 public Path getDefaultRootDirPath() throws IOException {
757 FileSystem fs = FileSystem.get(this.conf);
758 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
759 }
760
761
762
763
764
765
766
767
768
769 public Path createRootDir() throws IOException {
770 FileSystem fs = FileSystem.get(this.conf);
771 Path hbaseRootdir = getDefaultRootDirPath();
772 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
773 fs.mkdirs(hbaseRootdir);
774 FSUtils.setVersion(fs, hbaseRootdir);
775 return hbaseRootdir;
776 }
777
778
779
780
781
782 public void flush() throws IOException {
783 getMiniHBaseCluster().flushcache();
784 }
785
786
787
788
789
790 public void flush(byte [] tableName) throws IOException {
791 getMiniHBaseCluster().flushcache(tableName);
792 }
793
794
795
796
797
798 public void compact(boolean major) throws IOException {
799 getMiniHBaseCluster().compact(major);
800 }
801
802
803
804
805
806 public void compact(byte [] tableName, boolean major) throws IOException {
807 getMiniHBaseCluster().compact(tableName, major);
808 }
809
810
811
812
813
814
815
816
817
818 public HTable createTable(byte[] tableName, byte[] family)
819 throws IOException{
820 return createTable(tableName, new byte[][]{family});
821 }
822
823
824
825
826
827
828
829
830 public HTable createTable(byte[] tableName, byte[][] families)
831 throws IOException {
832 return createTable(tableName, families,
833 new Configuration(getConfiguration()));
834 }
835
836 public HTable createTable(byte[] tableName, byte[][] families,
837 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
838 throws IOException{
839 HTableDescriptor desc = new HTableDescriptor(tableName);
840 for (byte[] family : families) {
841 HColumnDescriptor hcd = new HColumnDescriptor(family)
842 .setMaxVersions(numVersions);
843 desc.addFamily(hcd);
844 }
845 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
846
847 waitUntilAllRegionsAssigned(tableName);
848 return new HTable(getConfiguration(), tableName);
849 }
850
851
852
853
854
855
856
857
858
859 public HTable createTable(byte[] tableName, byte[][] families,
860 final Configuration c)
861 throws IOException {
862 HTableDescriptor desc = new HTableDescriptor(tableName);
863 for(byte[] family : families) {
864 desc.addFamily(new HColumnDescriptor(family));
865 }
866 getHBaseAdmin().createTable(desc);
867
868 waitUntilAllRegionsAssigned(tableName);
869 return new HTable(c, tableName);
870 }
871
872
873
874
875
876
877
878
879
880
881 public HTable createTable(byte[] tableName, byte[][] families,
882 final Configuration c, int numVersions)
883 throws IOException {
884 HTableDescriptor desc = new HTableDescriptor(tableName);
885 for(byte[] family : families) {
886 HColumnDescriptor hcd = new HColumnDescriptor(family)
887 .setMaxVersions(numVersions);
888 desc.addFamily(hcd);
889 }
890 getHBaseAdmin().createTable(desc);
891
892 waitUntilAllRegionsAssigned(tableName);
893 return new HTable(c, tableName);
894 }
895
896
897
898
899
900
901
902
903
904 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
905 throws IOException {
906 return createTable(tableName, new byte[][]{family}, numVersions);
907 }
908
909
910
911
912
913
914
915
916
917 public HTable createTable(byte[] tableName, byte[][] families,
918 int numVersions)
919 throws IOException {
920 HTableDescriptor desc = new HTableDescriptor(tableName);
921 for (byte[] family : families) {
922 HColumnDescriptor hcd = new HColumnDescriptor(family)
923 .setMaxVersions(numVersions);
924 desc.addFamily(hcd);
925 }
926 getHBaseAdmin().createTable(desc);
927
928 waitUntilAllRegionsAssigned(tableName);
929 return new HTable(new Configuration(getConfiguration()), tableName);
930 }
931
932
933
934
935
936
937
938
939
940 public HTable createTable(byte[] tableName, byte[][] families,
941 int numVersions, int blockSize) throws IOException {
942 HTableDescriptor desc = new HTableDescriptor(tableName);
943 for (byte[] family : families) {
944 HColumnDescriptor hcd = new HColumnDescriptor(family)
945 .setMaxVersions(numVersions)
946 .setBlocksize(blockSize);
947 desc.addFamily(hcd);
948 }
949 getHBaseAdmin().createTable(desc);
950
951 waitUntilAllRegionsAssigned(tableName);
952 return new HTable(new Configuration(getConfiguration()), tableName);
953 }
954
955
956
957
958
959
960
961
962
963 public HTable createTable(byte[] tableName, byte[][] families,
964 int[] numVersions)
965 throws IOException {
966 HTableDescriptor desc = new HTableDescriptor(tableName);
967 int i = 0;
968 for (byte[] family : families) {
969 HColumnDescriptor hcd = new HColumnDescriptor(family)
970 .setMaxVersions(numVersions[i]);
971 desc.addFamily(hcd);
972 i++;
973 }
974 getHBaseAdmin().createTable(desc);
975
976 waitUntilAllRegionsAssigned(tableName);
977 return new HTable(new Configuration(getConfiguration()), tableName);
978 }
979
980
981
982
983
984 public void deleteTable(byte[] tableName) throws IOException {
985 try {
986 getHBaseAdmin().disableTable(tableName);
987 } catch (TableNotEnabledException e) {
988 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
989 }
990 getHBaseAdmin().deleteTable(tableName);
991 }
992
993
994
995
996
997
998
999 public HTable truncateTable(byte [] tableName) throws IOException {
1000 HTable table = new HTable(getConfiguration(), tableName);
1001 Scan scan = new Scan();
1002 ResultScanner resScan = table.getScanner(scan);
1003 for(Result res : resScan) {
1004 Delete del = new Delete(res.getRow());
1005 table.delete(del);
1006 }
1007 resScan = table.getScanner(scan);
1008 resScan.close();
1009 return table;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019 public int loadTable(final HTable t, final byte[] f) throws IOException {
1020 t.setAutoFlush(false);
1021 byte[] k = new byte[3];
1022 int rowCount = 0;
1023 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1024 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1025 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1026 k[0] = b1;
1027 k[1] = b2;
1028 k[2] = b3;
1029 Put put = new Put(k);
1030 put.add(f, null, k);
1031 t.put(put);
1032 rowCount++;
1033 }
1034 }
1035 }
1036 t.flushCommits();
1037 return rowCount;
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1048 return loadTable(t, f, null);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1060 t.setAutoFlush(false);
1061 byte[] k = new byte[3];
1062 int rowCount = 0;
1063 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1064 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1065 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1066 k[0] = b1;
1067 k[1] = b2;
1068 k[2] = b3;
1069 Put put = new Put(k);
1070 for (int i = 0; i < f.length; i++) {
1071 put.add(f[i], null, value != null ? value : k);
1072 }
1073 t.put(put);
1074 rowCount++;
1075 }
1076 }
1077 }
1078 t.flushCommits();
1079 return rowCount;
1080 }
1081
1082
1083
1084
1085 public static class SeenRowTracker {
1086 int dim = 'z' - 'a' + 1;
1087 int[][][] seenRows = new int[dim][dim][dim];
1088 byte[] startRow;
1089 byte[] stopRow;
1090
1091 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1092 this.startRow = startRow;
1093 this.stopRow = stopRow;
1094 }
1095
1096 int i(byte b) {
1097 return b - 'a';
1098 }
1099
1100 public void addRow(byte[] row) {
1101 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1102 }
1103
1104
1105
1106
1107 public void validate() {
1108 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1109 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1110 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1111 int count = seenRows[i(b1)][i(b2)][i(b3)];
1112 int expectedCount = 0;
1113 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1114 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1115 expectedCount = 1;
1116 }
1117 if (count != expectedCount) {
1118 String row = new String(new byte[] {b1,b2,b3});
1119 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1120 }
1121 }
1122 }
1123 }
1124 }
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134 public int loadRegion(final HRegion r, final byte[] f)
1135 throws IOException {
1136 return loadRegion(r, f, false);
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1148 throws IOException {
1149 byte[] k = new byte[3];
1150 int rowCount = 0;
1151 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1152 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1153 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1154 k[0] = b1;
1155 k[1] = b2;
1156 k[2] = b3;
1157 Put put = new Put(k);
1158 put.add(f, null, k);
1159 if (r.getLog() == null) put.setWriteToWAL(false);
1160 r.put(put);
1161 rowCount++;
1162 }
1163 }
1164 if (flush) {
1165 r.flushcache();
1166 }
1167 }
1168 return rowCount;
1169 }
1170
1171
1172
1173
1174 public int countRows(final HTable table) throws IOException {
1175 Scan scan = new Scan();
1176 ResultScanner results = table.getScanner(scan);
1177 int count = 0;
1178 for (@SuppressWarnings("unused") Result res : results) {
1179 count++;
1180 }
1181 results.close();
1182 return count;
1183 }
1184
1185 public int countRows(final HTable table, final byte[]... families) throws IOException {
1186 Scan scan = new Scan();
1187 for (byte[] family: families) {
1188 scan.addFamily(family);
1189 }
1190 ResultScanner results = table.getScanner(scan);
1191 int count = 0;
1192 for (@SuppressWarnings("unused") Result res : results) {
1193 count++;
1194 }
1195 results.close();
1196 return count;
1197 }
1198
1199
1200
1201
1202 public String checksumRows(final HTable table) throws Exception {
1203 Scan scan = new Scan();
1204 ResultScanner results = table.getScanner(scan);
1205 MessageDigest digest = MessageDigest.getInstance("MD5");
1206 for (Result res : results) {
1207 digest.update(res.getRow());
1208 }
1209 results.close();
1210 return digest.toString();
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 public int createMultiRegions(HTable table, byte[] columnFamily)
1222 throws IOException {
1223 return createMultiRegions(table, columnFamily, true);
1224 }
1225
1226 public static final byte[][] KEYS = {
1227 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1228 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1229 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1230 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1231 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1232 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1233 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1234 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1235 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1236 };
1237
1238 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1239 Bytes.toBytes("bbb"),
1240 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1241 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1242 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1243 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1244 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1245 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1246 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1247 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1248 };
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1261 throws IOException {
1262 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 public int createMultiRegions(final Configuration c, final HTable table,
1275 final byte [] family, int numRegions)
1276 throws IOException {
1277 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1278 byte [] startKey = Bytes.toBytes("aaaaa");
1279 byte [] endKey = Bytes.toBytes("zzzzz");
1280 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1281 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1282 for (int i=0;i<splitKeys.length;i++) {
1283 regionStartKeys[i+1] = splitKeys[i];
1284 }
1285 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1286 return createMultiRegions(c, table, family, regionStartKeys);
1287 }
1288
1289 public int createMultiRegions(final Configuration c, final HTable table,
1290 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1291 return createMultiRegions(c, table, columnFamily, startKeys, true);
1292 }
1293
1294 public int createMultiRegions(final Configuration c, final HTable table,
1295 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1296 throws IOException {
1297 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1298 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1299 HTableDescriptor htd = table.getTableDescriptor();
1300 if(!htd.hasFamily(columnFamily)) {
1301 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1302 htd.addFamily(hcd);
1303 }
1304
1305
1306
1307
1308 List<byte[]> rows = getMetaTableRows(htd.getName());
1309 String regionToDeleteInFS = table
1310 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1311 .getRegionInfo().getEncodedName();
1312 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1313
1314 int count = 0;
1315 for (int i = 0; i < startKeys.length; i++) {
1316 int j = (i + 1) % startKeys.length;
1317 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1318 startKeys[i], startKeys[j]);
1319 Put put = new Put(hri.getRegionName());
1320 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1321 Writables.getBytes(hri));
1322 meta.put(put);
1323 LOG.info("createMultiRegions: inserted " + hri.toString());
1324 newRegions.add(hri);
1325 count++;
1326 }
1327
1328 for (byte[] row : rows) {
1329 LOG.info("createMultiRegions: deleting meta row -> " +
1330 Bytes.toStringBinary(row));
1331 meta.delete(new Delete(row));
1332 }
1333 if (cleanupFS) {
1334
1335
1336 Path tableDir = new Path(getDefaultRootDirPath().toString()
1337 + System.getProperty("file.separator") + htd.getNameAsString()
1338 + System.getProperty("file.separator") + regionToDeleteInFS);
1339 FileSystem.get(c).delete(tableDir);
1340 }
1341
1342 HConnection conn = table.getConnection();
1343 conn.clearRegionCache();
1344
1345 HBaseAdmin admin = getHBaseAdmin();
1346 if (admin.isTableEnabled(table.getTableName())) {
1347 for(HRegionInfo hri : newRegions) {
1348 admin.assign(hri.getRegionName());
1349 }
1350 }
1351
1352 meta.close();
1353
1354 return count;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1368 final HTableDescriptor htd, byte [][] startKeys)
1369 throws IOException {
1370 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1371 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1372 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1373
1374 for (int i = 0; i < startKeys.length; i++) {
1375 int j = (i + 1) % startKeys.length;
1376 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1377 startKeys[j]);
1378 Put put = new Put(hri.getRegionName());
1379 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1380 Writables.getBytes(hri));
1381 meta.put(put);
1382 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1383 newRegions.add(hri);
1384 }
1385
1386 meta.close();
1387 return newRegions;
1388 }
1389
1390
1391
1392
1393
1394
1395 public List<byte[]> getMetaTableRows() throws IOException {
1396
1397 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1398 List<byte[]> rows = new ArrayList<byte[]>();
1399 ResultScanner s = t.getScanner(new Scan());
1400 for (Result result : s) {
1401 LOG.info("getMetaTableRows: row -> " +
1402 Bytes.toStringBinary(result.getRow()));
1403 rows.add(result.getRow());
1404 }
1405 s.close();
1406 t.close();
1407 return rows;
1408 }
1409
1410
1411
1412
1413
1414
1415 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1416
1417 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1418 List<byte[]> rows = new ArrayList<byte[]>();
1419 ResultScanner s = t.getScanner(new Scan());
1420 for (Result result : s) {
1421 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1422 if (val == null) {
1423 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1424
1425 continue;
1426 }
1427 HRegionInfo info = Writables.getHRegionInfo(val);
1428 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1429 LOG.info("getMetaTableRows: row -> " +
1430 Bytes.toStringBinary(result.getRow()) + info);
1431 rows.add(result.getRow());
1432 }
1433 }
1434 s.close();
1435 t.close();
1436 return rows;
1437 }
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1450 throws IOException {
1451 List<byte[]> metaRows = getMetaTableRows(tableName);
1452 if (metaRows == null || metaRows.isEmpty()) {
1453 return null;
1454 }
1455 LOG.debug("Found " + metaRows.size() + " rows for table " +
1456 Bytes.toString(tableName));
1457 byte [] firstrow = metaRows.get(0);
1458 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1459 int index = getMiniHBaseCluster().getServerWith(firstrow);
1460 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1461 }
1462
1463
1464
1465
1466
1467
1468
1469 public void startMiniMapReduceCluster() throws IOException {
1470 startMiniMapReduceCluster(2);
1471 }
1472
1473
1474
1475
1476
1477
1478
1479 public void startMiniMapReduceCluster(final int servers) throws IOException {
1480 LOG.info("Starting mini mapreduce cluster...");
1481 if (dataTestDir == null) {
1482 setupDataTestDir();
1483 }
1484
1485 Configuration c = getConfiguration();
1486 String logDir = c.get("hadoop.log.dir");
1487 String tmpDir = c.get("hadoop.tmp.dir");
1488 if (logDir == null) {
1489 logDir = tmpDir;
1490 }
1491 System.setProperty("hadoop.log.dir", logDir);
1492 c.set("mapred.output.dir", tmpDir);
1493
1494
1495
1496 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1497
1498 mrCluster = new MiniMRCluster(0, 0, servers,
1499 FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));
1500
1501 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
1502 if (jobConf == null) {
1503 jobConf = mrCluster.createJobConf();
1504 }
1505 jobConf.set("mapred.local.dir",
1506 conf.get("mapred.local.dir"));
1507
1508 LOG.info("Mini mapreduce cluster started");
1509 JobConf mrClusterJobConf = mrCluster.createJobConf();
1510 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1511
1512 conf.set("mapreduce.framework.name", "yarn");
1513 conf.setBoolean("yarn.is.minicluster", true);
1514 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1515 if (rmAdress != null) {
1516 conf.set("yarn.resourcemanager.address", rmAdress);
1517 }
1518 String schedulerAdress =
1519 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1520 if (schedulerAdress != null) {
1521 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1522 }
1523 }
1524
1525
1526
1527
1528 public void shutdownMiniMapReduceCluster() {
1529 LOG.info("Stopping mini mapreduce cluster...");
1530 if (mrCluster != null) {
1531 mrCluster.shutdown();
1532 mrCluster = null;
1533 }
1534
1535 conf.set("mapred.job.tracker", "local");
1536 LOG.info("Mini mapreduce cluster stopped");
1537 }
1538
1539
1540
1541
1542
1543
1544 public void enableDebug(Class<?> clazz) {
1545 Log l = LogFactory.getLog(clazz);
1546 if (l instanceof Log4JLogger) {
1547 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1548 } else if (l instanceof Jdk14Logger) {
1549 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1550 }
1551 }
1552
1553
1554
1555
1556
1557 public void expireMasterSession() throws Exception {
1558 HMaster master = getMiniHBaseCluster().getMaster();
1559 expireSession(master.getZooKeeper(), false);
1560 }
1561
1562
1563
1564
1565
1566
1567 public void expireRegionServerSession(int index) throws Exception {
1568 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1569 expireSession(rs.getZooKeeper(), false);
1570 decrementMinRegionServerCount();
1571 }
1572
1573 private void decrementMinRegionServerCount() {
1574
1575
1576 decrementMinRegionServerCount(getConfiguration());
1577
1578
1579 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1580 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1581 }
1582 }
1583
1584 private void decrementMinRegionServerCount(Configuration conf) {
1585 int currentCount = conf.getInt(
1586 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1587 if (currentCount != -1) {
1588 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1589 Math.max(currentCount - 1, 1));
1590 }
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1605 throws Exception {
1606 Configuration c = new Configuration(this.conf);
1607 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1608 int sessionTimeout = 500;
1609 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1610 byte[] password = zk.getSessionPasswd();
1611 long sessionID = zk.getSessionId();
1612
1613
1614
1615
1616
1617
1618
1619
1620 ZooKeeper monitor = new ZooKeeper(quorumServers,
1621 1000, new org.apache.zookeeper.Watcher(){
1622 @Override
1623 public void process(WatchedEvent watchedEvent) {
1624 LOG.info("Monitor ZKW received event="+watchedEvent);
1625 }
1626 } , sessionID, password);
1627
1628
1629 ZooKeeper newZK = new ZooKeeper(quorumServers,
1630 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1631 newZK.close();
1632 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1633
1634
1635 monitor.close();
1636
1637 if (checkStatus) {
1638 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1639 }
1640 }
1641
1642
1643
1644
1645
1646
1647
1648 public MiniHBaseCluster getHBaseCluster() {
1649 return getMiniHBaseCluster();
1650 }
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 public HBaseCluster getHBaseClusterInterface() {
1661
1662
1663 return hbaseCluster;
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675 public synchronized HBaseAdmin getHBaseAdmin()
1676 throws IOException {
1677 if (hbaseAdmin == null){
1678 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1679 }
1680 return hbaseAdmin;
1681 }
1682 private HBaseAdmin hbaseAdmin = null;
1683
1684
1685
1686
1687
1688
1689
1690 public void closeRegion(String regionName) throws IOException {
1691 closeRegion(Bytes.toBytes(regionName));
1692 }
1693
1694
1695
1696
1697
1698
1699
1700 public void closeRegion(byte[] regionName) throws IOException {
1701 getHBaseAdmin().closeRegion(regionName, null);
1702 }
1703
1704
1705
1706
1707
1708
1709
1710
1711 public void closeRegionByRow(String row, HTable table) throws IOException {
1712 closeRegionByRow(Bytes.toBytes(row), table);
1713 }
1714
1715
1716
1717
1718
1719
1720
1721
1722 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1723 HRegionLocation hrl = table.getRegionLocation(row);
1724 closeRegion(hrl.getRegionInfo().getRegionName());
1725 }
1726
1727 public MiniZooKeeperCluster getZkCluster() {
1728 return zkCluster;
1729 }
1730
1731 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1732 this.passedZkCluster = true;
1733 this.zkCluster = zkCluster;
1734 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1735 }
1736
1737 public MiniDFSCluster getDFSCluster() {
1738 return dfsCluster;
1739 }
1740
1741 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1742 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1743 throw new IOException("DFSCluster is already running! Shut it down first.");
1744 }
1745 this.dfsCluster = cluster;
1746 }
1747
1748 public FileSystem getTestFileSystem() throws IOException {
1749 return HFileSystem.get(conf);
1750 }
1751
1752
1753
1754
1755
1756 public boolean cleanupTestDir() throws IOException {
1757 if (dataTestDir == null ){
1758 return false;
1759 } else {
1760 boolean ret = deleteDir(getDataTestDir());
1761 dataTestDir = null;
1762 return ret;
1763 }
1764 }
1765
1766
1767
1768
1769
1770
1771 public boolean cleanupTestDir(final String subdir) throws IOException {
1772 if (dataTestDir == null){
1773 return false;
1774 }
1775 return deleteDir(getDataTestDir(subdir));
1776 }
1777
1778
1779
1780
1781
1782
1783 public boolean deleteDir(final Path dir) throws IOException {
1784 FileSystem fs = getTestFileSystem();
1785 if (fs.exists(dir)) {
1786 return fs.delete(getDataTestDir(), true);
1787 }
1788 return false;
1789 }
1790
1791 public void waitTableAvailable(byte[] table, long timeoutMillis)
1792 throws InterruptedException, IOException {
1793 long startWait = System.currentTimeMillis();
1794 while (!getHBaseAdmin().isTableAvailable(table)) {
1795 assertTrue("Timed out waiting for table to become available " +
1796 Bytes.toStringBinary(table),
1797 System.currentTimeMillis() - startWait < timeoutMillis);
1798 Thread.sleep(200);
1799 }
1800 }
1801
1802 public void waitTableEnabled(byte[] table, long timeoutMillis)
1803 throws InterruptedException, IOException {
1804 long startWait = System.currentTimeMillis();
1805 while (!getHBaseAdmin().isTableAvailable(table) &&
1806 !getHBaseAdmin().isTableEnabled(table)) {
1807 assertTrue("Timed out waiting for table to become available and enabled " +
1808 Bytes.toStringBinary(table),
1809 System.currentTimeMillis() - startWait < timeoutMillis);
1810 Thread.sleep(200);
1811 }
1812 }
1813
1814
1815
1816
1817
1818
1819
1820
1821 public boolean ensureSomeRegionServersAvailable(final int num)
1822 throws IOException {
1823 boolean startedServer = false;
1824 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1825 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1826 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1827 startedServer = true;
1828 }
1829
1830 return startedServer;
1831 }
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1842 throws IOException {
1843 boolean startedServer = ensureSomeRegionServersAvailable(num);
1844
1845 int nonStoppedServers = 0;
1846 for (JVMClusterUtil.RegionServerThread rst :
1847 getMiniHBaseCluster().getRegionServerThreads()) {
1848
1849 HRegionServer hrs = rst.getRegionServer();
1850 if (hrs.isStopping() || hrs.isStopped()) {
1851 LOG.info("A region server is stopped or stopping:"+hrs);
1852 } else {
1853 nonStoppedServers++;
1854 }
1855 }
1856 for (int i=nonStoppedServers; i<num; ++i) {
1857 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1858 startedServer = true;
1859 }
1860 return startedServer;
1861 }
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 public static User getDifferentUser(final Configuration c,
1874 final String differentiatingSuffix)
1875 throws IOException {
1876 FileSystem currentfs = FileSystem.get(c);
1877 if (!(currentfs instanceof DistributedFileSystem)) {
1878 return User.getCurrent();
1879 }
1880
1881
1882 String username = User.getCurrent().getName() +
1883 differentiatingSuffix;
1884 User user = User.createUserForTesting(c, username,
1885 new String[]{"supergroup"});
1886 return user;
1887 }
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1903 final int max) {
1904 try {
1905 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1906 for (Class<?> clazz: clazzes) {
1907 String className = clazz.getSimpleName();
1908 if (className.equals("DFSOutputStream")) {
1909 if (clazz.isInstance(stream)) {
1910 Field maxRecoveryErrorCountField =
1911 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1912 maxRecoveryErrorCountField.setAccessible(true);
1913 maxRecoveryErrorCountField.setInt(stream, max);
1914 break;
1915 }
1916 }
1917 }
1918 } catch (Exception e) {
1919 LOG.info("Could not set max recovery field", e);
1920 }
1921 }
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1932 waitUntilAllRegionsAssigned(tableName, 60000);
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1945 throws IOException {
1946 long deadline = System.currentTimeMillis() + timeout;
1947 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1948 try {
1949 while (true) {
1950 boolean allRegionsAssigned = true;
1951 Scan scan = new Scan();
1952 scan.addFamily(HConstants.CATALOG_FAMILY);
1953 ResultScanner s = meta.getScanner(scan);
1954 try {
1955 Result r;
1956 while ((r = s.next()) != null) {
1957 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1958 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1959 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1960 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1961 allRegionsAssigned &= (b != null);
1962 }
1963 }
1964 } finally {
1965 s.close();
1966 }
1967 if (allRegionsAssigned) {
1968 return;
1969 }
1970 long now = System.currentTimeMillis();
1971 if (now > deadline) {
1972 throw new IOException("Timeout waiting for all regions of " +
1973 Bytes.toStringBinary(tableName) + " to be assigned");
1974 }
1975 try {
1976 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1977 } catch (InterruptedException e) {
1978 throw new IOException(e);
1979 }
1980 }
1981 } finally {
1982 meta.close();
1983 }
1984 }
1985
1986
1987
1988
1989
1990 public static List<KeyValue> getFromStoreFile(Store store,
1991 Get get) throws IOException {
1992 MultiVersionConsistencyControl.resetThreadReadPoint();
1993 Scan scan = new Scan(get);
1994 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1995 scan.getFamilyMap().get(store.getFamily().getName()));
1996
1997 List<KeyValue> result = new ArrayList<KeyValue>();
1998 scanner.next(result);
1999 if (!result.isEmpty()) {
2000
2001 KeyValue kv = result.get(0);
2002 if (!Bytes.equals(kv.getRow(), get.getRow())) {
2003 result.clear();
2004 }
2005 }
2006 scanner.close();
2007 return result;
2008 }
2009
2010
2011
2012
2013
2014 public static List<KeyValue> getFromStoreFile(Store store,
2015 byte [] row,
2016 NavigableSet<byte[]> columns
2017 ) throws IOException {
2018 Get get = new Get(row);
2019 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2020 s.put(store.getFamily().getName(), columns);
2021
2022 return getFromStoreFile(store,get);
2023 }
2024
2025
2026
2027
2028
2029 public static ZooKeeperWatcher getZooKeeperWatcher(
2030 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2031 IOException {
2032 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2033 "unittest", new Abortable() {
2034 boolean aborted = false;
2035
2036 @Override
2037 public void abort(String why, Throwable e) {
2038 aborted = true;
2039 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2040 }
2041
2042 @Override
2043 public boolean isAborted() {
2044 return aborted;
2045 }
2046 });
2047 return zkw;
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2062 HBaseTestingUtility TEST_UTIL, HRegion region,
2063 ServerName serverName) throws ZooKeeperConnectionException,
2064 IOException, KeeperException, NodeExistsException {
2065 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2066 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2067 int version = ZKAssign.transitionNodeOpening(zkw, region
2068 .getRegionInfo(), serverName);
2069 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2070 version);
2071 return zkw;
2072 }
2073
2074 public static void assertKVListsEqual(String additionalMsg,
2075 final List<KeyValue> expected,
2076 final List<KeyValue> actual) {
2077 final int eLen = expected.size();
2078 final int aLen = actual.size();
2079 final int minLen = Math.min(eLen, aLen);
2080
2081 int i;
2082 for (i = 0; i < minLen
2083 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2084 ++i) {}
2085
2086 if (additionalMsg == null) {
2087 additionalMsg = "";
2088 }
2089 if (!additionalMsg.isEmpty()) {
2090 additionalMsg = ". " + additionalMsg;
2091 }
2092
2093 if (eLen != aLen || i != minLen) {
2094 throw new AssertionError(
2095 "Expected and actual KV arrays differ at position " + i + ": " +
2096 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2097 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2098 }
2099 }
2100
2101 private static <T> String safeGetAsStr(List<T> lst, int i) {
2102 if (0 <= i && i < lst.size()) {
2103 return lst.get(i).toString();
2104 } else {
2105 return "<out_of_range>";
2106 }
2107 }
2108
2109 public String getClusterKey() {
2110 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2111 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2112 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2113 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2114 }
2115
2116
2117 public HTable createRandomTable(String tableName,
2118 final Collection<String> families,
2119 final int maxVersions,
2120 final int numColsPerRow,
2121 final int numFlushes,
2122 final int numRegions,
2123 final int numRowsPerFlush)
2124 throws IOException, InterruptedException {
2125
2126 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2127 " regions, " + numFlushes + " storefiles per region, " +
2128 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2129 "\n");
2130
2131 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2132 final int numCF = families.size();
2133 final byte[][] cfBytes = new byte[numCF][];
2134 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2135
2136 {
2137 int cfIndex = 0;
2138 for (String cf : families) {
2139 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2140 }
2141 }
2142
2143 final int actualStartKey = 0;
2144 final int actualEndKey = Integer.MAX_VALUE;
2145 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2146 final int splitStartKey = actualStartKey + keysPerRegion;
2147 final int splitEndKey = actualEndKey - keysPerRegion;
2148 final String keyFormat = "%08x";
2149 final HTable table = createTable(tableNameBytes, cfBytes,
2150 maxVersions,
2151 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2152 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2153 numRegions);
2154 if (hbaseCluster != null) {
2155 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2156 }
2157
2158 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2159 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2160 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2161 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2162
2163 Put put = new Put(row);
2164 Delete del = new Delete(row);
2165 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2166 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2167 final long ts = rand.nextInt();
2168 final byte[] qual = Bytes.toBytes("col" + iCol);
2169 if (rand.nextBoolean()) {
2170 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2171 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2172 ts + "_random_" + rand.nextLong());
2173 put.add(cf, qual, ts, value);
2174 } else if (rand.nextDouble() < 0.8) {
2175 del.deleteColumn(cf, qual, ts);
2176 } else {
2177 del.deleteColumns(cf, qual, ts);
2178 }
2179 }
2180
2181 if (!put.isEmpty()) {
2182 table.put(put);
2183 }
2184
2185 if (!del.isEmpty()) {
2186 table.delete(del);
2187 }
2188 }
2189 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2190 table.flushCommits();
2191 if (hbaseCluster != null) {
2192 getMiniHBaseCluster().flushcache(tableNameBytes);
2193 }
2194 }
2195
2196 return table;
2197 }
2198
2199 private static final int MIN_RANDOM_PORT = 0xc000;
2200 private static final int MAX_RANDOM_PORT = 0xfffe;
2201
2202
2203
2204
2205
2206 public static int randomPort() {
2207 return MIN_RANDOM_PORT
2208 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2209 }
2210
2211 public static int randomFreePort() {
2212 int port = 0;
2213 do {
2214 port = randomPort();
2215 try {
2216 ServerSocket sock = new ServerSocket(port);
2217 sock.close();
2218 } catch (IOException ex) {
2219 port = 0;
2220 }
2221 } while (port == 0);
2222 return port;
2223 }
2224
2225 public static void waitForHostPort(String host, int port)
2226 throws IOException {
2227 final int maxTimeMs = 10000;
2228 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2229 IOException savedException = null;
2230 LOG.info("Waiting for server at " + host + ":" + port);
2231 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2232 try {
2233 Socket sock = new Socket(InetAddress.getByName(host), port);
2234 sock.close();
2235 savedException = null;
2236 LOG.info("Server at " + host + ":" + port + " is available");
2237 break;
2238 } catch (UnknownHostException e) {
2239 throw new IOException("Failed to look up " + host, e);
2240 } catch (IOException e) {
2241 savedException = e;
2242 }
2243 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2244 }
2245
2246 if (savedException != null) {
2247 throw savedException;
2248 }
2249 }
2250
2251
2252
2253
2254
2255
2256 public static int createPreSplitLoadTestTable(Configuration conf,
2257 byte[] tableName, byte[] columnFamily, Algorithm compression,
2258 DataBlockEncoding dataBlockEncoding) throws IOException {
2259 HTableDescriptor desc = new HTableDescriptor(tableName);
2260 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2261 hcd.setDataBlockEncoding(dataBlockEncoding);
2262 hcd.setCompressionType(compression);
2263 return createPreSplitLoadTestTable(conf, desc, hcd);
2264 }
2265
2266
2267
2268
2269
2270
2271 public static int createPreSplitLoadTestTable(Configuration conf,
2272 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2273 if (!desc.hasFamily(hcd.getName())) {
2274 desc.addFamily(hcd);
2275 }
2276
2277 int totalNumberOfRegions = 0;
2278 HBaseAdmin admin = new HBaseAdmin(conf);
2279 try {
2280
2281
2282
2283 int numberOfServers = admin.getClusterStatus().getServers().size();
2284 if (numberOfServers == 0) {
2285 throw new IllegalStateException("No live regionservers");
2286 }
2287
2288 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2289 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2290 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2291 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2292
2293 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2294 totalNumberOfRegions);
2295
2296 admin.createTable(desc, splits);
2297 admin.close();
2298 } catch (MasterNotRunningException e) {
2299 LOG.error("Master not running", e);
2300 throw new IOException(e);
2301 } catch (TableExistsException e) {
2302 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2303 " already exists, continuing");
2304 } finally {
2305 admin.close();
2306 }
2307 return totalNumberOfRegions;
2308 }
2309
2310 public static int getMetaRSPort(Configuration conf) throws IOException {
2311 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2312 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2313 table.close();
2314 return hloc.getPort();
2315 }
2316
2317 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2318 throws IOException {
2319 HTableDescriptor htd = new HTableDescriptor(tableName);
2320 htd.addFamily(hcd);
2321 HRegionInfo info =
2322 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2323 HRegion region =
2324 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2325 return region;
2326 }
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2337 assertTrue(numRegions>3);
2338 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2339 byte [][] result = new byte[tmpSplitKeys.length+1][];
2340 for (int i=0;i<tmpSplitKeys.length;i++) {
2341 result[i+1] = tmpSplitKeys[i];
2342 }
2343 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2344 return result;
2345 }
2346
2347
2348
2349
2350
2351
2352 public static List<HColumnDescriptor> generateColumnDescriptors() {
2353 return generateColumnDescriptors("");
2354 }
2355
2356
2357
2358
2359
2360
2361
2362 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2363 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2364 long familyId = 0;
2365 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2366 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2367 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2368 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2369 HColumnDescriptor htd = new HColumnDescriptor(name);
2370 htd.setCompressionType(compressionType);
2371 htd.setDataBlockEncoding(encodingType);
2372 htd.setBloomFilterType(bloomType);
2373 htds.add(htd);
2374 familyId++;
2375 }
2376 }
2377 }
2378 return htds;
2379 }
2380
2381
2382
2383
2384
2385 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2386 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2387 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2388 for (String algoName : allAlgos) {
2389 try {
2390 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2391 algo.getCompressor();
2392 supportedAlgos.add(algo);
2393 } catch (Throwable t) {
2394
2395 }
2396 }
2397 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2398 }
2399 }