1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.lang.reflect.Method;
29 import java.net.InetAddress;
30 import java.net.InetSocketAddress;
31 import java.net.ServerSocket;
32 import java.net.Socket;
33 import java.net.UnknownHostException;
34 import java.security.MessageDigest;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.List;
40 import java.util.Map;
41 import java.util.NavigableSet;
42 import java.util.Random;
43 import java.util.UUID;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.commons.logging.impl.Jdk14Logger;
48 import org.apache.commons.logging.impl.Log4JLogger;
49 import org.apache.hadoop.conf.Configuration;
50 import org.apache.hadoop.fs.FileSystem;
51 import org.apache.hadoop.fs.Path;
52 import org.apache.hadoop.hbase.client.Delete;
53 import org.apache.hadoop.hbase.client.Get;
54 import org.apache.hadoop.hbase.client.HBaseAdmin;
55 import org.apache.hadoop.hbase.client.HConnection;
56 import org.apache.hadoop.hbase.client.HTable;
57 import org.apache.hadoop.hbase.client.Put;
58 import org.apache.hadoop.hbase.client.Result;
59 import org.apache.hadoop.hbase.client.ResultScanner;
60 import org.apache.hadoop.hbase.client.Scan;
61 import org.apache.hadoop.hbase.fs.HFileSystem;
62 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
63 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
64 import org.apache.hadoop.hbase.io.hfile.Compression;
65 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
66 import org.apache.hadoop.hbase.io.hfile.HFile;
67 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
68 import org.apache.hadoop.hbase.master.HMaster;
69 import org.apache.hadoop.hbase.master.ServerManager;
70 import org.apache.hadoop.hbase.regionserver.HRegion;
71 import org.apache.hadoop.hbase.regionserver.HRegionServer;
72 import org.apache.hadoop.hbase.regionserver.InternalScanner;
73 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
74 import org.apache.hadoop.hbase.regionserver.Store;
75 import org.apache.hadoop.hbase.regionserver.StoreFile;
76 import org.apache.hadoop.hbase.security.User;
77 import org.apache.hadoop.hbase.util.Bytes;
78 import org.apache.hadoop.hbase.util.FSUtils;
79 import org.apache.hadoop.hbase.util.JVMClusterUtil;
80 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
81 import org.apache.hadoop.hbase.util.RegionSplitter;
82 import org.apache.hadoop.hbase.util.Threads;
83 import org.apache.hadoop.hbase.util.Writables;
84 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
85 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
86 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
87 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
88 import org.apache.hadoop.hdfs.DFSClient;
89 import org.apache.hadoop.hdfs.DistributedFileSystem;
90 import org.apache.hadoop.hdfs.MiniDFSCluster;
91 import org.apache.hadoop.mapred.JobConf;
92 import org.apache.hadoop.mapred.MiniMRCluster;
93 import org.apache.hadoop.security.UserGroupInformation;
94 import org.apache.zookeeper.KeeperException;
95 import org.apache.zookeeper.KeeperException.NodeExistsException;
96 import org.apache.zookeeper.WatchedEvent;
97 import org.apache.zookeeper.ZooKeeper;
98
99
100
101
102
103
104
105
106
107
108
109
110
111 public class HBaseTestingUtility {
112 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
113 private Configuration conf;
114 private MiniZooKeeperCluster zkCluster = null;
115
116
117
118
119
120 private static int DEFAULT_REGIONS_PER_SERVER = 5;
121
122
123
124
125
126 private boolean passedZkCluster = false;
127 private MiniDFSCluster dfsCluster = null;
128
129 private HBaseCluster hbaseCluster = null;
130 private MiniMRCluster mrCluster = null;
131
132
133 private File dataTestDir = null;
134
135
136
137 private File clusterTestDir = null;
138
139
140
141
142
143
144
145
146 private static final String TEST_DIRECTORY_KEY = "test.build.data";
147
148
149
150
151 public static final String BASE_TEST_DIRECTORY_KEY =
152 "test.build.data.basedirectory";
153
154
155
156
157 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
158
159
160 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
161 Arrays.asList(new Object[][] {
162 { Compression.Algorithm.NONE },
163 { Compression.Algorithm.GZ }
164 });
165
166
167 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
168 Arrays.asList(new Object[][] {
169 { new Boolean(false) },
170 { new Boolean(true) }
171 });
172
173
174 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
175 Compression.Algorithm.NONE, Compression.Algorithm.GZ
176 };
177
178
179
180
181
182 private static List<Object[]> bloomAndCompressionCombinations() {
183 List<Object[]> configurations = new ArrayList<Object[]>();
184 for (Compression.Algorithm comprAlgo :
185 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
186 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
187 configurations.add(new Object[] { comprAlgo, bloomType });
188 }
189 }
190 return Collections.unmodifiableList(configurations);
191 }
192
193 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
194 bloomAndCompressionCombinations();
195
196 public HBaseTestingUtility() {
197 this(HBaseConfiguration.create());
198 }
199
200 public HBaseTestingUtility(Configuration conf) {
201 this.conf = conf;
202
203
204 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
205 setHDFSClientRetry(1);
206 }
207
208
209
210
211 public void setHDFSClientRetry(final int retries) {
212 this.conf.setInt("hdfs.client.retries.number", retries);
213 HBaseFileSystem.setRetryCounts(conf);
214 if (0 == retries) {
215 makeDFSClientNonRetrying();
216 }
217 }
218
219
220
221
222
223
224
225
226
227
228
229
230 public Configuration getConfiguration() {
231 return this.conf;
232 }
233
234 public void setHBaseCluster(HBaseCluster hbaseCluster) {
235 this.hbaseCluster = hbaseCluster;
236 }
237
238
239
240
241
242
243
244
245
246 private Path getBaseTestDir() {
247 String PathName = System.getProperty(
248 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
249
250 return new Path(PathName);
251 }
252
253
254
255
256
257
258
259 public Path getDataTestDir() {
260 if (dataTestDir == null){
261 setupDataTestDir();
262 }
263 return new Path(dataTestDir.getAbsolutePath());
264 }
265
266
267
268
269
270
271 public Path getClusterTestDir() {
272 if (clusterTestDir == null){
273 setupClusterTestDir();
274 }
275 return new Path(clusterTestDir.getAbsolutePath());
276 }
277
278
279
280
281
282
283
284 public Path getDataTestDir(final String subdirName) {
285 return new Path(getDataTestDir(), subdirName);
286 }
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304 private void setupDataTestDir() {
305 if (dataTestDir != null) {
306 LOG.warn("Data test dir already setup in " +
307 dataTestDir.getAbsolutePath());
308 return;
309 }
310
311 String randomStr = UUID.randomUUID().toString();
312 Path testPath= new Path(getBaseTestDir(), randomStr);
313
314 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
315 dataTestDir.deleteOnExit();
316
317 createSubDirAndSystemProperty(
318 "hadoop.log.dir",
319 testPath, "hadoop-log-dir");
320
321
322
323 createSubDirAndSystemProperty(
324 "hadoop.tmp.dir",
325 testPath, "hadoop-tmp-dir");
326
327
328 createSubDir(
329 "mapred.local.dir",
330 testPath, "mapred-local-dir");
331
332 createSubDirAndSystemProperty(
333 "mapred.working.dir",
334 testPath, "mapred-working-dir");
335
336 createSubDir(
337 "hbase.local.dir",
338 testPath, "hbase-local-dir");
339 }
340
341 private void createSubDir(String propertyName, Path parent, String subDirName){
342 Path newPath= new Path(parent, subDirName);
343 File newDir = new File(newPath.toString()).getAbsoluteFile();
344 newDir.deleteOnExit();
345 conf.set(propertyName, newDir.getAbsolutePath());
346 }
347
348 private void createSubDirAndSystemProperty(
349 String propertyName, Path parent, String subDirName){
350
351 String sysValue = System.getProperty(propertyName);
352
353 if (sysValue != null) {
354
355
356 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
357 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
358 String confValue = conf.get(propertyName);
359 if (confValue != null && !confValue.endsWith(sysValue)){
360 LOG.warn(
361 propertyName + " property value differs in configuration and system: "+
362 "Configuration="+confValue+" while System="+sysValue+
363 " Erasing configuration value by system value."
364 );
365 }
366 conf.set(propertyName, sysValue);
367 } else {
368
369 createSubDir(propertyName, parent, subDirName);
370 System.setProperty(propertyName, conf.get(propertyName));
371 }
372 }
373
374
375
376
377 private void setupClusterTestDir() {
378 if (clusterTestDir != null) {
379 LOG.warn("Cluster test dir already setup in " +
380 clusterTestDir.getAbsolutePath());
381 return;
382 }
383
384
385
386 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
387 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
388
389 clusterTestDir.deleteOnExit();
390 }
391
392
393
394
395 public void isRunningCluster() throws IOException {
396 if (dfsCluster == null) return;
397 throw new IOException("Cluster already running at " +
398 this.clusterTestDir);
399 }
400
401
402
403
404
405
406
407
408 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
409 return startMiniDFSCluster(servers, null);
410 }
411
412
413
414
415
416
417
418
419
420
421
422
423 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
424 throws Exception {
425 if ( hosts != null && hosts.length != 0) {
426 return startMiniDFSCluster(hosts.length, hosts);
427 } else {
428 return startMiniDFSCluster(1, null);
429 }
430 }
431
432
433
434
435
436
437
438
439
440
441 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
442 throws Exception {
443
444
445 isRunningCluster();
446
447
448 if (clusterTestDir == null) {
449 setupClusterTestDir();
450 }
451
452
453 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
454
455
456
457
458 System.setProperty("test.cache.data", this.clusterTestDir.toString());
459
460
461 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
462 true, null, null, hosts, null);
463
464
465 FileSystem fs = this.dfsCluster.getFileSystem();
466 this.conf.set("fs.defaultFS", fs.getUri().toString());
467
468 this.conf.set("fs.default.name", fs.getUri().toString());
469
470
471 this.dfsCluster.waitClusterUp();
472
473 return this.dfsCluster;
474 }
475
476
477
478
479
480
481 public void shutdownMiniDFSCluster() throws Exception {
482 if (this.dfsCluster != null) {
483
484 this.dfsCluster.shutdown();
485 dfsCluster = null;
486 }
487
488 }
489
490
491
492
493
494
495
496
497 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
498 return startMiniZKCluster(1);
499 }
500
501
502
503
504
505
506
507
508
509 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
510 throws Exception {
511 File zkClusterFile = new File(getClusterTestDir().toString());
512 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
513 }
514
515 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
516 throws Exception {
517 return startMiniZKCluster(dir,1);
518 }
519
520 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
521 int zooKeeperServerNum)
522 throws Exception {
523 if (this.zkCluster != null) {
524 throw new IOException("Cluster already running at " + dir);
525 }
526 this.passedZkCluster = false;
527 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
528 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
529 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
530 Integer.toString(clientPort));
531 return this.zkCluster;
532 }
533
534
535
536
537
538
539
540 public void shutdownMiniZKCluster() throws IOException {
541 if (this.zkCluster != null) {
542 this.zkCluster.shutdown();
543 this.zkCluster = null;
544 }
545 }
546
547
548
549
550
551
552
553 public MiniHBaseCluster startMiniCluster() throws Exception {
554 return startMiniCluster(1, 1);
555 }
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 public MiniHBaseCluster startMiniCluster(final int numSlaves)
571 throws Exception {
572 return startMiniCluster(1, numSlaves);
573 }
574
575
576
577
578
579
580
581
582 public MiniHBaseCluster startMiniCluster(final int numMasters,
583 final int numSlaves)
584 throws Exception {
585 return startMiniCluster(numMasters, numSlaves, null);
586 }
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 public MiniHBaseCluster startMiniCluster(final int numMasters,
614 final int numSlaves, final String[] dataNodeHosts)
615 throws Exception {
616 int numDataNodes = numSlaves;
617 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
618 numDataNodes = dataNodeHosts.length;
619 }
620
621 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
622 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
623
624
625 isRunningCluster();
626
627
628
629 startMiniDFSCluster(numDataNodes, dataNodeHosts);
630
631
632 if (this.zkCluster == null) {
633 startMiniZKCluster(clusterTestDir);
634 }
635
636
637 return startMiniHBaseCluster(numMasters, numSlaves);
638 }
639
640
641
642
643
644
645
646
647
648
649
650
651 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
652 final int numSlaves)
653 throws IOException, InterruptedException {
654
655 createRootDir();
656
657
658
659 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
660 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
661 }
662 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
663 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
664 }
665
666 Configuration c = new Configuration(this.conf);
667 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
668
669 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
670 ResultScanner s = t.getScanner(new Scan());
671 while (s.next() != null) {
672 continue;
673 }
674 s.close();
675 t.close();
676
677 getHBaseAdmin();
678 LOG.info("Minicluster is up");
679 return (MiniHBaseCluster)this.hbaseCluster;
680 }
681
682
683
684
685
686
687
688 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
689 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
690
691 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
692 ResultScanner s = t.getScanner(new Scan());
693 while (s.next() != null) {
694
695 }
696 LOG.info("HBase has been restarted");
697 s.close();
698 t.close();
699 }
700
701
702
703
704
705
706 public MiniHBaseCluster getMiniHBaseCluster() {
707 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
708 return (MiniHBaseCluster)this.hbaseCluster;
709 }
710 throw new RuntimeException(hbaseCluster + " not an instance of " +
711 MiniHBaseCluster.class.getName());
712 }
713
714
715
716
717
718
719 public void shutdownMiniCluster() throws Exception {
720 LOG.info("Shutting down minicluster");
721 shutdownMiniHBaseCluster();
722 if (!this.passedZkCluster){
723 shutdownMiniZKCluster();
724 }
725 shutdownMiniDFSCluster();
726
727
728 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
729
730 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
731 new Path(this.clusterTestDir.toString()))) {
732 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
733 }
734 this.clusterTestDir = null;
735 }
736 LOG.info("Minicluster is down");
737 }
738
739
740
741
742
743 public void shutdownMiniHBaseCluster() throws IOException {
744 if (hbaseAdmin != null) {
745 hbaseAdmin.close();
746 hbaseAdmin = null;
747 }
748
749 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
750 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
751 if (this.hbaseCluster != null) {
752 this.hbaseCluster.shutdown();
753
754 this.hbaseCluster.waitUntilShutDown();
755 this.hbaseCluster = null;
756 }
757 }
758
759
760
761
762
763
764
765 public Path getDefaultRootDirPath() throws IOException {
766 FileSystem fs = FileSystem.get(this.conf);
767 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
768 }
769
770
771
772
773
774
775
776
777
778 public Path createRootDir() throws IOException {
779 FileSystem fs = FileSystem.get(this.conf);
780 Path hbaseRootdir = getDefaultRootDirPath();
781 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
782 fs.mkdirs(hbaseRootdir);
783 FSUtils.setVersion(fs, hbaseRootdir);
784 return hbaseRootdir;
785 }
786
787
788
789
790
791 public void flush() throws IOException {
792 getMiniHBaseCluster().flushcache();
793 }
794
795
796
797
798
799 public void flush(byte [] tableName) throws IOException {
800 getMiniHBaseCluster().flushcache(tableName);
801 }
802
803
804
805
806
807 public void compact(boolean major) throws IOException {
808 getMiniHBaseCluster().compact(major);
809 }
810
811
812
813
814
815 public void compact(byte [] tableName, boolean major) throws IOException {
816 getMiniHBaseCluster().compact(tableName, major);
817 }
818
819
820
821
822
823
824
825
826
827 public HTable createTable(byte[] tableName, byte[] family)
828 throws IOException{
829 return createTable(tableName, new byte[][]{family});
830 }
831
832
833
834
835
836
837
838
839 public HTable createTable(byte[] tableName, byte[][] families)
840 throws IOException {
841 return createTable(tableName, families,
842 new Configuration(getConfiguration()));
843 }
844
845 public HTable createTable(byte[] tableName, byte[][] families,
846 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
847 throws IOException{
848 HTableDescriptor desc = new HTableDescriptor(tableName);
849 for (byte[] family : families) {
850 HColumnDescriptor hcd = new HColumnDescriptor(family)
851 .setMaxVersions(numVersions);
852 desc.addFamily(hcd);
853 }
854 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
855
856 waitUntilAllRegionsAssigned(tableName);
857 return new HTable(getConfiguration(), tableName);
858 }
859
860
861
862
863
864
865
866
867
868 public HTable createTable(byte[] tableName, byte[][] families,
869 final Configuration c)
870 throws IOException {
871 HTableDescriptor desc = new HTableDescriptor(tableName);
872 for(byte[] family : families) {
873 desc.addFamily(new HColumnDescriptor(family));
874 }
875 getHBaseAdmin().createTable(desc);
876
877 waitUntilAllRegionsAssigned(tableName);
878 return new HTable(c, tableName);
879 }
880
881
882
883
884
885
886
887
888
889
890 public HTable createTable(byte[] tableName, byte[][] families,
891 final Configuration c, int numVersions)
892 throws IOException {
893 HTableDescriptor desc = new HTableDescriptor(tableName);
894 for(byte[] family : families) {
895 HColumnDescriptor hcd = new HColumnDescriptor(family)
896 .setMaxVersions(numVersions);
897 desc.addFamily(hcd);
898 }
899 getHBaseAdmin().createTable(desc);
900
901 waitUntilAllRegionsAssigned(tableName);
902 return new HTable(c, tableName);
903 }
904
905
906
907
908
909
910
911
912
913 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
914 throws IOException {
915 return createTable(tableName, new byte[][]{family}, numVersions);
916 }
917
918
919
920
921
922
923
924
925
926 public HTable createTable(byte[] tableName, byte[][] families,
927 int numVersions)
928 throws IOException {
929 HTableDescriptor desc = new HTableDescriptor(tableName);
930 for (byte[] family : families) {
931 HColumnDescriptor hcd = new HColumnDescriptor(family)
932 .setMaxVersions(numVersions);
933 desc.addFamily(hcd);
934 }
935 getHBaseAdmin().createTable(desc);
936
937 waitUntilAllRegionsAssigned(tableName);
938 return new HTable(new Configuration(getConfiguration()), tableName);
939 }
940
941
942
943
944
945
946
947
948
949 public HTable createTable(byte[] tableName, byte[][] families,
950 int numVersions, int blockSize) throws IOException {
951 HTableDescriptor desc = new HTableDescriptor(tableName);
952 for (byte[] family : families) {
953 HColumnDescriptor hcd = new HColumnDescriptor(family)
954 .setMaxVersions(numVersions)
955 .setBlocksize(blockSize);
956 desc.addFamily(hcd);
957 }
958 getHBaseAdmin().createTable(desc);
959
960 waitUntilAllRegionsAssigned(tableName);
961 return new HTable(new Configuration(getConfiguration()), tableName);
962 }
963
964
965
966
967
968
969
970
971
972 public HTable createTable(byte[] tableName, byte[][] families,
973 int[] numVersions)
974 throws IOException {
975 HTableDescriptor desc = new HTableDescriptor(tableName);
976 int i = 0;
977 for (byte[] family : families) {
978 HColumnDescriptor hcd = new HColumnDescriptor(family)
979 .setMaxVersions(numVersions[i]);
980 desc.addFamily(hcd);
981 i++;
982 }
983 getHBaseAdmin().createTable(desc);
984
985 waitUntilAllRegionsAssigned(tableName);
986 return new HTable(new Configuration(getConfiguration()), tableName);
987 }
988
989
990
991
992
993 public void deleteTable(byte[] tableName) throws IOException {
994 try {
995 getHBaseAdmin().disableTable(tableName);
996 } catch (TableNotEnabledException e) {
997 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
998 }
999 getHBaseAdmin().deleteTable(tableName);
1000 }
1001
1002
1003
1004
1005
1006
1007
1008 public HTable truncateTable(byte [] tableName) throws IOException {
1009 HTable table = new HTable(getConfiguration(), tableName);
1010 Scan scan = new Scan();
1011 ResultScanner resScan = table.getScanner(scan);
1012 for(Result res : resScan) {
1013 Delete del = new Delete(res.getRow());
1014 table.delete(del);
1015 }
1016 resScan = table.getScanner(scan);
1017 resScan.close();
1018 return table;
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028 public int loadTable(final HTable t, final byte[] f) throws IOException {
1029 t.setAutoFlush(false);
1030 byte[] k = new byte[3];
1031 int rowCount = 0;
1032 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1033 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1034 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1035 k[0] = b1;
1036 k[1] = b2;
1037 k[2] = b3;
1038 Put put = new Put(k);
1039 put.add(f, null, k);
1040 t.put(put);
1041 rowCount++;
1042 }
1043 }
1044 }
1045 t.flushCommits();
1046 return rowCount;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1057 return loadTable(t, f, null);
1058 }
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1069 t.setAutoFlush(false);
1070 byte[] k = new byte[3];
1071 int rowCount = 0;
1072 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1073 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1074 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1075 k[0] = b1;
1076 k[1] = b2;
1077 k[2] = b3;
1078 Put put = new Put(k);
1079 for (int i = 0; i < f.length; i++) {
1080 put.add(f[i], null, value != null ? value : k);
1081 }
1082 t.put(put);
1083 rowCount++;
1084 }
1085 }
1086 }
1087 t.flushCommits();
1088 return rowCount;
1089 }
1090
1091
1092
1093
1094 public static class SeenRowTracker {
1095 int dim = 'z' - 'a' + 1;
1096 int[][][] seenRows = new int[dim][dim][dim];
1097 byte[] startRow;
1098 byte[] stopRow;
1099
1100 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1101 this.startRow = startRow;
1102 this.stopRow = stopRow;
1103 }
1104
1105 int i(byte b) {
1106 return b - 'a';
1107 }
1108
1109 public void addRow(byte[] row) {
1110 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1111 }
1112
1113
1114
1115
1116 public void validate() {
1117 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1118 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1119 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1120 int count = seenRows[i(b1)][i(b2)][i(b3)];
1121 int expectedCount = 0;
1122 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1123 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1124 expectedCount = 1;
1125 }
1126 if (count != expectedCount) {
1127 String row = new String(new byte[] {b1,b2,b3});
1128 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1129 }
1130 }
1131 }
1132 }
1133 }
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143 public int loadRegion(final HRegion r, final byte[] f)
1144 throws IOException {
1145 return loadRegion(r, f, false);
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1157 throws IOException {
1158 byte[] k = new byte[3];
1159 int rowCount = 0;
1160 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1161 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1162 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1163 k[0] = b1;
1164 k[1] = b2;
1165 k[2] = b3;
1166 Put put = new Put(k);
1167 put.add(f, null, k);
1168 if (r.getLog() == null) put.setWriteToWAL(false);
1169 r.put(put);
1170 rowCount++;
1171 }
1172 }
1173 if (flush) {
1174 r.flushcache();
1175 }
1176 }
1177 return rowCount;
1178 }
1179
1180
1181
1182
1183 public int countRows(final HTable table) throws IOException {
1184 Scan scan = new Scan();
1185 ResultScanner results = table.getScanner(scan);
1186 int count = 0;
1187 for (@SuppressWarnings("unused") Result res : results) {
1188 count++;
1189 }
1190 results.close();
1191 return count;
1192 }
1193
1194 public int countRows(final HTable table, final byte[]... families) throws IOException {
1195 Scan scan = new Scan();
1196 for (byte[] family: families) {
1197 scan.addFamily(family);
1198 }
1199 ResultScanner results = table.getScanner(scan);
1200 int count = 0;
1201 for (@SuppressWarnings("unused") Result res : results) {
1202 count++;
1203 }
1204 results.close();
1205 return count;
1206 }
1207
1208
1209
1210
1211 public String checksumRows(final HTable table) throws Exception {
1212 Scan scan = new Scan();
1213 ResultScanner results = table.getScanner(scan);
1214 MessageDigest digest = MessageDigest.getInstance("MD5");
1215 for (Result res : results) {
1216 digest.update(res.getRow());
1217 }
1218 results.close();
1219 return digest.toString();
1220 }
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 public int createMultiRegions(HTable table, byte[] columnFamily)
1231 throws IOException {
1232 return createMultiRegions(table, columnFamily, true);
1233 }
1234
1235 public static final byte[][] KEYS = {
1236 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1237 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1238 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1239 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1240 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1241 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1242 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1243 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1244 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1245 };
1246
1247 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1248 Bytes.toBytes("bbb"),
1249 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1250 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1251 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1252 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1253 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1254 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1255 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1256 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1257 };
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1270 throws IOException {
1271 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1272 }
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 public int createMultiRegions(final Configuration c, final HTable table,
1284 final byte [] family, int numRegions)
1285 throws IOException {
1286 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1287 byte [] startKey = Bytes.toBytes("aaaaa");
1288 byte [] endKey = Bytes.toBytes("zzzzz");
1289 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1290 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1291 for (int i=0;i<splitKeys.length;i++) {
1292 regionStartKeys[i+1] = splitKeys[i];
1293 }
1294 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1295 return createMultiRegions(c, table, family, regionStartKeys);
1296 }
1297
1298 public int createMultiRegions(final Configuration c, final HTable table,
1299 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1300 return createMultiRegions(c, table, columnFamily, startKeys, true);
1301 }
1302
1303 public int createMultiRegions(final Configuration c, final HTable table,
1304 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1305 throws IOException {
1306 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1307 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1308 HTableDescriptor htd = table.getTableDescriptor();
1309 if(!htd.hasFamily(columnFamily)) {
1310 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1311 htd.addFamily(hcd);
1312 }
1313
1314
1315
1316
1317 List<byte[]> rows = getMetaTableRows(htd.getName());
1318 String regionToDeleteInFS = table
1319 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1320 .getRegionInfo().getEncodedName();
1321 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1322
1323 int count = 0;
1324 for (int i = 0; i < startKeys.length; i++) {
1325 int j = (i + 1) % startKeys.length;
1326 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1327 startKeys[i], startKeys[j]);
1328 Put put = new Put(hri.getRegionName());
1329 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1330 Writables.getBytes(hri));
1331 meta.put(put);
1332 LOG.info("createMultiRegions: inserted " + hri.toString());
1333 newRegions.add(hri);
1334 count++;
1335 }
1336
1337 for (byte[] row : rows) {
1338 LOG.info("createMultiRegions: deleting meta row -> " +
1339 Bytes.toStringBinary(row));
1340 meta.delete(new Delete(row));
1341 }
1342 if (cleanupFS) {
1343
1344
1345 Path tableDir = new Path(getDefaultRootDirPath().toString()
1346 + System.getProperty("file.separator") + htd.getNameAsString()
1347 + System.getProperty("file.separator") + regionToDeleteInFS);
1348 FileSystem.get(c).delete(tableDir);
1349 }
1350
1351 HConnection conn = table.getConnection();
1352 conn.clearRegionCache();
1353
1354 HBaseAdmin admin = getHBaseAdmin();
1355 if (admin.isTableEnabled(table.getTableName())) {
1356 for(HRegionInfo hri : newRegions) {
1357 admin.assign(hri.getRegionName());
1358 }
1359 }
1360
1361 meta.close();
1362
1363 return count;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1377 final HTableDescriptor htd, byte [][] startKeys)
1378 throws IOException {
1379 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1380 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1381 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1382
1383 for (int i = 0; i < startKeys.length; i++) {
1384 int j = (i + 1) % startKeys.length;
1385 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1386 startKeys[j]);
1387 Put put = new Put(hri.getRegionName());
1388 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1389 Writables.getBytes(hri));
1390 meta.put(put);
1391 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1392 newRegions.add(hri);
1393 }
1394
1395 meta.close();
1396 return newRegions;
1397 }
1398
1399
1400
1401
1402
1403
1404 public List<byte[]> getMetaTableRows() throws IOException {
1405
1406 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1407 List<byte[]> rows = new ArrayList<byte[]>();
1408 ResultScanner s = t.getScanner(new Scan());
1409 for (Result result : s) {
1410 LOG.info("getMetaTableRows: row -> " +
1411 Bytes.toStringBinary(result.getRow()));
1412 rows.add(result.getRow());
1413 }
1414 s.close();
1415 t.close();
1416 return rows;
1417 }
1418
1419
1420
1421
1422
1423
1424 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1425
1426 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1427 List<byte[]> rows = new ArrayList<byte[]>();
1428 ResultScanner s = t.getScanner(new Scan());
1429 for (Result result : s) {
1430 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1431 if (val == null) {
1432 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1433
1434 continue;
1435 }
1436 HRegionInfo info = Writables.getHRegionInfo(val);
1437 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1438 LOG.info("getMetaTableRows: row -> " +
1439 Bytes.toStringBinary(result.getRow()) + info);
1440 rows.add(result.getRow());
1441 }
1442 }
1443 s.close();
1444 t.close();
1445 return rows;
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1459 throws IOException {
1460 List<byte[]> metaRows = getMetaTableRows(tableName);
1461 if (metaRows == null || metaRows.isEmpty()) {
1462 return null;
1463 }
1464 LOG.debug("Found " + metaRows.size() + " rows for table " +
1465 Bytes.toString(tableName));
1466 byte [] firstrow = metaRows.get(0);
1467 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1468 int index = getMiniHBaseCluster().getServerWith(firstrow);
1469 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1470 }
1471
1472
1473
1474
1475
1476
1477
1478 public void startMiniMapReduceCluster() throws IOException {
1479 startMiniMapReduceCluster(2);
1480 }
1481
1482
1483
1484
1485
1486
1487
1488 public void startMiniMapReduceCluster(final int servers) throws IOException {
1489 LOG.info("Starting mini mapreduce cluster...");
1490 if (dataTestDir == null) {
1491 setupDataTestDir();
1492 }
1493
1494 Configuration c = getConfiguration();
1495 String logDir = c.get("hadoop.log.dir");
1496 String tmpDir = c.get("hadoop.tmp.dir");
1497 if (logDir == null) {
1498 logDir = tmpDir;
1499 }
1500 System.setProperty("hadoop.log.dir", logDir);
1501 c.set("mapred.output.dir", tmpDir);
1502
1503
1504
1505 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1506
1507 mrCluster = new MiniMRCluster(0, 0, servers,
1508 FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));
1509
1510 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
1511 if (jobConf == null) {
1512 jobConf = mrCluster.createJobConf();
1513 }
1514 jobConf.set("mapred.local.dir",
1515 conf.get("mapred.local.dir"));
1516
1517 LOG.info("Mini mapreduce cluster started");
1518 JobConf mrClusterJobConf = mrCluster.createJobConf();
1519 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1520
1521 conf.set("mapreduce.framework.name", "yarn");
1522 conf.setBoolean("yarn.is.minicluster", true);
1523 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1524 if (rmAdress != null) {
1525 conf.set("yarn.resourcemanager.address", rmAdress);
1526 }
1527 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
1528 if (historyAddress != null) {
1529 conf.set("mapreduce.jobhistory.address", historyAddress);
1530 }
1531 String schedulerAdress =
1532 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1533 if (schedulerAdress != null) {
1534 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1535 }
1536 }
1537
1538
1539
1540
1541 public void shutdownMiniMapReduceCluster() {
1542 LOG.info("Stopping mini mapreduce cluster...");
1543 if (mrCluster != null) {
1544 mrCluster.shutdown();
1545 mrCluster = null;
1546 }
1547
1548 conf.set("mapred.job.tracker", "local");
1549 LOG.info("Mini mapreduce cluster stopped");
1550 }
1551
1552
1553
1554
1555
1556
1557 public void enableDebug(Class<?> clazz) {
1558 Log l = LogFactory.getLog(clazz);
1559 if (l instanceof Log4JLogger) {
1560 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1561 } else if (l instanceof Jdk14Logger) {
1562 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1563 }
1564 }
1565
1566
1567
1568
1569
1570 public void expireMasterSession() throws Exception {
1571 HMaster master = getMiniHBaseCluster().getMaster();
1572 expireSession(master.getZooKeeper(), false);
1573 }
1574
1575
1576
1577
1578
1579
1580 public void expireRegionServerSession(int index) throws Exception {
1581 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1582 expireSession(rs.getZooKeeper(), false);
1583 decrementMinRegionServerCount();
1584 }
1585
1586 private void decrementMinRegionServerCount() {
1587
1588
1589 decrementMinRegionServerCount(getConfiguration());
1590
1591
1592 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1593 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1594 }
1595 }
1596
1597 private void decrementMinRegionServerCount(Configuration conf) {
1598 int currentCount = conf.getInt(
1599 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1600 if (currentCount != -1) {
1601 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1602 Math.max(currentCount - 1, 1));
1603 }
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1618 throws Exception {
1619 Configuration c = new Configuration(this.conf);
1620 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1621 int sessionTimeout = 500;
1622 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1623 byte[] password = zk.getSessionPasswd();
1624 long sessionID = zk.getSessionId();
1625
1626
1627
1628
1629
1630
1631
1632
1633 ZooKeeper monitor = new ZooKeeper(quorumServers,
1634 1000, new org.apache.zookeeper.Watcher(){
1635 @Override
1636 public void process(WatchedEvent watchedEvent) {
1637 LOG.info("Monitor ZKW received event="+watchedEvent);
1638 }
1639 } , sessionID, password);
1640
1641
1642 ZooKeeper newZK = new ZooKeeper(quorumServers,
1643 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1644 newZK.close();
1645 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1646
1647
1648 monitor.close();
1649
1650 if (checkStatus) {
1651 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1652 }
1653 }
1654
1655
1656
1657
1658
1659
1660
1661 public MiniHBaseCluster getHBaseCluster() {
1662 return getMiniHBaseCluster();
1663 }
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 public HBaseCluster getHBaseClusterInterface() {
1674
1675
1676 return hbaseCluster;
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 public synchronized HBaseAdmin getHBaseAdmin()
1689 throws IOException {
1690 if (hbaseAdmin == null){
1691 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1692 }
1693 return hbaseAdmin;
1694 }
1695 private HBaseAdmin hbaseAdmin = null;
1696
1697
1698
1699
1700
1701
1702
1703 public void closeRegion(String regionName) throws IOException {
1704 closeRegion(Bytes.toBytes(regionName));
1705 }
1706
1707
1708
1709
1710
1711
1712
1713 public void closeRegion(byte[] regionName) throws IOException {
1714 getHBaseAdmin().closeRegion(regionName, null);
1715 }
1716
1717
1718
1719
1720
1721
1722
1723
1724 public void closeRegionByRow(String row, HTable table) throws IOException {
1725 closeRegionByRow(Bytes.toBytes(row), table);
1726 }
1727
1728
1729
1730
1731
1732
1733
1734
1735 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1736 HRegionLocation hrl = table.getRegionLocation(row);
1737 closeRegion(hrl.getRegionInfo().getRegionName());
1738 }
1739
1740 public MiniZooKeeperCluster getZkCluster() {
1741 return zkCluster;
1742 }
1743
1744 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1745 this.passedZkCluster = true;
1746 this.zkCluster = zkCluster;
1747 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1748 }
1749
1750 public MiniDFSCluster getDFSCluster() {
1751 return dfsCluster;
1752 }
1753
1754 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1755 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1756 throw new IOException("DFSCluster is already running! Shut it down first.");
1757 }
1758 this.dfsCluster = cluster;
1759 }
1760
1761 public FileSystem getTestFileSystem() throws IOException {
1762 return HFileSystem.get(conf);
1763 }
1764
1765
1766
1767
1768
1769 public boolean cleanupTestDir() throws IOException {
1770 if (dataTestDir == null ){
1771 return false;
1772 } else {
1773 boolean ret = deleteDir(getDataTestDir());
1774 dataTestDir = null;
1775 return ret;
1776 }
1777 }
1778
1779
1780
1781
1782
1783
1784 public boolean cleanupTestDir(final String subdir) throws IOException {
1785 if (dataTestDir == null){
1786 return false;
1787 }
1788 return deleteDir(getDataTestDir(subdir));
1789 }
1790
1791
1792
1793
1794
1795
1796 public boolean deleteDir(final Path dir) throws IOException {
1797 FileSystem fs = getTestFileSystem();
1798 if (fs.exists(dir)) {
1799 return fs.delete(getDataTestDir(), true);
1800 }
1801 return false;
1802 }
1803
1804 public void waitTableAvailable(byte[] table, long timeoutMillis)
1805 throws InterruptedException, IOException {
1806 long startWait = System.currentTimeMillis();
1807 while (!getHBaseAdmin().isTableAvailable(table)) {
1808 assertTrue("Timed out waiting for table to become available " +
1809 Bytes.toStringBinary(table),
1810 System.currentTimeMillis() - startWait < timeoutMillis);
1811 Thread.sleep(200);
1812 }
1813 }
1814
1815 public void waitTableEnabled(byte[] table, long timeoutMillis)
1816 throws InterruptedException, IOException {
1817 long startWait = System.currentTimeMillis();
1818 while (!getHBaseAdmin().isTableAvailable(table) &&
1819 !getHBaseAdmin().isTableEnabled(table)) {
1820 assertTrue("Timed out waiting for table to become available and enabled " +
1821 Bytes.toStringBinary(table),
1822 System.currentTimeMillis() - startWait < timeoutMillis);
1823 Thread.sleep(200);
1824 }
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834 public boolean ensureSomeRegionServersAvailable(final int num)
1835 throws IOException {
1836 boolean startedServer = false;
1837 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1838 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1839 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1840 startedServer = true;
1841 }
1842
1843 return startedServer;
1844 }
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1855 throws IOException {
1856 boolean startedServer = ensureSomeRegionServersAvailable(num);
1857
1858 int nonStoppedServers = 0;
1859 for (JVMClusterUtil.RegionServerThread rst :
1860 getMiniHBaseCluster().getRegionServerThreads()) {
1861
1862 HRegionServer hrs = rst.getRegionServer();
1863 if (hrs.isStopping() || hrs.isStopped()) {
1864 LOG.info("A region server is stopped or stopping:"+hrs);
1865 } else {
1866 nonStoppedServers++;
1867 }
1868 }
1869 for (int i=nonStoppedServers; i<num; ++i) {
1870 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1871 startedServer = true;
1872 }
1873 return startedServer;
1874 }
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 public static User getDifferentUser(final Configuration c,
1887 final String differentiatingSuffix)
1888 throws IOException {
1889 FileSystem currentfs = FileSystem.get(c);
1890 if (!(currentfs instanceof DistributedFileSystem)) {
1891 return User.getCurrent();
1892 }
1893
1894
1895 String username = User.getCurrent().getName() +
1896 differentiatingSuffix;
1897 User user = User.createUserForTesting(c, username,
1898 new String[]{"supergroup"});
1899 return user;
1900 }
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1916 final int max) {
1917 try {
1918 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1919 for (Class<?> clazz: clazzes) {
1920 String className = clazz.getSimpleName();
1921 if (className.equals("DFSOutputStream")) {
1922 if (clazz.isInstance(stream)) {
1923 Field maxRecoveryErrorCountField =
1924 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1925 maxRecoveryErrorCountField.setAccessible(true);
1926 maxRecoveryErrorCountField.setInt(stream, max);
1927 break;
1928 }
1929 }
1930 }
1931 } catch (Exception e) {
1932 LOG.info("Could not set max recovery field", e);
1933 }
1934 }
1935
1936 void makeDFSClientNonRetrying() {
1937 if (null == this.dfsCluster) {
1938 LOG.debug("dfsCluster has not started, can't make client non-retrying.");
1939 return;
1940 }
1941 try {
1942 final FileSystem filesystem = this.dfsCluster.getFileSystem();
1943 if (!(filesystem instanceof DistributedFileSystem)) {
1944 LOG.debug("dfsCluster is not backed by a DistributedFileSystem, can't make client non-retrying.");
1945 return;
1946 }
1947
1948 final DistributedFileSystem fs = (DistributedFileSystem)filesystem;
1949
1950 final Field dfsField = fs.getClass().getDeclaredField("dfs");
1951 dfsField.setAccessible(true);
1952 final Class<?> dfsClazz = dfsField.getType();
1953 final DFSClient dfs = DFSClient.class.cast(dfsField.get(fs));
1954
1955
1956 final Method createRPCNamenode = dfsClazz.getDeclaredMethod("createRPCNamenode", InetSocketAddress.class, Configuration.class, UserGroupInformation.class);
1957 createRPCNamenode.setAccessible(true);
1958
1959
1960 final Field nnField = dfsClazz.getDeclaredField("nnAddress");
1961 nnField.setAccessible(true);
1962 final InetSocketAddress nnAddress = InetSocketAddress.class.cast(nnField.get(dfs));
1963 final Field confField = dfsClazz.getDeclaredField("conf");
1964 confField.setAccessible(true);
1965 final Configuration conf = Configuration.class.cast(confField.get(dfs));
1966 final Field ugiField = dfsClazz.getDeclaredField("ugi");
1967 ugiField.setAccessible(true);
1968 final UserGroupInformation ugi = UserGroupInformation.class.cast(ugiField.get(dfs));
1969
1970
1971 final Field namenodeField = dfsClazz.getDeclaredField("namenode");
1972 namenodeField.setAccessible(true);
1973 namenodeField.set(dfs, createRPCNamenode.invoke(null, nnAddress, conf, ugi));
1974 LOG.debug("Set DSFClient namenode to bare RPC");
1975 } catch (Exception exception) {
1976 LOG.info("Could not alter DFSClient to be non-retrying.", exception);
1977 }
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1989 waitUntilAllRegionsAssigned(tableName, 60000);
1990 }
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
2002 throws IOException {
2003 long deadline = System.currentTimeMillis() + timeout;
2004 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
2005 try {
2006 while (true) {
2007 boolean allRegionsAssigned = true;
2008 Scan scan = new Scan();
2009 scan.addFamily(HConstants.CATALOG_FAMILY);
2010 ResultScanner s = meta.getScanner(scan);
2011 try {
2012 Result r;
2013 while ((r = s.next()) != null) {
2014 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2015 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
2016 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
2017 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2018 allRegionsAssigned &= (b != null);
2019 }
2020 }
2021 } finally {
2022 s.close();
2023 }
2024 if (allRegionsAssigned) {
2025 return;
2026 }
2027 long now = System.currentTimeMillis();
2028 if (now > deadline) {
2029 throw new IOException("Timeout waiting for all regions of " +
2030 Bytes.toStringBinary(tableName) + " to be assigned");
2031 }
2032 try {
2033 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
2034 } catch (InterruptedException e) {
2035 throw new IOException(e);
2036 }
2037 }
2038 } finally {
2039 meta.close();
2040 }
2041 }
2042
2043
2044
2045
2046
2047 public static List<KeyValue> getFromStoreFile(Store store,
2048 Get get) throws IOException {
2049 MultiVersionConsistencyControl.resetThreadReadPoint();
2050 Scan scan = new Scan(get);
2051 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2052 scan.getFamilyMap().get(store.getFamily().getName()));
2053
2054 List<KeyValue> result = new ArrayList<KeyValue>();
2055 scanner.next(result);
2056 if (!result.isEmpty()) {
2057
2058 KeyValue kv = result.get(0);
2059 if (!Bytes.equals(kv.getRow(), get.getRow())) {
2060 result.clear();
2061 }
2062 }
2063 scanner.close();
2064 return result;
2065 }
2066
2067
2068
2069
2070
2071 public static List<KeyValue> getFromStoreFile(Store store,
2072 byte [] row,
2073 NavigableSet<byte[]> columns
2074 ) throws IOException {
2075 Get get = new Get(row);
2076 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2077 s.put(store.getFamily().getName(), columns);
2078
2079 return getFromStoreFile(store,get);
2080 }
2081
2082
2083
2084
2085
2086 public static ZooKeeperWatcher getZooKeeperWatcher(
2087 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2088 IOException {
2089 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2090 "unittest", new Abortable() {
2091 boolean aborted = false;
2092
2093 @Override
2094 public void abort(String why, Throwable e) {
2095 aborted = true;
2096 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2097 }
2098
2099 @Override
2100 public boolean isAborted() {
2101 return aborted;
2102 }
2103 });
2104 return zkw;
2105 }
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2119 HBaseTestingUtility TEST_UTIL, HRegion region,
2120 ServerName serverName) throws ZooKeeperConnectionException,
2121 IOException, KeeperException, NodeExistsException {
2122 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2123 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2124 int version = ZKAssign.transitionNodeOpening(zkw, region
2125 .getRegionInfo(), serverName);
2126 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2127 version);
2128 return zkw;
2129 }
2130
2131 public static void assertKVListsEqual(String additionalMsg,
2132 final List<KeyValue> expected,
2133 final List<KeyValue> actual) {
2134 final int eLen = expected.size();
2135 final int aLen = actual.size();
2136 final int minLen = Math.min(eLen, aLen);
2137
2138 int i;
2139 for (i = 0; i < minLen
2140 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2141 ++i) {}
2142
2143 if (additionalMsg == null) {
2144 additionalMsg = "";
2145 }
2146 if (!additionalMsg.isEmpty()) {
2147 additionalMsg = ". " + additionalMsg;
2148 }
2149
2150 if (eLen != aLen || i != minLen) {
2151 throw new AssertionError(
2152 "Expected and actual KV arrays differ at position " + i + ": " +
2153 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2154 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2155 }
2156 }
2157
2158 private static <T> String safeGetAsStr(List<T> lst, int i) {
2159 if (0 <= i && i < lst.size()) {
2160 return lst.get(i).toString();
2161 } else {
2162 return "<out_of_range>";
2163 }
2164 }
2165
2166 public String getClusterKey() {
2167 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2168 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2169 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2170 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2171 }
2172
2173
2174 public HTable createRandomTable(String tableName,
2175 final Collection<String> families,
2176 final int maxVersions,
2177 final int numColsPerRow,
2178 final int numFlushes,
2179 final int numRegions,
2180 final int numRowsPerFlush)
2181 throws IOException, InterruptedException {
2182
2183 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2184 " regions, " + numFlushes + " storefiles per region, " +
2185 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2186 "\n");
2187
2188 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2189 final int numCF = families.size();
2190 final byte[][] cfBytes = new byte[numCF][];
2191 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2192
2193 {
2194 int cfIndex = 0;
2195 for (String cf : families) {
2196 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2197 }
2198 }
2199
2200 final int actualStartKey = 0;
2201 final int actualEndKey = Integer.MAX_VALUE;
2202 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2203 final int splitStartKey = actualStartKey + keysPerRegion;
2204 final int splitEndKey = actualEndKey - keysPerRegion;
2205 final String keyFormat = "%08x";
2206 final HTable table = createTable(tableNameBytes, cfBytes,
2207 maxVersions,
2208 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2209 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2210 numRegions);
2211 if (hbaseCluster != null) {
2212 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2213 }
2214
2215 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2216 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2217 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2218 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2219
2220 Put put = new Put(row);
2221 Delete del = new Delete(row);
2222 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2223 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2224 final long ts = rand.nextInt();
2225 final byte[] qual = Bytes.toBytes("col" + iCol);
2226 if (rand.nextBoolean()) {
2227 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2228 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2229 ts + "_random_" + rand.nextLong());
2230 put.add(cf, qual, ts, value);
2231 } else if (rand.nextDouble() < 0.8) {
2232 del.deleteColumn(cf, qual, ts);
2233 } else {
2234 del.deleteColumns(cf, qual, ts);
2235 }
2236 }
2237
2238 if (!put.isEmpty()) {
2239 table.put(put);
2240 }
2241
2242 if (!del.isEmpty()) {
2243 table.delete(del);
2244 }
2245 }
2246 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2247 table.flushCommits();
2248 if (hbaseCluster != null) {
2249 getMiniHBaseCluster().flushcache(tableNameBytes);
2250 }
2251 }
2252
2253 return table;
2254 }
2255
2256 private static final int MIN_RANDOM_PORT = 0xc000;
2257 private static final int MAX_RANDOM_PORT = 0xfffe;
2258
2259
2260
2261
2262
2263 public static int randomPort() {
2264 return MIN_RANDOM_PORT
2265 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2266 }
2267
2268 public static int randomFreePort() {
2269 int port = 0;
2270 do {
2271 port = randomPort();
2272 try {
2273 ServerSocket sock = new ServerSocket(port);
2274 sock.close();
2275 } catch (IOException ex) {
2276 port = 0;
2277 }
2278 } while (port == 0);
2279 return port;
2280 }
2281
2282 public static void waitForHostPort(String host, int port)
2283 throws IOException {
2284 final int maxTimeMs = 10000;
2285 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2286 IOException savedException = null;
2287 LOG.info("Waiting for server at " + host + ":" + port);
2288 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2289 try {
2290 Socket sock = new Socket(InetAddress.getByName(host), port);
2291 sock.close();
2292 savedException = null;
2293 LOG.info("Server at " + host + ":" + port + " is available");
2294 break;
2295 } catch (UnknownHostException e) {
2296 throw new IOException("Failed to look up " + host, e);
2297 } catch (IOException e) {
2298 savedException = e;
2299 }
2300 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2301 }
2302
2303 if (savedException != null) {
2304 throw savedException;
2305 }
2306 }
2307
2308
2309
2310
2311
2312
2313 public static int createPreSplitLoadTestTable(Configuration conf,
2314 byte[] tableName, byte[] columnFamily, Algorithm compression,
2315 DataBlockEncoding dataBlockEncoding) throws IOException {
2316 HTableDescriptor desc = new HTableDescriptor(tableName);
2317 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2318 hcd.setDataBlockEncoding(dataBlockEncoding);
2319 hcd.setCompressionType(compression);
2320 return createPreSplitLoadTestTable(conf, desc, hcd);
2321 }
2322
2323
2324
2325
2326
2327
2328 public static int createPreSplitLoadTestTable(Configuration conf,
2329 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2330 if (!desc.hasFamily(hcd.getName())) {
2331 desc.addFamily(hcd);
2332 }
2333
2334 int totalNumberOfRegions = 0;
2335 HBaseAdmin admin = new HBaseAdmin(conf);
2336 try {
2337
2338
2339
2340 int numberOfServers = admin.getClusterStatus().getServers().size();
2341 if (numberOfServers == 0) {
2342 throw new IllegalStateException("No live regionservers");
2343 }
2344
2345 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2346 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2347 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2348 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2349
2350 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2351 totalNumberOfRegions);
2352
2353 admin.createTable(desc, splits);
2354 admin.close();
2355 } catch (MasterNotRunningException e) {
2356 LOG.error("Master not running", e);
2357 throw new IOException(e);
2358 } catch (TableExistsException e) {
2359 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2360 " already exists, continuing");
2361 } finally {
2362 admin.close();
2363 }
2364 return totalNumberOfRegions;
2365 }
2366
2367 public static int getMetaRSPort(Configuration conf) throws IOException {
2368 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2369 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2370 table.close();
2371 return hloc.getPort();
2372 }
2373
2374 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2375 throws IOException {
2376 HTableDescriptor htd = new HTableDescriptor(tableName);
2377 htd.addFamily(hcd);
2378 HRegionInfo info =
2379 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2380 HRegion region =
2381 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2382 return region;
2383 }
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2394 assertTrue(numRegions>3);
2395 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2396 byte [][] result = new byte[tmpSplitKeys.length+1][];
2397 for (int i=0;i<tmpSplitKeys.length;i++) {
2398 result[i+1] = tmpSplitKeys[i];
2399 }
2400 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2401 return result;
2402 }
2403
2404
2405
2406
2407
2408
2409 public static List<HColumnDescriptor> generateColumnDescriptors() {
2410 return generateColumnDescriptors("");
2411 }
2412
2413
2414
2415
2416
2417
2418
2419 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2420 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2421 long familyId = 0;
2422 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2423 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2424 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2425 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2426 HColumnDescriptor htd = new HColumnDescriptor(name);
2427 htd.setCompressionType(compressionType);
2428 htd.setDataBlockEncoding(encodingType);
2429 htd.setBloomFilterType(bloomType);
2430 htds.add(htd);
2431 familyId++;
2432 }
2433 }
2434 }
2435 return htds;
2436 }
2437
2438
2439
2440
2441
2442 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2443 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2444 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2445 for (String algoName : allAlgos) {
2446 try {
2447 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2448 algo.getCompressor();
2449 supportedAlgos.add(algo);
2450 } catch (Throwable t) {
2451
2452 }
2453 }
2454 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2455 }
2456 }