1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.master.HMaster;
66 import org.apache.hadoop.hbase.master.ServerManager;
67 import org.apache.hadoop.hbase.regionserver.HRegion;
68 import org.apache.hadoop.hbase.regionserver.HRegionServer;
69 import org.apache.hadoop.hbase.regionserver.InternalScanner;
70 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
71 import org.apache.hadoop.hbase.regionserver.Store;
72 import org.apache.hadoop.hbase.regionserver.StoreFile;
73 import org.apache.hadoop.hbase.security.User;
74 import org.apache.hadoop.hbase.util.Bytes;
75 import org.apache.hadoop.hbase.util.FSUtils;
76 import org.apache.hadoop.hbase.util.JVMClusterUtil;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
78 import org.apache.hadoop.hbase.util.RegionSplitter;
79 import org.apache.hadoop.hbase.util.Threads;
80 import org.apache.hadoop.hbase.util.Writables;
81 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
82 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
83 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
84 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
85 import org.apache.hadoop.hdfs.DFSClient;
86 import org.apache.hadoop.hdfs.DistributedFileSystem;
87 import org.apache.hadoop.hdfs.MiniDFSCluster;
88 import org.apache.hadoop.mapred.JobConf;
89 import org.apache.hadoop.mapred.MiniMRCluster;
90 import org.apache.zookeeper.KeeperException;
91 import org.apache.zookeeper.KeeperException.NodeExistsException;
92 import org.apache.zookeeper.WatchedEvent;
93 import org.apache.zookeeper.ZooKeeper;
94
95
96
97
98
99
100
101
102
103
104
105
106
107 public class HBaseTestingUtility {
108 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
109 private Configuration conf;
110 private MiniZooKeeperCluster zkCluster = null;
111
112
113
114
115
116 private static int DEFAULT_REGIONS_PER_SERVER = 5;
117
118
119
120
121
122 private boolean passedZkCluster = false;
123 private MiniDFSCluster dfsCluster = null;
124
125 private HBaseCluster hbaseCluster = null;
126 private MiniMRCluster mrCluster = null;
127
128
129 private File dataTestDir = null;
130
131
132
133 private File clusterTestDir = null;
134
135
136
137
138
139
140
141
142 private static final String TEST_DIRECTORY_KEY = "test.build.data";
143
144
145
146
147 public static final String BASE_TEST_DIRECTORY_KEY =
148 "test.build.data.basedirectory";
149
150
151
152
153 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
154
155
156 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
157 Arrays.asList(new Object[][] {
158 { Compression.Algorithm.NONE },
159 { Compression.Algorithm.GZ }
160 });
161
162
163 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
164 Arrays.asList(new Object[][] {
165 { new Boolean(false) },
166 { new Boolean(true) }
167 });
168
169
170 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
171 Compression.Algorithm.NONE, Compression.Algorithm.GZ
172 };
173
174
175
176
177
178 private static List<Object[]> bloomAndCompressionCombinations() {
179 List<Object[]> configurations = new ArrayList<Object[]>();
180 for (Compression.Algorithm comprAlgo :
181 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
182 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
183 configurations.add(new Object[] { comprAlgo, bloomType });
184 }
185 }
186 return Collections.unmodifiableList(configurations);
187 }
188
189 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
190 bloomAndCompressionCombinations();
191
192 public HBaseTestingUtility() {
193 this(HBaseConfiguration.create());
194 }
195
196 public HBaseTestingUtility(Configuration conf) {
197 this.conf = conf;
198
199
200 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
201 setHDFSClientRetryProperty();
202 }
203
204 private void setHDFSClientRetryProperty() {
205 this.conf.setInt("hdfs.client.retries.number", 1);
206 HBaseFileSystem.setRetryCounts(conf);
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220 public Configuration getConfiguration() {
221 return this.conf;
222 }
223
224 public void setHBaseCluster(HBaseCluster hbaseCluster) {
225 this.hbaseCluster = hbaseCluster;
226 }
227
228
229
230
231
232
233
234
235
236 private Path getBaseTestDir() {
237 String PathName = System.getProperty(
238 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
239
240 return new Path(PathName);
241 }
242
243
244
245
246
247
248
249 public Path getDataTestDir() {
250 if (dataTestDir == null){
251 setupDataTestDir();
252 }
253 return new Path(dataTestDir.getAbsolutePath());
254 }
255
256
257
258
259
260
261 public Path getClusterTestDir() {
262 if (clusterTestDir == null){
263 setupClusterTestDir();
264 }
265 return new Path(clusterTestDir.getAbsolutePath());
266 }
267
268
269
270
271
272
273
274 public Path getDataTestDir(final String subdirName) {
275 return new Path(getDataTestDir(), subdirName);
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 private void setupDataTestDir() {
295 if (dataTestDir != null) {
296 LOG.warn("Data test dir already setup in " +
297 dataTestDir.getAbsolutePath());
298 return;
299 }
300
301 String randomStr = UUID.randomUUID().toString();
302 Path testPath= new Path(getBaseTestDir(), randomStr);
303
304 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
305 dataTestDir.deleteOnExit();
306
307 createSubDirAndSystemProperty(
308 "hadoop.log.dir",
309 testPath, "hadoop-log-dir");
310
311
312
313 createSubDirAndSystemProperty(
314 "hadoop.tmp.dir",
315 testPath, "hadoop-tmp-dir");
316
317
318 createSubDir(
319 "mapred.local.dir",
320 testPath, "mapred-local-dir");
321
322 createSubDirAndSystemProperty(
323 "mapred.working.dir",
324 testPath, "mapred-working-dir");
325
326 createSubDir(
327 "hbase.local.dir",
328 testPath, "hbase-local-dir");
329 }
330
331 private void createSubDir(String propertyName, Path parent, String subDirName){
332 Path newPath= new Path(parent, subDirName);
333 File newDir = new File(newPath.toString()).getAbsoluteFile();
334 newDir.deleteOnExit();
335 conf.set(propertyName, newDir.getAbsolutePath());
336 }
337
338 private void createSubDirAndSystemProperty(
339 String propertyName, Path parent, String subDirName){
340
341 String sysValue = System.getProperty(propertyName);
342
343 if (sysValue != null) {
344
345
346 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
347 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
348 String confValue = conf.get(propertyName);
349 if (confValue != null && !confValue.endsWith(sysValue)){
350 LOG.warn(
351 propertyName + " property value differs in configuration and system: "+
352 "Configuration="+confValue+" while System="+sysValue+
353 " Erasing configuration value by system value."
354 );
355 }
356 conf.set(propertyName, sysValue);
357 } else {
358
359 createSubDir(propertyName, parent, subDirName);
360 System.setProperty(propertyName, conf.get(propertyName));
361 }
362 }
363
364
365
366
367 private void setupClusterTestDir() {
368 if (clusterTestDir != null) {
369 LOG.warn("Cluster test dir already setup in " +
370 clusterTestDir.getAbsolutePath());
371 return;
372 }
373
374
375
376 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
377 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
378
379 clusterTestDir.deleteOnExit();
380 }
381
382
383
384
385 public void isRunningCluster() throws IOException {
386 if (dfsCluster == null) return;
387 throw new IOException("Cluster already running at " +
388 this.clusterTestDir);
389 }
390
391
392
393
394
395
396
397
398 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
399 return startMiniDFSCluster(servers, null);
400 }
401
402
403
404
405
406
407
408
409
410
411
412
413 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
414 throws Exception {
415 if ( hosts != null && hosts.length != 0) {
416 return startMiniDFSCluster(hosts.length, hosts);
417 } else {
418 return startMiniDFSCluster(1, null);
419 }
420 }
421
422
423
424
425
426
427
428
429
430
431 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
432 throws Exception {
433
434
435 isRunningCluster();
436
437
438 if (clusterTestDir == null) {
439 setupClusterTestDir();
440 }
441
442
443 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
444
445
446
447
448 System.setProperty("test.cache.data", this.clusterTestDir.toString());
449
450
451 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
452 true, null, null, hosts, null);
453
454
455 FileSystem fs = this.dfsCluster.getFileSystem();
456 this.conf.set("fs.defaultFS", fs.getUri().toString());
457
458 this.conf.set("fs.default.name", fs.getUri().toString());
459
460
461 this.dfsCluster.waitClusterUp();
462
463 return this.dfsCluster;
464 }
465
466
467
468
469
470
471 public void shutdownMiniDFSCluster() throws Exception {
472 if (this.dfsCluster != null) {
473
474 this.dfsCluster.shutdown();
475 dfsCluster = null;
476 }
477
478 }
479
480
481
482
483
484
485
486
487 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
488 return startMiniZKCluster(1);
489 }
490
491
492
493
494
495
496
497
498
499 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
500 throws Exception {
501 File zkClusterFile = new File(getClusterTestDir().toString());
502 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
503 }
504
505 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
506 throws Exception {
507 return startMiniZKCluster(dir,1);
508 }
509
510 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
511 int zooKeeperServerNum)
512 throws Exception {
513 if (this.zkCluster != null) {
514 throw new IOException("Cluster already running at " + dir);
515 }
516 this.passedZkCluster = false;
517 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
518 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
519 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
520 Integer.toString(clientPort));
521 return this.zkCluster;
522 }
523
524
525
526
527
528
529
530 public void shutdownMiniZKCluster() throws IOException {
531 if (this.zkCluster != null) {
532 this.zkCluster.shutdown();
533 this.zkCluster = null;
534 }
535 }
536
537
538
539
540
541
542
543 public MiniHBaseCluster startMiniCluster() throws Exception {
544 return startMiniCluster(1, 1);
545 }
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 public MiniHBaseCluster startMiniCluster(final int numSlaves)
561 throws Exception {
562 return startMiniCluster(1, numSlaves);
563 }
564
565
566
567
568
569
570
571
572 public MiniHBaseCluster startMiniCluster(final int numMasters,
573 final int numSlaves)
574 throws Exception {
575 return startMiniCluster(numMasters, numSlaves, null);
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603 public MiniHBaseCluster startMiniCluster(final int numMasters,
604 final int numSlaves, final String[] dataNodeHosts)
605 throws Exception {
606 int numDataNodes = numSlaves;
607 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
608 numDataNodes = dataNodeHosts.length;
609 }
610
611 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
612 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
613
614
615 isRunningCluster();
616
617
618
619 startMiniDFSCluster(numDataNodes, dataNodeHosts);
620
621
622 if (this.zkCluster == null) {
623 startMiniZKCluster(clusterTestDir);
624 }
625
626
627 return startMiniHBaseCluster(numMasters, numSlaves);
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
642 final int numSlaves)
643 throws IOException, InterruptedException {
644
645 createRootDir();
646
647
648
649 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
650 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
651 }
652 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
653 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
654 }
655
656 Configuration c = new Configuration(this.conf);
657 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
658
659 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
660 ResultScanner s = t.getScanner(new Scan());
661 while (s.next() != null) {
662 continue;
663 }
664 s.close();
665 t.close();
666
667 getHBaseAdmin();
668 LOG.info("Minicluster is up");
669 return (MiniHBaseCluster)this.hbaseCluster;
670 }
671
672
673
674
675
676
677
678 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
679 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
680
681 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
682 ResultScanner s = t.getScanner(new Scan());
683 while (s.next() != null) {
684
685 }
686 LOG.info("HBase has been restarted");
687 s.close();
688 t.close();
689 }
690
691
692
693
694
695
696 public MiniHBaseCluster getMiniHBaseCluster() {
697 if (this.hbaseCluster instanceof MiniHBaseCluster) {
698 return (MiniHBaseCluster)this.hbaseCluster;
699 }
700 throw new RuntimeException(hbaseCluster + " not an instance of " +
701 MiniHBaseCluster.class.getName());
702 }
703
704
705
706
707
708
709 public void shutdownMiniCluster() throws Exception {
710 LOG.info("Shutting down minicluster");
711 shutdownMiniHBaseCluster();
712 if (!this.passedZkCluster){
713 shutdownMiniZKCluster();
714 }
715 shutdownMiniDFSCluster();
716
717
718 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
719
720 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
721 new Path(this.clusterTestDir.toString()))) {
722 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
723 }
724 this.clusterTestDir = null;
725 }
726 LOG.info("Minicluster is down");
727 }
728
729
730
731
732
733 public void shutdownMiniHBaseCluster() throws IOException {
734 if (hbaseAdmin != null) {
735 hbaseAdmin.close();
736 hbaseAdmin = null;
737 }
738
739 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
741 if (this.hbaseCluster != null) {
742 this.hbaseCluster.shutdown();
743
744 this.hbaseCluster.waitUntilShutDown();
745 this.hbaseCluster = null;
746 }
747 }
748
749
750
751
752
753
754
755 public Path getDefaultRootDirPath() throws IOException {
756 FileSystem fs = FileSystem.get(this.conf);
757 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
758 }
759
760
761
762
763
764
765
766
767
768 public Path createRootDir() throws IOException {
769 FileSystem fs = FileSystem.get(this.conf);
770 Path hbaseRootdir = getDefaultRootDirPath();
771 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
772 fs.mkdirs(hbaseRootdir);
773 FSUtils.setVersion(fs, hbaseRootdir);
774 return hbaseRootdir;
775 }
776
777
778
779
780
781 public void flush() throws IOException {
782 getMiniHBaseCluster().flushcache();
783 }
784
785
786
787
788
789 public void flush(byte [] tableName) throws IOException {
790 getMiniHBaseCluster().flushcache(tableName);
791 }
792
793
794
795
796
797 public void compact(boolean major) throws IOException {
798 getMiniHBaseCluster().compact(major);
799 }
800
801
802
803
804
805 public void compact(byte [] tableName, boolean major) throws IOException {
806 getMiniHBaseCluster().compact(tableName, major);
807 }
808
809
810
811
812
813
814
815
816
817 public HTable createTable(byte[] tableName, byte[] family)
818 throws IOException{
819 return createTable(tableName, new byte[][]{family});
820 }
821
822
823
824
825
826
827
828
829 public HTable createTable(byte[] tableName, byte[][] families)
830 throws IOException {
831 return createTable(tableName, families,
832 new Configuration(getConfiguration()));
833 }
834
835 public HTable createTable(byte[] tableName, byte[][] families,
836 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
837 throws IOException{
838 HTableDescriptor desc = new HTableDescriptor(tableName);
839 for (byte[] family : families) {
840 HColumnDescriptor hcd = new HColumnDescriptor(family)
841 .setMaxVersions(numVersions);
842 desc.addFamily(hcd);
843 }
844 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
845
846 waitUntilAllRegionsAssigned(tableName);
847 return new HTable(getConfiguration(), tableName);
848 }
849
850
851
852
853
854
855
856
857
858 public HTable createTable(byte[] tableName, byte[][] families,
859 final Configuration c)
860 throws IOException {
861 HTableDescriptor desc = new HTableDescriptor(tableName);
862 for(byte[] family : families) {
863 desc.addFamily(new HColumnDescriptor(family));
864 }
865 getHBaseAdmin().createTable(desc);
866
867 waitUntilAllRegionsAssigned(tableName);
868 return new HTable(c, tableName);
869 }
870
871
872
873
874
875
876
877
878
879
880 public HTable createTable(byte[] tableName, byte[][] families,
881 final Configuration c, int numVersions)
882 throws IOException {
883 HTableDescriptor desc = new HTableDescriptor(tableName);
884 for(byte[] family : families) {
885 HColumnDescriptor hcd = new HColumnDescriptor(family)
886 .setMaxVersions(numVersions);
887 desc.addFamily(hcd);
888 }
889 getHBaseAdmin().createTable(desc);
890
891 waitUntilAllRegionsAssigned(tableName);
892 return new HTable(c, tableName);
893 }
894
895
896
897
898
899
900
901
902
903 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
904 throws IOException {
905 return createTable(tableName, new byte[][]{family}, numVersions);
906 }
907
908
909
910
911
912
913
914
915
916 public HTable createTable(byte[] tableName, byte[][] families,
917 int numVersions)
918 throws IOException {
919 HTableDescriptor desc = new HTableDescriptor(tableName);
920 for (byte[] family : families) {
921 HColumnDescriptor hcd = new HColumnDescriptor(family)
922 .setMaxVersions(numVersions);
923 desc.addFamily(hcd);
924 }
925 getHBaseAdmin().createTable(desc);
926
927 waitUntilAllRegionsAssigned(tableName);
928 return new HTable(new Configuration(getConfiguration()), tableName);
929 }
930
931
932
933
934
935
936
937
938
939 public HTable createTable(byte[] tableName, byte[][] families,
940 int numVersions, int blockSize) throws IOException {
941 HTableDescriptor desc = new HTableDescriptor(tableName);
942 for (byte[] family : families) {
943 HColumnDescriptor hcd = new HColumnDescriptor(family)
944 .setMaxVersions(numVersions)
945 .setBlocksize(blockSize);
946 desc.addFamily(hcd);
947 }
948 getHBaseAdmin().createTable(desc);
949
950 waitUntilAllRegionsAssigned(tableName);
951 return new HTable(new Configuration(getConfiguration()), tableName);
952 }
953
954
955
956
957
958
959
960
961
962 public HTable createTable(byte[] tableName, byte[][] families,
963 int[] numVersions)
964 throws IOException {
965 HTableDescriptor desc = new HTableDescriptor(tableName);
966 int i = 0;
967 for (byte[] family : families) {
968 HColumnDescriptor hcd = new HColumnDescriptor(family)
969 .setMaxVersions(numVersions[i]);
970 desc.addFamily(hcd);
971 i++;
972 }
973 getHBaseAdmin().createTable(desc);
974
975 waitUntilAllRegionsAssigned(tableName);
976 return new HTable(new Configuration(getConfiguration()), tableName);
977 }
978
979
980
981
982
983 public void deleteTable(byte[] tableName) throws IOException {
984 try {
985 getHBaseAdmin().disableTable(tableName);
986 } catch (TableNotEnabledException e) {
987 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
988 }
989 getHBaseAdmin().deleteTable(tableName);
990 }
991
992
993
994
995
996
997
998 public HTable truncateTable(byte [] tableName) throws IOException {
999 HTable table = new HTable(getConfiguration(), tableName);
1000 Scan scan = new Scan();
1001 ResultScanner resScan = table.getScanner(scan);
1002 for(Result res : resScan) {
1003 Delete del = new Delete(res.getRow());
1004 table.delete(del);
1005 }
1006 resScan = table.getScanner(scan);
1007 resScan.close();
1008 return table;
1009 }
1010
1011
1012
1013
1014
1015
1016
1017
1018 public int loadTable(final HTable t, final byte[] f) throws IOException {
1019 t.setAutoFlush(false);
1020 byte[] k = new byte[3];
1021 int rowCount = 0;
1022 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1023 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1024 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1025 k[0] = b1;
1026 k[1] = b2;
1027 k[2] = b3;
1028 Put put = new Put(k);
1029 put.add(f, null, k);
1030 t.put(put);
1031 rowCount++;
1032 }
1033 }
1034 }
1035 t.flushCommits();
1036 return rowCount;
1037 }
1038
1039
1040
1041
1042
1043
1044
1045
1046 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1047 t.setAutoFlush(false);
1048 byte[] k = new byte[3];
1049 int rowCount = 0;
1050 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1051 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1052 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1053 k[0] = b1;
1054 k[1] = b2;
1055 k[2] = b3;
1056 Put put = new Put(k);
1057 for (int i = 0; i < f.length; i++) {
1058 put.add(f[i], null, k);
1059 }
1060 t.put(put);
1061 rowCount++;
1062 }
1063 }
1064 }
1065 t.flushCommits();
1066 return rowCount;
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076 public int loadRegion(final HRegion r, final byte[] f)
1077 throws IOException {
1078 return loadRegion(r, f, false);
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1090 throws IOException {
1091 byte[] k = new byte[3];
1092 int rowCount = 0;
1093 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1094 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1095 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1096 k[0] = b1;
1097 k[1] = b2;
1098 k[2] = b3;
1099 Put put = new Put(k);
1100 put.add(f, null, k);
1101 if (r.getLog() == null) put.setWriteToWAL(false);
1102 r.put(put);
1103 rowCount++;
1104 }
1105 }
1106 if (flush) {
1107 r.flushcache();
1108 }
1109 }
1110 return rowCount;
1111 }
1112
1113
1114
1115
1116 public int countRows(final HTable table) throws IOException {
1117 Scan scan = new Scan();
1118 ResultScanner results = table.getScanner(scan);
1119 int count = 0;
1120 for (@SuppressWarnings("unused") Result res : results) {
1121 count++;
1122 }
1123 results.close();
1124 return count;
1125 }
1126
1127 public int countRows(final HTable table, final byte[]... families) throws IOException {
1128 Scan scan = new Scan();
1129 for (byte[] family: families) {
1130 scan.addFamily(family);
1131 }
1132 ResultScanner results = table.getScanner(scan);
1133 int count = 0;
1134 for (@SuppressWarnings("unused") Result res : results) {
1135 count++;
1136 }
1137 results.close();
1138 return count;
1139 }
1140
1141
1142
1143
1144 public String checksumRows(final HTable table) throws Exception {
1145 Scan scan = new Scan();
1146 ResultScanner results = table.getScanner(scan);
1147 MessageDigest digest = MessageDigest.getInstance("MD5");
1148 for (Result res : results) {
1149 digest.update(res.getRow());
1150 }
1151 results.close();
1152 return digest.toString();
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 public int createMultiRegions(HTable table, byte[] columnFamily)
1164 throws IOException {
1165 return createMultiRegions(table, columnFamily, true);
1166 }
1167
1168 public static final byte[][] KEYS = {
1169 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1170 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1171 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1172 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1173 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1174 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1175 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1176 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1177 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1178 };
1179
1180 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1181 Bytes.toBytes("bbb"),
1182 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1183 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1184 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1185 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1186 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1187 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1188 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1189 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1190 };
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1203 throws IOException {
1204 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 public int createMultiRegions(final Configuration c, final HTable table,
1217 final byte [] family, int numRegions)
1218 throws IOException {
1219 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1220 byte [] startKey = Bytes.toBytes("aaaaa");
1221 byte [] endKey = Bytes.toBytes("zzzzz");
1222 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1223 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1224 for (int i=0;i<splitKeys.length;i++) {
1225 regionStartKeys[i+1] = splitKeys[i];
1226 }
1227 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1228 return createMultiRegions(c, table, family, regionStartKeys);
1229 }
1230
1231 public int createMultiRegions(final Configuration c, final HTable table,
1232 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1233 return createMultiRegions(c, table, columnFamily, startKeys, true);
1234 }
1235
1236 public int createMultiRegions(final Configuration c, final HTable table,
1237 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1238 throws IOException {
1239 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1240 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1241 HTableDescriptor htd = table.getTableDescriptor();
1242 if(!htd.hasFamily(columnFamily)) {
1243 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1244 htd.addFamily(hcd);
1245 }
1246
1247
1248
1249
1250 List<byte[]> rows = getMetaTableRows(htd.getName());
1251 String regionToDeleteInFS = table
1252 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1253 .getRegionInfo().getEncodedName();
1254 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1255
1256 int count = 0;
1257 for (int i = 0; i < startKeys.length; i++) {
1258 int j = (i + 1) % startKeys.length;
1259 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1260 startKeys[i], startKeys[j]);
1261 Put put = new Put(hri.getRegionName());
1262 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1263 Writables.getBytes(hri));
1264 meta.put(put);
1265 LOG.info("createMultiRegions: inserted " + hri.toString());
1266 newRegions.add(hri);
1267 count++;
1268 }
1269
1270 for (byte[] row : rows) {
1271 LOG.info("createMultiRegions: deleting meta row -> " +
1272 Bytes.toStringBinary(row));
1273 meta.delete(new Delete(row));
1274 }
1275 if (cleanupFS) {
1276
1277
1278 Path tableDir = new Path(getDefaultRootDirPath().toString()
1279 + System.getProperty("file.separator") + htd.getNameAsString()
1280 + System.getProperty("file.separator") + regionToDeleteInFS);
1281 FileSystem.get(c).delete(tableDir);
1282 }
1283
1284 HConnection conn = table.getConnection();
1285 conn.clearRegionCache();
1286
1287 HBaseAdmin admin = getHBaseAdmin();
1288 if (admin.isTableEnabled(table.getTableName())) {
1289 for(HRegionInfo hri : newRegions) {
1290 admin.assign(hri.getRegionName());
1291 }
1292 }
1293
1294 meta.close();
1295
1296 return count;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1310 final HTableDescriptor htd, byte [][] startKeys)
1311 throws IOException {
1312 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1313 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1314 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1315
1316 for (int i = 0; i < startKeys.length; i++) {
1317 int j = (i + 1) % startKeys.length;
1318 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1319 startKeys[j]);
1320 Put put = new Put(hri.getRegionName());
1321 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1322 Writables.getBytes(hri));
1323 meta.put(put);
1324 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1325 newRegions.add(hri);
1326 }
1327
1328 meta.close();
1329 return newRegions;
1330 }
1331
1332
1333
1334
1335
1336
1337 public List<byte[]> getMetaTableRows() throws IOException {
1338
1339 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1340 List<byte[]> rows = new ArrayList<byte[]>();
1341 ResultScanner s = t.getScanner(new Scan());
1342 for (Result result : s) {
1343 LOG.info("getMetaTableRows: row -> " +
1344 Bytes.toStringBinary(result.getRow()));
1345 rows.add(result.getRow());
1346 }
1347 s.close();
1348 t.close();
1349 return rows;
1350 }
1351
1352
1353
1354
1355
1356
1357 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1358
1359 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1360 List<byte[]> rows = new ArrayList<byte[]>();
1361 ResultScanner s = t.getScanner(new Scan());
1362 for (Result result : s) {
1363 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1364 if (val == null) {
1365 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1366
1367 continue;
1368 }
1369 HRegionInfo info = Writables.getHRegionInfo(val);
1370 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1371 LOG.info("getMetaTableRows: row -> " +
1372 Bytes.toStringBinary(result.getRow()) + info);
1373 rows.add(result.getRow());
1374 }
1375 }
1376 s.close();
1377 t.close();
1378 return rows;
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1392 throws IOException {
1393 List<byte[]> metaRows = getMetaTableRows(tableName);
1394 if (metaRows == null || metaRows.isEmpty()) {
1395 return null;
1396 }
1397 LOG.debug("Found " + metaRows.size() + " rows for table " +
1398 Bytes.toString(tableName));
1399 byte [] firstrow = metaRows.get(0);
1400 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1401 int index = getMiniHBaseCluster().getServerWith(firstrow);
1402 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1403 }
1404
1405
1406
1407
1408
1409
1410
1411 public void startMiniMapReduceCluster() throws IOException {
1412 startMiniMapReduceCluster(2);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421 public void startMiniMapReduceCluster(final int servers) throws IOException {
1422 LOG.info("Starting mini mapreduce cluster...");
1423
1424 Configuration c = getConfiguration();
1425 String logDir = c.get("hadoop.log.dir");
1426 String tmpDir = c.get("hadoop.tmp.dir");
1427 if (logDir == null) {
1428 logDir = tmpDir;
1429 }
1430 System.setProperty("hadoop.log.dir", logDir);
1431 c.set("mapred.output.dir", tmpDir);
1432
1433
1434
1435 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1436
1437 mrCluster = new MiniMRCluster(servers,
1438 FileSystem.get(conf).getUri().toString(), 1);
1439 LOG.info("Mini mapreduce cluster started");
1440 JobConf mrClusterJobConf = mrCluster.createJobConf();
1441 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1442
1443 conf.set("mapreduce.framework.name", "yarn");
1444 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1445 if (rmAdress != null) {
1446 conf.set("yarn.resourcemanager.address", rmAdress);
1447 }
1448 String schedulerAdress =
1449 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1450 if (schedulerAdress != null) {
1451 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1452 }
1453 }
1454
1455
1456
1457
1458 public void shutdownMiniMapReduceCluster() {
1459 LOG.info("Stopping mini mapreduce cluster...");
1460 if (mrCluster != null) {
1461 mrCluster.shutdown();
1462 mrCluster = null;
1463 }
1464
1465 conf.set("mapred.job.tracker", "local");
1466 LOG.info("Mini mapreduce cluster stopped");
1467 }
1468
1469
1470
1471
1472
1473
1474 public void enableDebug(Class<?> clazz) {
1475 Log l = LogFactory.getLog(clazz);
1476 if (l instanceof Log4JLogger) {
1477 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1478 } else if (l instanceof Jdk14Logger) {
1479 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1480 }
1481 }
1482
1483
1484
1485
1486
1487 public void expireMasterSession() throws Exception {
1488 HMaster master = getMiniHBaseCluster().getMaster();
1489 expireSession(master.getZooKeeper(), false);
1490 }
1491
1492
1493
1494
1495
1496
1497 public void expireRegionServerSession(int index) throws Exception {
1498 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1499 expireSession(rs.getZooKeeper(), false);
1500 decrementMinRegionServerCount();
1501 }
1502
1503 private void decrementMinRegionServerCount() {
1504
1505
1506 decrementMinRegionServerCount(getConfiguration());
1507
1508
1509 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1510 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1511 }
1512 }
1513
1514 private void decrementMinRegionServerCount(Configuration conf) {
1515 int currentCount = conf.getInt(
1516 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1517 if (currentCount != -1) {
1518 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1519 Math.max(currentCount - 1, 1));
1520 }
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1535 throws Exception {
1536 Configuration c = new Configuration(this.conf);
1537 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1538 int sessionTimeout = 500;
1539 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1540 byte[] password = zk.getSessionPasswd();
1541 long sessionID = zk.getSessionId();
1542
1543
1544
1545
1546
1547
1548
1549
1550 ZooKeeper monitor = new ZooKeeper(quorumServers,
1551 1000, new org.apache.zookeeper.Watcher(){
1552 @Override
1553 public void process(WatchedEvent watchedEvent) {
1554 LOG.info("Monitor ZKW received event="+watchedEvent);
1555 }
1556 } , sessionID, password);
1557
1558
1559 ZooKeeper newZK = new ZooKeeper(quorumServers,
1560 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1561 newZK.close();
1562 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1563
1564
1565 monitor.close();
1566
1567 if (checkStatus) {
1568 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1569 }
1570 }
1571
1572
1573
1574
1575
1576
1577
1578 public MiniHBaseCluster getHBaseCluster() {
1579 return getMiniHBaseCluster();
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590 public HBaseCluster getHBaseClusterInterface() {
1591
1592
1593 return hbaseCluster;
1594 }
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605 public synchronized HBaseAdmin getHBaseAdmin()
1606 throws IOException {
1607 if (hbaseAdmin == null){
1608 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1609 }
1610 return hbaseAdmin;
1611 }
1612 private HBaseAdmin hbaseAdmin = null;
1613
1614
1615
1616
1617
1618
1619
1620 public void closeRegion(String regionName) throws IOException {
1621 closeRegion(Bytes.toBytes(regionName));
1622 }
1623
1624
1625
1626
1627
1628
1629
1630 public void closeRegion(byte[] regionName) throws IOException {
1631 getHBaseAdmin().closeRegion(regionName, null);
1632 }
1633
1634
1635
1636
1637
1638
1639
1640
1641 public void closeRegionByRow(String row, HTable table) throws IOException {
1642 closeRegionByRow(Bytes.toBytes(row), table);
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1653 HRegionLocation hrl = table.getRegionLocation(row);
1654 closeRegion(hrl.getRegionInfo().getRegionName());
1655 }
1656
1657 public MiniZooKeeperCluster getZkCluster() {
1658 return zkCluster;
1659 }
1660
1661 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1662 this.passedZkCluster = true;
1663 this.zkCluster = zkCluster;
1664 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1665 }
1666
1667 public MiniDFSCluster getDFSCluster() {
1668 return dfsCluster;
1669 }
1670
1671 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1672 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1673 throw new IOException("DFSCluster is already running! Shut it down first.");
1674 }
1675 this.dfsCluster = cluster;
1676 }
1677
1678 public FileSystem getTestFileSystem() throws IOException {
1679 return HFileSystem.get(conf);
1680 }
1681
1682
1683
1684
1685
1686 public boolean cleanupTestDir() throws IOException {
1687 if (dataTestDir == null ){
1688 return false;
1689 } else {
1690 boolean ret = deleteDir(getDataTestDir());
1691 dataTestDir = null;
1692 return ret;
1693 }
1694 }
1695
1696
1697
1698
1699
1700
1701 public boolean cleanupTestDir(final String subdir) throws IOException {
1702 if (dataTestDir == null){
1703 return false;
1704 }
1705 return deleteDir(getDataTestDir(subdir));
1706 }
1707
1708
1709
1710
1711
1712
1713 public boolean deleteDir(final Path dir) throws IOException {
1714 FileSystem fs = getTestFileSystem();
1715 if (fs.exists(dir)) {
1716 return fs.delete(getDataTestDir(), true);
1717 }
1718 return false;
1719 }
1720
1721 public void waitTableAvailable(byte[] table, long timeoutMillis)
1722 throws InterruptedException, IOException {
1723 long startWait = System.currentTimeMillis();
1724 while (!getHBaseAdmin().isTableAvailable(table)) {
1725 assertTrue("Timed out waiting for table to become available " +
1726 Bytes.toStringBinary(table),
1727 System.currentTimeMillis() - startWait < timeoutMillis);
1728 Thread.sleep(200);
1729 }
1730 }
1731
1732 public void waitTableEnabled(byte[] table, long timeoutMillis)
1733 throws InterruptedException, IOException {
1734 long startWait = System.currentTimeMillis();
1735 while (!getHBaseAdmin().isTableAvailable(table) &&
1736 !getHBaseAdmin().isTableEnabled(table)) {
1737 assertTrue("Timed out waiting for table to become available and enabled " +
1738 Bytes.toStringBinary(table),
1739 System.currentTimeMillis() - startWait < timeoutMillis);
1740 Thread.sleep(200);
1741 }
1742 }
1743
1744
1745
1746
1747
1748
1749
1750
1751 public boolean ensureSomeRegionServersAvailable(final int num)
1752 throws IOException {
1753 boolean startedServer = false;
1754 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1755 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1756 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1757 startedServer = true;
1758 }
1759
1760 return startedServer;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1772 throws IOException {
1773 boolean startedServer = ensureSomeRegionServersAvailable(num);
1774
1775 int nonStoppedServers = 0;
1776 for (JVMClusterUtil.RegionServerThread rst :
1777 getMiniHBaseCluster().getRegionServerThreads()) {
1778
1779 HRegionServer hrs = rst.getRegionServer();
1780 if (hrs.isStopping() || hrs.isStopped()) {
1781 LOG.info("A region server is stopped or stopping:"+hrs);
1782 } else {
1783 nonStoppedServers++;
1784 }
1785 }
1786 for (int i=nonStoppedServers; i<num; ++i) {
1787 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1788 startedServer = true;
1789 }
1790 return startedServer;
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 public static User getDifferentUser(final Configuration c,
1804 final String differentiatingSuffix)
1805 throws IOException {
1806 FileSystem currentfs = FileSystem.get(c);
1807 if (!(currentfs instanceof DistributedFileSystem)) {
1808 return User.getCurrent();
1809 }
1810
1811
1812 String username = User.getCurrent().getName() +
1813 differentiatingSuffix;
1814 User user = User.createUserForTesting(c, username,
1815 new String[]{"supergroup"});
1816 return user;
1817 }
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1833 final int max) {
1834 try {
1835 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1836 for (Class<?> clazz: clazzes) {
1837 String className = clazz.getSimpleName();
1838 if (className.equals("DFSOutputStream")) {
1839 if (clazz.isInstance(stream)) {
1840 Field maxRecoveryErrorCountField =
1841 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1842 maxRecoveryErrorCountField.setAccessible(true);
1843 maxRecoveryErrorCountField.setInt(stream, max);
1844 break;
1845 }
1846 }
1847 }
1848 } catch (Exception e) {
1849 LOG.info("Could not set max recovery field", e);
1850 }
1851 }
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1862 waitUntilAllRegionsAssigned(tableName, 60000);
1863 }
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1875 throws IOException {
1876 long deadline = System.currentTimeMillis() + timeout;
1877 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1878 try {
1879 while (true) {
1880 boolean allRegionsAssigned = true;
1881 Scan scan = new Scan();
1882 scan.addFamily(HConstants.CATALOG_FAMILY);
1883 ResultScanner s = meta.getScanner(scan);
1884 try {
1885 Result r;
1886 while ((r = s.next()) != null) {
1887 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1888 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1889 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1890 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1891 allRegionsAssigned &= (b != null);
1892 }
1893 }
1894 } finally {
1895 s.close();
1896 }
1897 if (allRegionsAssigned) {
1898 return;
1899 }
1900 long now = System.currentTimeMillis();
1901 if (now > deadline) {
1902 throw new IOException("Timeout waiting for all regions of " +
1903 Bytes.toStringBinary(tableName) + " to be assigned");
1904 }
1905 try {
1906 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1907 } catch (InterruptedException e) {
1908 throw new IOException(e);
1909 }
1910 }
1911 } finally {
1912 meta.close();
1913 }
1914 }
1915
1916
1917
1918
1919
1920 public static List<KeyValue> getFromStoreFile(Store store,
1921 Get get) throws IOException {
1922 MultiVersionConsistencyControl.resetThreadReadPoint();
1923 Scan scan = new Scan(get);
1924 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1925 scan.getFamilyMap().get(store.getFamily().getName()));
1926
1927 List<KeyValue> result = new ArrayList<KeyValue>();
1928 scanner.next(result);
1929 if (!result.isEmpty()) {
1930
1931 KeyValue kv = result.get(0);
1932 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1933 result.clear();
1934 }
1935 }
1936 scanner.close();
1937 return result;
1938 }
1939
1940
1941
1942
1943
1944 public static List<KeyValue> getFromStoreFile(Store store,
1945 byte [] row,
1946 NavigableSet<byte[]> columns
1947 ) throws IOException {
1948 Get get = new Get(row);
1949 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1950 s.put(store.getFamily().getName(), columns);
1951
1952 return getFromStoreFile(store,get);
1953 }
1954
1955
1956
1957
1958
1959 public static ZooKeeperWatcher getZooKeeperWatcher(
1960 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
1961 IOException {
1962 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
1963 "unittest", new Abortable() {
1964 boolean aborted = false;
1965
1966 @Override
1967 public void abort(String why, Throwable e) {
1968 aborted = true;
1969 throw new RuntimeException("Fatal ZK error, why=" + why, e);
1970 }
1971
1972 @Override
1973 public boolean isAborted() {
1974 return aborted;
1975 }
1976 });
1977 return zkw;
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
1992 HBaseTestingUtility TEST_UTIL, HRegion region,
1993 ServerName serverName) throws ZooKeeperConnectionException,
1994 IOException, KeeperException, NodeExistsException {
1995 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
1996 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
1997 int version = ZKAssign.transitionNodeOpening(zkw, region
1998 .getRegionInfo(), serverName);
1999 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2000 version);
2001 return zkw;
2002 }
2003
2004 public static void assertKVListsEqual(String additionalMsg,
2005 final List<KeyValue> expected,
2006 final List<KeyValue> actual) {
2007 final int eLen = expected.size();
2008 final int aLen = actual.size();
2009 final int minLen = Math.min(eLen, aLen);
2010
2011 int i;
2012 for (i = 0; i < minLen
2013 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2014 ++i) {}
2015
2016 if (additionalMsg == null) {
2017 additionalMsg = "";
2018 }
2019 if (!additionalMsg.isEmpty()) {
2020 additionalMsg = ". " + additionalMsg;
2021 }
2022
2023 if (eLen != aLen || i != minLen) {
2024 throw new AssertionError(
2025 "Expected and actual KV arrays differ at position " + i + ": " +
2026 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2027 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2028 }
2029 }
2030
2031 private static <T> String safeGetAsStr(List<T> lst, int i) {
2032 if (0 <= i && i < lst.size()) {
2033 return lst.get(i).toString();
2034 } else {
2035 return "<out_of_range>";
2036 }
2037 }
2038
2039 public String getClusterKey() {
2040 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2041 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2042 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2043 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2044 }
2045
2046
2047 public HTable createRandomTable(String tableName,
2048 final Collection<String> families,
2049 final int maxVersions,
2050 final int numColsPerRow,
2051 final int numFlushes,
2052 final int numRegions,
2053 final int numRowsPerFlush)
2054 throws IOException, InterruptedException {
2055
2056 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2057 " regions, " + numFlushes + " storefiles per region, " +
2058 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2059 "\n");
2060
2061 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2062 final int numCF = families.size();
2063 final byte[][] cfBytes = new byte[numCF][];
2064 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2065
2066 {
2067 int cfIndex = 0;
2068 for (String cf : families) {
2069 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2070 }
2071 }
2072
2073 final int actualStartKey = 0;
2074 final int actualEndKey = Integer.MAX_VALUE;
2075 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2076 final int splitStartKey = actualStartKey + keysPerRegion;
2077 final int splitEndKey = actualEndKey - keysPerRegion;
2078 final String keyFormat = "%08x";
2079 final HTable table = createTable(tableNameBytes, cfBytes,
2080 maxVersions,
2081 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2082 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2083 numRegions);
2084 if (hbaseCluster != null) {
2085 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2086 }
2087
2088 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2089 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2090 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2091 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2092
2093 Put put = new Put(row);
2094 Delete del = new Delete(row);
2095 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2096 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2097 final long ts = rand.nextInt();
2098 final byte[] qual = Bytes.toBytes("col" + iCol);
2099 if (rand.nextBoolean()) {
2100 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2101 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2102 ts + "_random_" + rand.nextLong());
2103 put.add(cf, qual, ts, value);
2104 } else if (rand.nextDouble() < 0.8) {
2105 del.deleteColumn(cf, qual, ts);
2106 } else {
2107 del.deleteColumns(cf, qual, ts);
2108 }
2109 }
2110
2111 if (!put.isEmpty()) {
2112 table.put(put);
2113 }
2114
2115 if (!del.isEmpty()) {
2116 table.delete(del);
2117 }
2118 }
2119 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2120 table.flushCommits();
2121 if (hbaseCluster != null) {
2122 getMiniHBaseCluster().flushcache(tableNameBytes);
2123 }
2124 }
2125
2126 return table;
2127 }
2128
2129 private static final int MIN_RANDOM_PORT = 0xc000;
2130 private static final int MAX_RANDOM_PORT = 0xfffe;
2131
2132
2133
2134
2135
2136 public static int randomPort() {
2137 return MIN_RANDOM_PORT
2138 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2139 }
2140
2141 public static int randomFreePort() {
2142 int port = 0;
2143 do {
2144 port = randomPort();
2145 try {
2146 ServerSocket sock = new ServerSocket(port);
2147 sock.close();
2148 } catch (IOException ex) {
2149 port = 0;
2150 }
2151 } while (port == 0);
2152 return port;
2153 }
2154
2155 public static void waitForHostPort(String host, int port)
2156 throws IOException {
2157 final int maxTimeMs = 10000;
2158 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2159 IOException savedException = null;
2160 LOG.info("Waiting for server at " + host + ":" + port);
2161 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2162 try {
2163 Socket sock = new Socket(InetAddress.getByName(host), port);
2164 sock.close();
2165 savedException = null;
2166 LOG.info("Server at " + host + ":" + port + " is available");
2167 break;
2168 } catch (UnknownHostException e) {
2169 throw new IOException("Failed to look up " + host, e);
2170 } catch (IOException e) {
2171 savedException = e;
2172 }
2173 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2174 }
2175
2176 if (savedException != null) {
2177 throw savedException;
2178 }
2179 }
2180
2181
2182
2183
2184
2185
2186 public static int createPreSplitLoadTestTable(Configuration conf,
2187 byte[] tableName, byte[] columnFamily, Algorithm compression,
2188 DataBlockEncoding dataBlockEncoding) throws IOException {
2189 HTableDescriptor desc = new HTableDescriptor(tableName);
2190 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2191 hcd.setDataBlockEncoding(dataBlockEncoding);
2192 hcd.setCompressionType(compression);
2193 return createPreSplitLoadTestTable(conf, desc, hcd);
2194 }
2195
2196
2197
2198
2199
2200
2201 public static int createPreSplitLoadTestTable(Configuration conf,
2202 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2203 if (!desc.hasFamily(hcd.getName())) {
2204 desc.addFamily(hcd);
2205 }
2206
2207 int totalNumberOfRegions = 0;
2208 HBaseAdmin admin = new HBaseAdmin(conf);
2209 try {
2210
2211
2212
2213 int numberOfServers = admin.getClusterStatus().getServers().size();
2214 if (numberOfServers == 0) {
2215 throw new IllegalStateException("No live regionservers");
2216 }
2217
2218 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2219 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2220 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2221 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2222
2223 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2224 totalNumberOfRegions);
2225
2226 admin.createTable(desc, splits);
2227 admin.close();
2228 } catch (MasterNotRunningException e) {
2229 LOG.error("Master not running", e);
2230 throw new IOException(e);
2231 } catch (TableExistsException e) {
2232 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2233 " already exists, continuing");
2234 } finally {
2235 admin.close();
2236 }
2237 return totalNumberOfRegions;
2238 }
2239
2240 public static int getMetaRSPort(Configuration conf) throws IOException {
2241 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2242 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2243 table.close();
2244 return hloc.getPort();
2245 }
2246
2247 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2248 throws IOException {
2249 HTableDescriptor htd = new HTableDescriptor(tableName);
2250 htd.addFamily(hcd);
2251 HRegionInfo info =
2252 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2253 HRegion region =
2254 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2255 return region;
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2267 assertTrue(numRegions>3);
2268 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2269 byte [][] result = new byte[tmpSplitKeys.length+1][];
2270 for (int i=0;i<tmpSplitKeys.length;i++) {
2271 result[i+1] = tmpSplitKeys[i];
2272 }
2273 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2274 return result;
2275 }
2276
2277
2278
2279
2280
2281
2282 public static List<HColumnDescriptor> generateColumnDescriptors() {
2283 return generateColumnDescriptors("");
2284 }
2285
2286
2287
2288
2289
2290
2291
2292 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2293 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2294 long familyId = 0;
2295 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2296 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2297 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2298 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2299 HColumnDescriptor htd = new HColumnDescriptor(name);
2300 htd.setCompressionType(compressionType);
2301 htd.setDataBlockEncoding(encodingType);
2302 htd.setBloomFilterType(bloomType);
2303 htds.add(htd);
2304 familyId++;
2305 }
2306 }
2307 }
2308 return htds;
2309 }
2310
2311
2312
2313
2314
2315 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2316 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2317 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2318 for (String algoName : allAlgos) {
2319 try {
2320 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2321 algo.getCompressor();
2322 supportedAlgos.add(algo);
2323 } catch (Throwable t) {
2324
2325 }
2326 }
2327 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2328 }
2329 }