1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.master.HMaster;
66 import org.apache.hadoop.hbase.master.ServerManager;
67 import org.apache.hadoop.hbase.regionserver.HRegion;
68 import org.apache.hadoop.hbase.regionserver.HRegionServer;
69 import org.apache.hadoop.hbase.regionserver.InternalScanner;
70 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
71 import org.apache.hadoop.hbase.regionserver.Store;
72 import org.apache.hadoop.hbase.regionserver.StoreFile;
73 import org.apache.hadoop.hbase.security.User;
74 import org.apache.hadoop.hbase.util.Bytes;
75 import org.apache.hadoop.hbase.util.FSUtils;
76 import org.apache.hadoop.hbase.util.JVMClusterUtil;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
78 import org.apache.hadoop.hbase.util.RegionSplitter;
79 import org.apache.hadoop.hbase.util.Threads;
80 import org.apache.hadoop.hbase.util.Writables;
81 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
82 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
83 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
84 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
85 import org.apache.hadoop.hdfs.DFSClient;
86 import org.apache.hadoop.hdfs.DistributedFileSystem;
87 import org.apache.hadoop.hdfs.MiniDFSCluster;
88 import org.apache.hadoop.mapred.JobConf;
89 import org.apache.hadoop.mapred.MiniMRCluster;
90 import org.apache.zookeeper.KeeperException;
91 import org.apache.zookeeper.WatchedEvent;
92 import org.apache.zookeeper.KeeperException.NodeExistsException;
93 import org.apache.zookeeper.ZooKeeper;
94
95
96
97
98
99
100
101
102
103
104
105
106
107 public class HBaseTestingUtility {
108 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
109 private Configuration conf;
110 private MiniZooKeeperCluster zkCluster = null;
111
112
113
114
115
116 private static int DEFAULT_REGIONS_PER_SERVER = 5;
117
118
119
120
121
122 private boolean passedZkCluster = false;
123 private MiniDFSCluster dfsCluster = null;
124
125 private HBaseCluster hbaseCluster = null;
126 private MiniMRCluster mrCluster = null;
127
128
129 private File dataTestDir = null;
130
131
132
133 private File clusterTestDir = null;
134
135
136
137
138
139
140
141
142 private static final String TEST_DIRECTORY_KEY = "test.build.data";
143
144
145
146
147 public static final String BASE_TEST_DIRECTORY_KEY =
148 "test.build.data.basedirectory";
149
150
151
152
153 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
154
155
156 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
157 Arrays.asList(new Object[][] {
158 { Compression.Algorithm.NONE },
159 { Compression.Algorithm.GZ }
160 });
161
162
163 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
164 Arrays.asList(new Object[][] {
165 { new Boolean(false) },
166 { new Boolean(true) }
167 });
168
169
170 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
171 Compression.Algorithm.NONE, Compression.Algorithm.GZ
172 };
173
174
175
176
177
178 private static List<Object[]> bloomAndCompressionCombinations() {
179 List<Object[]> configurations = new ArrayList<Object[]>();
180 for (Compression.Algorithm comprAlgo :
181 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
182 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
183 configurations.add(new Object[] { comprAlgo, bloomType });
184 }
185 }
186 return Collections.unmodifiableList(configurations);
187 }
188
189 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
190 bloomAndCompressionCombinations();
191
192 public HBaseTestingUtility() {
193 this(HBaseConfiguration.create());
194 }
195
196 public HBaseTestingUtility(Configuration conf) {
197 this.conf = conf;
198
199
200 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
201 setHDFSClientRetryProperty();
202 }
203
204 private void setHDFSClientRetryProperty() {
205 this.conf.setInt("hdfs.client.retries.number", 1);
206 HBaseFileSystem.setRetryCounts(conf);
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220 public Configuration getConfiguration() {
221 return this.conf;
222 }
223
224 public void setHBaseCluster(HBaseCluster hbaseCluster) {
225 this.hbaseCluster = hbaseCluster;
226 }
227
228
229
230
231
232
233
234
235
236 private Path getBaseTestDir() {
237 String PathName = System.getProperty(
238 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
239
240 return new Path(PathName);
241 }
242
243
244
245
246
247
248
249 public Path getDataTestDir() {
250 if (dataTestDir == null){
251 setupDataTestDir();
252 }
253 return new Path(dataTestDir.getAbsolutePath());
254 }
255
256
257
258
259
260
261 public Path getClusterTestDir() {
262 if (clusterTestDir == null){
263 setupClusterTestDir();
264 }
265 return new Path(clusterTestDir.getAbsolutePath());
266 }
267
268
269
270
271
272
273
274 public Path getDataTestDir(final String subdirName) {
275 return new Path(getDataTestDir(), subdirName);
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 private void setupDataTestDir() {
295 if (dataTestDir != null) {
296 LOG.warn("Data test dir already setup in " +
297 dataTestDir.getAbsolutePath());
298 return;
299 }
300
301 String randomStr = UUID.randomUUID().toString();
302 Path testPath= new Path(getBaseTestDir(), randomStr);
303
304 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
305 dataTestDir.deleteOnExit();
306
307 createSubDirAndSystemProperty(
308 "hadoop.log.dir",
309 testPath, "hadoop-log-dir");
310
311
312
313 createSubDirAndSystemProperty(
314 "hadoop.tmp.dir",
315 testPath, "hadoop-tmp-dir");
316
317
318 createSubDir(
319 "mapred.local.dir",
320 testPath, "mapred-local-dir");
321
322 createSubDirAndSystemProperty(
323 "mapred.working.dir",
324 testPath, "mapred-working-dir");
325
326 createSubDir(
327 "hbase.local.dir",
328 testPath, "hbase-local-dir");
329 }
330
331 private void createSubDir(String propertyName, Path parent, String subDirName){
332 Path newPath= new Path(parent, subDirName);
333 File newDir = new File(newPath.toString()).getAbsoluteFile();
334 newDir.deleteOnExit();
335 conf.set(propertyName, newDir.getAbsolutePath());
336 }
337
338 private void createSubDirAndSystemProperty(
339 String propertyName, Path parent, String subDirName){
340
341 String sysValue = System.getProperty(propertyName);
342
343 if (sysValue != null) {
344
345
346 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
347 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
348 String confValue = conf.get(propertyName);
349 if (confValue != null && !confValue.endsWith(sysValue)){
350 LOG.warn(
351 propertyName + " property value differs in configuration and system: "+
352 "Configuration="+confValue+" while System="+sysValue+
353 " Erasing configuration value by system value."
354 );
355 }
356 conf.set(propertyName, sysValue);
357 } else {
358
359 createSubDir(propertyName, parent, subDirName);
360 System.setProperty(propertyName, conf.get(propertyName));
361 }
362 }
363
364
365
366
367 private void setupClusterTestDir() {
368 if (clusterTestDir != null) {
369 LOG.warn("Cluster test dir already setup in " +
370 clusterTestDir.getAbsolutePath());
371 return;
372 }
373
374
375
376 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
377 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
378
379 clusterTestDir.deleteOnExit();
380 }
381
382
383
384
385 public void isRunningCluster() throws IOException {
386 if (dfsCluster == null) return;
387 throw new IOException("Cluster already running at " +
388 this.clusterTestDir);
389 }
390
391
392
393
394
395
396
397
398 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
399 return startMiniDFSCluster(servers, null);
400 }
401
402
403
404
405
406
407
408
409
410
411
412
413 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
414 throws Exception {
415 if ( hosts != null && hosts.length != 0) {
416 return startMiniDFSCluster(hosts.length, hosts);
417 } else {
418 return startMiniDFSCluster(1, null);
419 }
420 }
421
422
423
424
425
426
427
428
429
430
431 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
432 throws Exception {
433
434
435 isRunningCluster();
436
437
438 if (clusterTestDir == null) {
439 setupClusterTestDir();
440 }
441
442
443 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
444
445
446
447
448 System.setProperty("test.cache.data", this.clusterTestDir.toString());
449
450
451 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
452 true, null, null, hosts, null);
453
454
455 FileSystem fs = this.dfsCluster.getFileSystem();
456 this.conf.set("fs.defaultFS", fs.getUri().toString());
457
458 this.conf.set("fs.default.name", fs.getUri().toString());
459
460
461 this.dfsCluster.waitClusterUp();
462
463 return this.dfsCluster;
464 }
465
466
467
468
469
470
471 public void shutdownMiniDFSCluster() throws Exception {
472 if (this.dfsCluster != null) {
473
474 this.dfsCluster.shutdown();
475 dfsCluster = null;
476 }
477
478 }
479
480
481
482
483
484
485
486
487 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
488 return startMiniZKCluster(1);
489 }
490
491
492
493
494
495
496
497
498
499 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
500 throws Exception {
501 File zkClusterFile = new File(getClusterTestDir().toString());
502 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
503 }
504
505 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
506 throws Exception {
507 return startMiniZKCluster(dir,1);
508 }
509
510 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
511 int zooKeeperServerNum)
512 throws Exception {
513 if (this.zkCluster != null) {
514 throw new IOException("Cluster already running at " + dir);
515 }
516 this.passedZkCluster = false;
517 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
518 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
519 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
520 Integer.toString(clientPort));
521 return this.zkCluster;
522 }
523
524
525
526
527
528
529
530 public void shutdownMiniZKCluster() throws IOException {
531 if (this.zkCluster != null) {
532 this.zkCluster.shutdown();
533 this.zkCluster = null;
534 }
535 }
536
537
538
539
540
541
542
543 public MiniHBaseCluster startMiniCluster() throws Exception {
544 return startMiniCluster(1, 1);
545 }
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 public MiniHBaseCluster startMiniCluster(final int numSlaves)
561 throws Exception {
562 return startMiniCluster(1, numSlaves);
563 }
564
565
566
567
568
569
570
571
572 public MiniHBaseCluster startMiniCluster(final int numMasters,
573 final int numSlaves)
574 throws Exception {
575 return startMiniCluster(numMasters, numSlaves, null);
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603 public MiniHBaseCluster startMiniCluster(final int numMasters,
604 final int numSlaves, final String[] dataNodeHosts)
605 throws Exception {
606 int numDataNodes = numSlaves;
607 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
608 numDataNodes = dataNodeHosts.length;
609 }
610
611 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
612 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
613
614
615 isRunningCluster();
616
617
618
619 startMiniDFSCluster(numDataNodes, dataNodeHosts);
620
621
622 if (this.zkCluster == null) {
623 startMiniZKCluster(clusterTestDir);
624 }
625
626
627 return startMiniHBaseCluster(numMasters, numSlaves);
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
642 final int numSlaves)
643 throws IOException, InterruptedException {
644
645 createRootDir();
646
647
648
649 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
650 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
651 }
652 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
653 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
654 }
655
656 Configuration c = new Configuration(this.conf);
657 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
658
659 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
660 ResultScanner s = t.getScanner(new Scan());
661 while (s.next() != null) {
662 continue;
663 }
664 s.close();
665 t.close();
666
667 getHBaseAdmin();
668 LOG.info("Minicluster is up");
669 return (MiniHBaseCluster)this.hbaseCluster;
670 }
671
672
673
674
675
676
677
678 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
679 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
680
681 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
682 ResultScanner s = t.getScanner(new Scan());
683 while (s.next() != null) {
684
685 }
686 LOG.info("HBase has been restarted");
687 s.close();
688 t.close();
689 }
690
691
692
693
694
695
696 public MiniHBaseCluster getMiniHBaseCluster() {
697 if (this.hbaseCluster instanceof MiniHBaseCluster) {
698 return (MiniHBaseCluster)this.hbaseCluster;
699 }
700 throw new RuntimeException(hbaseCluster + " not an instance of " +
701 MiniHBaseCluster.class.getName());
702 }
703
704
705
706
707
708
709 public void shutdownMiniCluster() throws Exception {
710 LOG.info("Shutting down minicluster");
711 shutdownMiniHBaseCluster();
712 if (!this.passedZkCluster){
713 shutdownMiniZKCluster();
714 }
715 shutdownMiniDFSCluster();
716
717
718 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
719
720 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
721 new Path(this.clusterTestDir.toString()))) {
722 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
723 }
724 this.clusterTestDir = null;
725 }
726 LOG.info("Minicluster is down");
727 }
728
729
730
731
732
733 public void shutdownMiniHBaseCluster() throws IOException {
734 if (hbaseAdmin != null) {
735 hbaseAdmin.close();
736 hbaseAdmin = null;
737 }
738
739 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
741 if (this.hbaseCluster != null) {
742 this.hbaseCluster.shutdown();
743
744 this.hbaseCluster.waitUntilShutDown();
745 this.hbaseCluster = null;
746 }
747 }
748
749
750
751
752
753
754
755 public Path getDefaultRootDirPath() throws IOException {
756 FileSystem fs = FileSystem.get(this.conf);
757 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
758 }
759
760
761
762
763
764
765
766
767
768 public Path createRootDir() throws IOException {
769 FileSystem fs = FileSystem.get(this.conf);
770 Path hbaseRootdir = getDefaultRootDirPath();
771 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
772 fs.mkdirs(hbaseRootdir);
773 FSUtils.setVersion(fs, hbaseRootdir);
774 return hbaseRootdir;
775 }
776
777
778
779
780
781 public void flush() throws IOException {
782 getMiniHBaseCluster().flushcache();
783 }
784
785
786
787
788
789 public void flush(byte [] tableName) throws IOException {
790 getMiniHBaseCluster().flushcache(tableName);
791 }
792
793
794
795
796
797 public void compact(boolean major) throws IOException {
798 getMiniHBaseCluster().compact(major);
799 }
800
801
802
803
804
805 public void compact(byte [] tableName, boolean major) throws IOException {
806 getMiniHBaseCluster().compact(tableName, major);
807 }
808
809
810
811
812
813
814
815
816
817 public HTable createTable(byte[] tableName, byte[] family)
818 throws IOException{
819 return createTable(tableName, new byte[][]{family});
820 }
821
822
823
824
825
826
827
828
829 public HTable createTable(byte[] tableName, byte[][] families)
830 throws IOException {
831 return createTable(tableName, families,
832 new Configuration(getConfiguration()));
833 }
834
835 public HTable createTable(byte[] tableName, byte[][] families,
836 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
837 throws IOException{
838 HTableDescriptor desc = new HTableDescriptor(tableName);
839 for (byte[] family : families) {
840 HColumnDescriptor hcd = new HColumnDescriptor(family)
841 .setMaxVersions(numVersions);
842 desc.addFamily(hcd);
843 }
844 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
845 return new HTable(getConfiguration(), tableName);
846 }
847
848
849
850
851
852
853
854
855
856 public HTable createTable(byte[] tableName, byte[][] families,
857 final Configuration c)
858 throws IOException {
859 HTableDescriptor desc = new HTableDescriptor(tableName);
860 for(byte[] family : families) {
861 desc.addFamily(new HColumnDescriptor(family));
862 }
863 getHBaseAdmin().createTable(desc);
864 return new HTable(c, tableName);
865 }
866
867
868
869
870
871
872
873
874
875
876 public HTable createTable(byte[] tableName, byte[][] families,
877 final Configuration c, int numVersions)
878 throws IOException {
879 HTableDescriptor desc = new HTableDescriptor(tableName);
880 for(byte[] family : families) {
881 HColumnDescriptor hcd = new HColumnDescriptor(family)
882 .setMaxVersions(numVersions);
883 desc.addFamily(hcd);
884 }
885 getHBaseAdmin().createTable(desc);
886 return new HTable(c, tableName);
887 }
888
889
890
891
892
893
894
895
896
897 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
898 throws IOException {
899 return createTable(tableName, new byte[][]{family}, numVersions);
900 }
901
902
903
904
905
906
907
908
909
910 public HTable createTable(byte[] tableName, byte[][] families,
911 int numVersions)
912 throws IOException {
913 HTableDescriptor desc = new HTableDescriptor(tableName);
914 for (byte[] family : families) {
915 HColumnDescriptor hcd = new HColumnDescriptor(family)
916 .setMaxVersions(numVersions);
917 desc.addFamily(hcd);
918 }
919 getHBaseAdmin().createTable(desc);
920 return new HTable(new Configuration(getConfiguration()), tableName);
921 }
922
923
924
925
926
927
928
929
930
931 public HTable createTable(byte[] tableName, byte[][] families,
932 int numVersions, int blockSize) throws IOException {
933 HTableDescriptor desc = new HTableDescriptor(tableName);
934 for (byte[] family : families) {
935 HColumnDescriptor hcd = new HColumnDescriptor(family)
936 .setMaxVersions(numVersions)
937 .setBlocksize(blockSize);
938 desc.addFamily(hcd);
939 }
940 getHBaseAdmin().createTable(desc);
941 return new HTable(new Configuration(getConfiguration()), tableName);
942 }
943
944
945
946
947
948
949
950
951
952 public HTable createTable(byte[] tableName, byte[][] families,
953 int[] numVersions)
954 throws IOException {
955 HTableDescriptor desc = new HTableDescriptor(tableName);
956 int i = 0;
957 for (byte[] family : families) {
958 HColumnDescriptor hcd = new HColumnDescriptor(family)
959 .setMaxVersions(numVersions[i]);
960 desc.addFamily(hcd);
961 i++;
962 }
963 getHBaseAdmin().createTable(desc);
964 return new HTable(new Configuration(getConfiguration()), tableName);
965 }
966
967
968
969
970
971 public void deleteTable(byte[] tableName) throws IOException {
972 try {
973 getHBaseAdmin().disableTable(tableName);
974 } catch (TableNotEnabledException e) {
975 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
976 }
977 getHBaseAdmin().deleteTable(tableName);
978 }
979
980
981
982
983
984
985
986 public HTable truncateTable(byte [] tableName) throws IOException {
987 HTable table = new HTable(getConfiguration(), tableName);
988 Scan scan = new Scan();
989 ResultScanner resScan = table.getScanner(scan);
990 for(Result res : resScan) {
991 Delete del = new Delete(res.getRow());
992 table.delete(del);
993 }
994 resScan = table.getScanner(scan);
995 resScan.close();
996 return table;
997 }
998
999
1000
1001
1002
1003
1004
1005
1006 public int loadTable(final HTable t, final byte[] f) throws IOException {
1007 t.setAutoFlush(false);
1008 byte[] k = new byte[3];
1009 int rowCount = 0;
1010 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1011 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1012 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1013 k[0] = b1;
1014 k[1] = b2;
1015 k[2] = b3;
1016 Put put = new Put(k);
1017 put.add(f, null, k);
1018 t.put(put);
1019 rowCount++;
1020 }
1021 }
1022 }
1023 t.flushCommits();
1024 return rowCount;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1035 t.setAutoFlush(false);
1036 byte[] k = new byte[3];
1037 int rowCount = 0;
1038 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1039 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1040 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1041 k[0] = b1;
1042 k[1] = b2;
1043 k[2] = b3;
1044 Put put = new Put(k);
1045 for (int i = 0; i < f.length; i++) {
1046 put.add(f[i], null, k);
1047 }
1048 t.put(put);
1049 rowCount++;
1050 }
1051 }
1052 }
1053 t.flushCommits();
1054 return rowCount;
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064 public int loadRegion(final HRegion r, final byte[] f)
1065 throws IOException {
1066 return loadRegion(r, f, false);
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1078 throws IOException {
1079 byte[] k = new byte[3];
1080 int rowCount = 0;
1081 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1082 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1083 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1084 k[0] = b1;
1085 k[1] = b2;
1086 k[2] = b3;
1087 Put put = new Put(k);
1088 put.add(f, null, k);
1089 if (r.getLog() == null) put.setWriteToWAL(false);
1090 r.put(put);
1091 rowCount++;
1092 }
1093 }
1094 if (flush) {
1095 r.flushcache();
1096 }
1097 }
1098 return rowCount;
1099 }
1100
1101
1102
1103
1104 public int countRows(final HTable table) throws IOException {
1105 Scan scan = new Scan();
1106 ResultScanner results = table.getScanner(scan);
1107 int count = 0;
1108 for (@SuppressWarnings("unused") Result res : results) {
1109 count++;
1110 }
1111 results.close();
1112 return count;
1113 }
1114
1115 public int countRows(final HTable table, final byte[]... families) throws IOException {
1116 Scan scan = new Scan();
1117 for (byte[] family: families) {
1118 scan.addFamily(family);
1119 }
1120 ResultScanner results = table.getScanner(scan);
1121 int count = 0;
1122 for (@SuppressWarnings("unused") Result res : results) {
1123 count++;
1124 }
1125 results.close();
1126 return count;
1127 }
1128
1129
1130
1131
1132 public String checksumRows(final HTable table) throws Exception {
1133 Scan scan = new Scan();
1134 ResultScanner results = table.getScanner(scan);
1135 MessageDigest digest = MessageDigest.getInstance("MD5");
1136 for (Result res : results) {
1137 digest.update(res.getRow());
1138 }
1139 results.close();
1140 return digest.toString();
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 public int createMultiRegions(HTable table, byte[] columnFamily)
1152 throws IOException {
1153 return createMultiRegions(table, columnFamily, true);
1154 }
1155
1156 public static final byte[][] KEYS = {
1157 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1158 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1159 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1160 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1161 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1162 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1163 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1164 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1165 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1166 };
1167
1168 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1169 Bytes.toBytes("bbb"),
1170 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1171 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1172 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1173 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1174 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1175 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1176 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1177 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1178 };
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1191 throws IOException {
1192 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 public int createMultiRegions(final Configuration c, final HTable table,
1205 final byte [] family, int numRegions)
1206 throws IOException {
1207 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1208 byte [] startKey = Bytes.toBytes("aaaaa");
1209 byte [] endKey = Bytes.toBytes("zzzzz");
1210 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1211 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1212 for (int i=0;i<splitKeys.length;i++) {
1213 regionStartKeys[i+1] = splitKeys[i];
1214 }
1215 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1216 return createMultiRegions(c, table, family, regionStartKeys);
1217 }
1218
1219 public int createMultiRegions(final Configuration c, final HTable table,
1220 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1221 return createMultiRegions(c, table, columnFamily, startKeys, true);
1222 }
1223
1224 public int createMultiRegions(final Configuration c, final HTable table,
1225 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1226 throws IOException {
1227 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1228 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1229 HTableDescriptor htd = table.getTableDescriptor();
1230 if(!htd.hasFamily(columnFamily)) {
1231 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1232 htd.addFamily(hcd);
1233 }
1234
1235
1236
1237
1238 List<byte[]> rows = getMetaTableRows(htd.getName());
1239 String regionToDeleteInFS = table
1240 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1241 .getRegionInfo().getEncodedName();
1242 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1243
1244 int count = 0;
1245 for (int i = 0; i < startKeys.length; i++) {
1246 int j = (i + 1) % startKeys.length;
1247 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1248 startKeys[i], startKeys[j]);
1249 Put put = new Put(hri.getRegionName());
1250 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1251 Writables.getBytes(hri));
1252 meta.put(put);
1253 LOG.info("createMultiRegions: inserted " + hri.toString());
1254 newRegions.add(hri);
1255 count++;
1256 }
1257
1258 for (byte[] row : rows) {
1259 LOG.info("createMultiRegions: deleting meta row -> " +
1260 Bytes.toStringBinary(row));
1261 meta.delete(new Delete(row));
1262 }
1263 if (cleanupFS) {
1264
1265
1266 Path tableDir = new Path(getDefaultRootDirPath().toString()
1267 + System.getProperty("file.separator") + htd.getNameAsString()
1268 + System.getProperty("file.separator") + regionToDeleteInFS);
1269 getDFSCluster().getFileSystem().delete(tableDir);
1270 }
1271
1272 HConnection conn = table.getConnection();
1273 conn.clearRegionCache();
1274
1275 HBaseAdmin admin = getHBaseAdmin();
1276 if (admin.isTableEnabled(table.getTableName())) {
1277 for(HRegionInfo hri : newRegions) {
1278 admin.assign(hri.getRegionName());
1279 }
1280 }
1281
1282 meta.close();
1283
1284 return count;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1298 final HTableDescriptor htd, byte [][] startKeys)
1299 throws IOException {
1300 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1301 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1302 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1303
1304 for (int i = 0; i < startKeys.length; i++) {
1305 int j = (i + 1) % startKeys.length;
1306 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1307 startKeys[j]);
1308 Put put = new Put(hri.getRegionName());
1309 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1310 Writables.getBytes(hri));
1311 meta.put(put);
1312 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1313 newRegions.add(hri);
1314 }
1315
1316 meta.close();
1317 return newRegions;
1318 }
1319
1320
1321
1322
1323
1324
1325 public List<byte[]> getMetaTableRows() throws IOException {
1326
1327 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1328 List<byte[]> rows = new ArrayList<byte[]>();
1329 ResultScanner s = t.getScanner(new Scan());
1330 for (Result result : s) {
1331 LOG.info("getMetaTableRows: row -> " +
1332 Bytes.toStringBinary(result.getRow()));
1333 rows.add(result.getRow());
1334 }
1335 s.close();
1336 t.close();
1337 return rows;
1338 }
1339
1340
1341
1342
1343
1344
1345 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1346
1347 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1348 List<byte[]> rows = new ArrayList<byte[]>();
1349 ResultScanner s = t.getScanner(new Scan());
1350 for (Result result : s) {
1351 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1352 if (val == null) {
1353 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1354
1355 continue;
1356 }
1357 HRegionInfo info = Writables.getHRegionInfo(val);
1358 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1359 LOG.info("getMetaTableRows: row -> " +
1360 Bytes.toStringBinary(result.getRow()) + info);
1361 rows.add(result.getRow());
1362 }
1363 }
1364 s.close();
1365 t.close();
1366 return rows;
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1380 throws IOException {
1381 List<byte[]> metaRows = getMetaTableRows(tableName);
1382 if (metaRows == null || metaRows.isEmpty()) {
1383 return null;
1384 }
1385 LOG.debug("Found " + metaRows.size() + " rows for table " +
1386 Bytes.toString(tableName));
1387 byte [] firstrow = metaRows.get(0);
1388 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1389 int index = getMiniHBaseCluster().getServerWith(firstrow);
1390 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1391 }
1392
1393
1394
1395
1396
1397
1398
1399 public void startMiniMapReduceCluster() throws IOException {
1400 startMiniMapReduceCluster(2);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409 public void startMiniMapReduceCluster(final int servers) throws IOException {
1410 LOG.info("Starting mini mapreduce cluster...");
1411
1412 Configuration c = getConfiguration();
1413 String logDir = c.get("hadoop.log.dir");
1414 String tmpDir = c.get("hadoop.tmp.dir");
1415 if (logDir == null) {
1416 logDir = tmpDir;
1417 }
1418 System.setProperty("hadoop.log.dir", logDir);
1419 c.set("mapred.output.dir", tmpDir);
1420 mrCluster = new MiniMRCluster(servers,
1421 FileSystem.get(conf).getUri().toString(), 1);
1422 LOG.info("Mini mapreduce cluster started");
1423 JobConf mrClusterJobConf = mrCluster.createJobConf();
1424 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1425
1426 conf.set("mapreduce.framework.name", "yarn");
1427 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1428 if (rmAdress != null) {
1429 conf.set("yarn.resourcemanager.address", rmAdress);
1430 }
1431 String schedulerAdress =
1432 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1433 if (schedulerAdress != null) {
1434 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1435 }
1436 }
1437
1438
1439
1440
1441 public void shutdownMiniMapReduceCluster() {
1442 LOG.info("Stopping mini mapreduce cluster...");
1443 if (mrCluster != null) {
1444 mrCluster.shutdown();
1445 mrCluster = null;
1446 }
1447
1448 conf.set("mapred.job.tracker", "local");
1449 LOG.info("Mini mapreduce cluster stopped");
1450 }
1451
1452
1453
1454
1455
1456
1457 public void enableDebug(Class<?> clazz) {
1458 Log l = LogFactory.getLog(clazz);
1459 if (l instanceof Log4JLogger) {
1460 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1461 } else if (l instanceof Jdk14Logger) {
1462 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1463 }
1464 }
1465
1466
1467
1468
1469
1470 public void expireMasterSession() throws Exception {
1471 HMaster master = getMiniHBaseCluster().getMaster();
1472 expireSession(master.getZooKeeper(), false);
1473 }
1474
1475
1476
1477
1478
1479
1480 public void expireRegionServerSession(int index) throws Exception {
1481 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1482 expireSession(rs.getZooKeeper(), false);
1483 decrementMinRegionServerCount();
1484 }
1485
1486 private void decrementMinRegionServerCount() {
1487
1488
1489 decrementMinRegionServerCount(getConfiguration());
1490
1491
1492 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1493 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1494 }
1495 }
1496
1497 private void decrementMinRegionServerCount(Configuration conf) {
1498 int currentCount = conf.getInt(
1499 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1500 if (currentCount != -1) {
1501 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1502 Math.max(currentCount - 1, 1));
1503 }
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1518 throws Exception {
1519 Configuration c = new Configuration(this.conf);
1520 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1521 int sessionTimeout = 500;
1522 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1523 byte[] password = zk.getSessionPasswd();
1524 long sessionID = zk.getSessionId();
1525
1526
1527
1528
1529
1530
1531
1532
1533 ZooKeeper monitor = new ZooKeeper(quorumServers,
1534 1000, new org.apache.zookeeper.Watcher(){
1535 @Override
1536 public void process(WatchedEvent watchedEvent) {
1537 LOG.info("Monitor ZKW received event="+watchedEvent);
1538 }
1539 } , sessionID, password);
1540
1541
1542 ZooKeeper newZK = new ZooKeeper(quorumServers,
1543 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1544 newZK.close();
1545 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1546
1547
1548 monitor.close();
1549
1550 if (checkStatus) {
1551 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1552 }
1553 }
1554
1555
1556
1557
1558
1559
1560
1561 public MiniHBaseCluster getHBaseCluster() {
1562 return getMiniHBaseCluster();
1563 }
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 public HBaseCluster getHBaseClusterInterface() {
1574
1575
1576 return hbaseCluster;
1577 }
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 public synchronized HBaseAdmin getHBaseAdmin()
1589 throws IOException {
1590 if (hbaseAdmin == null){
1591 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1592 }
1593 return hbaseAdmin;
1594 }
1595 private HBaseAdmin hbaseAdmin = null;
1596
1597
1598
1599
1600
1601
1602
1603 public void closeRegion(String regionName) throws IOException {
1604 closeRegion(Bytes.toBytes(regionName));
1605 }
1606
1607
1608
1609
1610
1611
1612
1613 public void closeRegion(byte[] regionName) throws IOException {
1614 getHBaseAdmin().closeRegion(regionName, null);
1615 }
1616
1617
1618
1619
1620
1621
1622
1623
1624 public void closeRegionByRow(String row, HTable table) throws IOException {
1625 closeRegionByRow(Bytes.toBytes(row), table);
1626 }
1627
1628
1629
1630
1631
1632
1633
1634
1635 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1636 HRegionLocation hrl = table.getRegionLocation(row);
1637 closeRegion(hrl.getRegionInfo().getRegionName());
1638 }
1639
1640 public MiniZooKeeperCluster getZkCluster() {
1641 return zkCluster;
1642 }
1643
1644 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1645 this.passedZkCluster = true;
1646 this.zkCluster = zkCluster;
1647 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1648 }
1649
1650 public MiniDFSCluster getDFSCluster() {
1651 return dfsCluster;
1652 }
1653
1654 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1655 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1656 throw new IOException("DFSCluster is already running! Shut it down first.");
1657 }
1658 this.dfsCluster = cluster;
1659 }
1660
1661 public FileSystem getTestFileSystem() throws IOException {
1662 return HFileSystem.get(conf);
1663 }
1664
1665
1666
1667
1668
1669 public boolean cleanupTestDir() throws IOException {
1670 if (dataTestDir == null ){
1671 return false;
1672 } else {
1673 boolean ret = deleteDir(getDataTestDir());
1674 dataTestDir = null;
1675 return ret;
1676 }
1677 }
1678
1679
1680
1681
1682
1683
1684 public boolean cleanupTestDir(final String subdir) throws IOException {
1685 if (dataTestDir == null){
1686 return false;
1687 }
1688 return deleteDir(getDataTestDir(subdir));
1689 }
1690
1691
1692
1693
1694
1695
1696 public boolean deleteDir(final Path dir) throws IOException {
1697 FileSystem fs = getTestFileSystem();
1698 if (fs.exists(dir)) {
1699 return fs.delete(getDataTestDir(), true);
1700 }
1701 return false;
1702 }
1703
1704 public void waitTableAvailable(byte[] table, long timeoutMillis)
1705 throws InterruptedException, IOException {
1706 long startWait = System.currentTimeMillis();
1707 while (!getHBaseAdmin().isTableAvailable(table)) {
1708 assertTrue("Timed out waiting for table to become available " +
1709 Bytes.toStringBinary(table),
1710 System.currentTimeMillis() - startWait < timeoutMillis);
1711 Thread.sleep(200);
1712 }
1713 }
1714
1715 public void waitTableEnabled(byte[] table, long timeoutMillis)
1716 throws InterruptedException, IOException {
1717 long startWait = System.currentTimeMillis();
1718 while (!getHBaseAdmin().isTableAvailable(table) &&
1719 !getHBaseAdmin().isTableEnabled(table)) {
1720 assertTrue("Timed out waiting for table to become available and enabled " +
1721 Bytes.toStringBinary(table),
1722 System.currentTimeMillis() - startWait < timeoutMillis);
1723 Thread.sleep(200);
1724 }
1725 }
1726
1727
1728
1729
1730
1731
1732
1733
1734 public boolean ensureSomeRegionServersAvailable(final int num)
1735 throws IOException {
1736 boolean startedServer = false;
1737 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1738 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1739 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1740 startedServer = true;
1741 }
1742
1743 return startedServer;
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1756 throws IOException {
1757 boolean startedServer = ensureSomeRegionServersAvailable(num);
1758
1759 int nonStoppedServers = 0;
1760 for (JVMClusterUtil.RegionServerThread rst :
1761 getMiniHBaseCluster().getRegionServerThreads()) {
1762
1763 HRegionServer hrs = rst.getRegionServer();
1764 if (hrs.isStopping() || hrs.isStopped()) {
1765 LOG.info("A region server is stopped or stopping:"+hrs);
1766 } else {
1767 nonStoppedServers++;
1768 }
1769 }
1770 for (int i=nonStoppedServers; i<num; ++i) {
1771 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1772 startedServer = true;
1773 }
1774 return startedServer;
1775 }
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 public static User getDifferentUser(final Configuration c,
1788 final String differentiatingSuffix)
1789 throws IOException {
1790 FileSystem currentfs = FileSystem.get(c);
1791 if (!(currentfs instanceof DistributedFileSystem)) {
1792 return User.getCurrent();
1793 }
1794
1795
1796 String username = User.getCurrent().getName() +
1797 differentiatingSuffix;
1798 User user = User.createUserForTesting(c, username,
1799 new String[]{"supergroup"});
1800 return user;
1801 }
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1817 final int max) {
1818 try {
1819 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1820 for (Class<?> clazz: clazzes) {
1821 String className = clazz.getSimpleName();
1822 if (className.equals("DFSOutputStream")) {
1823 if (clazz.isInstance(stream)) {
1824 Field maxRecoveryErrorCountField =
1825 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1826 maxRecoveryErrorCountField.setAccessible(true);
1827 maxRecoveryErrorCountField.setInt(stream, max);
1828 break;
1829 }
1830 }
1831 }
1832 } catch (Exception e) {
1833 LOG.info("Could not set max recovery field", e);
1834 }
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1846 waitUntilAllRegionsAssigned(tableName, 60000);
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1859 throws IOException {
1860 long deadline = System.currentTimeMillis() + timeout;
1861 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1862 try {
1863 while (true) {
1864 boolean allRegionsAssigned = true;
1865 Scan scan = new Scan();
1866 scan.addFamily(HConstants.CATALOG_FAMILY);
1867 ResultScanner s = meta.getScanner(scan);
1868 try {
1869 Result r;
1870 while ((r = s.next()) != null) {
1871 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1872 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1873 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1874 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1875 allRegionsAssigned &= (b != null);
1876 }
1877 }
1878 } finally {
1879 s.close();
1880 }
1881 if (allRegionsAssigned) {
1882 return;
1883 }
1884 long now = System.currentTimeMillis();
1885 if (now > deadline) {
1886 throw new IOException("Timeout waiting for all regions of " +
1887 Bytes.toStringBinary(tableName) + " to be assigned");
1888 }
1889 try {
1890 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1891 } catch (InterruptedException e) {
1892 throw new IOException(e);
1893 }
1894 }
1895 } finally {
1896 meta.close();
1897 }
1898 }
1899
1900
1901
1902
1903
1904 public static List<KeyValue> getFromStoreFile(Store store,
1905 Get get) throws IOException {
1906 MultiVersionConsistencyControl.resetThreadReadPoint();
1907 Scan scan = new Scan(get);
1908 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1909 scan.getFamilyMap().get(store.getFamily().getName()));
1910
1911 List<KeyValue> result = new ArrayList<KeyValue>();
1912 scanner.next(result);
1913 if (!result.isEmpty()) {
1914
1915 KeyValue kv = result.get(0);
1916 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1917 result.clear();
1918 }
1919 }
1920 scanner.close();
1921 return result;
1922 }
1923
1924
1925
1926
1927
1928 public static List<KeyValue> getFromStoreFile(Store store,
1929 byte [] row,
1930 NavigableSet<byte[]> columns
1931 ) throws IOException {
1932 Get get = new Get(row);
1933 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1934 s.put(store.getFamily().getName(), columns);
1935
1936 return getFromStoreFile(store,get);
1937 }
1938
1939
1940
1941
1942
1943 public static ZooKeeperWatcher getZooKeeperWatcher(
1944 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
1945 IOException {
1946 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
1947 "unittest", new Abortable() {
1948 boolean aborted = false;
1949
1950 @Override
1951 public void abort(String why, Throwable e) {
1952 aborted = true;
1953 throw new RuntimeException("Fatal ZK error, why=" + why, e);
1954 }
1955
1956 @Override
1957 public boolean isAborted() {
1958 return aborted;
1959 }
1960 });
1961 return zkw;
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
1976 HBaseTestingUtility TEST_UTIL, HRegion region,
1977 ServerName serverName) throws ZooKeeperConnectionException,
1978 IOException, KeeperException, NodeExistsException {
1979 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
1980 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
1981 int version = ZKAssign.transitionNodeOpening(zkw, region
1982 .getRegionInfo(), serverName);
1983 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
1984 version);
1985 return zkw;
1986 }
1987
1988 public static void assertKVListsEqual(String additionalMsg,
1989 final List<KeyValue> expected,
1990 final List<KeyValue> actual) {
1991 final int eLen = expected.size();
1992 final int aLen = actual.size();
1993 final int minLen = Math.min(eLen, aLen);
1994
1995 int i;
1996 for (i = 0; i < minLen
1997 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
1998 ++i) {}
1999
2000 if (additionalMsg == null) {
2001 additionalMsg = "";
2002 }
2003 if (!additionalMsg.isEmpty()) {
2004 additionalMsg = ". " + additionalMsg;
2005 }
2006
2007 if (eLen != aLen || i != minLen) {
2008 throw new AssertionError(
2009 "Expected and actual KV arrays differ at position " + i + ": " +
2010 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2011 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2012 }
2013 }
2014
2015 private static <T> String safeGetAsStr(List<T> lst, int i) {
2016 if (0 <= i && i < lst.size()) {
2017 return lst.get(i).toString();
2018 } else {
2019 return "<out_of_range>";
2020 }
2021 }
2022
2023 public String getClusterKey() {
2024 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2025 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2026 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2027 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2028 }
2029
2030
2031 public HTable createRandomTable(String tableName,
2032 final Collection<String> families,
2033 final int maxVersions,
2034 final int numColsPerRow,
2035 final int numFlushes,
2036 final int numRegions,
2037 final int numRowsPerFlush)
2038 throws IOException, InterruptedException {
2039
2040 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2041 " regions, " + numFlushes + " storefiles per region, " +
2042 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2043 "\n");
2044
2045 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2046 final int numCF = families.size();
2047 final byte[][] cfBytes = new byte[numCF][];
2048 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2049
2050 {
2051 int cfIndex = 0;
2052 for (String cf : families) {
2053 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2054 }
2055 }
2056
2057 final int actualStartKey = 0;
2058 final int actualEndKey = Integer.MAX_VALUE;
2059 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2060 final int splitStartKey = actualStartKey + keysPerRegion;
2061 final int splitEndKey = actualEndKey - keysPerRegion;
2062 final String keyFormat = "%08x";
2063 final HTable table = createTable(tableNameBytes, cfBytes,
2064 maxVersions,
2065 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2066 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2067 numRegions);
2068 if (hbaseCluster != null) {
2069 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2070 }
2071
2072 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2073 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2074 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2075 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2076
2077 Put put = new Put(row);
2078 Delete del = new Delete(row);
2079 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2080 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2081 final long ts = rand.nextInt();
2082 final byte[] qual = Bytes.toBytes("col" + iCol);
2083 if (rand.nextBoolean()) {
2084 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2085 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2086 ts + "_random_" + rand.nextLong());
2087 put.add(cf, qual, ts, value);
2088 } else if (rand.nextDouble() < 0.8) {
2089 del.deleteColumn(cf, qual, ts);
2090 } else {
2091 del.deleteColumns(cf, qual, ts);
2092 }
2093 }
2094
2095 if (!put.isEmpty()) {
2096 table.put(put);
2097 }
2098
2099 if (!del.isEmpty()) {
2100 table.delete(del);
2101 }
2102 }
2103 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2104 table.flushCommits();
2105 if (hbaseCluster != null) {
2106 getMiniHBaseCluster().flushcache(tableNameBytes);
2107 }
2108 }
2109
2110 return table;
2111 }
2112
2113 private static final int MIN_RANDOM_PORT = 0xc000;
2114 private static final int MAX_RANDOM_PORT = 0xfffe;
2115
2116
2117
2118
2119
2120 public static int randomPort() {
2121 return MIN_RANDOM_PORT
2122 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2123 }
2124
2125 public static int randomFreePort() {
2126 int port = 0;
2127 do {
2128 port = randomPort();
2129 try {
2130 ServerSocket sock = new ServerSocket(port);
2131 sock.close();
2132 } catch (IOException ex) {
2133 port = 0;
2134 }
2135 } while (port == 0);
2136 return port;
2137 }
2138
2139 public static void waitForHostPort(String host, int port)
2140 throws IOException {
2141 final int maxTimeMs = 10000;
2142 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2143 IOException savedException = null;
2144 LOG.info("Waiting for server at " + host + ":" + port);
2145 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2146 try {
2147 Socket sock = new Socket(InetAddress.getByName(host), port);
2148 sock.close();
2149 savedException = null;
2150 LOG.info("Server at " + host + ":" + port + " is available");
2151 break;
2152 } catch (UnknownHostException e) {
2153 throw new IOException("Failed to look up " + host, e);
2154 } catch (IOException e) {
2155 savedException = e;
2156 }
2157 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2158 }
2159
2160 if (savedException != null) {
2161 throw savedException;
2162 }
2163 }
2164
2165
2166
2167
2168
2169
2170 public static int createPreSplitLoadTestTable(Configuration conf,
2171 byte[] tableName, byte[] columnFamily, Algorithm compression,
2172 DataBlockEncoding dataBlockEncoding) throws IOException {
2173 HTableDescriptor desc = new HTableDescriptor(tableName);
2174 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2175 hcd.setDataBlockEncoding(dataBlockEncoding);
2176 hcd.setCompressionType(compression);
2177 return createPreSplitLoadTestTable(conf, desc, hcd);
2178 }
2179
2180
2181
2182
2183
2184
2185 public static int createPreSplitLoadTestTable(Configuration conf,
2186 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2187 if (!desc.hasFamily(hcd.getName())) {
2188 desc.addFamily(hcd);
2189 }
2190
2191 int totalNumberOfRegions = 0;
2192 HBaseAdmin admin = new HBaseAdmin(conf);
2193 try {
2194
2195
2196
2197 int numberOfServers = admin.getClusterStatus().getServers().size();
2198 if (numberOfServers == 0) {
2199 throw new IllegalStateException("No live regionservers");
2200 }
2201
2202 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2203 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2204 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2205 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2206
2207 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2208 totalNumberOfRegions);
2209
2210 admin.createTable(desc, splits);
2211 admin.close();
2212 } catch (MasterNotRunningException e) {
2213 LOG.error("Master not running", e);
2214 throw new IOException(e);
2215 } catch (TableExistsException e) {
2216 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2217 " already exists, continuing");
2218 } finally {
2219 admin.close();
2220 }
2221 return totalNumberOfRegions;
2222 }
2223
2224 public static int getMetaRSPort(Configuration conf) throws IOException {
2225 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2226 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2227 table.close();
2228 return hloc.getPort();
2229 }
2230
2231 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2232 throws IOException {
2233 HTableDescriptor htd = new HTableDescriptor(tableName);
2234 htd.addFamily(hcd);
2235 HRegionInfo info =
2236 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2237 HRegion region =
2238 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2239 return region;
2240 }
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2251 assertTrue(numRegions>3);
2252 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2253 byte [][] result = new byte[tmpSplitKeys.length+1][];
2254 for (int i=0;i<tmpSplitKeys.length;i++) {
2255 result[i+1] = tmpSplitKeys[i];
2256 }
2257 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2258 return result;
2259 }
2260
2261
2262
2263
2264
2265
2266 public static List<HColumnDescriptor> generateColumnDescriptors() {
2267 return generateColumnDescriptors("");
2268 }
2269
2270
2271
2272
2273
2274
2275
2276 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2277 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2278 long familyId = 0;
2279 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2280 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2281 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2282 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2283 HColumnDescriptor htd = new HColumnDescriptor(name);
2284 htd.setCompressionType(compressionType);
2285 htd.setDataBlockEncoding(encodingType);
2286 htd.setBloomFilterType(bloomType);
2287 htds.add(htd);
2288 familyId++;
2289 }
2290 }
2291 }
2292 return htds;
2293 }
2294
2295
2296
2297
2298
2299 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2300 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2301 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2302 for (String algoName : allAlgos) {
2303 try {
2304 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2305 algo.getCompressor();
2306 supportedAlgos.add(algo);
2307 } catch (Throwable t) {
2308
2309 }
2310 }
2311 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2312 }
2313 }