1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.master.HMaster;
66 import org.apache.hadoop.hbase.master.ServerManager;
67 import org.apache.hadoop.hbase.regionserver.HRegion;
68 import org.apache.hadoop.hbase.regionserver.HRegionServer;
69 import org.apache.hadoop.hbase.regionserver.InternalScanner;
70 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
71 import org.apache.hadoop.hbase.regionserver.Store;
72 import org.apache.hadoop.hbase.regionserver.StoreFile;
73 import org.apache.hadoop.hbase.security.User;
74 import org.apache.hadoop.hbase.util.Bytes;
75 import org.apache.hadoop.hbase.util.FSUtils;
76 import org.apache.hadoop.hbase.util.JVMClusterUtil;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
78 import org.apache.hadoop.hbase.util.RegionSplitter;
79 import org.apache.hadoop.hbase.util.Threads;
80 import org.apache.hadoop.hbase.util.Writables;
81 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
82 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
83 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
84 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
85 import org.apache.hadoop.hdfs.DFSClient;
86 import org.apache.hadoop.hdfs.DistributedFileSystem;
87 import org.apache.hadoop.hdfs.MiniDFSCluster;
88 import org.apache.hadoop.mapred.JobConf;
89 import org.apache.hadoop.mapred.MiniMRCluster;
90 import org.apache.zookeeper.KeeperException;
91 import org.apache.zookeeper.WatchedEvent;
92 import org.apache.zookeeper.KeeperException.NodeExistsException;
93 import org.apache.zookeeper.ZooKeeper;
94
95
96
97
98
99
100
101
102
103
104
105
106
107 public class HBaseTestingUtility {
108 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
109 private Configuration conf;
110 private MiniZooKeeperCluster zkCluster = null;
111
112
113
114
115
116 private static int DEFAULT_REGIONS_PER_SERVER = 5;
117
118
119
120
121
122 private boolean passedZkCluster = false;
123 private MiniDFSCluster dfsCluster = null;
124
125 private HBaseCluster hbaseCluster = null;
126 private MiniMRCluster mrCluster = null;
127
128
129 private File dataTestDir = null;
130
131
132
133 private File clusterTestDir = null;
134
135
136
137
138
139
140
141
142 private static final String TEST_DIRECTORY_KEY = "test.build.data";
143
144
145
146
147 public static final String BASE_TEST_DIRECTORY_KEY =
148 "test.build.data.basedirectory";
149
150
151
152
153 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
154
155
156 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
157 Arrays.asList(new Object[][] {
158 { Compression.Algorithm.NONE },
159 { Compression.Algorithm.GZ }
160 });
161
162
163 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
164 Arrays.asList(new Object[][] {
165 { new Boolean(false) },
166 { new Boolean(true) }
167 });
168
169
170 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
171 Compression.Algorithm.NONE, Compression.Algorithm.GZ
172 };
173
174
175
176
177
178 private static List<Object[]> bloomAndCompressionCombinations() {
179 List<Object[]> configurations = new ArrayList<Object[]>();
180 for (Compression.Algorithm comprAlgo :
181 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
182 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
183 configurations.add(new Object[] { comprAlgo, bloomType });
184 }
185 }
186 return Collections.unmodifiableList(configurations);
187 }
188
189 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
190 bloomAndCompressionCombinations();
191
192 public HBaseTestingUtility() {
193 this(HBaseConfiguration.create());
194 }
195
196 public HBaseTestingUtility(Configuration conf) {
197 this.conf = conf;
198
199
200 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
201 setHDFSClientRetryProperty();
202 }
203
204 private void setHDFSClientRetryProperty() {
205 this.conf.setInt("hdfs.client.retries.number", 1);
206 HBaseFileSystem.setRetryCounts(conf);
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220 public Configuration getConfiguration() {
221 return this.conf;
222 }
223
224 public void setHBaseCluster(HBaseCluster hbaseCluster) {
225 this.hbaseCluster = hbaseCluster;
226 }
227
228
229
230
231
232
233
234
235
236 private Path getBaseTestDir() {
237 String PathName = System.getProperty(
238 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
239
240 return new Path(PathName);
241 }
242
243
244
245
246
247
248
249 public Path getDataTestDir() {
250 if (dataTestDir == null){
251 setupDataTestDir();
252 }
253 return new Path(dataTestDir.getAbsolutePath());
254 }
255
256
257
258
259
260
261 public Path getClusterTestDir() {
262 if (clusterTestDir == null){
263 setupClusterTestDir();
264 }
265 return new Path(clusterTestDir.getAbsolutePath());
266 }
267
268
269
270
271
272
273
274 public Path getDataTestDir(final String subdirName) {
275 return new Path(getDataTestDir(), subdirName);
276 }
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 private void setupDataTestDir() {
295 if (dataTestDir != null) {
296 LOG.warn("Data test dir already setup in " +
297 dataTestDir.getAbsolutePath());
298 return;
299 }
300
301 String randomStr = UUID.randomUUID().toString();
302 Path testPath= new Path(getBaseTestDir(), randomStr);
303
304 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
305 dataTestDir.deleteOnExit();
306
307 createSubDirAndSystemProperty(
308 "hadoop.log.dir",
309 testPath, "hadoop-log-dir");
310
311
312
313 createSubDirAndSystemProperty(
314 "hadoop.tmp.dir",
315 testPath, "hadoop-tmp-dir");
316
317
318 createSubDir(
319 "mapred.local.dir",
320 testPath, "mapred-local-dir");
321
322 createSubDirAndSystemProperty(
323 "mapred.working.dir",
324 testPath, "mapred-working-dir");
325
326 createSubDir(
327 "hbase.local.dir",
328 testPath, "hbase-local-dir");
329 }
330
331 private void createSubDir(String propertyName, Path parent, String subDirName){
332 Path newPath= new Path(parent, subDirName);
333 File newDir = new File(newPath.toString()).getAbsoluteFile();
334 newDir.deleteOnExit();
335 conf.set(propertyName, newDir.getAbsolutePath());
336 }
337
338 private void createSubDirAndSystemProperty(
339 String propertyName, Path parent, String subDirName){
340
341 String sysValue = System.getProperty(propertyName);
342
343 if (sysValue != null) {
344
345
346 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
347 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
348 String confValue = conf.get(propertyName);
349 if (confValue != null && !confValue.endsWith(sysValue)){
350 LOG.warn(
351 propertyName + " property value differs in configuration and system: "+
352 "Configuration="+confValue+" while System="+sysValue+
353 " Erasing configuration value by system value."
354 );
355 }
356 conf.set(propertyName, sysValue);
357 } else {
358
359 createSubDir(propertyName, parent, subDirName);
360 System.setProperty(propertyName, conf.get(propertyName));
361 }
362 }
363
364
365
366
367 private void setupClusterTestDir() {
368 if (clusterTestDir != null) {
369 LOG.warn("Cluster test dir already setup in " +
370 clusterTestDir.getAbsolutePath());
371 return;
372 }
373
374
375
376 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
377 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
378
379 clusterTestDir.deleteOnExit();
380 }
381
382
383
384
385 public void isRunningCluster() throws IOException {
386 if (dfsCluster == null) return;
387 throw new IOException("Cluster already running at " +
388 this.clusterTestDir);
389 }
390
391
392
393
394
395
396
397
398 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
399 return startMiniDFSCluster(servers, null);
400 }
401
402
403
404
405
406
407
408
409
410
411
412
413 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
414 throws Exception {
415 if ( hosts != null && hosts.length != 0) {
416 return startMiniDFSCluster(hosts.length, hosts);
417 } else {
418 return startMiniDFSCluster(1, null);
419 }
420 }
421
422
423
424
425
426
427
428
429
430
431 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
432 throws Exception {
433
434
435 isRunningCluster();
436
437
438 if (clusterTestDir == null) {
439 setupClusterTestDir();
440 }
441
442
443 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
444
445
446
447
448 System.setProperty("test.cache.data", this.clusterTestDir.toString());
449
450
451 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
452 true, null, null, hosts, null);
453
454
455 FileSystem fs = this.dfsCluster.getFileSystem();
456 this.conf.set("fs.defaultFS", fs.getUri().toString());
457
458 this.conf.set("fs.default.name", fs.getUri().toString());
459
460
461 this.dfsCluster.waitClusterUp();
462
463 return this.dfsCluster;
464 }
465
466
467
468
469
470
471 public void shutdownMiniDFSCluster() throws Exception {
472 if (this.dfsCluster != null) {
473
474 this.dfsCluster.shutdown();
475 dfsCluster = null;
476 }
477
478 }
479
480
481
482
483
484
485
486
487 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
488 return startMiniZKCluster(1);
489 }
490
491
492
493
494
495
496
497
498
499 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
500 throws Exception {
501 File zkClusterFile = new File(getClusterTestDir().toString());
502 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
503 }
504
505 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
506 throws Exception {
507 return startMiniZKCluster(dir,1);
508 }
509
510 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
511 int zooKeeperServerNum)
512 throws Exception {
513 if (this.zkCluster != null) {
514 throw new IOException("Cluster already running at " + dir);
515 }
516 this.passedZkCluster = false;
517 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
518 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
519 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
520 Integer.toString(clientPort));
521 return this.zkCluster;
522 }
523
524
525
526
527
528
529
530 public void shutdownMiniZKCluster() throws IOException {
531 if (this.zkCluster != null) {
532 this.zkCluster.shutdown();
533 this.zkCluster = null;
534 }
535 }
536
537
538
539
540
541
542
543 public MiniHBaseCluster startMiniCluster() throws Exception {
544 return startMiniCluster(1, 1);
545 }
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 public MiniHBaseCluster startMiniCluster(final int numSlaves)
561 throws Exception {
562 return startMiniCluster(1, numSlaves);
563 }
564
565
566
567
568
569
570
571
572 public MiniHBaseCluster startMiniCluster(final int numMasters,
573 final int numSlaves)
574 throws Exception {
575 return startMiniCluster(numMasters, numSlaves, null);
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603 public MiniHBaseCluster startMiniCluster(final int numMasters,
604 final int numSlaves, final String[] dataNodeHosts)
605 throws Exception {
606 int numDataNodes = numSlaves;
607 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
608 numDataNodes = dataNodeHosts.length;
609 }
610
611 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
612 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
613
614
615 isRunningCluster();
616
617
618
619 startMiniDFSCluster(numDataNodes, dataNodeHosts);
620
621
622 if (this.zkCluster == null) {
623 startMiniZKCluster(clusterTestDir);
624 }
625
626
627 return startMiniHBaseCluster(numMasters, numSlaves);
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
642 final int numSlaves)
643 throws IOException, InterruptedException {
644
645 createRootDir();
646
647
648
649 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
650 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
651 }
652 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
653 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
654 }
655
656 Configuration c = new Configuration(this.conf);
657 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
658
659 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
660 ResultScanner s = t.getScanner(new Scan());
661 while (s.next() != null) {
662 continue;
663 }
664 s.close();
665 t.close();
666
667 getHBaseAdmin();
668 LOG.info("Minicluster is up");
669 return (MiniHBaseCluster)this.hbaseCluster;
670 }
671
672
673
674
675
676
677
678 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
679 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
680
681 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
682 ResultScanner s = t.getScanner(new Scan());
683 while (s.next() != null) {
684
685 }
686 LOG.info("HBase has been restarted");
687 s.close();
688 t.close();
689 }
690
691
692
693
694
695
696 public MiniHBaseCluster getMiniHBaseCluster() {
697 if (this.hbaseCluster instanceof MiniHBaseCluster) {
698 return (MiniHBaseCluster)this.hbaseCluster;
699 }
700 throw new RuntimeException(hbaseCluster + " not an instance of " +
701 MiniHBaseCluster.class.getName());
702 }
703
704
705
706
707
708
709 public void shutdownMiniCluster() throws Exception {
710 LOG.info("Shutting down minicluster");
711 shutdownMiniHBaseCluster();
712 if (!this.passedZkCluster){
713 shutdownMiniZKCluster();
714 }
715 shutdownMiniDFSCluster();
716
717
718 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
719
720 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
721 new Path(this.clusterTestDir.toString()))) {
722 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
723 }
724 this.clusterTestDir = null;
725 }
726 LOG.info("Minicluster is down");
727 }
728
729
730
731
732
733 public void shutdownMiniHBaseCluster() throws IOException {
734 if (hbaseAdmin != null) {
735 hbaseAdmin.close();
736 hbaseAdmin = null;
737 }
738
739 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
741 if (this.hbaseCluster != null) {
742 this.hbaseCluster.shutdown();
743
744 this.hbaseCluster.waitUntilShutDown();
745 this.hbaseCluster = null;
746 }
747 }
748
749
750
751
752
753
754
755 public Path getDefaultRootDirPath() throws IOException {
756 FileSystem fs = FileSystem.get(this.conf);
757 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
758 }
759
760
761
762
763
764
765
766
767
768 public Path createRootDir() throws IOException {
769 FileSystem fs = FileSystem.get(this.conf);
770 Path hbaseRootdir = getDefaultRootDirPath();
771 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
772 fs.mkdirs(hbaseRootdir);
773 FSUtils.setVersion(fs, hbaseRootdir);
774 return hbaseRootdir;
775 }
776
777
778
779
780
781 public void flush() throws IOException {
782 getMiniHBaseCluster().flushcache();
783 }
784
785
786
787
788
789 public void flush(byte [] tableName) throws IOException {
790 getMiniHBaseCluster().flushcache(tableName);
791 }
792
793
794
795
796
797 public void compact(boolean major) throws IOException {
798 getMiniHBaseCluster().compact(major);
799 }
800
801
802
803
804
805 public void compact(byte [] tableName, boolean major) throws IOException {
806 getMiniHBaseCluster().compact(tableName, major);
807 }
808
809
810
811
812
813
814
815
816
817 public HTable createTable(byte[] tableName, byte[] family)
818 throws IOException{
819 return createTable(tableName, new byte[][]{family});
820 }
821
822
823
824
825
826
827
828
829 public HTable createTable(byte[] tableName, byte[][] families)
830 throws IOException {
831 return createTable(tableName, families,
832 new Configuration(getConfiguration()));
833 }
834
835 public HTable createTable(byte[] tableName, byte[][] families,
836 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
837 throws IOException{
838 HTableDescriptor desc = new HTableDescriptor(tableName);
839 for (byte[] family : families) {
840 HColumnDescriptor hcd = new HColumnDescriptor(family)
841 .setMaxVersions(numVersions);
842 desc.addFamily(hcd);
843 }
844 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
845 return new HTable(getConfiguration(), tableName);
846 }
847
848
849
850
851
852
853
854
855
856 public HTable createTable(byte[] tableName, byte[][] families,
857 final Configuration c)
858 throws IOException {
859 HTableDescriptor desc = new HTableDescriptor(tableName);
860 for(byte[] family : families) {
861 desc.addFamily(new HColumnDescriptor(family));
862 }
863 getHBaseAdmin().createTable(desc);
864 return new HTable(c, tableName);
865 }
866
867
868
869
870
871
872
873
874
875
876 public HTable createTable(byte[] tableName, byte[][] families,
877 final Configuration c, int numVersions)
878 throws IOException {
879 HTableDescriptor desc = new HTableDescriptor(tableName);
880 for(byte[] family : families) {
881 HColumnDescriptor hcd = new HColumnDescriptor(family)
882 .setMaxVersions(numVersions);
883 desc.addFamily(hcd);
884 }
885 getHBaseAdmin().createTable(desc);
886 return new HTable(c, tableName);
887 }
888
889
890
891
892
893
894
895
896
897 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
898 throws IOException {
899 return createTable(tableName, new byte[][]{family}, numVersions);
900 }
901
902
903
904
905
906
907
908
909
910 public HTable createTable(byte[] tableName, byte[][] families,
911 int numVersions)
912 throws IOException {
913 HTableDescriptor desc = new HTableDescriptor(tableName);
914 for (byte[] family : families) {
915 HColumnDescriptor hcd = new HColumnDescriptor(family)
916 .setMaxVersions(numVersions);
917 desc.addFamily(hcd);
918 }
919 getHBaseAdmin().createTable(desc);
920 return new HTable(new Configuration(getConfiguration()), tableName);
921 }
922
923
924
925
926
927
928
929
930
931 public HTable createTable(byte[] tableName, byte[][] families,
932 int numVersions, int blockSize) throws IOException {
933 HTableDescriptor desc = new HTableDescriptor(tableName);
934 for (byte[] family : families) {
935 HColumnDescriptor hcd = new HColumnDescriptor(family)
936 .setMaxVersions(numVersions)
937 .setBlocksize(blockSize);
938 desc.addFamily(hcd);
939 }
940 getHBaseAdmin().createTable(desc);
941 return new HTable(new Configuration(getConfiguration()), tableName);
942 }
943
944
945
946
947
948
949
950
951
952 public HTable createTable(byte[] tableName, byte[][] families,
953 int[] numVersions)
954 throws IOException {
955 HTableDescriptor desc = new HTableDescriptor(tableName);
956 int i = 0;
957 for (byte[] family : families) {
958 HColumnDescriptor hcd = new HColumnDescriptor(family)
959 .setMaxVersions(numVersions[i]);
960 desc.addFamily(hcd);
961 i++;
962 }
963 getHBaseAdmin().createTable(desc);
964 return new HTable(new Configuration(getConfiguration()), tableName);
965 }
966
967
968
969
970
971 public void deleteTable(byte[] tableName) throws IOException {
972 try {
973 getHBaseAdmin().disableTable(tableName);
974 } catch (TableNotEnabledException e) {
975 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
976 }
977 getHBaseAdmin().deleteTable(tableName);
978 }
979
980
981
982
983
984
985
986 public HTable truncateTable(byte [] tableName) throws IOException {
987 HTable table = new HTable(getConfiguration(), tableName);
988 Scan scan = new Scan();
989 ResultScanner resScan = table.getScanner(scan);
990 for(Result res : resScan) {
991 Delete del = new Delete(res.getRow());
992 table.delete(del);
993 }
994 resScan = table.getScanner(scan);
995 resScan.close();
996 return table;
997 }
998
999
1000
1001
1002
1003
1004
1005
1006 public int loadTable(final HTable t, final byte[] f) throws IOException {
1007 t.setAutoFlush(false);
1008 byte[] k = new byte[3];
1009 int rowCount = 0;
1010 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1011 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1012 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1013 k[0] = b1;
1014 k[1] = b2;
1015 k[2] = b3;
1016 Put put = new Put(k);
1017 put.add(f, null, k);
1018 t.put(put);
1019 rowCount++;
1020 }
1021 }
1022 }
1023 t.flushCommits();
1024 return rowCount;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1035 t.setAutoFlush(false);
1036 byte[] k = new byte[3];
1037 int rowCount = 0;
1038 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1039 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1040 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1041 k[0] = b1;
1042 k[1] = b2;
1043 k[2] = b3;
1044 Put put = new Put(k);
1045 for (int i = 0; i < f.length; i++) {
1046 put.add(f[i], null, k);
1047 }
1048 t.put(put);
1049 rowCount++;
1050 }
1051 }
1052 }
1053 t.flushCommits();
1054 return rowCount;
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064 public int loadRegion(final HRegion r, final byte[] f)
1065 throws IOException {
1066 return loadRegion(r, f, false);
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1078 throws IOException {
1079 byte[] k = new byte[3];
1080 int rowCount = 0;
1081 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1082 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1083 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1084 k[0] = b1;
1085 k[1] = b2;
1086 k[2] = b3;
1087 Put put = new Put(k);
1088 put.add(f, null, k);
1089 if (r.getLog() == null) put.setWriteToWAL(false);
1090 r.put(put);
1091 rowCount++;
1092 }
1093 }
1094 if (flush) {
1095 r.flushcache();
1096 }
1097 }
1098 return rowCount;
1099 }
1100
1101
1102
1103
1104 public int countRows(final HTable table) throws IOException {
1105 Scan scan = new Scan();
1106 ResultScanner results = table.getScanner(scan);
1107 int count = 0;
1108 for (@SuppressWarnings("unused") Result res : results) {
1109 count++;
1110 }
1111 results.close();
1112 return count;
1113 }
1114
1115 public int countRows(final HTable table, final byte[]... families) throws IOException {
1116 Scan scan = new Scan();
1117 for (byte[] family: families) {
1118 scan.addFamily(family);
1119 }
1120 ResultScanner results = table.getScanner(scan);
1121 int count = 0;
1122 for (@SuppressWarnings("unused") Result res : results) {
1123 count++;
1124 }
1125 results.close();
1126 return count;
1127 }
1128
1129
1130
1131
1132 public String checksumRows(final HTable table) throws Exception {
1133 Scan scan = new Scan();
1134 ResultScanner results = table.getScanner(scan);
1135 MessageDigest digest = MessageDigest.getInstance("MD5");
1136 for (Result res : results) {
1137 digest.update(res.getRow());
1138 }
1139 results.close();
1140 return digest.toString();
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 public int createMultiRegions(HTable table, byte[] columnFamily)
1152 throws IOException {
1153 return createMultiRegions(table, columnFamily, true);
1154 }
1155
1156 public static final byte[][] KEYS = {
1157 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1158 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1159 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1160 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1161 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1162 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1163 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1164 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1165 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1166 };
1167
1168 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1169 Bytes.toBytes("bbb"),
1170 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1171 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1172 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1173 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1174 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1175 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1176 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1177 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1178 };
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1191 throws IOException {
1192 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 public int createMultiRegions(final Configuration c, final HTable table,
1205 final byte [] family, int numRegions)
1206 throws IOException {
1207 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1208 byte [] startKey = Bytes.toBytes("aaaaa");
1209 byte [] endKey = Bytes.toBytes("zzzzz");
1210 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1211 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1212 for (int i=0;i<splitKeys.length;i++) {
1213 regionStartKeys[i+1] = splitKeys[i];
1214 }
1215 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1216 return createMultiRegions(c, table, family, regionStartKeys);
1217 }
1218
1219 public int createMultiRegions(final Configuration c, final HTable table,
1220 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1221 return createMultiRegions(c, table, columnFamily, startKeys, true);
1222 }
1223
1224 public int createMultiRegions(final Configuration c, final HTable table,
1225 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1226 throws IOException {
1227 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1228 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1229 HTableDescriptor htd = table.getTableDescriptor();
1230 if(!htd.hasFamily(columnFamily)) {
1231 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1232 htd.addFamily(hcd);
1233 }
1234
1235
1236
1237
1238 List<byte[]> rows = getMetaTableRows(htd.getName());
1239 String regionToDeleteInFS = table
1240 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1241 .getRegionInfo().getEncodedName();
1242 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1243
1244 int count = 0;
1245 for (int i = 0; i < startKeys.length; i++) {
1246 int j = (i + 1) % startKeys.length;
1247 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1248 startKeys[i], startKeys[j]);
1249 Put put = new Put(hri.getRegionName());
1250 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1251 Writables.getBytes(hri));
1252 meta.put(put);
1253 LOG.info("createMultiRegions: inserted " + hri.toString());
1254 newRegions.add(hri);
1255 count++;
1256 }
1257
1258 for (byte[] row : rows) {
1259 LOG.info("createMultiRegions: deleting meta row -> " +
1260 Bytes.toStringBinary(row));
1261 meta.delete(new Delete(row));
1262 }
1263 if (cleanupFS) {
1264
1265
1266 Path tableDir = new Path(getDefaultRootDirPath().toString()
1267 + System.getProperty("file.separator") + htd.getNameAsString()
1268 + System.getProperty("file.separator") + regionToDeleteInFS);
1269 getDFSCluster().getFileSystem().delete(tableDir);
1270 }
1271
1272 HConnection conn = table.getConnection();
1273 conn.clearRegionCache();
1274
1275 HBaseAdmin admin = getHBaseAdmin();
1276 if (admin.isTableEnabled(table.getTableName())) {
1277 for(HRegionInfo hri : newRegions) {
1278 admin.assign(hri.getRegionName());
1279 }
1280 }
1281
1282 meta.close();
1283
1284 return count;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1298 final HTableDescriptor htd, byte [][] startKeys)
1299 throws IOException {
1300 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1301 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1302 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1303
1304 for (int i = 0; i < startKeys.length; i++) {
1305 int j = (i + 1) % startKeys.length;
1306 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1307 startKeys[j]);
1308 Put put = new Put(hri.getRegionName());
1309 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1310 Writables.getBytes(hri));
1311 meta.put(put);
1312 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1313 newRegions.add(hri);
1314 }
1315
1316 meta.close();
1317 return newRegions;
1318 }
1319
1320
1321
1322
1323
1324
1325 public List<byte[]> getMetaTableRows() throws IOException {
1326
1327 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1328 List<byte[]> rows = new ArrayList<byte[]>();
1329 ResultScanner s = t.getScanner(new Scan());
1330 for (Result result : s) {
1331 LOG.info("getMetaTableRows: row -> " +
1332 Bytes.toStringBinary(result.getRow()));
1333 rows.add(result.getRow());
1334 }
1335 s.close();
1336 t.close();
1337 return rows;
1338 }
1339
1340
1341
1342
1343
1344
1345 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1346
1347 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1348 List<byte[]> rows = new ArrayList<byte[]>();
1349 ResultScanner s = t.getScanner(new Scan());
1350 for (Result result : s) {
1351 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1352 if (val == null) {
1353 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1354
1355 continue;
1356 }
1357 HRegionInfo info = Writables.getHRegionInfo(val);
1358 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1359 LOG.info("getMetaTableRows: row -> " +
1360 Bytes.toStringBinary(result.getRow()) + info);
1361 rows.add(result.getRow());
1362 }
1363 }
1364 s.close();
1365 t.close();
1366 return rows;
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1380 throws IOException {
1381 List<byte[]> metaRows = getMetaTableRows(tableName);
1382 if (metaRows == null || metaRows.isEmpty()) {
1383 return null;
1384 }
1385 LOG.debug("Found " + metaRows.size() + " rows for table " +
1386 Bytes.toString(tableName));
1387 byte [] firstrow = metaRows.get(0);
1388 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1389 int index = getMiniHBaseCluster().getServerWith(firstrow);
1390 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1391 }
1392
1393
1394
1395
1396
1397
1398
1399 public void startMiniMapReduceCluster() throws IOException {
1400 startMiniMapReduceCluster(2);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409 public void startMiniMapReduceCluster(final int servers) throws IOException {
1410 LOG.info("Starting mini mapreduce cluster...");
1411
1412 Configuration c = getConfiguration();
1413 String logDir = c.get("hadoop.log.dir");
1414 String tmpDir = c.get("hadoop.tmp.dir");
1415 if (logDir == null) {
1416 logDir = tmpDir;
1417 }
1418 System.setProperty("hadoop.log.dir", logDir);
1419 c.set("mapred.output.dir", tmpDir);
1420
1421
1422
1423 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1424
1425 mrCluster = new MiniMRCluster(servers,
1426 FileSystem.get(conf).getUri().toString(), 1);
1427 LOG.info("Mini mapreduce cluster started");
1428 JobConf mrClusterJobConf = mrCluster.createJobConf();
1429 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1430
1431 conf.set("mapreduce.framework.name", "yarn");
1432 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1433 if (rmAdress != null) {
1434 conf.set("yarn.resourcemanager.address", rmAdress);
1435 }
1436 String schedulerAdress =
1437 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1438 if (schedulerAdress != null) {
1439 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1440 }
1441 }
1442
1443
1444
1445
1446 public void shutdownMiniMapReduceCluster() {
1447 LOG.info("Stopping mini mapreduce cluster...");
1448 if (mrCluster != null) {
1449 mrCluster.shutdown();
1450 mrCluster = null;
1451 }
1452
1453 conf.set("mapred.job.tracker", "local");
1454 LOG.info("Mini mapreduce cluster stopped");
1455 }
1456
1457
1458
1459
1460
1461
1462 public void enableDebug(Class<?> clazz) {
1463 Log l = LogFactory.getLog(clazz);
1464 if (l instanceof Log4JLogger) {
1465 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1466 } else if (l instanceof Jdk14Logger) {
1467 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1468 }
1469 }
1470
1471
1472
1473
1474
1475 public void expireMasterSession() throws Exception {
1476 HMaster master = getMiniHBaseCluster().getMaster();
1477 expireSession(master.getZooKeeper(), false);
1478 }
1479
1480
1481
1482
1483
1484
1485 public void expireRegionServerSession(int index) throws Exception {
1486 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1487 expireSession(rs.getZooKeeper(), false);
1488 decrementMinRegionServerCount();
1489 }
1490
1491 private void decrementMinRegionServerCount() {
1492
1493
1494 decrementMinRegionServerCount(getConfiguration());
1495
1496
1497 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1498 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1499 }
1500 }
1501
1502 private void decrementMinRegionServerCount(Configuration conf) {
1503 int currentCount = conf.getInt(
1504 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1505 if (currentCount != -1) {
1506 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1507 Math.max(currentCount - 1, 1));
1508 }
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1523 throws Exception {
1524 Configuration c = new Configuration(this.conf);
1525 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1526 int sessionTimeout = 500;
1527 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1528 byte[] password = zk.getSessionPasswd();
1529 long sessionID = zk.getSessionId();
1530
1531
1532
1533
1534
1535
1536
1537
1538 ZooKeeper monitor = new ZooKeeper(quorumServers,
1539 1000, new org.apache.zookeeper.Watcher(){
1540 @Override
1541 public void process(WatchedEvent watchedEvent) {
1542 LOG.info("Monitor ZKW received event="+watchedEvent);
1543 }
1544 } , sessionID, password);
1545
1546
1547 ZooKeeper newZK = new ZooKeeper(quorumServers,
1548 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1549 newZK.close();
1550 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1551
1552
1553 monitor.close();
1554
1555 if (checkStatus) {
1556 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1557 }
1558 }
1559
1560
1561
1562
1563
1564
1565
1566 public MiniHBaseCluster getHBaseCluster() {
1567 return getMiniHBaseCluster();
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 public HBaseCluster getHBaseClusterInterface() {
1579
1580
1581 return hbaseCluster;
1582 }
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 public synchronized HBaseAdmin getHBaseAdmin()
1594 throws IOException {
1595 if (hbaseAdmin == null){
1596 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1597 }
1598 return hbaseAdmin;
1599 }
1600 private HBaseAdmin hbaseAdmin = null;
1601
1602
1603
1604
1605
1606
1607
1608 public void closeRegion(String regionName) throws IOException {
1609 closeRegion(Bytes.toBytes(regionName));
1610 }
1611
1612
1613
1614
1615
1616
1617
1618 public void closeRegion(byte[] regionName) throws IOException {
1619 getHBaseAdmin().closeRegion(regionName, null);
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629 public void closeRegionByRow(String row, HTable table) throws IOException {
1630 closeRegionByRow(Bytes.toBytes(row), table);
1631 }
1632
1633
1634
1635
1636
1637
1638
1639
1640 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1641 HRegionLocation hrl = table.getRegionLocation(row);
1642 closeRegion(hrl.getRegionInfo().getRegionName());
1643 }
1644
1645 public MiniZooKeeperCluster getZkCluster() {
1646 return zkCluster;
1647 }
1648
1649 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1650 this.passedZkCluster = true;
1651 this.zkCluster = zkCluster;
1652 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1653 }
1654
1655 public MiniDFSCluster getDFSCluster() {
1656 return dfsCluster;
1657 }
1658
1659 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1660 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1661 throw new IOException("DFSCluster is already running! Shut it down first.");
1662 }
1663 this.dfsCluster = cluster;
1664 }
1665
1666 public FileSystem getTestFileSystem() throws IOException {
1667 return HFileSystem.get(conf);
1668 }
1669
1670
1671
1672
1673
1674 public boolean cleanupTestDir() throws IOException {
1675 if (dataTestDir == null ){
1676 return false;
1677 } else {
1678 boolean ret = deleteDir(getDataTestDir());
1679 dataTestDir = null;
1680 return ret;
1681 }
1682 }
1683
1684
1685
1686
1687
1688
1689 public boolean cleanupTestDir(final String subdir) throws IOException {
1690 if (dataTestDir == null){
1691 return false;
1692 }
1693 return deleteDir(getDataTestDir(subdir));
1694 }
1695
1696
1697
1698
1699
1700
1701 public boolean deleteDir(final Path dir) throws IOException {
1702 FileSystem fs = getTestFileSystem();
1703 if (fs.exists(dir)) {
1704 return fs.delete(getDataTestDir(), true);
1705 }
1706 return false;
1707 }
1708
1709 public void waitTableAvailable(byte[] table, long timeoutMillis)
1710 throws InterruptedException, IOException {
1711 long startWait = System.currentTimeMillis();
1712 while (!getHBaseAdmin().isTableAvailable(table)) {
1713 assertTrue("Timed out waiting for table to become available " +
1714 Bytes.toStringBinary(table),
1715 System.currentTimeMillis() - startWait < timeoutMillis);
1716 Thread.sleep(200);
1717 }
1718 }
1719
1720 public void waitTableEnabled(byte[] table, long timeoutMillis)
1721 throws InterruptedException, IOException {
1722 long startWait = System.currentTimeMillis();
1723 while (!getHBaseAdmin().isTableAvailable(table) &&
1724 !getHBaseAdmin().isTableEnabled(table)) {
1725 assertTrue("Timed out waiting for table to become available and enabled " +
1726 Bytes.toStringBinary(table),
1727 System.currentTimeMillis() - startWait < timeoutMillis);
1728 Thread.sleep(200);
1729 }
1730 }
1731
1732
1733
1734
1735
1736
1737
1738
1739 public boolean ensureSomeRegionServersAvailable(final int num)
1740 throws IOException {
1741 boolean startedServer = false;
1742 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1743 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1744 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1745 startedServer = true;
1746 }
1747
1748 return startedServer;
1749 }
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1760 throws IOException {
1761 boolean startedServer = ensureSomeRegionServersAvailable(num);
1762
1763 int nonStoppedServers = 0;
1764 for (JVMClusterUtil.RegionServerThread rst :
1765 getMiniHBaseCluster().getRegionServerThreads()) {
1766
1767 HRegionServer hrs = rst.getRegionServer();
1768 if (hrs.isStopping() || hrs.isStopped()) {
1769 LOG.info("A region server is stopped or stopping:"+hrs);
1770 } else {
1771 nonStoppedServers++;
1772 }
1773 }
1774 for (int i=nonStoppedServers; i<num; ++i) {
1775 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1776 startedServer = true;
1777 }
1778 return startedServer;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 public static User getDifferentUser(final Configuration c,
1792 final String differentiatingSuffix)
1793 throws IOException {
1794 FileSystem currentfs = FileSystem.get(c);
1795 if (!(currentfs instanceof DistributedFileSystem)) {
1796 return User.getCurrent();
1797 }
1798
1799
1800 String username = User.getCurrent().getName() +
1801 differentiatingSuffix;
1802 User user = User.createUserForTesting(c, username,
1803 new String[]{"supergroup"});
1804 return user;
1805 }
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1821 final int max) {
1822 try {
1823 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1824 for (Class<?> clazz: clazzes) {
1825 String className = clazz.getSimpleName();
1826 if (className.equals("DFSOutputStream")) {
1827 if (clazz.isInstance(stream)) {
1828 Field maxRecoveryErrorCountField =
1829 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1830 maxRecoveryErrorCountField.setAccessible(true);
1831 maxRecoveryErrorCountField.setInt(stream, max);
1832 break;
1833 }
1834 }
1835 }
1836 } catch (Exception e) {
1837 LOG.info("Could not set max recovery field", e);
1838 }
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1850 waitUntilAllRegionsAssigned(tableName, 60000);
1851 }
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1863 throws IOException {
1864 long deadline = System.currentTimeMillis() + timeout;
1865 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1866 try {
1867 while (true) {
1868 boolean allRegionsAssigned = true;
1869 Scan scan = new Scan();
1870 scan.addFamily(HConstants.CATALOG_FAMILY);
1871 ResultScanner s = meta.getScanner(scan);
1872 try {
1873 Result r;
1874 while ((r = s.next()) != null) {
1875 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1876 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1877 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1878 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1879 allRegionsAssigned &= (b != null);
1880 }
1881 }
1882 } finally {
1883 s.close();
1884 }
1885 if (allRegionsAssigned) {
1886 return;
1887 }
1888 long now = System.currentTimeMillis();
1889 if (now > deadline) {
1890 throw new IOException("Timeout waiting for all regions of " +
1891 Bytes.toStringBinary(tableName) + " to be assigned");
1892 }
1893 try {
1894 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1895 } catch (InterruptedException e) {
1896 throw new IOException(e);
1897 }
1898 }
1899 } finally {
1900 meta.close();
1901 }
1902 }
1903
1904
1905
1906
1907
1908 public static List<KeyValue> getFromStoreFile(Store store,
1909 Get get) throws IOException {
1910 MultiVersionConsistencyControl.resetThreadReadPoint();
1911 Scan scan = new Scan(get);
1912 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1913 scan.getFamilyMap().get(store.getFamily().getName()));
1914
1915 List<KeyValue> result = new ArrayList<KeyValue>();
1916 scanner.next(result);
1917 if (!result.isEmpty()) {
1918
1919 KeyValue kv = result.get(0);
1920 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1921 result.clear();
1922 }
1923 }
1924 scanner.close();
1925 return result;
1926 }
1927
1928
1929
1930
1931
1932 public static List<KeyValue> getFromStoreFile(Store store,
1933 byte [] row,
1934 NavigableSet<byte[]> columns
1935 ) throws IOException {
1936 Get get = new Get(row);
1937 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1938 s.put(store.getFamily().getName(), columns);
1939
1940 return getFromStoreFile(store,get);
1941 }
1942
1943
1944
1945
1946
1947 public static ZooKeeperWatcher getZooKeeperWatcher(
1948 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
1949 IOException {
1950 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
1951 "unittest", new Abortable() {
1952 boolean aborted = false;
1953
1954 @Override
1955 public void abort(String why, Throwable e) {
1956 aborted = true;
1957 throw new RuntimeException("Fatal ZK error, why=" + why, e);
1958 }
1959
1960 @Override
1961 public boolean isAborted() {
1962 return aborted;
1963 }
1964 });
1965 return zkw;
1966 }
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
1980 HBaseTestingUtility TEST_UTIL, HRegion region,
1981 ServerName serverName) throws ZooKeeperConnectionException,
1982 IOException, KeeperException, NodeExistsException {
1983 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
1984 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
1985 int version = ZKAssign.transitionNodeOpening(zkw, region
1986 .getRegionInfo(), serverName);
1987 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
1988 version);
1989 return zkw;
1990 }
1991
1992 public static void assertKVListsEqual(String additionalMsg,
1993 final List<KeyValue> expected,
1994 final List<KeyValue> actual) {
1995 final int eLen = expected.size();
1996 final int aLen = actual.size();
1997 final int minLen = Math.min(eLen, aLen);
1998
1999 int i;
2000 for (i = 0; i < minLen
2001 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2002 ++i) {}
2003
2004 if (additionalMsg == null) {
2005 additionalMsg = "";
2006 }
2007 if (!additionalMsg.isEmpty()) {
2008 additionalMsg = ". " + additionalMsg;
2009 }
2010
2011 if (eLen != aLen || i != minLen) {
2012 throw new AssertionError(
2013 "Expected and actual KV arrays differ at position " + i + ": " +
2014 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2015 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2016 }
2017 }
2018
2019 private static <T> String safeGetAsStr(List<T> lst, int i) {
2020 if (0 <= i && i < lst.size()) {
2021 return lst.get(i).toString();
2022 } else {
2023 return "<out_of_range>";
2024 }
2025 }
2026
2027 public String getClusterKey() {
2028 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2029 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2030 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2031 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2032 }
2033
2034
2035 public HTable createRandomTable(String tableName,
2036 final Collection<String> families,
2037 final int maxVersions,
2038 final int numColsPerRow,
2039 final int numFlushes,
2040 final int numRegions,
2041 final int numRowsPerFlush)
2042 throws IOException, InterruptedException {
2043
2044 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2045 " regions, " + numFlushes + " storefiles per region, " +
2046 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2047 "\n");
2048
2049 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2050 final int numCF = families.size();
2051 final byte[][] cfBytes = new byte[numCF][];
2052 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2053
2054 {
2055 int cfIndex = 0;
2056 for (String cf : families) {
2057 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2058 }
2059 }
2060
2061 final int actualStartKey = 0;
2062 final int actualEndKey = Integer.MAX_VALUE;
2063 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2064 final int splitStartKey = actualStartKey + keysPerRegion;
2065 final int splitEndKey = actualEndKey - keysPerRegion;
2066 final String keyFormat = "%08x";
2067 final HTable table = createTable(tableNameBytes, cfBytes,
2068 maxVersions,
2069 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2070 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2071 numRegions);
2072 if (hbaseCluster != null) {
2073 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2074 }
2075
2076 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2077 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2078 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2079 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2080
2081 Put put = new Put(row);
2082 Delete del = new Delete(row);
2083 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2084 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2085 final long ts = rand.nextInt();
2086 final byte[] qual = Bytes.toBytes("col" + iCol);
2087 if (rand.nextBoolean()) {
2088 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2089 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2090 ts + "_random_" + rand.nextLong());
2091 put.add(cf, qual, ts, value);
2092 } else if (rand.nextDouble() < 0.8) {
2093 del.deleteColumn(cf, qual, ts);
2094 } else {
2095 del.deleteColumns(cf, qual, ts);
2096 }
2097 }
2098
2099 if (!put.isEmpty()) {
2100 table.put(put);
2101 }
2102
2103 if (!del.isEmpty()) {
2104 table.delete(del);
2105 }
2106 }
2107 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2108 table.flushCommits();
2109 if (hbaseCluster != null) {
2110 getMiniHBaseCluster().flushcache(tableNameBytes);
2111 }
2112 }
2113
2114 return table;
2115 }
2116
2117 private static final int MIN_RANDOM_PORT = 0xc000;
2118 private static final int MAX_RANDOM_PORT = 0xfffe;
2119
2120
2121
2122
2123
2124 public static int randomPort() {
2125 return MIN_RANDOM_PORT
2126 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2127 }
2128
2129 public static int randomFreePort() {
2130 int port = 0;
2131 do {
2132 port = randomPort();
2133 try {
2134 ServerSocket sock = new ServerSocket(port);
2135 sock.close();
2136 } catch (IOException ex) {
2137 port = 0;
2138 }
2139 } while (port == 0);
2140 return port;
2141 }
2142
2143 public static void waitForHostPort(String host, int port)
2144 throws IOException {
2145 final int maxTimeMs = 10000;
2146 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2147 IOException savedException = null;
2148 LOG.info("Waiting for server at " + host + ":" + port);
2149 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2150 try {
2151 Socket sock = new Socket(InetAddress.getByName(host), port);
2152 sock.close();
2153 savedException = null;
2154 LOG.info("Server at " + host + ":" + port + " is available");
2155 break;
2156 } catch (UnknownHostException e) {
2157 throw new IOException("Failed to look up " + host, e);
2158 } catch (IOException e) {
2159 savedException = e;
2160 }
2161 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2162 }
2163
2164 if (savedException != null) {
2165 throw savedException;
2166 }
2167 }
2168
2169
2170
2171
2172
2173
2174 public static int createPreSplitLoadTestTable(Configuration conf,
2175 byte[] tableName, byte[] columnFamily, Algorithm compression,
2176 DataBlockEncoding dataBlockEncoding) throws IOException {
2177 HTableDescriptor desc = new HTableDescriptor(tableName);
2178 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2179 hcd.setDataBlockEncoding(dataBlockEncoding);
2180 hcd.setCompressionType(compression);
2181 return createPreSplitLoadTestTable(conf, desc, hcd);
2182 }
2183
2184
2185
2186
2187
2188
2189 public static int createPreSplitLoadTestTable(Configuration conf,
2190 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2191 if (!desc.hasFamily(hcd.getName())) {
2192 desc.addFamily(hcd);
2193 }
2194
2195 int totalNumberOfRegions = 0;
2196 HBaseAdmin admin = new HBaseAdmin(conf);
2197 try {
2198
2199
2200
2201 int numberOfServers = admin.getClusterStatus().getServers().size();
2202 if (numberOfServers == 0) {
2203 throw new IllegalStateException("No live regionservers");
2204 }
2205
2206 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2207 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2208 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2209 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2210
2211 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2212 totalNumberOfRegions);
2213
2214 admin.createTable(desc, splits);
2215 admin.close();
2216 } catch (MasterNotRunningException e) {
2217 LOG.error("Master not running", e);
2218 throw new IOException(e);
2219 } catch (TableExistsException e) {
2220 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2221 " already exists, continuing");
2222 } finally {
2223 admin.close();
2224 }
2225 return totalNumberOfRegions;
2226 }
2227
2228 public static int getMetaRSPort(Configuration conf) throws IOException {
2229 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2230 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2231 table.close();
2232 return hloc.getPort();
2233 }
2234
2235 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2236 throws IOException {
2237 HTableDescriptor htd = new HTableDescriptor(tableName);
2238 htd.addFamily(hcd);
2239 HRegionInfo info =
2240 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2241 HRegion region =
2242 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2243 return region;
2244 }
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2255 assertTrue(numRegions>3);
2256 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2257 byte [][] result = new byte[tmpSplitKeys.length+1][];
2258 for (int i=0;i<tmpSplitKeys.length;i++) {
2259 result[i+1] = tmpSplitKeys[i];
2260 }
2261 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2262 return result;
2263 }
2264
2265
2266
2267
2268
2269
2270 public static List<HColumnDescriptor> generateColumnDescriptors() {
2271 return generateColumnDescriptors("");
2272 }
2273
2274
2275
2276
2277
2278
2279
2280 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2281 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2282 long familyId = 0;
2283 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2284 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2285 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2286 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2287 HColumnDescriptor htd = new HColumnDescriptor(name);
2288 htd.setCompressionType(compressionType);
2289 htd.setDataBlockEncoding(encodingType);
2290 htd.setBloomFilterType(bloomType);
2291 htds.add(htd);
2292 familyId++;
2293 }
2294 }
2295 }
2296 return htds;
2297 }
2298
2299
2300
2301
2302
2303 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2304 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2305 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2306 for (String algoName : allAlgos) {
2307 try {
2308 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2309 algo.getCompressor();
2310 supportedAlgos.add(algo);
2311 } catch (Throwable t) {
2312
2313 }
2314 }
2315 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2316 }
2317 }