1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.io.hfile.HFile;
65 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
66 import org.apache.hadoop.hbase.master.HMaster;
67 import org.apache.hadoop.hbase.master.ServerManager;
68 import org.apache.hadoop.hbase.regionserver.HRegion;
69 import org.apache.hadoop.hbase.regionserver.HRegionServer;
70 import org.apache.hadoop.hbase.regionserver.InternalScanner;
71 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
72 import org.apache.hadoop.hbase.regionserver.Store;
73 import org.apache.hadoop.hbase.regionserver.StoreFile;
74 import org.apache.hadoop.hbase.security.User;
75 import org.apache.hadoop.hbase.util.Bytes;
76 import org.apache.hadoop.hbase.util.FSUtils;
77 import org.apache.hadoop.hbase.util.JVMClusterUtil;
78 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
79 import org.apache.hadoop.hbase.util.RegionSplitter;
80 import org.apache.hadoop.hbase.util.Threads;
81 import org.apache.hadoop.hbase.util.Writables;
82 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
83 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
84 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
85 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
86 import org.apache.hadoop.hdfs.DFSClient;
87 import org.apache.hadoop.hdfs.DistributedFileSystem;
88 import org.apache.hadoop.hdfs.MiniDFSCluster;
89 import org.apache.hadoop.mapred.JobConf;
90 import org.apache.hadoop.mapred.MiniMRCluster;
91 import org.apache.zookeeper.KeeperException;
92 import org.apache.zookeeper.KeeperException.NodeExistsException;
93 import org.apache.zookeeper.WatchedEvent;
94 import org.apache.zookeeper.ZooKeeper;
95
96
97
98
99
100
101
102
103
104
105
106
107
108 public class HBaseTestingUtility {
109 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
110 private Configuration conf;
111 private MiniZooKeeperCluster zkCluster = null;
112
113
114
115
116
117 private static int DEFAULT_REGIONS_PER_SERVER = 5;
118
119
120
121
122
123 private boolean passedZkCluster = false;
124 private MiniDFSCluster dfsCluster = null;
125
126 private HBaseCluster hbaseCluster = null;
127 private MiniMRCluster mrCluster = null;
128
129
130 private File dataTestDir = null;
131
132
133
134 private File clusterTestDir = null;
135
136
137
138
139
140
141
142
143 private static final String TEST_DIRECTORY_KEY = "test.build.data";
144
145
146
147
148 public static final String BASE_TEST_DIRECTORY_KEY =
149 "test.build.data.basedirectory";
150
151
152
153
154 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
155
156
157 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
158 Arrays.asList(new Object[][] {
159 { Compression.Algorithm.NONE },
160 { Compression.Algorithm.GZ }
161 });
162
163
164 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
165 Arrays.asList(new Object[][] {
166 { new Boolean(false) },
167 { new Boolean(true) }
168 });
169
170
171 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
172 Compression.Algorithm.NONE, Compression.Algorithm.GZ
173 };
174
175
176
177
178
179 private static List<Object[]> bloomAndCompressionCombinations() {
180 List<Object[]> configurations = new ArrayList<Object[]>();
181 for (Compression.Algorithm comprAlgo :
182 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
183 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
184 configurations.add(new Object[] { comprAlgo, bloomType });
185 }
186 }
187 return Collections.unmodifiableList(configurations);
188 }
189
190 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
191 bloomAndCompressionCombinations();
192
193 public HBaseTestingUtility() {
194 this(HBaseConfiguration.create());
195 }
196
197 public HBaseTestingUtility(Configuration conf) {
198 this.conf = conf;
199
200
201 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
202 setHDFSClientRetryProperty();
203 }
204
205 private void setHDFSClientRetryProperty() {
206 this.conf.setInt("hdfs.client.retries.number", 1);
207 HBaseFileSystem.setRetryCounts(conf);
208 }
209
210
211
212
213
214
215
216
217
218
219
220
221 public Configuration getConfiguration() {
222 return this.conf;
223 }
224
225 public void setHBaseCluster(HBaseCluster hbaseCluster) {
226 this.hbaseCluster = hbaseCluster;
227 }
228
229
230
231
232
233
234
235
236
237 private Path getBaseTestDir() {
238 String PathName = System.getProperty(
239 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
240
241 return new Path(PathName);
242 }
243
244
245
246
247
248
249
250 public Path getDataTestDir() {
251 if (dataTestDir == null){
252 setupDataTestDir();
253 }
254 return new Path(dataTestDir.getAbsolutePath());
255 }
256
257
258
259
260
261
262 public Path getClusterTestDir() {
263 if (clusterTestDir == null){
264 setupClusterTestDir();
265 }
266 return new Path(clusterTestDir.getAbsolutePath());
267 }
268
269
270
271
272
273
274
275 public Path getDataTestDir(final String subdirName) {
276 return new Path(getDataTestDir(), subdirName);
277 }
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295 private void setupDataTestDir() {
296 if (dataTestDir != null) {
297 LOG.warn("Data test dir already setup in " +
298 dataTestDir.getAbsolutePath());
299 return;
300 }
301
302 String randomStr = UUID.randomUUID().toString();
303 Path testPath= new Path(getBaseTestDir(), randomStr);
304
305 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
306 dataTestDir.deleteOnExit();
307
308 createSubDirAndSystemProperty(
309 "hadoop.log.dir",
310 testPath, "hadoop-log-dir");
311
312
313
314 createSubDirAndSystemProperty(
315 "hadoop.tmp.dir",
316 testPath, "hadoop-tmp-dir");
317
318
319 createSubDir(
320 "mapred.local.dir",
321 testPath, "mapred-local-dir");
322
323 createSubDirAndSystemProperty(
324 "mapred.working.dir",
325 testPath, "mapred-working-dir");
326
327 createSubDir(
328 "hbase.local.dir",
329 testPath, "hbase-local-dir");
330 }
331
332 private void createSubDir(String propertyName, Path parent, String subDirName){
333 Path newPath= new Path(parent, subDirName);
334 File newDir = new File(newPath.toString()).getAbsoluteFile();
335 newDir.deleteOnExit();
336 conf.set(propertyName, newDir.getAbsolutePath());
337 }
338
339 private void createSubDirAndSystemProperty(
340 String propertyName, Path parent, String subDirName){
341
342 String sysValue = System.getProperty(propertyName);
343
344 if (sysValue != null) {
345
346
347 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
348 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
349 String confValue = conf.get(propertyName);
350 if (confValue != null && !confValue.endsWith(sysValue)){
351 LOG.warn(
352 propertyName + " property value differs in configuration and system: "+
353 "Configuration="+confValue+" while System="+sysValue+
354 " Erasing configuration value by system value."
355 );
356 }
357 conf.set(propertyName, sysValue);
358 } else {
359
360 createSubDir(propertyName, parent, subDirName);
361 System.setProperty(propertyName, conf.get(propertyName));
362 }
363 }
364
365
366
367
368 private void setupClusterTestDir() {
369 if (clusterTestDir != null) {
370 LOG.warn("Cluster test dir already setup in " +
371 clusterTestDir.getAbsolutePath());
372 return;
373 }
374
375
376
377 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
378 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
379
380 clusterTestDir.deleteOnExit();
381 }
382
383
384
385
386 public void isRunningCluster() throws IOException {
387 if (dfsCluster == null) return;
388 throw new IOException("Cluster already running at " +
389 this.clusterTestDir);
390 }
391
392
393
394
395
396
397
398
399 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
400 return startMiniDFSCluster(servers, null);
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
415 throws Exception {
416 if ( hosts != null && hosts.length != 0) {
417 return startMiniDFSCluster(hosts.length, hosts);
418 } else {
419 return startMiniDFSCluster(1, null);
420 }
421 }
422
423
424
425
426
427
428
429
430
431
432 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
433 throws Exception {
434
435
436 isRunningCluster();
437
438
439 if (clusterTestDir == null) {
440 setupClusterTestDir();
441 }
442
443
444 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
445
446
447
448
449 System.setProperty("test.cache.data", this.clusterTestDir.toString());
450
451
452 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
453 true, null, null, hosts, null);
454
455
456 FileSystem fs = this.dfsCluster.getFileSystem();
457 this.conf.set("fs.defaultFS", fs.getUri().toString());
458
459 this.conf.set("fs.default.name", fs.getUri().toString());
460
461
462 this.dfsCluster.waitClusterUp();
463
464 return this.dfsCluster;
465 }
466
467
468
469
470
471
472 public void shutdownMiniDFSCluster() throws Exception {
473 if (this.dfsCluster != null) {
474
475 this.dfsCluster.shutdown();
476 dfsCluster = null;
477 }
478
479 }
480
481
482
483
484
485
486
487
488 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
489 return startMiniZKCluster(1);
490 }
491
492
493
494
495
496
497
498
499
500 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
501 throws Exception {
502 File zkClusterFile = new File(getClusterTestDir().toString());
503 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
504 }
505
506 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
507 throws Exception {
508 return startMiniZKCluster(dir,1);
509 }
510
511 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
512 int zooKeeperServerNum)
513 throws Exception {
514 if (this.zkCluster != null) {
515 throw new IOException("Cluster already running at " + dir);
516 }
517 this.passedZkCluster = false;
518 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
519 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
520 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
521 Integer.toString(clientPort));
522 return this.zkCluster;
523 }
524
525
526
527
528
529
530
531 public void shutdownMiniZKCluster() throws IOException {
532 if (this.zkCluster != null) {
533 this.zkCluster.shutdown();
534 this.zkCluster = null;
535 }
536 }
537
538
539
540
541
542
543
544 public MiniHBaseCluster startMiniCluster() throws Exception {
545 return startMiniCluster(1, 1);
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 public MiniHBaseCluster startMiniCluster(final int numSlaves)
562 throws Exception {
563 return startMiniCluster(1, numSlaves);
564 }
565
566
567
568
569
570
571
572
573 public MiniHBaseCluster startMiniCluster(final int numMasters,
574 final int numSlaves)
575 throws Exception {
576 return startMiniCluster(numMasters, numSlaves, null);
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 public MiniHBaseCluster startMiniCluster(final int numMasters,
605 final int numSlaves, final String[] dataNodeHosts)
606 throws Exception {
607 int numDataNodes = numSlaves;
608 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
609 numDataNodes = dataNodeHosts.length;
610 }
611
612 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
613 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
614
615
616 isRunningCluster();
617
618
619
620 startMiniDFSCluster(numDataNodes, dataNodeHosts);
621
622
623 if (this.zkCluster == null) {
624 startMiniZKCluster(clusterTestDir);
625 }
626
627
628 return startMiniHBaseCluster(numMasters, numSlaves);
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
643 final int numSlaves)
644 throws IOException, InterruptedException {
645
646 createRootDir();
647
648
649
650 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
651 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
652 }
653 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
654 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
655 }
656
657 Configuration c = new Configuration(this.conf);
658 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
659
660 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
661 ResultScanner s = t.getScanner(new Scan());
662 while (s.next() != null) {
663 continue;
664 }
665 s.close();
666 t.close();
667
668 getHBaseAdmin();
669 LOG.info("Minicluster is up");
670 return (MiniHBaseCluster)this.hbaseCluster;
671 }
672
673
674
675
676
677
678
679 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
680 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
681
682 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
683 ResultScanner s = t.getScanner(new Scan());
684 while (s.next() != null) {
685
686 }
687 LOG.info("HBase has been restarted");
688 s.close();
689 t.close();
690 }
691
692
693
694
695
696
697 public MiniHBaseCluster getMiniHBaseCluster() {
698 if (this.hbaseCluster instanceof MiniHBaseCluster) {
699 return (MiniHBaseCluster)this.hbaseCluster;
700 }
701 throw new RuntimeException(hbaseCluster + " not an instance of " +
702 MiniHBaseCluster.class.getName());
703 }
704
705
706
707
708
709
710 public void shutdownMiniCluster() throws Exception {
711 LOG.info("Shutting down minicluster");
712 shutdownMiniHBaseCluster();
713 if (!this.passedZkCluster){
714 shutdownMiniZKCluster();
715 }
716 shutdownMiniDFSCluster();
717
718
719 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
720
721 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
722 new Path(this.clusterTestDir.toString()))) {
723 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
724 }
725 this.clusterTestDir = null;
726 }
727 LOG.info("Minicluster is down");
728 }
729
730
731
732
733
734 public void shutdownMiniHBaseCluster() throws IOException {
735 if (hbaseAdmin != null) {
736 hbaseAdmin.close();
737 hbaseAdmin = null;
738 }
739
740 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
741 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
742 if (this.hbaseCluster != null) {
743 this.hbaseCluster.shutdown();
744
745 this.hbaseCluster.waitUntilShutDown();
746 this.hbaseCluster = null;
747 }
748 }
749
750
751
752
753
754
755
756 public Path getDefaultRootDirPath() throws IOException {
757 FileSystem fs = FileSystem.get(this.conf);
758 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
759 }
760
761
762
763
764
765
766
767
768
769 public Path createRootDir() throws IOException {
770 FileSystem fs = FileSystem.get(this.conf);
771 Path hbaseRootdir = getDefaultRootDirPath();
772 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
773 fs.mkdirs(hbaseRootdir);
774 FSUtils.setVersion(fs, hbaseRootdir);
775 return hbaseRootdir;
776 }
777
778
779
780
781
782 public void flush() throws IOException {
783 getMiniHBaseCluster().flushcache();
784 }
785
786
787
788
789
790 public void flush(byte [] tableName) throws IOException {
791 getMiniHBaseCluster().flushcache(tableName);
792 }
793
794
795
796
797
798 public void compact(boolean major) throws IOException {
799 getMiniHBaseCluster().compact(major);
800 }
801
802
803
804
805
806 public void compact(byte [] tableName, boolean major) throws IOException {
807 getMiniHBaseCluster().compact(tableName, major);
808 }
809
810
811
812
813
814
815
816
817
818 public HTable createTable(byte[] tableName, byte[] family)
819 throws IOException{
820 return createTable(tableName, new byte[][]{family});
821 }
822
823
824
825
826
827
828
829
830 public HTable createTable(byte[] tableName, byte[][] families)
831 throws IOException {
832 return createTable(tableName, families,
833 new Configuration(getConfiguration()));
834 }
835
836 public HTable createTable(byte[] tableName, byte[][] families,
837 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
838 throws IOException{
839 HTableDescriptor desc = new HTableDescriptor(tableName);
840 for (byte[] family : families) {
841 HColumnDescriptor hcd = new HColumnDescriptor(family)
842 .setMaxVersions(numVersions);
843 desc.addFamily(hcd);
844 }
845 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
846
847 waitUntilAllRegionsAssigned(tableName);
848 return new HTable(getConfiguration(), tableName);
849 }
850
851
852
853
854
855
856
857
858
859 public HTable createTable(byte[] tableName, byte[][] families,
860 final Configuration c)
861 throws IOException {
862 HTableDescriptor desc = new HTableDescriptor(tableName);
863 for(byte[] family : families) {
864 desc.addFamily(new HColumnDescriptor(family));
865 }
866 getHBaseAdmin().createTable(desc);
867
868 waitUntilAllRegionsAssigned(tableName);
869 return new HTable(c, tableName);
870 }
871
872
873
874
875
876
877
878
879
880
881 public HTable createTable(byte[] tableName, byte[][] families,
882 final Configuration c, int numVersions)
883 throws IOException {
884 HTableDescriptor desc = new HTableDescriptor(tableName);
885 for(byte[] family : families) {
886 HColumnDescriptor hcd = new HColumnDescriptor(family)
887 .setMaxVersions(numVersions);
888 desc.addFamily(hcd);
889 }
890 getHBaseAdmin().createTable(desc);
891
892 waitUntilAllRegionsAssigned(tableName);
893 return new HTable(c, tableName);
894 }
895
896
897
898
899
900
901
902
903
904 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
905 throws IOException {
906 return createTable(tableName, new byte[][]{family}, numVersions);
907 }
908
909
910
911
912
913
914
915
916
917 public HTable createTable(byte[] tableName, byte[][] families,
918 int numVersions)
919 throws IOException {
920 HTableDescriptor desc = new HTableDescriptor(tableName);
921 for (byte[] family : families) {
922 HColumnDescriptor hcd = new HColumnDescriptor(family)
923 .setMaxVersions(numVersions);
924 desc.addFamily(hcd);
925 }
926 getHBaseAdmin().createTable(desc);
927
928 waitUntilAllRegionsAssigned(tableName);
929 return new HTable(new Configuration(getConfiguration()), tableName);
930 }
931
932
933
934
935
936
937
938
939
940 public HTable createTable(byte[] tableName, byte[][] families,
941 int numVersions, int blockSize) throws IOException {
942 HTableDescriptor desc = new HTableDescriptor(tableName);
943 for (byte[] family : families) {
944 HColumnDescriptor hcd = new HColumnDescriptor(family)
945 .setMaxVersions(numVersions)
946 .setBlocksize(blockSize);
947 desc.addFamily(hcd);
948 }
949 getHBaseAdmin().createTable(desc);
950
951 waitUntilAllRegionsAssigned(tableName);
952 return new HTable(new Configuration(getConfiguration()), tableName);
953 }
954
955
956
957
958
959
960
961
962
963 public HTable createTable(byte[] tableName, byte[][] families,
964 int[] numVersions)
965 throws IOException {
966 HTableDescriptor desc = new HTableDescriptor(tableName);
967 int i = 0;
968 for (byte[] family : families) {
969 HColumnDescriptor hcd = new HColumnDescriptor(family)
970 .setMaxVersions(numVersions[i]);
971 desc.addFamily(hcd);
972 i++;
973 }
974 getHBaseAdmin().createTable(desc);
975
976 waitUntilAllRegionsAssigned(tableName);
977 return new HTable(new Configuration(getConfiguration()), tableName);
978 }
979
980
981
982
983
984 public void deleteTable(byte[] tableName) throws IOException {
985 try {
986 getHBaseAdmin().disableTable(tableName);
987 } catch (TableNotEnabledException e) {
988 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
989 }
990 getHBaseAdmin().deleteTable(tableName);
991 }
992
993
994
995
996
997
998
999 public HTable truncateTable(byte [] tableName) throws IOException {
1000 HTable table = new HTable(getConfiguration(), tableName);
1001 Scan scan = new Scan();
1002 ResultScanner resScan = table.getScanner(scan);
1003 for(Result res : resScan) {
1004 Delete del = new Delete(res.getRow());
1005 table.delete(del);
1006 }
1007 resScan = table.getScanner(scan);
1008 resScan.close();
1009 return table;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019 public int loadTable(final HTable t, final byte[] f) throws IOException {
1020 t.setAutoFlush(false);
1021 byte[] k = new byte[3];
1022 int rowCount = 0;
1023 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1024 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1025 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1026 k[0] = b1;
1027 k[1] = b2;
1028 k[2] = b3;
1029 Put put = new Put(k);
1030 put.add(f, null, k);
1031 t.put(put);
1032 rowCount++;
1033 }
1034 }
1035 }
1036 t.flushCommits();
1037 return rowCount;
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1048 t.setAutoFlush(false);
1049 byte[] k = new byte[3];
1050 int rowCount = 0;
1051 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1052 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1053 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1054 k[0] = b1;
1055 k[1] = b2;
1056 k[2] = b3;
1057 Put put = new Put(k);
1058 for (int i = 0; i < f.length; i++) {
1059 put.add(f[i], null, k);
1060 }
1061 t.put(put);
1062 rowCount++;
1063 }
1064 }
1065 }
1066 t.flushCommits();
1067 return rowCount;
1068 }
1069
1070
1071
1072
1073
1074
1075
1076
1077 public int loadRegion(final HRegion r, final byte[] f)
1078 throws IOException {
1079 return loadRegion(r, f, false);
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1091 throws IOException {
1092 byte[] k = new byte[3];
1093 int rowCount = 0;
1094 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1095 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1096 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1097 k[0] = b1;
1098 k[1] = b2;
1099 k[2] = b3;
1100 Put put = new Put(k);
1101 put.add(f, null, k);
1102 if (r.getLog() == null) put.setWriteToWAL(false);
1103 r.put(put);
1104 rowCount++;
1105 }
1106 }
1107 if (flush) {
1108 r.flushcache();
1109 }
1110 }
1111 return rowCount;
1112 }
1113
1114
1115
1116
1117 public int countRows(final HTable table) throws IOException {
1118 Scan scan = new Scan();
1119 ResultScanner results = table.getScanner(scan);
1120 int count = 0;
1121 for (@SuppressWarnings("unused") Result res : results) {
1122 count++;
1123 }
1124 results.close();
1125 return count;
1126 }
1127
1128 public int countRows(final HTable table, final byte[]... families) throws IOException {
1129 Scan scan = new Scan();
1130 for (byte[] family: families) {
1131 scan.addFamily(family);
1132 }
1133 ResultScanner results = table.getScanner(scan);
1134 int count = 0;
1135 for (@SuppressWarnings("unused") Result res : results) {
1136 count++;
1137 }
1138 results.close();
1139 return count;
1140 }
1141
1142
1143
1144
1145 public String checksumRows(final HTable table) throws Exception {
1146 Scan scan = new Scan();
1147 ResultScanner results = table.getScanner(scan);
1148 MessageDigest digest = MessageDigest.getInstance("MD5");
1149 for (Result res : results) {
1150 digest.update(res.getRow());
1151 }
1152 results.close();
1153 return digest.toString();
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 public int createMultiRegions(HTable table, byte[] columnFamily)
1165 throws IOException {
1166 return createMultiRegions(table, columnFamily, true);
1167 }
1168
1169 public static final byte[][] KEYS = {
1170 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1171 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1172 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1173 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1174 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1175 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1176 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1177 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1178 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1179 };
1180
1181 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1182 Bytes.toBytes("bbb"),
1183 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1184 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1185 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1186 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1187 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1188 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1189 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1190 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1191 };
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1204 throws IOException {
1205 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 public int createMultiRegions(final Configuration c, final HTable table,
1218 final byte [] family, int numRegions)
1219 throws IOException {
1220 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1221 byte [] startKey = Bytes.toBytes("aaaaa");
1222 byte [] endKey = Bytes.toBytes("zzzzz");
1223 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1224 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1225 for (int i=0;i<splitKeys.length;i++) {
1226 regionStartKeys[i+1] = splitKeys[i];
1227 }
1228 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1229 return createMultiRegions(c, table, family, regionStartKeys);
1230 }
1231
1232 public int createMultiRegions(final Configuration c, final HTable table,
1233 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1234 return createMultiRegions(c, table, columnFamily, startKeys, true);
1235 }
1236
1237 public int createMultiRegions(final Configuration c, final HTable table,
1238 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1239 throws IOException {
1240 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1241 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1242 HTableDescriptor htd = table.getTableDescriptor();
1243 if(!htd.hasFamily(columnFamily)) {
1244 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1245 htd.addFamily(hcd);
1246 }
1247
1248
1249
1250
1251 List<byte[]> rows = getMetaTableRows(htd.getName());
1252 String regionToDeleteInFS = table
1253 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1254 .getRegionInfo().getEncodedName();
1255 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1256
1257 int count = 0;
1258 for (int i = 0; i < startKeys.length; i++) {
1259 int j = (i + 1) % startKeys.length;
1260 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1261 startKeys[i], startKeys[j]);
1262 Put put = new Put(hri.getRegionName());
1263 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1264 Writables.getBytes(hri));
1265 meta.put(put);
1266 LOG.info("createMultiRegions: inserted " + hri.toString());
1267 newRegions.add(hri);
1268 count++;
1269 }
1270
1271 for (byte[] row : rows) {
1272 LOG.info("createMultiRegions: deleting meta row -> " +
1273 Bytes.toStringBinary(row));
1274 meta.delete(new Delete(row));
1275 }
1276 if (cleanupFS) {
1277
1278
1279 Path tableDir = new Path(getDefaultRootDirPath().toString()
1280 + System.getProperty("file.separator") + htd.getNameAsString()
1281 + System.getProperty("file.separator") + regionToDeleteInFS);
1282 FileSystem.get(c).delete(tableDir);
1283 }
1284
1285 HConnection conn = table.getConnection();
1286 conn.clearRegionCache();
1287
1288 HBaseAdmin admin = getHBaseAdmin();
1289 if (admin.isTableEnabled(table.getTableName())) {
1290 for(HRegionInfo hri : newRegions) {
1291 admin.assign(hri.getRegionName());
1292 }
1293 }
1294
1295 meta.close();
1296
1297 return count;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1311 final HTableDescriptor htd, byte [][] startKeys)
1312 throws IOException {
1313 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1314 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1315 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1316
1317 for (int i = 0; i < startKeys.length; i++) {
1318 int j = (i + 1) % startKeys.length;
1319 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1320 startKeys[j]);
1321 Put put = new Put(hri.getRegionName());
1322 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1323 Writables.getBytes(hri));
1324 meta.put(put);
1325 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1326 newRegions.add(hri);
1327 }
1328
1329 meta.close();
1330 return newRegions;
1331 }
1332
1333
1334
1335
1336
1337
1338 public List<byte[]> getMetaTableRows() throws IOException {
1339
1340 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1341 List<byte[]> rows = new ArrayList<byte[]>();
1342 ResultScanner s = t.getScanner(new Scan());
1343 for (Result result : s) {
1344 LOG.info("getMetaTableRows: row -> " +
1345 Bytes.toStringBinary(result.getRow()));
1346 rows.add(result.getRow());
1347 }
1348 s.close();
1349 t.close();
1350 return rows;
1351 }
1352
1353
1354
1355
1356
1357
1358 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1359
1360 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1361 List<byte[]> rows = new ArrayList<byte[]>();
1362 ResultScanner s = t.getScanner(new Scan());
1363 for (Result result : s) {
1364 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1365 if (val == null) {
1366 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1367
1368 continue;
1369 }
1370 HRegionInfo info = Writables.getHRegionInfo(val);
1371 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1372 LOG.info("getMetaTableRows: row -> " +
1373 Bytes.toStringBinary(result.getRow()) + info);
1374 rows.add(result.getRow());
1375 }
1376 }
1377 s.close();
1378 t.close();
1379 return rows;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1393 throws IOException {
1394 List<byte[]> metaRows = getMetaTableRows(tableName);
1395 if (metaRows == null || metaRows.isEmpty()) {
1396 return null;
1397 }
1398 LOG.debug("Found " + metaRows.size() + " rows for table " +
1399 Bytes.toString(tableName));
1400 byte [] firstrow = metaRows.get(0);
1401 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1402 int index = getMiniHBaseCluster().getServerWith(firstrow);
1403 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1404 }
1405
1406
1407
1408
1409
1410
1411
1412 public void startMiniMapReduceCluster() throws IOException {
1413 startMiniMapReduceCluster(2);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422 public void startMiniMapReduceCluster(final int servers) throws IOException {
1423 LOG.info("Starting mini mapreduce cluster...");
1424 if (dataTestDir == null) {
1425 setupDataTestDir();
1426 }
1427
1428 Configuration c = getConfiguration();
1429 String logDir = c.get("hadoop.log.dir");
1430 String tmpDir = c.get("hadoop.tmp.dir");
1431 if (logDir == null) {
1432 logDir = tmpDir;
1433 }
1434 System.setProperty("hadoop.log.dir", logDir);
1435 c.set("mapred.output.dir", tmpDir);
1436
1437
1438
1439 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
1440
1441 mrCluster = new MiniMRCluster(0, 0, servers,
1442 FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));
1443
1444 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
1445 if (jobConf == null) {
1446 jobConf = mrCluster.createJobConf();
1447 }
1448 jobConf.set("mapred.local.dir",
1449 conf.get("mapred.local.dir"));
1450
1451 LOG.info("Mini mapreduce cluster started");
1452 JobConf mrClusterJobConf = mrCluster.createJobConf();
1453 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1454
1455 conf.set("mapreduce.framework.name", "yarn");
1456 conf.setBoolean("yarn.is.minicluster", true);
1457 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1458 if (rmAdress != null) {
1459 conf.set("yarn.resourcemanager.address", rmAdress);
1460 }
1461 String schedulerAdress =
1462 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1463 if (schedulerAdress != null) {
1464 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1465 }
1466 }
1467
1468
1469
1470
1471 public void shutdownMiniMapReduceCluster() {
1472 LOG.info("Stopping mini mapreduce cluster...");
1473 if (mrCluster != null) {
1474 mrCluster.shutdown();
1475 mrCluster = null;
1476 }
1477
1478 conf.set("mapred.job.tracker", "local");
1479 LOG.info("Mini mapreduce cluster stopped");
1480 }
1481
1482
1483
1484
1485
1486
1487 public void enableDebug(Class<?> clazz) {
1488 Log l = LogFactory.getLog(clazz);
1489 if (l instanceof Log4JLogger) {
1490 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1491 } else if (l instanceof Jdk14Logger) {
1492 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1493 }
1494 }
1495
1496
1497
1498
1499
1500 public void expireMasterSession() throws Exception {
1501 HMaster master = getMiniHBaseCluster().getMaster();
1502 expireSession(master.getZooKeeper(), false);
1503 }
1504
1505
1506
1507
1508
1509
1510 public void expireRegionServerSession(int index) throws Exception {
1511 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1512 expireSession(rs.getZooKeeper(), false);
1513 decrementMinRegionServerCount();
1514 }
1515
1516 private void decrementMinRegionServerCount() {
1517
1518
1519 decrementMinRegionServerCount(getConfiguration());
1520
1521
1522 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1523 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1524 }
1525 }
1526
1527 private void decrementMinRegionServerCount(Configuration conf) {
1528 int currentCount = conf.getInt(
1529 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1530 if (currentCount != -1) {
1531 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1532 Math.max(currentCount - 1, 1));
1533 }
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1548 throws Exception {
1549 Configuration c = new Configuration(this.conf);
1550 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1551 int sessionTimeout = 500;
1552 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1553 byte[] password = zk.getSessionPasswd();
1554 long sessionID = zk.getSessionId();
1555
1556
1557
1558
1559
1560
1561
1562
1563 ZooKeeper monitor = new ZooKeeper(quorumServers,
1564 1000, new org.apache.zookeeper.Watcher(){
1565 @Override
1566 public void process(WatchedEvent watchedEvent) {
1567 LOG.info("Monitor ZKW received event="+watchedEvent);
1568 }
1569 } , sessionID, password);
1570
1571
1572 ZooKeeper newZK = new ZooKeeper(quorumServers,
1573 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1574 newZK.close();
1575 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1576
1577
1578 monitor.close();
1579
1580 if (checkStatus) {
1581 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1582 }
1583 }
1584
1585
1586
1587
1588
1589
1590
1591 public MiniHBaseCluster getHBaseCluster() {
1592 return getMiniHBaseCluster();
1593 }
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603 public HBaseCluster getHBaseClusterInterface() {
1604
1605
1606 return hbaseCluster;
1607 }
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618 public synchronized HBaseAdmin getHBaseAdmin()
1619 throws IOException {
1620 if (hbaseAdmin == null){
1621 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1622 }
1623 return hbaseAdmin;
1624 }
1625 private HBaseAdmin hbaseAdmin = null;
1626
1627
1628
1629
1630
1631
1632
1633 public void closeRegion(String regionName) throws IOException {
1634 closeRegion(Bytes.toBytes(regionName));
1635 }
1636
1637
1638
1639
1640
1641
1642
1643 public void closeRegion(byte[] regionName) throws IOException {
1644 getHBaseAdmin().closeRegion(regionName, null);
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654 public void closeRegionByRow(String row, HTable table) throws IOException {
1655 closeRegionByRow(Bytes.toBytes(row), table);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1666 HRegionLocation hrl = table.getRegionLocation(row);
1667 closeRegion(hrl.getRegionInfo().getRegionName());
1668 }
1669
1670 public MiniZooKeeperCluster getZkCluster() {
1671 return zkCluster;
1672 }
1673
1674 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1675 this.passedZkCluster = true;
1676 this.zkCluster = zkCluster;
1677 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1678 }
1679
1680 public MiniDFSCluster getDFSCluster() {
1681 return dfsCluster;
1682 }
1683
1684 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1685 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1686 throw new IOException("DFSCluster is already running! Shut it down first.");
1687 }
1688 this.dfsCluster = cluster;
1689 }
1690
1691 public FileSystem getTestFileSystem() throws IOException {
1692 return HFileSystem.get(conf);
1693 }
1694
1695
1696
1697
1698
1699 public boolean cleanupTestDir() throws IOException {
1700 if (dataTestDir == null ){
1701 return false;
1702 } else {
1703 boolean ret = deleteDir(getDataTestDir());
1704 dataTestDir = null;
1705 return ret;
1706 }
1707 }
1708
1709
1710
1711
1712
1713
1714 public boolean cleanupTestDir(final String subdir) throws IOException {
1715 if (dataTestDir == null){
1716 return false;
1717 }
1718 return deleteDir(getDataTestDir(subdir));
1719 }
1720
1721
1722
1723
1724
1725
1726 public boolean deleteDir(final Path dir) throws IOException {
1727 FileSystem fs = getTestFileSystem();
1728 if (fs.exists(dir)) {
1729 return fs.delete(getDataTestDir(), true);
1730 }
1731 return false;
1732 }
1733
1734 public void waitTableAvailable(byte[] table, long timeoutMillis)
1735 throws InterruptedException, IOException {
1736 long startWait = System.currentTimeMillis();
1737 while (!getHBaseAdmin().isTableAvailable(table)) {
1738 assertTrue("Timed out waiting for table to become available " +
1739 Bytes.toStringBinary(table),
1740 System.currentTimeMillis() - startWait < timeoutMillis);
1741 Thread.sleep(200);
1742 }
1743 }
1744
1745 public void waitTableEnabled(byte[] table, long timeoutMillis)
1746 throws InterruptedException, IOException {
1747 long startWait = System.currentTimeMillis();
1748 while (!getHBaseAdmin().isTableAvailable(table) &&
1749 !getHBaseAdmin().isTableEnabled(table)) {
1750 assertTrue("Timed out waiting for table to become available and enabled " +
1751 Bytes.toStringBinary(table),
1752 System.currentTimeMillis() - startWait < timeoutMillis);
1753 Thread.sleep(200);
1754 }
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764 public boolean ensureSomeRegionServersAvailable(final int num)
1765 throws IOException {
1766 boolean startedServer = false;
1767 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1768 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1769 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1770 startedServer = true;
1771 }
1772
1773 return startedServer;
1774 }
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1785 throws IOException {
1786 boolean startedServer = ensureSomeRegionServersAvailable(num);
1787
1788 int nonStoppedServers = 0;
1789 for (JVMClusterUtil.RegionServerThread rst :
1790 getMiniHBaseCluster().getRegionServerThreads()) {
1791
1792 HRegionServer hrs = rst.getRegionServer();
1793 if (hrs.isStopping() || hrs.isStopped()) {
1794 LOG.info("A region server is stopped or stopping:"+hrs);
1795 } else {
1796 nonStoppedServers++;
1797 }
1798 }
1799 for (int i=nonStoppedServers; i<num; ++i) {
1800 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1801 startedServer = true;
1802 }
1803 return startedServer;
1804 }
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 public static User getDifferentUser(final Configuration c,
1817 final String differentiatingSuffix)
1818 throws IOException {
1819 FileSystem currentfs = FileSystem.get(c);
1820 if (!(currentfs instanceof DistributedFileSystem)) {
1821 return User.getCurrent();
1822 }
1823
1824
1825 String username = User.getCurrent().getName() +
1826 differentiatingSuffix;
1827 User user = User.createUserForTesting(c, username,
1828 new String[]{"supergroup"});
1829 return user;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1846 final int max) {
1847 try {
1848 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1849 for (Class<?> clazz: clazzes) {
1850 String className = clazz.getSimpleName();
1851 if (className.equals("DFSOutputStream")) {
1852 if (clazz.isInstance(stream)) {
1853 Field maxRecoveryErrorCountField =
1854 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1855 maxRecoveryErrorCountField.setAccessible(true);
1856 maxRecoveryErrorCountField.setInt(stream, max);
1857 break;
1858 }
1859 }
1860 }
1861 } catch (Exception e) {
1862 LOG.info("Could not set max recovery field", e);
1863 }
1864 }
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException {
1875 waitUntilAllRegionsAssigned(tableName, 60000);
1876 }
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887 public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout)
1888 throws IOException {
1889 long deadline = System.currentTimeMillis() + timeout;
1890 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1891 try {
1892 while (true) {
1893 boolean allRegionsAssigned = true;
1894 Scan scan = new Scan();
1895 scan.addFamily(HConstants.CATALOG_FAMILY);
1896 ResultScanner s = meta.getScanner(scan);
1897 try {
1898 Result r;
1899 while ((r = s.next()) != null) {
1900 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1901 HRegionInfo info = Writables.getHRegionInfoOrNull(b);
1902 if (info != null && Bytes.equals(info.getTableName(), tableName)) {
1903 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1904 allRegionsAssigned &= (b != null);
1905 }
1906 }
1907 } finally {
1908 s.close();
1909 }
1910 if (allRegionsAssigned) {
1911 return;
1912 }
1913 long now = System.currentTimeMillis();
1914 if (now > deadline) {
1915 throw new IOException("Timeout waiting for all regions of " +
1916 Bytes.toStringBinary(tableName) + " to be assigned");
1917 }
1918 try {
1919 Thread.sleep(deadline - now < 200 ? deadline - now : 200);
1920 } catch (InterruptedException e) {
1921 throw new IOException(e);
1922 }
1923 }
1924 } finally {
1925 meta.close();
1926 }
1927 }
1928
1929
1930
1931
1932
1933 public static List<KeyValue> getFromStoreFile(Store store,
1934 Get get) throws IOException {
1935 MultiVersionConsistencyControl.resetThreadReadPoint();
1936 Scan scan = new Scan(get);
1937 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1938 scan.getFamilyMap().get(store.getFamily().getName()));
1939
1940 List<KeyValue> result = new ArrayList<KeyValue>();
1941 scanner.next(result);
1942 if (!result.isEmpty()) {
1943
1944 KeyValue kv = result.get(0);
1945 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1946 result.clear();
1947 }
1948 }
1949 scanner.close();
1950 return result;
1951 }
1952
1953
1954
1955
1956
1957 public static List<KeyValue> getFromStoreFile(Store store,
1958 byte [] row,
1959 NavigableSet<byte[]> columns
1960 ) throws IOException {
1961 Get get = new Get(row);
1962 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1963 s.put(store.getFamily().getName(), columns);
1964
1965 return getFromStoreFile(store,get);
1966 }
1967
1968
1969
1970
1971
1972 public static ZooKeeperWatcher getZooKeeperWatcher(
1973 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
1974 IOException {
1975 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
1976 "unittest", new Abortable() {
1977 boolean aborted = false;
1978
1979 @Override
1980 public void abort(String why, Throwable e) {
1981 aborted = true;
1982 throw new RuntimeException("Fatal ZK error, why=" + why, e);
1983 }
1984
1985 @Override
1986 public boolean isAborted() {
1987 return aborted;
1988 }
1989 });
1990 return zkw;
1991 }
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2005 HBaseTestingUtility TEST_UTIL, HRegion region,
2006 ServerName serverName) throws ZooKeeperConnectionException,
2007 IOException, KeeperException, NodeExistsException {
2008 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2009 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2010 int version = ZKAssign.transitionNodeOpening(zkw, region
2011 .getRegionInfo(), serverName);
2012 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2013 version);
2014 return zkw;
2015 }
2016
2017 public static void assertKVListsEqual(String additionalMsg,
2018 final List<KeyValue> expected,
2019 final List<KeyValue> actual) {
2020 final int eLen = expected.size();
2021 final int aLen = actual.size();
2022 final int minLen = Math.min(eLen, aLen);
2023
2024 int i;
2025 for (i = 0; i < minLen
2026 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2027 ++i) {}
2028
2029 if (additionalMsg == null) {
2030 additionalMsg = "";
2031 }
2032 if (!additionalMsg.isEmpty()) {
2033 additionalMsg = ". " + additionalMsg;
2034 }
2035
2036 if (eLen != aLen || i != minLen) {
2037 throw new AssertionError(
2038 "Expected and actual KV arrays differ at position " + i + ": " +
2039 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2040 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2041 }
2042 }
2043
2044 private static <T> String safeGetAsStr(List<T> lst, int i) {
2045 if (0 <= i && i < lst.size()) {
2046 return lst.get(i).toString();
2047 } else {
2048 return "<out_of_range>";
2049 }
2050 }
2051
2052 public String getClusterKey() {
2053 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2054 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2055 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2056 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2057 }
2058
2059
2060 public HTable createRandomTable(String tableName,
2061 final Collection<String> families,
2062 final int maxVersions,
2063 final int numColsPerRow,
2064 final int numFlushes,
2065 final int numRegions,
2066 final int numRowsPerFlush)
2067 throws IOException, InterruptedException {
2068
2069 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2070 " regions, " + numFlushes + " storefiles per region, " +
2071 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2072 "\n");
2073
2074 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2075 final int numCF = families.size();
2076 final byte[][] cfBytes = new byte[numCF][];
2077 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2078
2079 {
2080 int cfIndex = 0;
2081 for (String cf : families) {
2082 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2083 }
2084 }
2085
2086 final int actualStartKey = 0;
2087 final int actualEndKey = Integer.MAX_VALUE;
2088 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2089 final int splitStartKey = actualStartKey + keysPerRegion;
2090 final int splitEndKey = actualEndKey - keysPerRegion;
2091 final String keyFormat = "%08x";
2092 final HTable table = createTable(tableNameBytes, cfBytes,
2093 maxVersions,
2094 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2095 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2096 numRegions);
2097 if (hbaseCluster != null) {
2098 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2099 }
2100
2101 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2102 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2103 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2104 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2105
2106 Put put = new Put(row);
2107 Delete del = new Delete(row);
2108 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2109 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2110 final long ts = rand.nextInt();
2111 final byte[] qual = Bytes.toBytes("col" + iCol);
2112 if (rand.nextBoolean()) {
2113 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2114 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2115 ts + "_random_" + rand.nextLong());
2116 put.add(cf, qual, ts, value);
2117 } else if (rand.nextDouble() < 0.8) {
2118 del.deleteColumn(cf, qual, ts);
2119 } else {
2120 del.deleteColumns(cf, qual, ts);
2121 }
2122 }
2123
2124 if (!put.isEmpty()) {
2125 table.put(put);
2126 }
2127
2128 if (!del.isEmpty()) {
2129 table.delete(del);
2130 }
2131 }
2132 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2133 table.flushCommits();
2134 if (hbaseCluster != null) {
2135 getMiniHBaseCluster().flushcache(tableNameBytes);
2136 }
2137 }
2138
2139 return table;
2140 }
2141
2142 private static final int MIN_RANDOM_PORT = 0xc000;
2143 private static final int MAX_RANDOM_PORT = 0xfffe;
2144
2145
2146
2147
2148
2149 public static int randomPort() {
2150 return MIN_RANDOM_PORT
2151 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2152 }
2153
2154 public static int randomFreePort() {
2155 int port = 0;
2156 do {
2157 port = randomPort();
2158 try {
2159 ServerSocket sock = new ServerSocket(port);
2160 sock.close();
2161 } catch (IOException ex) {
2162 port = 0;
2163 }
2164 } while (port == 0);
2165 return port;
2166 }
2167
2168 public static void waitForHostPort(String host, int port)
2169 throws IOException {
2170 final int maxTimeMs = 10000;
2171 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2172 IOException savedException = null;
2173 LOG.info("Waiting for server at " + host + ":" + port);
2174 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2175 try {
2176 Socket sock = new Socket(InetAddress.getByName(host), port);
2177 sock.close();
2178 savedException = null;
2179 LOG.info("Server at " + host + ":" + port + " is available");
2180 break;
2181 } catch (UnknownHostException e) {
2182 throw new IOException("Failed to look up " + host, e);
2183 } catch (IOException e) {
2184 savedException = e;
2185 }
2186 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2187 }
2188
2189 if (savedException != null) {
2190 throw savedException;
2191 }
2192 }
2193
2194
2195
2196
2197
2198
2199 public static int createPreSplitLoadTestTable(Configuration conf,
2200 byte[] tableName, byte[] columnFamily, Algorithm compression,
2201 DataBlockEncoding dataBlockEncoding) throws IOException {
2202 HTableDescriptor desc = new HTableDescriptor(tableName);
2203 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2204 hcd.setDataBlockEncoding(dataBlockEncoding);
2205 hcd.setCompressionType(compression);
2206 return createPreSplitLoadTestTable(conf, desc, hcd);
2207 }
2208
2209
2210
2211
2212
2213
2214 public static int createPreSplitLoadTestTable(Configuration conf,
2215 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
2216 if (!desc.hasFamily(hcd.getName())) {
2217 desc.addFamily(hcd);
2218 }
2219
2220 int totalNumberOfRegions = 0;
2221 HBaseAdmin admin = new HBaseAdmin(conf);
2222 try {
2223
2224
2225
2226 int numberOfServers = admin.getClusterStatus().getServers().size();
2227 if (numberOfServers == 0) {
2228 throw new IllegalStateException("No live regionservers");
2229 }
2230
2231 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2232 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2233 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2234 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2235
2236 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2237 totalNumberOfRegions);
2238
2239 admin.createTable(desc, splits);
2240 admin.close();
2241 } catch (MasterNotRunningException e) {
2242 LOG.error("Master not running", e);
2243 throw new IOException(e);
2244 } catch (TableExistsException e) {
2245 LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) +
2246 " already exists, continuing");
2247 } finally {
2248 admin.close();
2249 }
2250 return totalNumberOfRegions;
2251 }
2252
2253 public static int getMetaRSPort(Configuration conf) throws IOException {
2254 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2255 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2256 table.close();
2257 return hloc.getPort();
2258 }
2259
2260 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2261 throws IOException {
2262 HTableDescriptor htd = new HTableDescriptor(tableName);
2263 htd.addFamily(hcd);
2264 HRegionInfo info =
2265 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2266 HRegion region =
2267 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2268 return region;
2269 }
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2280 assertTrue(numRegions>3);
2281 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2282 byte [][] result = new byte[tmpSplitKeys.length+1][];
2283 for (int i=0;i<tmpSplitKeys.length;i++) {
2284 result[i+1] = tmpSplitKeys[i];
2285 }
2286 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2287 return result;
2288 }
2289
2290
2291
2292
2293
2294
2295 public static List<HColumnDescriptor> generateColumnDescriptors() {
2296 return generateColumnDescriptors("");
2297 }
2298
2299
2300
2301
2302
2303
2304
2305 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
2306 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
2307 long familyId = 0;
2308 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
2309 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
2310 for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
2311 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
2312 HColumnDescriptor htd = new HColumnDescriptor(name);
2313 htd.setCompressionType(compressionType);
2314 htd.setDataBlockEncoding(encodingType);
2315 htd.setBloomFilterType(bloomType);
2316 htds.add(htd);
2317 familyId++;
2318 }
2319 }
2320 }
2321 return htds;
2322 }
2323
2324
2325
2326
2327
2328 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
2329 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
2330 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
2331 for (String algoName : allAlgos) {
2332 try {
2333 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
2334 algo.getCompressor();
2335 supportedAlgos.add(algo);
2336 } catch (Throwable t) {
2337
2338 }
2339 }
2340 return supportedAlgos.toArray(new Compression.Algorithm[0]);
2341 }
2342 }