1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Random;
41 import java.util.UUID;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.commons.logging.impl.Jdk14Logger;
46 import org.apache.commons.logging.impl.Log4JLogger;
47 import org.apache.hadoop.conf.Configuration;
48 import org.apache.hadoop.fs.FileSystem;
49 import org.apache.hadoop.fs.Path;
50 import org.apache.hadoop.hbase.client.Delete;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.HBaseAdmin;
53 import org.apache.hadoop.hbase.client.HConnection;
54 import org.apache.hadoop.hbase.client.HTable;
55 import org.apache.hadoop.hbase.client.Put;
56 import org.apache.hadoop.hbase.client.Result;
57 import org.apache.hadoop.hbase.client.ResultScanner;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.fs.HFileSystem;
60 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
61 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
62 import org.apache.hadoop.hbase.io.hfile.Compression;
63 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
64 import org.apache.hadoop.hbase.master.HMaster;
65 import org.apache.hadoop.hbase.master.ServerManager;
66 import org.apache.hadoop.hbase.regionserver.HRegion;
67 import org.apache.hadoop.hbase.regionserver.HRegionServer;
68 import org.apache.hadoop.hbase.regionserver.InternalScanner;
69 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
70 import org.apache.hadoop.hbase.regionserver.Store;
71 import org.apache.hadoop.hbase.regionserver.StoreFile;
72 import org.apache.hadoop.hbase.security.User;
73 import org.apache.hadoop.hbase.util.Bytes;
74 import org.apache.hadoop.hbase.util.FSUtils;
75 import org.apache.hadoop.hbase.util.JVMClusterUtil;
76 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
77 import org.apache.hadoop.hbase.util.RegionSplitter;
78 import org.apache.hadoop.hbase.util.Threads;
79 import org.apache.hadoop.hbase.util.Writables;
80 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
81 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
82 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
83 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
84 import org.apache.hadoop.hdfs.DFSClient;
85 import org.apache.hadoop.hdfs.DistributedFileSystem;
86 import org.apache.hadoop.hdfs.MiniDFSCluster;
87 import org.apache.hadoop.mapred.JobConf;
88 import org.apache.hadoop.mapred.MiniMRCluster;
89 import org.apache.zookeeper.KeeperException;
90 import org.apache.zookeeper.WatchedEvent;
91 import org.apache.zookeeper.KeeperException.NodeExistsException;
92 import org.apache.zookeeper.ZooKeeper;
93
94
95
96
97
98
99
100
101
102
103
104
105
106 public class HBaseTestingUtility {
107 private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
108 private Configuration conf;
109 private MiniZooKeeperCluster zkCluster = null;
110
111
112
113
114
115 private static int DEFAULT_REGIONS_PER_SERVER = 5;
116
117
118
119
120
121 private boolean passedZkCluster = false;
122 private MiniDFSCluster dfsCluster = null;
123
124 private HBaseCluster hbaseCluster = null;
125 private MiniMRCluster mrCluster = null;
126
127
128 private File dataTestDir = null;
129
130
131
132 private File clusterTestDir = null;
133
134
135
136
137
138
139
140
141 private static final String TEST_DIRECTORY_KEY = "test.build.data";
142
143
144
145
146 public static final String BASE_TEST_DIRECTORY_KEY =
147 "test.build.data.basedirectory";
148
149
150
151
152 public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
153
154
155 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
156 Arrays.asList(new Object[][] {
157 { Compression.Algorithm.NONE },
158 { Compression.Algorithm.GZ }
159 });
160
161
162 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
163 Arrays.asList(new Object[][] {
164 { new Boolean(false) },
165 { new Boolean(true) }
166 });
167
168
169 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
170 Compression.Algorithm.NONE, Compression.Algorithm.GZ
171 };
172
173
174
175
176
177 private static List<Object[]> bloomAndCompressionCombinations() {
178 List<Object[]> configurations = new ArrayList<Object[]>();
179 for (Compression.Algorithm comprAlgo :
180 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
181 for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
182 configurations.add(new Object[] { comprAlgo, bloomType });
183 }
184 }
185 return Collections.unmodifiableList(configurations);
186 }
187
188 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
189 bloomAndCompressionCombinations();
190
191 public HBaseTestingUtility() {
192 this(HBaseConfiguration.create());
193 }
194
195 public HBaseTestingUtility(Configuration conf) {
196 this.conf = conf;
197
198
199 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
200 }
201
202
203
204
205
206
207
208
209
210
211
212
213 public Configuration getConfiguration() {
214 return this.conf;
215 }
216
217 public void setHBaseCluster(HBaseCluster hbaseCluster) {
218 this.hbaseCluster = hbaseCluster;
219 }
220
221
222
223
224
225
226
227
228
229 private Path getBaseTestDir() {
230 String PathName = System.getProperty(
231 BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
232
233 return new Path(PathName);
234 }
235
236
237
238
239
240
241
242 public Path getDataTestDir() {
243 if (dataTestDir == null){
244 setupDataTestDir();
245 }
246 return new Path(dataTestDir.getAbsolutePath());
247 }
248
249
250
251
252
253
254 public Path getClusterTestDir() {
255 if (clusterTestDir == null){
256 setupClusterTestDir();
257 }
258 return new Path(clusterTestDir.getAbsolutePath());
259 }
260
261
262
263
264
265
266
267 public Path getDataTestDir(final String subdirName) {
268 return new Path(getDataTestDir(), subdirName);
269 }
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287 private void setupDataTestDir() {
288 if (dataTestDir != null) {
289 LOG.warn("Data test dir already setup in " +
290 dataTestDir.getAbsolutePath());
291 return;
292 }
293
294 String randomStr = UUID.randomUUID().toString();
295 Path testPath= new Path(getBaseTestDir(), randomStr);
296
297 dataTestDir = new File(testPath.toString()).getAbsoluteFile();
298 dataTestDir.deleteOnExit();
299
300 createSubDirAndSystemProperty(
301 "hadoop.log.dir",
302 testPath, "hadoop-log-dir");
303
304
305
306 createSubDirAndSystemProperty(
307 "hadoop.tmp.dir",
308 testPath, "hadoop-tmp-dir");
309
310
311 createSubDir(
312 "mapred.local.dir",
313 testPath, "mapred-local-dir");
314
315 createSubDirAndSystemProperty(
316 "mapred.working.dir",
317 testPath, "mapred-working-dir");
318
319 createSubDir(
320 "hbase.local.dir",
321 testPath, "hbase-local-dir");
322 }
323
324 private void createSubDir(String propertyName, Path parent, String subDirName){
325 Path newPath= new Path(parent, subDirName);
326 File newDir = new File(newPath.toString()).getAbsoluteFile();
327 newDir.deleteOnExit();
328 conf.set(propertyName, newDir.getAbsolutePath());
329 }
330
331 private void createSubDirAndSystemProperty(
332 String propertyName, Path parent, String subDirName){
333
334 String sysValue = System.getProperty(propertyName);
335
336 if (sysValue != null) {
337
338
339 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
340 sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
341 String confValue = conf.get(propertyName);
342 if (confValue != null && !confValue.endsWith(sysValue)){
343 LOG.warn(
344 propertyName + " property value differs in configuration and system: "+
345 "Configuration="+confValue+" while System="+sysValue+
346 " Erasing configuration value by system value."
347 );
348 }
349 conf.set(propertyName, sysValue);
350 } else {
351
352 createSubDir(propertyName, parent, subDirName);
353 System.setProperty(propertyName, conf.get(propertyName));
354 }
355 }
356
357
358
359
360 private void setupClusterTestDir() {
361 if (clusterTestDir != null) {
362 LOG.warn("Cluster test dir already setup in " +
363 clusterTestDir.getAbsolutePath());
364 return;
365 }
366
367
368
369 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
370 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
371
372 clusterTestDir.deleteOnExit();
373 }
374
375
376
377
378 public void isRunningCluster() throws IOException {
379 if (dfsCluster == null) return;
380 throw new IOException("Cluster already running at " +
381 this.clusterTestDir);
382 }
383
384
385
386
387
388
389
390
391 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
392 return startMiniDFSCluster(servers, null);
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
407 throws Exception {
408 if ( hosts != null && hosts.length != 0) {
409 return startMiniDFSCluster(hosts.length, hosts);
410 } else {
411 return startMiniDFSCluster(1, null);
412 }
413 }
414
415
416
417
418
419
420
421
422
423
424 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
425 throws Exception {
426
427
428 isRunningCluster();
429
430
431 if (clusterTestDir == null) {
432 setupClusterTestDir();
433 }
434
435
436 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
437
438
439
440
441 System.setProperty("test.cache.data", this.clusterTestDir.toString());
442
443
444 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
445 true, null, null, hosts, null);
446
447
448 FileSystem fs = this.dfsCluster.getFileSystem();
449 this.conf.set("fs.defaultFS", fs.getUri().toString());
450
451 this.conf.set("fs.default.name", fs.getUri().toString());
452
453
454 this.dfsCluster.waitClusterUp();
455
456 return this.dfsCluster;
457 }
458
459
460
461
462
463
464 public void shutdownMiniDFSCluster() throws Exception {
465 if (this.dfsCluster != null) {
466
467 this.dfsCluster.shutdown();
468 dfsCluster = null;
469 }
470
471 }
472
473
474
475
476
477
478
479
480 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
481 return startMiniZKCluster(1);
482 }
483
484
485
486
487
488
489
490
491
492 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
493 throws Exception {
494 File zkClusterFile = new File(getClusterTestDir().toString());
495 return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
496 }
497
498 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
499 throws Exception {
500 return startMiniZKCluster(dir,1);
501 }
502
503 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
504 int zooKeeperServerNum)
505 throws Exception {
506 if (this.zkCluster != null) {
507 throw new IOException("Cluster already running at " + dir);
508 }
509 this.passedZkCluster = false;
510 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
511 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
512 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
513 Integer.toString(clientPort));
514 return this.zkCluster;
515 }
516
517
518
519
520
521
522
523 public void shutdownMiniZKCluster() throws IOException {
524 if (this.zkCluster != null) {
525 this.zkCluster.shutdown();
526 this.zkCluster = null;
527 }
528 }
529
530
531
532
533
534
535
536 public MiniHBaseCluster startMiniCluster() throws Exception {
537 return startMiniCluster(1, 1);
538 }
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553 public MiniHBaseCluster startMiniCluster(final int numSlaves)
554 throws Exception {
555 return startMiniCluster(1, numSlaves);
556 }
557
558
559
560
561
562
563
564
565 public MiniHBaseCluster startMiniCluster(final int numMasters,
566 final int numSlaves)
567 throws Exception {
568 return startMiniCluster(numMasters, numSlaves, null);
569 }
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596 public MiniHBaseCluster startMiniCluster(final int numMasters,
597 final int numSlaves, final String[] dataNodeHosts)
598 throws Exception {
599 int numDataNodes = numSlaves;
600 if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
601 numDataNodes = dataNodeHosts.length;
602 }
603
604 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
605 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
606
607
608 isRunningCluster();
609
610
611
612 startMiniDFSCluster(numDataNodes, dataNodeHosts);
613
614
615 if (this.zkCluster == null) {
616 startMiniZKCluster(clusterTestDir);
617 }
618
619
620 return startMiniHBaseCluster(numMasters, numSlaves);
621 }
622
623
624
625
626
627
628
629
630
631
632
633
634 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
635 final int numSlaves)
636 throws IOException, InterruptedException {
637
638 createRootDir();
639
640
641
642 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
643 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
644 }
645 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
646 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
647 }
648
649 Configuration c = new Configuration(this.conf);
650 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
651
652 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
653 ResultScanner s = t.getScanner(new Scan());
654 while (s.next() != null) {
655 continue;
656 }
657 s.close();
658 t.close();
659
660 getHBaseAdmin();
661 LOG.info("Minicluster is up");
662 return (MiniHBaseCluster)this.hbaseCluster;
663 }
664
665
666
667
668
669
670
671 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
672 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
673
674 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
675 ResultScanner s = t.getScanner(new Scan());
676 while (s.next() != null) {
677
678 }
679 LOG.info("HBase has been restarted");
680 s.close();
681 t.close();
682 }
683
684
685
686
687
688
689 public MiniHBaseCluster getMiniHBaseCluster() {
690 if (this.hbaseCluster instanceof MiniHBaseCluster) {
691 return (MiniHBaseCluster)this.hbaseCluster;
692 }
693 throw new RuntimeException(hbaseCluster + " not an instance of " +
694 MiniHBaseCluster.class.getName());
695 }
696
697
698
699
700
701
702 public void shutdownMiniCluster() throws Exception {
703 LOG.info("Shutting down minicluster");
704 shutdownMiniHBaseCluster();
705 if (!this.passedZkCluster){
706 shutdownMiniZKCluster();
707 }
708 shutdownMiniDFSCluster();
709
710
711 if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
712
713 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
714 new Path(this.clusterTestDir.toString()))) {
715 LOG.warn("Failed delete of " + this.clusterTestDir.toString());
716 }
717 this.clusterTestDir = null;
718 }
719 LOG.info("Minicluster is down");
720 }
721
722
723
724
725
726 public void shutdownMiniHBaseCluster() throws IOException {
727 if (hbaseAdmin != null) {
728 hbaseAdmin.close();
729 hbaseAdmin = null;
730 }
731
732 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
733 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
734 if (this.hbaseCluster != null) {
735 this.hbaseCluster.shutdown();
736
737 this.hbaseCluster.waitUntilShutDown();
738 this.hbaseCluster = null;
739 }
740 }
741
742
743
744
745
746
747
748 public Path getDefaultRootDirPath() throws IOException {
749 FileSystem fs = FileSystem.get(this.conf);
750 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
751 }
752
753
754
755
756
757
758
759
760
761 public Path createRootDir() throws IOException {
762 FileSystem fs = FileSystem.get(this.conf);
763 Path hbaseRootdir = getDefaultRootDirPath();
764 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
765 fs.mkdirs(hbaseRootdir);
766 FSUtils.setVersion(fs, hbaseRootdir);
767 return hbaseRootdir;
768 }
769
770
771
772
773
774 public void flush() throws IOException {
775 getMiniHBaseCluster().flushcache();
776 }
777
778
779
780
781
782 public void flush(byte [] tableName) throws IOException {
783 getMiniHBaseCluster().flushcache(tableName);
784 }
785
786
787
788
789
790 public void compact(boolean major) throws IOException {
791 getMiniHBaseCluster().compact(major);
792 }
793
794
795
796
797
798 public void compact(byte [] tableName, boolean major) throws IOException {
799 getMiniHBaseCluster().compact(tableName, major);
800 }
801
802
803
804
805
806
807
808
809
810 public HTable createTable(byte[] tableName, byte[] family)
811 throws IOException{
812 return createTable(tableName, new byte[][]{family});
813 }
814
815
816
817
818
819
820
821
822 public HTable createTable(byte[] tableName, byte[][] families)
823 throws IOException {
824 return createTable(tableName, families,
825 new Configuration(getConfiguration()));
826 }
827
828 public HTable createTable(byte[] tableName, byte[][] families,
829 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
830 throws IOException{
831 HTableDescriptor desc = new HTableDescriptor(tableName);
832 for (byte[] family : families) {
833 HColumnDescriptor hcd = new HColumnDescriptor(family)
834 .setMaxVersions(numVersions);
835 desc.addFamily(hcd);
836 }
837 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
838 return new HTable(getConfiguration(), tableName);
839 }
840
841
842
843
844
845
846
847
848
849 public HTable createTable(byte[] tableName, byte[][] families,
850 final Configuration c)
851 throws IOException {
852 HTableDescriptor desc = new HTableDescriptor(tableName);
853 for(byte[] family : families) {
854 desc.addFamily(new HColumnDescriptor(family));
855 }
856 getHBaseAdmin().createTable(desc);
857 return new HTable(c, tableName);
858 }
859
860
861
862
863
864
865
866
867
868
869 public HTable createTable(byte[] tableName, byte[][] families,
870 final Configuration c, int numVersions)
871 throws IOException {
872 HTableDescriptor desc = new HTableDescriptor(tableName);
873 for(byte[] family : families) {
874 HColumnDescriptor hcd = new HColumnDescriptor(family)
875 .setMaxVersions(numVersions);
876 desc.addFamily(hcd);
877 }
878 getHBaseAdmin().createTable(desc);
879 return new HTable(c, tableName);
880 }
881
882
883
884
885
886
887
888
889
890 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
891 throws IOException {
892 return createTable(tableName, new byte[][]{family}, numVersions);
893 }
894
895
896
897
898
899
900
901
902
903 public HTable createTable(byte[] tableName, byte[][] families,
904 int numVersions)
905 throws IOException {
906 HTableDescriptor desc = new HTableDescriptor(tableName);
907 for (byte[] family : families) {
908 HColumnDescriptor hcd = new HColumnDescriptor(family)
909 .setMaxVersions(numVersions);
910 desc.addFamily(hcd);
911 }
912 getHBaseAdmin().createTable(desc);
913 return new HTable(new Configuration(getConfiguration()), tableName);
914 }
915
916
917
918
919
920
921
922
923
924 public HTable createTable(byte[] tableName, byte[][] families,
925 int numVersions, int blockSize) throws IOException {
926 HTableDescriptor desc = new HTableDescriptor(tableName);
927 for (byte[] family : families) {
928 HColumnDescriptor hcd = new HColumnDescriptor(family)
929 .setMaxVersions(numVersions)
930 .setBlocksize(blockSize);
931 desc.addFamily(hcd);
932 }
933 getHBaseAdmin().createTable(desc);
934 return new HTable(new Configuration(getConfiguration()), tableName);
935 }
936
937
938
939
940
941
942
943
944
945 public HTable createTable(byte[] tableName, byte[][] families,
946 int[] numVersions)
947 throws IOException {
948 HTableDescriptor desc = new HTableDescriptor(tableName);
949 int i = 0;
950 for (byte[] family : families) {
951 HColumnDescriptor hcd = new HColumnDescriptor(family)
952 .setMaxVersions(numVersions[i]);
953 desc.addFamily(hcd);
954 i++;
955 }
956 getHBaseAdmin().createTable(desc);
957 return new HTable(new Configuration(getConfiguration()), tableName);
958 }
959
960
961
962
963
964 public void deleteTable(byte[] tableName) throws IOException {
965 try {
966 getHBaseAdmin().disableTable(tableName);
967 } catch (TableNotEnabledException e) {
968 LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
969 }
970 getHBaseAdmin().deleteTable(tableName);
971 }
972
973
974
975
976
977
978
979 public HTable truncateTable(byte [] tableName) throws IOException {
980 HTable table = new HTable(getConfiguration(), tableName);
981 Scan scan = new Scan();
982 ResultScanner resScan = table.getScanner(scan);
983 for(Result res : resScan) {
984 Delete del = new Delete(res.getRow());
985 table.delete(del);
986 }
987 resScan = table.getScanner(scan);
988 resScan.close();
989 return table;
990 }
991
992
993
994
995
996
997
998
999 public int loadTable(final HTable t, final byte[] f) throws IOException {
1000 t.setAutoFlush(false);
1001 byte[] k = new byte[3];
1002 int rowCount = 0;
1003 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1004 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1005 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1006 k[0] = b1;
1007 k[1] = b2;
1008 k[2] = b3;
1009 Put put = new Put(k);
1010 put.add(f, null, k);
1011 t.put(put);
1012 rowCount++;
1013 }
1014 }
1015 }
1016 t.flushCommits();
1017 return rowCount;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1028 t.setAutoFlush(false);
1029 byte[] k = new byte[3];
1030 int rowCount = 0;
1031 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1032 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1033 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1034 k[0] = b1;
1035 k[1] = b2;
1036 k[2] = b3;
1037 Put put = new Put(k);
1038 for (int i = 0; i < f.length; i++) {
1039 put.add(f[i], null, k);
1040 }
1041 t.put(put);
1042 rowCount++;
1043 }
1044 }
1045 }
1046 t.flushCommits();
1047 return rowCount;
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057 public int loadRegion(final HRegion r, final byte[] f)
1058 throws IOException {
1059 return loadRegion(r, f, false);
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1071 throws IOException {
1072 byte[] k = new byte[3];
1073 int rowCount = 0;
1074 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1075 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1076 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1077 k[0] = b1;
1078 k[1] = b2;
1079 k[2] = b3;
1080 Put put = new Put(k);
1081 put.add(f, null, k);
1082 if (r.getLog() == null) put.setWriteToWAL(false);
1083 r.put(put);
1084 rowCount++;
1085 }
1086 }
1087 if (flush) {
1088 r.flushcache();
1089 }
1090 }
1091 return rowCount;
1092 }
1093
1094
1095
1096
1097 public int countRows(final HTable table) throws IOException {
1098 Scan scan = new Scan();
1099 ResultScanner results = table.getScanner(scan);
1100 int count = 0;
1101 for (@SuppressWarnings("unused") Result res : results) {
1102 count++;
1103 }
1104 results.close();
1105 return count;
1106 }
1107
1108 public int countRows(final HTable table, final byte[]... families) throws IOException {
1109 Scan scan = new Scan();
1110 for (byte[] family: families) {
1111 scan.addFamily(family);
1112 }
1113 ResultScanner results = table.getScanner(scan);
1114 int count = 0;
1115 for (@SuppressWarnings("unused") Result res : results) {
1116 count++;
1117 }
1118 results.close();
1119 return count;
1120 }
1121
1122
1123
1124
1125 public String checksumRows(final HTable table) throws Exception {
1126 Scan scan = new Scan();
1127 ResultScanner results = table.getScanner(scan);
1128 MessageDigest digest = MessageDigest.getInstance("MD5");
1129 for (Result res : results) {
1130 digest.update(res.getRow());
1131 }
1132 results.close();
1133 return digest.toString();
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 public int createMultiRegions(HTable table, byte[] columnFamily)
1145 throws IOException {
1146 return createMultiRegions(table, columnFamily, true);
1147 }
1148
1149 public static final byte[][] KEYS = {
1150 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1151 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1152 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1153 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1154 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1155 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1156 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1157 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1158 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1159 };
1160
1161 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1162 Bytes.toBytes("bbb"),
1163 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1164 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1165 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1166 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1167 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1168 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1169 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1170 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1171 };
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 public int createMultiRegions(HTable table, byte[] columnFamily, boolean cleanupFS)
1184 throws IOException {
1185 return createMultiRegions(getConfiguration(), table, columnFamily, KEYS, cleanupFS);
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 public int createMultiRegions(final Configuration c, final HTable table,
1198 final byte [] family, int numRegions)
1199 throws IOException {
1200 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1201 byte [] startKey = Bytes.toBytes("aaaaa");
1202 byte [] endKey = Bytes.toBytes("zzzzz");
1203 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1204 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1205 for (int i=0;i<splitKeys.length;i++) {
1206 regionStartKeys[i+1] = splitKeys[i];
1207 }
1208 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1209 return createMultiRegions(c, table, family, regionStartKeys);
1210 }
1211
1212 public int createMultiRegions(final Configuration c, final HTable table,
1213 final byte[] columnFamily, byte [][] startKeys) throws IOException {
1214 return createMultiRegions(c, table, columnFamily, startKeys, true);
1215 }
1216
1217 public int createMultiRegions(final Configuration c, final HTable table,
1218 final byte[] columnFamily, byte [][] startKeys, boolean cleanupFS)
1219 throws IOException {
1220 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1221 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
1222 HTableDescriptor htd = table.getTableDescriptor();
1223 if(!htd.hasFamily(columnFamily)) {
1224 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1225 htd.addFamily(hcd);
1226 }
1227
1228
1229
1230
1231 List<byte[]> rows = getMetaTableRows(htd.getName());
1232 String regionToDeleteInFS = table
1233 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1234 .getRegionInfo().getEncodedName();
1235 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1236
1237 int count = 0;
1238 for (int i = 0; i < startKeys.length; i++) {
1239 int j = (i + 1) % startKeys.length;
1240 HRegionInfo hri = new HRegionInfo(table.getTableName(),
1241 startKeys[i], startKeys[j]);
1242 Put put = new Put(hri.getRegionName());
1243 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1244 Writables.getBytes(hri));
1245 meta.put(put);
1246 LOG.info("createMultiRegions: inserted " + hri.toString());
1247 newRegions.add(hri);
1248 count++;
1249 }
1250
1251 for (byte[] row : rows) {
1252 LOG.info("createMultiRegions: deleting meta row -> " +
1253 Bytes.toStringBinary(row));
1254 meta.delete(new Delete(row));
1255 }
1256 if (cleanupFS) {
1257
1258
1259 Path tableDir = new Path(getDefaultRootDirPath().toString()
1260 + System.getProperty("file.separator") + htd.getNameAsString()
1261 + System.getProperty("file.separator") + regionToDeleteInFS);
1262 getDFSCluster().getFileSystem().delete(tableDir);
1263 }
1264
1265 HConnection conn = table.getConnection();
1266 conn.clearRegionCache();
1267
1268 HBaseAdmin admin = getHBaseAdmin();
1269 if (admin.isTableEnabled(table.getTableName())) {
1270 for(HRegionInfo hri : newRegions) {
1271 admin.assign(hri.getRegionName());
1272 }
1273 }
1274
1275 meta.close();
1276
1277 return count;
1278 }
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1291 final HTableDescriptor htd, byte [][] startKeys)
1292 throws IOException {
1293 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
1294 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1295 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1296
1297 for (int i = 0; i < startKeys.length; i++) {
1298 int j = (i + 1) % startKeys.length;
1299 HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
1300 startKeys[j]);
1301 Put put = new Put(hri.getRegionName());
1302 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1303 Writables.getBytes(hri));
1304 meta.put(put);
1305 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
1306 newRegions.add(hri);
1307 }
1308
1309 meta.close();
1310 return newRegions;
1311 }
1312
1313
1314
1315
1316
1317
1318 public List<byte[]> getMetaTableRows() throws IOException {
1319
1320 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1321 List<byte[]> rows = new ArrayList<byte[]>();
1322 ResultScanner s = t.getScanner(new Scan());
1323 for (Result result : s) {
1324 LOG.info("getMetaTableRows: row -> " +
1325 Bytes.toStringBinary(result.getRow()));
1326 rows.add(result.getRow());
1327 }
1328 s.close();
1329 t.close();
1330 return rows;
1331 }
1332
1333
1334
1335
1336
1337
1338 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
1339
1340 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
1341 List<byte[]> rows = new ArrayList<byte[]>();
1342 ResultScanner s = t.getScanner(new Scan());
1343 for (Result result : s) {
1344 byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
1345 if (val == null) {
1346 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1347
1348 continue;
1349 }
1350 HRegionInfo info = Writables.getHRegionInfo(val);
1351 if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
1352 LOG.info("getMetaTableRows: row -> " +
1353 Bytes.toStringBinary(result.getRow()) + info);
1354 rows.add(result.getRow());
1355 }
1356 }
1357 s.close();
1358 t.close();
1359 return rows;
1360 }
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1373 throws IOException {
1374 List<byte[]> metaRows = getMetaTableRows(tableName);
1375 if (metaRows == null || metaRows.isEmpty()) {
1376 return null;
1377 }
1378 LOG.debug("Found " + metaRows.size() + " rows for table " +
1379 Bytes.toString(tableName));
1380 byte [] firstrow = metaRows.get(0);
1381 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
1382 int index = getMiniHBaseCluster().getServerWith(firstrow);
1383 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
1384 }
1385
1386
1387
1388
1389
1390
1391
1392 public void startMiniMapReduceCluster() throws IOException {
1393 startMiniMapReduceCluster(2);
1394 }
1395
1396
1397
1398
1399
1400
1401
1402 public void startMiniMapReduceCluster(final int servers) throws IOException {
1403 LOG.info("Starting mini mapreduce cluster...");
1404
1405 Configuration c = getConfiguration();
1406 String logDir = c.get("hadoop.log.dir");
1407 String tmpDir = c.get("hadoop.tmp.dir");
1408 if (logDir == null) {
1409 logDir = tmpDir;
1410 }
1411 System.setProperty("hadoop.log.dir", logDir);
1412 c.set("mapred.output.dir", tmpDir);
1413 mrCluster = new MiniMRCluster(servers,
1414 FileSystem.get(conf).getUri().toString(), 1);
1415 LOG.info("Mini mapreduce cluster started");
1416 JobConf mrClusterJobConf = mrCluster.createJobConf();
1417 c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
1418
1419 conf.set("mapreduce.framework.name", "yarn");
1420 String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
1421 if (rmAdress != null) {
1422 conf.set("yarn.resourcemanager.address", rmAdress);
1423 }
1424 String schedulerAdress =
1425 mrClusterJobConf.get("yarn.resourcemanager.scheduler.address");
1426 if (schedulerAdress != null) {
1427 conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
1428 }
1429 }
1430
1431
1432
1433
1434 public void shutdownMiniMapReduceCluster() {
1435 LOG.info("Stopping mini mapreduce cluster...");
1436 if (mrCluster != null) {
1437 mrCluster.shutdown();
1438 mrCluster = null;
1439 }
1440
1441 conf.set("mapred.job.tracker", "local");
1442 LOG.info("Mini mapreduce cluster stopped");
1443 }
1444
1445
1446
1447
1448
1449
1450 public void enableDebug(Class<?> clazz) {
1451 Log l = LogFactory.getLog(clazz);
1452 if (l instanceof Log4JLogger) {
1453 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
1454 } else if (l instanceof Jdk14Logger) {
1455 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
1456 }
1457 }
1458
1459
1460
1461
1462
1463 public void expireMasterSession() throws Exception {
1464 HMaster master = getMiniHBaseCluster().getMaster();
1465 expireSession(master.getZooKeeper(), false);
1466 }
1467
1468
1469
1470
1471
1472
1473 public void expireRegionServerSession(int index) throws Exception {
1474 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
1475 expireSession(rs.getZooKeeper(), false);
1476 decrementMinRegionServerCount();
1477 }
1478
1479 private void decrementMinRegionServerCount() {
1480
1481
1482 decrementMinRegionServerCount(getConfiguration());
1483
1484
1485 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
1486 decrementMinRegionServerCount(master.getMaster().getConfiguration());
1487 }
1488 }
1489
1490 private void decrementMinRegionServerCount(Configuration conf) {
1491 int currentCount = conf.getInt(
1492 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1493 if (currentCount != -1) {
1494 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
1495 Math.max(currentCount - 1, 1));
1496 }
1497 }
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
1511 throws Exception {
1512 Configuration c = new Configuration(this.conf);
1513 String quorumServers = ZKConfig.getZKQuorumServersString(c);
1514 int sessionTimeout = 500;
1515 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
1516 byte[] password = zk.getSessionPasswd();
1517 long sessionID = zk.getSessionId();
1518
1519
1520
1521
1522
1523
1524
1525
1526 ZooKeeper monitor = new ZooKeeper(quorumServers,
1527 1000, new org.apache.zookeeper.Watcher(){
1528 @Override
1529 public void process(WatchedEvent watchedEvent) {
1530 LOG.info("Monitor ZKW received event="+watchedEvent);
1531 }
1532 } , sessionID, password);
1533
1534
1535 ZooKeeper newZK = new ZooKeeper(quorumServers,
1536 sessionTimeout, EmptyWatcher.instance, sessionID, password);
1537 newZK.close();
1538 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
1539
1540
1541 monitor.close();
1542
1543 if (checkStatus) {
1544 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
1545 }
1546 }
1547
1548
1549
1550
1551
1552
1553
1554 public MiniHBaseCluster getHBaseCluster() {
1555 return getMiniHBaseCluster();
1556 }
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 public HBaseCluster getHBaseClusterInterface() {
1567
1568
1569 return hbaseCluster;
1570 }
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 public synchronized HBaseAdmin getHBaseAdmin()
1582 throws IOException {
1583 if (hbaseAdmin == null){
1584 hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
1585 }
1586 return hbaseAdmin;
1587 }
1588 private HBaseAdmin hbaseAdmin = null;
1589
1590
1591
1592
1593
1594
1595
1596 public void closeRegion(String regionName) throws IOException {
1597 closeRegion(Bytes.toBytes(regionName));
1598 }
1599
1600
1601
1602
1603
1604
1605
1606 public void closeRegion(byte[] regionName) throws IOException {
1607 getHBaseAdmin().closeRegion(regionName, null);
1608 }
1609
1610
1611
1612
1613
1614
1615
1616
1617 public void closeRegionByRow(String row, HTable table) throws IOException {
1618 closeRegionByRow(Bytes.toBytes(row), table);
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1629 HRegionLocation hrl = table.getRegionLocation(row);
1630 closeRegion(hrl.getRegionInfo().getRegionName());
1631 }
1632
1633 public MiniZooKeeperCluster getZkCluster() {
1634 return zkCluster;
1635 }
1636
1637 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1638 this.passedZkCluster = true;
1639 this.zkCluster = zkCluster;
1640 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
1641 }
1642
1643 public MiniDFSCluster getDFSCluster() {
1644 return dfsCluster;
1645 }
1646
1647 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
1648 if (dfsCluster != null && dfsCluster.isClusterUp()) {
1649 throw new IOException("DFSCluster is already running! Shut it down first.");
1650 }
1651 this.dfsCluster = cluster;
1652 }
1653
1654 public FileSystem getTestFileSystem() throws IOException {
1655 return HFileSystem.get(conf);
1656 }
1657
1658
1659
1660
1661
1662 public boolean cleanupTestDir() throws IOException {
1663 if (dataTestDir == null ){
1664 return false;
1665 } else {
1666 boolean ret = deleteDir(getDataTestDir());
1667 dataTestDir = null;
1668 return ret;
1669 }
1670 }
1671
1672
1673
1674
1675
1676
1677 public boolean cleanupTestDir(final String subdir) throws IOException {
1678 if (dataTestDir == null){
1679 return false;
1680 }
1681 return deleteDir(getDataTestDir(subdir));
1682 }
1683
1684
1685
1686
1687
1688
1689 public boolean deleteDir(final Path dir) throws IOException {
1690 FileSystem fs = getTestFileSystem();
1691 if (fs.exists(dir)) {
1692 return fs.delete(getDataTestDir(), true);
1693 }
1694 return false;
1695 }
1696
1697 public void waitTableAvailable(byte[] table, long timeoutMillis)
1698 throws InterruptedException, IOException {
1699 long startWait = System.currentTimeMillis();
1700 while (!getHBaseAdmin().isTableAvailable(table)) {
1701 assertTrue("Timed out waiting for table to become available " +
1702 Bytes.toStringBinary(table),
1703 System.currentTimeMillis() - startWait < timeoutMillis);
1704 Thread.sleep(200);
1705 }
1706 }
1707
1708 public void waitTableEnabled(byte[] table, long timeoutMillis)
1709 throws InterruptedException, IOException {
1710 long startWait = System.currentTimeMillis();
1711 while (!getHBaseAdmin().isTableAvailable(table) &&
1712 !getHBaseAdmin().isTableEnabled(table)) {
1713 assertTrue("Timed out waiting for table to become available and enabled " +
1714 Bytes.toStringBinary(table),
1715 System.currentTimeMillis() - startWait < timeoutMillis);
1716 Thread.sleep(200);
1717 }
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727 public boolean ensureSomeRegionServersAvailable(final int num)
1728 throws IOException {
1729 boolean startedServer = false;
1730 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
1731 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
1732 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
1733 startedServer = true;
1734 }
1735
1736 return startedServer;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
1749 throws IOException {
1750 boolean startedServer = ensureSomeRegionServersAvailable(num);
1751
1752 int nonStoppedServers = 0;
1753 for (JVMClusterUtil.RegionServerThread rst :
1754 getMiniHBaseCluster().getRegionServerThreads()) {
1755
1756 HRegionServer hrs = rst.getRegionServer();
1757 if (hrs.isStopping() || hrs.isStopped()) {
1758 LOG.info("A region server is stopped or stopping:"+hrs);
1759 } else {
1760 nonStoppedServers++;
1761 }
1762 }
1763 for (int i=nonStoppedServers; i<num; ++i) {
1764 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
1765 startedServer = true;
1766 }
1767 return startedServer;
1768 }
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780 public static User getDifferentUser(final Configuration c,
1781 final String differentiatingSuffix)
1782 throws IOException {
1783 FileSystem currentfs = FileSystem.get(c);
1784 if (!(currentfs instanceof DistributedFileSystem)) {
1785 return User.getCurrent();
1786 }
1787
1788
1789 String username = User.getCurrent().getName() +
1790 differentiatingSuffix;
1791 User user = User.createUserForTesting(c, username,
1792 new String[]{"supergroup"});
1793 return user;
1794 }
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1810 final int max) {
1811 try {
1812 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1813 for (Class<?> clazz: clazzes) {
1814 String className = clazz.getSimpleName();
1815 if (className.equals("DFSOutputStream")) {
1816 if (clazz.isInstance(stream)) {
1817 Field maxRecoveryErrorCountField =
1818 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1819 maxRecoveryErrorCountField.setAccessible(true);
1820 maxRecoveryErrorCountField.setInt(stream, max);
1821 break;
1822 }
1823 }
1824 }
1825 } catch (Exception e) {
1826 LOG.info("Could not set max recovery field", e);
1827 }
1828 }
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 public void waitUntilAllRegionsAssigned(final int countOfRegions)
1839 throws IOException {
1840 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1841 while (true) {
1842 int rows = 0;
1843 Scan scan = new Scan();
1844 scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1845 ResultScanner s = meta.getScanner(scan);
1846 for (Result r = null; (r = s.next()) != null;) {
1847 byte [] b =
1848 r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1849 if (b == null || b.length <= 0) {
1850 break;
1851 }
1852 rows++;
1853 }
1854 s.close();
1855
1856 if (rows == countOfRegions) {
1857 break;
1858 }
1859 LOG.info("Found=" + rows);
1860 Threads.sleep(200);
1861 }
1862 }
1863
1864
1865
1866
1867
1868 public static List<KeyValue> getFromStoreFile(Store store,
1869 Get get) throws IOException {
1870 MultiVersionConsistencyControl.resetThreadReadPoint();
1871 Scan scan = new Scan(get);
1872 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1873 scan.getFamilyMap().get(store.getFamily().getName()));
1874
1875 List<KeyValue> result = new ArrayList<KeyValue>();
1876 scanner.next(result);
1877 if (!result.isEmpty()) {
1878
1879 KeyValue kv = result.get(0);
1880 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1881 result.clear();
1882 }
1883 }
1884 scanner.close();
1885 return result;
1886 }
1887
1888
1889
1890
1891
1892 public static List<KeyValue> getFromStoreFile(Store store,
1893 byte [] row,
1894 NavigableSet<byte[]> columns
1895 ) throws IOException {
1896 Get get = new Get(row);
1897 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1898 s.put(store.getFamily().getName(), columns);
1899
1900 return getFromStoreFile(store,get);
1901 }
1902
1903
1904
1905
1906
1907 public static ZooKeeperWatcher getZooKeeperWatcher(
1908 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
1909 IOException {
1910 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
1911 "unittest", new Abortable() {
1912 boolean aborted = false;
1913
1914 @Override
1915 public void abort(String why, Throwable e) {
1916 aborted = true;
1917 throw new RuntimeException("Fatal ZK error, why=" + why, e);
1918 }
1919
1920 @Override
1921 public boolean isAborted() {
1922 return aborted;
1923 }
1924 });
1925 return zkw;
1926 }
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
1940 HBaseTestingUtility TEST_UTIL, HRegion region,
1941 ServerName serverName) throws ZooKeeperConnectionException,
1942 IOException, KeeperException, NodeExistsException {
1943 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
1944 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
1945 int version = ZKAssign.transitionNodeOpening(zkw, region
1946 .getRegionInfo(), serverName);
1947 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
1948 version);
1949 return zkw;
1950 }
1951
1952 public static void assertKVListsEqual(String additionalMsg,
1953 final List<KeyValue> expected,
1954 final List<KeyValue> actual) {
1955 final int eLen = expected.size();
1956 final int aLen = actual.size();
1957 final int minLen = Math.min(eLen, aLen);
1958
1959 int i;
1960 for (i = 0; i < minLen
1961 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
1962 ++i) {}
1963
1964 if (additionalMsg == null) {
1965 additionalMsg = "";
1966 }
1967 if (!additionalMsg.isEmpty()) {
1968 additionalMsg = ". " + additionalMsg;
1969 }
1970
1971 if (eLen != aLen || i != minLen) {
1972 throw new AssertionError(
1973 "Expected and actual KV arrays differ at position " + i + ": " +
1974 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
1975 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
1976 }
1977 }
1978
1979 private static <T> String safeGetAsStr(List<T> lst, int i) {
1980 if (0 <= i && i < lst.size()) {
1981 return lst.get(i).toString();
1982 } else {
1983 return "<out_of_range>";
1984 }
1985 }
1986
1987 public String getClusterKey() {
1988 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
1989 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
1990 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
1991 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
1992 }
1993
1994
1995 public HTable createRandomTable(String tableName,
1996 final Collection<String> families,
1997 final int maxVersions,
1998 final int numColsPerRow,
1999 final int numFlushes,
2000 final int numRegions,
2001 final int numRowsPerFlush)
2002 throws IOException, InterruptedException {
2003
2004 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2005 " regions, " + numFlushes + " storefiles per region, " +
2006 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2007 "\n");
2008
2009 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2010 final int numCF = families.size();
2011 final byte[][] cfBytes = new byte[numCF][];
2012 final byte[] tableNameBytes = Bytes.toBytes(tableName);
2013
2014 {
2015 int cfIndex = 0;
2016 for (String cf : families) {
2017 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2018 }
2019 }
2020
2021 final int actualStartKey = 0;
2022 final int actualEndKey = Integer.MAX_VALUE;
2023 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2024 final int splitStartKey = actualStartKey + keysPerRegion;
2025 final int splitEndKey = actualEndKey - keysPerRegion;
2026 final String keyFormat = "%08x";
2027 final HTable table = createTable(tableNameBytes, cfBytes,
2028 maxVersions,
2029 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2030 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2031 numRegions);
2032 if (hbaseCluster != null) {
2033 getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME);
2034 }
2035
2036 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2037 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2038 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2039 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2040
2041 Put put = new Put(row);
2042 Delete del = new Delete(row);
2043 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2044 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2045 final long ts = rand.nextInt();
2046 final byte[] qual = Bytes.toBytes("col" + iCol);
2047 if (rand.nextBoolean()) {
2048 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2049 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2050 ts + "_random_" + rand.nextLong());
2051 put.add(cf, qual, ts, value);
2052 } else if (rand.nextDouble() < 0.8) {
2053 del.deleteColumn(cf, qual, ts);
2054 } else {
2055 del.deleteColumns(cf, qual, ts);
2056 }
2057 }
2058
2059 if (!put.isEmpty()) {
2060 table.put(put);
2061 }
2062
2063 if (!del.isEmpty()) {
2064 table.delete(del);
2065 }
2066 }
2067 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2068 table.flushCommits();
2069 if (hbaseCluster != null) {
2070 getMiniHBaseCluster().flushcache(tableNameBytes);
2071 }
2072 }
2073
2074 return table;
2075 }
2076
2077 private static final int MIN_RANDOM_PORT = 0xc000;
2078 private static final int MAX_RANDOM_PORT = 0xfffe;
2079
2080
2081
2082
2083
2084 public static int randomPort() {
2085 return MIN_RANDOM_PORT
2086 + new Random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2087 }
2088
2089 public static int randomFreePort() {
2090 int port = 0;
2091 do {
2092 port = randomPort();
2093 try {
2094 ServerSocket sock = new ServerSocket(port);
2095 sock.close();
2096 } catch (IOException ex) {
2097 port = 0;
2098 }
2099 } while (port == 0);
2100 return port;
2101 }
2102
2103 public static void waitForHostPort(String host, int port)
2104 throws IOException {
2105 final int maxTimeMs = 10000;
2106 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
2107 IOException savedException = null;
2108 LOG.info("Waiting for server at " + host + ":" + port);
2109 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
2110 try {
2111 Socket sock = new Socket(InetAddress.getByName(host), port);
2112 sock.close();
2113 savedException = null;
2114 LOG.info("Server at " + host + ":" + port + " is available");
2115 break;
2116 } catch (UnknownHostException e) {
2117 throw new IOException("Failed to look up " + host, e);
2118 } catch (IOException e) {
2119 savedException = e;
2120 }
2121 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
2122 }
2123
2124 if (savedException != null) {
2125 throw savedException;
2126 }
2127 }
2128
2129
2130
2131
2132
2133
2134 public static int createPreSplitLoadTestTable(Configuration conf,
2135 byte[] tableName, byte[] columnFamily, Algorithm compression,
2136 DataBlockEncoding dataBlockEncoding) throws IOException {
2137 HTableDescriptor desc = new HTableDescriptor(tableName);
2138 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
2139 hcd.setDataBlockEncoding(dataBlockEncoding);
2140 hcd.setCompressionType(compression);
2141 desc.addFamily(hcd);
2142
2143 int totalNumberOfRegions = 0;
2144 try {
2145 HBaseAdmin admin = new HBaseAdmin(conf);
2146
2147
2148
2149
2150 int numberOfServers = admin.getClusterStatus().getServers().size();
2151 if (numberOfServers == 0) {
2152 throw new IllegalStateException("No live regionservers");
2153 }
2154
2155 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
2156 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
2157 "pre-splitting table into " + totalNumberOfRegions + " regions " +
2158 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
2159
2160 byte[][] splits = new RegionSplitter.HexStringSplit().split(
2161 totalNumberOfRegions);
2162
2163 admin.createTable(desc, splits);
2164 admin.close();
2165 } catch (MasterNotRunningException e) {
2166 LOG.error("Master not running", e);
2167 throw new IOException(e);
2168 } catch (TableExistsException e) {
2169 LOG.warn("Table " + Bytes.toStringBinary(tableName) +
2170 " already exists, continuing");
2171 }
2172 return totalNumberOfRegions;
2173 }
2174
2175 public static int getMetaRSPort(Configuration conf) throws IOException {
2176 HTable table = new HTable(conf, HConstants.META_TABLE_NAME);
2177 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
2178 table.close();
2179 return hloc.getPort();
2180 }
2181
2182 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
2183 throws IOException {
2184 HTableDescriptor htd = new HTableDescriptor(tableName);
2185 htd.addFamily(hcd);
2186 HRegionInfo info =
2187 new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
2188 HRegion region =
2189 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
2190 return region;
2191 }
2192
2193 }