1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.UUID;
44 import java.util.concurrent.TimeUnit;
45
46 import org.apache.commons.logging.Log;
47 import org.apache.commons.logging.LogFactory;
48 import org.apache.commons.logging.impl.Jdk14Logger;
49 import org.apache.commons.logging.impl.Log4JLogger;
50 import org.apache.hadoop.classification.InterfaceAudience;
51 import org.apache.hadoop.classification.InterfaceStability;
52 import org.apache.hadoop.conf.Configuration;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.hbase.Waiter.Predicate;
56 import org.apache.hadoop.hbase.catalog.MetaEditor;
57 import org.apache.hadoop.hbase.client.Delete;
58 import org.apache.hadoop.hbase.client.Durability;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.HBaseAdmin;
61 import org.apache.hadoop.hbase.client.HConnection;
62 import org.apache.hadoop.hbase.client.HTable;
63 import org.apache.hadoop.hbase.client.Put;
64 import org.apache.hadoop.hbase.client.Result;
65 import org.apache.hadoop.hbase.client.ResultScanner;
66 import org.apache.hadoop.hbase.client.Scan;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.io.compress.Compression;
69 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72 import org.apache.hadoop.hbase.io.hfile.HFile;
73 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.master.RegionStates;
77 import org.apache.hadoop.hbase.master.ServerManager;
78 import org.apache.hadoop.hbase.regionserver.BloomType;
79 import org.apache.hadoop.hbase.regionserver.HRegion;
80 import org.apache.hadoop.hbase.regionserver.HRegionServer;
81 import org.apache.hadoop.hbase.regionserver.HStore;
82 import org.apache.hadoop.hbase.regionserver.InternalScanner;
83 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
84 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
85 import org.apache.hadoop.hbase.regionserver.wal.HLog;
86 import org.apache.hadoop.hbase.security.User;
87 import org.apache.hadoop.hbase.tool.Canary;
88 import org.apache.hadoop.hbase.util.Bytes;
89 import org.apache.hadoop.hbase.util.FSUtils;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil;
91 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
92 import org.apache.hadoop.hbase.util.RegionSplitter;
93 import org.apache.hadoop.hbase.util.RetryCounter;
94 import org.apache.hadoop.hbase.util.Threads;
95 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
96 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
97 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
98 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
99 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
100 import org.apache.hadoop.hdfs.DFSClient;
101 import org.apache.hadoop.hdfs.DistributedFileSystem;
102 import org.apache.hadoop.hdfs.MiniDFSCluster;
103 import org.apache.hadoop.mapred.JobConf;
104 import org.apache.hadoop.mapred.MiniMRCluster;
105 import org.apache.hadoop.mapred.TaskLog;
106 import org.apache.zookeeper.KeeperException;
107 import org.apache.zookeeper.KeeperException.NodeExistsException;
108 import org.apache.zookeeper.WatchedEvent;
109 import org.apache.zookeeper.ZooKeeper;
110 import org.apache.zookeeper.ZooKeeper.States;
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 @InterfaceAudience.Public
127 @InterfaceStability.Evolving
128 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
129 private MiniZooKeeperCluster zkCluster = null;
130
131
132
133
134
135 private static int DEFAULT_REGIONS_PER_SERVER = 5;
136
137
138
139
140
141 private boolean passedZkCluster = false;
142 private MiniDFSCluster dfsCluster = null;
143
144 private HBaseCluster hbaseCluster = null;
145 private MiniMRCluster mrCluster = null;
146
147
148 private boolean miniClusterRunning;
149
150 private String hadoopLogDir;
151
152
153 private File clusterTestDir = null;
154
155
156
157 private Path dataTestDirOnTestFS = null;
158
159
160
161
162
163
164
165
166 private static final String TEST_DIRECTORY_KEY = "test.build.data";
167
168
169 private static String FS_URI;
170
171
172 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
173
174
175 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
176 Arrays.asList(new Object[][] {
177 { Compression.Algorithm.NONE },
178 { Compression.Algorithm.GZ }
179 });
180
181
182 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
183 Arrays.asList(new Object[][] {
184 { new Boolean(false) },
185 { new Boolean(true) }
186 });
187
188
189 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
190 Compression.Algorithm.NONE, Compression.Algorithm.GZ
191 };
192
193
194
195
196
197 private static List<Object[]> bloomAndCompressionCombinations() {
198 List<Object[]> configurations = new ArrayList<Object[]>();
199 for (Compression.Algorithm comprAlgo :
200 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
201 for (BloomType bloomType : BloomType.values()) {
202 configurations.add(new Object[] { comprAlgo, bloomType });
203 }
204 }
205 return Collections.unmodifiableList(configurations);
206 }
207
208 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
209 bloomAndCompressionCombinations();
210
211 public HBaseTestingUtility() {
212 this(HBaseConfiguration.create());
213 }
214
215 public HBaseTestingUtility(Configuration conf) {
216 super(conf);
217
218
219 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
220 }
221
222
223
224
225
226
227
228 public static HBaseTestingUtility createLocalHTU() {
229 Configuration c = HBaseConfiguration.create();
230 return createLocalHTU(c);
231 }
232
233
234
235
236
237
238
239
240 public static HBaseTestingUtility createLocalHTU(Configuration c) {
241 HBaseTestingUtility htu = new HBaseTestingUtility(c);
242 String dataTestDir = htu.getDataTestDir().toString();
243 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
244 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
245 return htu;
246 }
247
248
249
250
251
252
253
254
255
256
257
258
259 @Override
260 public Configuration getConfiguration() {
261 return super.getConfiguration();
262 }
263
264 public void setHBaseCluster(HBaseCluster hbaseCluster) {
265 this.hbaseCluster = hbaseCluster;
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284 @Override
285 protected Path setupDataTestDir() {
286 Path testPath = super.setupDataTestDir();
287 if (null == testPath) {
288 return null;
289 }
290
291 createSubDirAndSystemProperty(
292 "hadoop.log.dir",
293 testPath, "hadoop-log-dir");
294
295
296
297 createSubDirAndSystemProperty(
298 "hadoop.tmp.dir",
299 testPath, "hadoop-tmp-dir");
300
301
302 createSubDir(
303 "mapred.local.dir",
304 testPath, "mapred-local-dir");
305
306 return testPath;
307 }
308
309 private void createSubDirAndSystemProperty(
310 String propertyName, Path parent, String subDirName){
311
312 String sysValue = System.getProperty(propertyName);
313
314 if (sysValue != null) {
315
316
317 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
318 sysValue + " so I do NOT create it in " + parent);
319 String confValue = conf.get(propertyName);
320 if (confValue != null && !confValue.endsWith(sysValue)){
321 LOG.warn(
322 propertyName + " property value differs in configuration and system: "+
323 "Configuration="+confValue+" while System="+sysValue+
324 " Erasing configuration value by system value."
325 );
326 }
327 conf.set(propertyName, sysValue);
328 } else {
329
330 createSubDir(propertyName, parent, subDirName);
331 System.setProperty(propertyName, conf.get(propertyName));
332 }
333 }
334
335
336
337
338
339
340
341 private Path getBaseTestDirOnTestFS() throws IOException {
342 FileSystem fs = getTestFileSystem();
343 return new Path(fs.getWorkingDirectory(), "test-data");
344 }
345
346
347
348
349
350
351 Path getClusterTestDir() {
352 if (clusterTestDir == null){
353 setupClusterTestDir();
354 }
355 return new Path(clusterTestDir.getAbsolutePath());
356 }
357
358
359
360
361 private void setupClusterTestDir() {
362 if (clusterTestDir != null) {
363 return;
364 }
365
366
367
368 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
369 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
370
371 boolean b = deleteOnExit();
372 if (b) clusterTestDir.deleteOnExit();
373 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
374 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
375 }
376
377
378
379
380
381
382
383 public Path getDataTestDirOnTestFS() throws IOException {
384 if (dataTestDirOnTestFS == null) {
385 setupDataTestDirOnTestFS();
386 }
387
388 return dataTestDirOnTestFS;
389 }
390
391
392
393
394
395
396
397
398 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
399 return new Path(getDataTestDirOnTestFS(), subdirName);
400 }
401
402
403
404
405 private void setupDataTestDirOnTestFS() throws IOException {
406 if (dataTestDirOnTestFS != null) {
407 LOG.warn("Data test on test fs dir already setup in "
408 + dataTestDirOnTestFS.toString());
409 return;
410 }
411
412
413
414
415
416 FileSystem fs = getTestFileSystem();
417 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
418 File dataTestDir = new File(getDataTestDir().toString());
419 if (deleteOnExit()) dataTestDir.deleteOnExit();
420 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
421 } else {
422 Path base = getBaseTestDirOnTestFS();
423 String randomStr = UUID.randomUUID().toString();
424 dataTestDirOnTestFS = new Path(base, randomStr);
425 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
426 }
427 }
428
429
430
431
432
433
434 public boolean cleanupDataTestDirOnTestFS() throws IOException {
435 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
436 if (ret)
437 dataTestDirOnTestFS = null;
438 return ret;
439 }
440
441
442
443
444
445
446 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
447 Path cpath = getDataTestDirOnTestFS(subdirName);
448 return getTestFileSystem().delete(cpath, true);
449 }
450
451
452
453
454
455
456
457
458 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
459 return startMiniDFSCluster(servers, null);
460 }
461
462
463
464
465
466
467
468
469
470
471
472
473 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
474 throws Exception {
475 if ( hosts != null && hosts.length != 0) {
476 return startMiniDFSCluster(hosts.length, hosts);
477 } else {
478 return startMiniDFSCluster(1, null);
479 }
480 }
481
482
483
484
485
486
487
488
489
490
491 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
492 throws Exception {
493 createDirsAndSetProperties();
494
495
496 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
497 setLevel(org.apache.log4j.Level.ERROR);
498 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
499 setLevel(org.apache.log4j.Level.ERROR);
500
501
502 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
503 true, null, null, hosts, null);
504
505
506 FileSystem fs = this.dfsCluster.getFileSystem();
507 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
508
509
510 this.dfsCluster.waitClusterUp();
511
512
513 dataTestDirOnTestFS = null;
514
515 return this.dfsCluster;
516 }
517
518
519 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
520 throws Exception {
521 createDirsAndSetProperties();
522 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
523 true, null, racks, hosts, null);
524
525
526 FileSystem fs = this.dfsCluster.getFileSystem();
527 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
528
529
530 this.dfsCluster.waitClusterUp();
531
532
533 dataTestDirOnTestFS = null;
534
535 return this.dfsCluster;
536 }
537
538 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
539 createDirsAndSetProperties();
540 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
541 null, null, null);
542 return dfsCluster;
543 }
544
545
546 private void createDirsAndSetProperties() throws IOException {
547 setupClusterTestDir();
548 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
549 createDirAndSetProperty("cache_data", "test.cache.data");
550 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
551 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
552 createDirAndSetProperty("mapred_local", "mapred.local.dir");
553 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
554 enableShortCircuit();
555
556 Path root = getDataTestDirOnTestFS("hadoop");
557 conf.set(MapreduceTestingShim.getMROutputDirProp(),
558 new Path(root, "mapred-output-dir").toString());
559 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
560 conf.set("mapreduce.jobtracker.staging.root.dir",
561 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
562 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
563 }
564
565
566
567
568
569
570
571 public boolean isReadShortCircuitOn(){
572 final String propName = "hbase.tests.use.shortcircuit.reads";
573 String readOnProp = System.getProperty(propName);
574 if (readOnProp != null){
575 return Boolean.parseBoolean(readOnProp);
576 } else {
577 return conf.getBoolean(propName, false);
578 }
579 }
580
581
582
583
584 private void enableShortCircuit() {
585 if (isReadShortCircuitOn()) {
586 String curUser = System.getProperty("user.name");
587 LOG.info("read short circuit is ON for user " + curUser);
588
589 conf.set("dfs.block.local-path-access.user", curUser);
590
591 conf.setBoolean("dfs.client.read.shortcircuit", true);
592
593 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
594 } else {
595 LOG.info("read short circuit is OFF");
596 }
597 }
598
599 private String createDirAndSetProperty(final String relPath, String property) {
600 String path = getDataTestDir(relPath).toString();
601 System.setProperty(property, path);
602 conf.set(property, path);
603 new File(path).mkdirs();
604 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
605 return path;
606 }
607
608
609
610
611
612
613 public void shutdownMiniDFSCluster() throws IOException {
614 if (this.dfsCluster != null) {
615
616 this.dfsCluster.shutdown();
617 dfsCluster = null;
618 dataTestDirOnTestFS = null;
619 FSUtils.setFsDefault(this.conf, new Path("file:///"));
620 }
621 }
622
623
624
625
626
627
628
629
630 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
631 return startMiniZKCluster(1);
632 }
633
634
635
636
637
638
639
640
641
642 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
643 throws Exception {
644 setupClusterTestDir();
645 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
646 }
647
648 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
649 throws Exception {
650 return startMiniZKCluster(dir,1);
651 }
652
653
654
655
656
657 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
658 int zooKeeperServerNum)
659 throws Exception {
660 if (this.zkCluster != null) {
661 throw new IOException("Cluster already running at " + dir);
662 }
663 this.passedZkCluster = false;
664 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
665 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
666 if (defPort > 0){
667
668 this.zkCluster.setDefaultClientPort(defPort);
669 }
670 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
671 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
672 Integer.toString(clientPort));
673 return this.zkCluster;
674 }
675
676
677
678
679
680
681
682 public void shutdownMiniZKCluster() throws IOException {
683 if (this.zkCluster != null) {
684 this.zkCluster.shutdown();
685 this.zkCluster = null;
686 }
687 }
688
689
690
691
692
693
694
695 public MiniHBaseCluster startMiniCluster() throws Exception {
696 return startMiniCluster(1, 1);
697 }
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712 public MiniHBaseCluster startMiniCluster(final int numSlaves)
713 throws Exception {
714 return startMiniCluster(1, numSlaves);
715 }
716
717
718
719
720
721
722
723
724 public MiniHBaseCluster startMiniCluster(final int numMasters,
725 final int numSlaves)
726 throws Exception {
727 return startMiniCluster(numMasters, numSlaves, null);
728 }
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 public MiniHBaseCluster startMiniCluster(final int numMasters,
755 final int numSlaves, final String[] dataNodeHosts) throws Exception {
756 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
757 }
758
759
760
761
762
763 public MiniHBaseCluster startMiniCluster(final int numMasters,
764 final int numSlaves, final int numDataNodes) throws Exception {
765 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
766 }
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 public MiniHBaseCluster startMiniCluster(final int numMasters,
796 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
797 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
798 throws Exception {
799 return startMiniCluster(
800 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
801 }
802
803
804
805
806
807
808 public MiniHBaseCluster startMiniCluster(final int numMasters,
809 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
810 Class<? extends HMaster> masterClass,
811 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
812 throws Exception {
813 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
814 numDataNodes = dataNodeHosts.length;
815 }
816
817 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
818 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
819
820
821 if (miniClusterRunning) {
822 throw new IllegalStateException("A mini-cluster is already running");
823 }
824 miniClusterRunning = true;
825
826 setupClusterTestDir();
827 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
828
829
830
831 startMiniDFSCluster(numDataNodes, dataNodeHosts);
832
833
834 if (this.zkCluster == null) {
835 startMiniZKCluster(clusterTestDir);
836 }
837
838
839 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
840 }
841
842 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
843 throws IOException, InterruptedException{
844 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
845 }
846
847
848
849
850
851
852
853
854
855
856
857
858 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
859 final int numSlaves, Class<? extends HMaster> masterClass,
860 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
861 throws IOException, InterruptedException {
862
863 createRootDir();
864
865
866
867 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
868 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
869 }
870 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
871 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
872 }
873
874 Configuration c = new Configuration(this.conf);
875 this.hbaseCluster =
876 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
877
878 HTable t = new HTable(c, TableName.META_TABLE_NAME);
879 ResultScanner s = t.getScanner(new Scan());
880 while (s.next() != null) {
881 continue;
882 }
883 s.close();
884 t.close();
885
886 getHBaseAdmin();
887 LOG.info("Minicluster is up");
888 return (MiniHBaseCluster)this.hbaseCluster;
889 }
890
891
892
893
894
895
896
897 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
898 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
899
900 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
901 ResultScanner s = t.getScanner(new Scan());
902 while (s.next() != null) {
903
904 }
905 LOG.info("HBase has been restarted");
906 s.close();
907 t.close();
908 }
909
910
911
912
913
914
915 public MiniHBaseCluster getMiniHBaseCluster() {
916 if (this.hbaseCluster instanceof MiniHBaseCluster) {
917 return (MiniHBaseCluster)this.hbaseCluster;
918 }
919 throw new RuntimeException(hbaseCluster + " not an instance of " +
920 MiniHBaseCluster.class.getName());
921 }
922
923
924
925
926
927
928 public void shutdownMiniCluster() throws Exception {
929 LOG.info("Shutting down minicluster");
930 shutdownMiniHBaseCluster();
931 if (!this.passedZkCluster){
932 shutdownMiniZKCluster();
933 }
934 shutdownMiniDFSCluster();
935
936 cleanupTestDir();
937 miniClusterRunning = false;
938 LOG.info("Minicluster is down");
939 }
940
941
942
943
944
945 public void shutdownMiniHBaseCluster() throws IOException {
946 if (hbaseAdmin != null) {
947 hbaseAdmin.close0();
948 hbaseAdmin = null;
949 }
950
951 if (zooKeeperWatcher != null) {
952 zooKeeperWatcher.close();
953 zooKeeperWatcher = null;
954 }
955
956
957 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
958 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
959 if (this.hbaseCluster != null) {
960 this.hbaseCluster.shutdown();
961
962 this.hbaseCluster.waitUntilShutDown();
963 this.hbaseCluster = null;
964 }
965 }
966
967
968
969
970
971
972
973 public Path getDefaultRootDirPath() throws IOException {
974 FileSystem fs = FileSystem.get(this.conf);
975 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
976 }
977
978
979
980
981
982
983
984
985
986 public Path createRootDir() throws IOException {
987 FileSystem fs = FileSystem.get(this.conf);
988 Path hbaseRootdir = getDefaultRootDirPath();
989 FSUtils.setRootDir(this.conf, hbaseRootdir);
990 fs.mkdirs(hbaseRootdir);
991 FSUtils.setVersion(fs, hbaseRootdir);
992 return hbaseRootdir;
993 }
994
995
996
997
998
999 public void flush() throws IOException {
1000 getMiniHBaseCluster().flushcache();
1001 }
1002
1003
1004
1005
1006
1007 public void flush(TableName tableName) throws IOException {
1008 getMiniHBaseCluster().flushcache(tableName);
1009 }
1010
1011
1012
1013
1014
1015 public void compact(boolean major) throws IOException {
1016 getMiniHBaseCluster().compact(major);
1017 }
1018
1019
1020
1021
1022
1023 public void compact(TableName tableName, boolean major) throws IOException {
1024 getMiniHBaseCluster().compact(tableName, major);
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034 public HTable createTable(String tableName, String family)
1035 throws IOException{
1036 return createTable(TableName.valueOf(tableName), new String[]{family});
1037 }
1038
1039
1040
1041
1042
1043
1044
1045
1046 public HTable createTable(byte[] tableName, byte[] family)
1047 throws IOException{
1048 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058 public HTable createTable(TableName tableName, String[] families)
1059 throws IOException {
1060 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1061 for (String family : families) {
1062 fams.add(Bytes.toBytes(family));
1063 }
1064 return createTable(tableName, fams.toArray(new byte[0][]));
1065 }
1066
1067
1068
1069
1070
1071
1072
1073
1074 public HTable createTable(TableName tableName, byte[] family)
1075 throws IOException{
1076 return createTable(tableName, new byte[][]{family});
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 public HTable createTable(byte[] tableName, byte[][] families)
1088 throws IOException {
1089 return createTable(tableName, families,
1090 new Configuration(getConfiguration()));
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100 public HTable createTable(TableName tableName, byte[][] families)
1101 throws IOException {
1102 return createTable(tableName, families,
1103 new Configuration(getConfiguration()));
1104 }
1105
1106 public HTable createTable(byte[] tableName, byte[][] families,
1107 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1108 return createTable(TableName.valueOf(tableName), families, numVersions,
1109 startKey, endKey, numRegions);
1110 }
1111
1112 public HTable createTable(String tableName, byte[][] families,
1113 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1114 return createTable(TableName.valueOf(tableName), families, numVersions,
1115 startKey, endKey, numRegions);
1116 }
1117
1118 public HTable createTable(TableName tableName, byte[][] families,
1119 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1120 throws IOException{
1121 HTableDescriptor desc = new HTableDescriptor(tableName);
1122 for (byte[] family : families) {
1123 HColumnDescriptor hcd = new HColumnDescriptor(family)
1124 .setMaxVersions(numVersions);
1125 desc.addFamily(hcd);
1126 }
1127 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1128
1129 waitUntilAllRegionsAssigned(tableName);
1130 return new HTable(getConfiguration(), tableName);
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1142 throws IOException {
1143 for(byte[] family : families) {
1144 HColumnDescriptor hcd = new HColumnDescriptor(family);
1145
1146
1147
1148 hcd.setBloomFilterType(BloomType.NONE);
1149 htd.addFamily(hcd);
1150 }
1151 getHBaseAdmin().createTable(htd);
1152
1153 waitUntilAllRegionsAssigned(htd.getTableName());
1154 return new HTable(c, htd.getTableName());
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 public HTable createTable(TableName tableName, byte[][] families,
1166 final Configuration c)
1167 throws IOException {
1168 return createTable(new HTableDescriptor(tableName), families, c);
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 public HTable createTable(byte[] tableName, byte[][] families,
1180 final Configuration c)
1181 throws IOException {
1182 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1183 for(byte[] family : families) {
1184 HColumnDescriptor hcd = new HColumnDescriptor(family);
1185
1186
1187
1188 hcd.setBloomFilterType(BloomType.NONE);
1189 desc.addFamily(hcd);
1190 }
1191 getHBaseAdmin().createTable(desc);
1192 return new HTable(c, tableName);
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 public HTable createTable(TableName tableName, byte[][] families,
1205 final Configuration c, int numVersions)
1206 throws IOException {
1207 HTableDescriptor desc = new HTableDescriptor(tableName);
1208 for(byte[] family : families) {
1209 HColumnDescriptor hcd = new HColumnDescriptor(family)
1210 .setMaxVersions(numVersions);
1211 desc.addFamily(hcd);
1212 }
1213 getHBaseAdmin().createTable(desc);
1214
1215 waitUntilAllRegionsAssigned(tableName);
1216 return new HTable(c, tableName);
1217 }
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 public HTable createTable(byte[] tableName, byte[][] families,
1229 final Configuration c, int numVersions)
1230 throws IOException {
1231 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1232 for(byte[] family : families) {
1233 HColumnDescriptor hcd = new HColumnDescriptor(family)
1234 .setMaxVersions(numVersions);
1235 desc.addFamily(hcd);
1236 }
1237 getHBaseAdmin().createTable(desc);
1238 return new HTable(c, tableName);
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1250 throws IOException {
1251 return createTable(tableName, new byte[][]{family}, numVersions);
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1263 throws IOException {
1264 return createTable(tableName, new byte[][]{family}, numVersions);
1265 }
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 public HTable createTable(byte[] tableName, byte[][] families,
1276 int numVersions)
1277 throws IOException {
1278 return createTable(TableName.valueOf(tableName), families, numVersions);
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 public HTable createTable(TableName tableName, byte[][] families,
1290 int numVersions)
1291 throws IOException {
1292 HTableDescriptor desc = new HTableDescriptor(tableName);
1293 for (byte[] family : families) {
1294 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1295 desc.addFamily(hcd);
1296 }
1297 getHBaseAdmin().createTable(desc);
1298
1299 waitUntilAllRegionsAssigned(tableName);
1300 return new HTable(new Configuration(getConfiguration()), tableName);
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 public HTable createTable(byte[] tableName, byte[][] families,
1312 int numVersions, int blockSize) throws IOException {
1313 return createTable(TableName.valueOf(tableName),
1314 families, numVersions, blockSize);
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 public HTable createTable(TableName tableName, byte[][] families,
1326 int numVersions, int blockSize) throws IOException {
1327 HTableDescriptor desc = new HTableDescriptor(tableName);
1328 for (byte[] family : families) {
1329 HColumnDescriptor hcd = new HColumnDescriptor(family)
1330 .setMaxVersions(numVersions)
1331 .setBlocksize(blockSize);
1332 desc.addFamily(hcd);
1333 }
1334 getHBaseAdmin().createTable(desc);
1335
1336 waitUntilAllRegionsAssigned(tableName);
1337 return new HTable(new Configuration(getConfiguration()), tableName);
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 public HTable createTable(byte[] tableName, byte[][] families,
1349 int[] numVersions)
1350 throws IOException {
1351 return createTable(TableName.valueOf(tableName), families, numVersions);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public HTable createTable(TableName tableName, byte[][] families,
1363 int[] numVersions)
1364 throws IOException {
1365 HTableDescriptor desc = new HTableDescriptor(tableName);
1366 int i = 0;
1367 for (byte[] family : families) {
1368 HColumnDescriptor hcd = new HColumnDescriptor(family)
1369 .setMaxVersions(numVersions[i]);
1370 desc.addFamily(hcd);
1371 i++;
1372 }
1373 getHBaseAdmin().createTable(desc);
1374
1375 waitUntilAllRegionsAssigned(tableName);
1376 return new HTable(new Configuration(getConfiguration()), tableName);
1377 }
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1388 throws IOException{
1389 return createTable(TableName.valueOf(tableName), family, splitRows);
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1401 throws IOException {
1402 HTableDescriptor desc = new HTableDescriptor(tableName);
1403 HColumnDescriptor hcd = new HColumnDescriptor(family);
1404 desc.addFamily(hcd);
1405 getHBaseAdmin().createTable(desc, splitRows);
1406
1407 waitUntilAllRegionsAssigned(tableName);
1408 return new HTable(getConfiguration(), tableName);
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1420 throws IOException {
1421 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1422 for(byte[] family:families) {
1423 HColumnDescriptor hcd = new HColumnDescriptor(family);
1424 desc.addFamily(hcd);
1425 }
1426 getHBaseAdmin().createTable(desc, splitRows);
1427
1428 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1429 return new HTable(getConfiguration(), tableName);
1430 }
1431
1432
1433
1434
1435
1436 public void deleteTable(String tableName) throws IOException {
1437 deleteTable(TableName.valueOf(tableName));
1438 }
1439
1440
1441
1442
1443
1444 public void deleteTable(byte[] tableName) throws IOException {
1445 deleteTable(TableName.valueOf(tableName));
1446 }
1447
1448
1449
1450
1451
1452 public void deleteTable(TableName tableName) throws IOException {
1453 try {
1454 getHBaseAdmin().disableTable(tableName);
1455 } catch (TableNotEnabledException e) {
1456 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1457 }
1458 getHBaseAdmin().deleteTable(tableName);
1459 }
1460
1461
1462
1463
1464
1465 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1466 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1467 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1468 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1469 private static final int MAXVERSIONS = 3;
1470
1471 public static final char FIRST_CHAR = 'a';
1472 public static final char LAST_CHAR = 'z';
1473 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1474 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1475
1476
1477
1478
1479
1480
1481
1482
1483 public HTableDescriptor createTableDescriptor(final String name,
1484 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1485 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1486 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1487 htd.addFamily(new HColumnDescriptor(cfName)
1488 .setMinVersions(minVersions)
1489 .setMaxVersions(versions)
1490 .setKeepDeletedCells(keepDeleted)
1491 .setBlockCacheEnabled(false)
1492 .setTimeToLive(ttl)
1493 );
1494 }
1495 return htd;
1496 }
1497
1498
1499
1500
1501
1502
1503
1504 public HTableDescriptor createTableDescriptor(final String name) {
1505 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1506 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1507 }
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1518 byte [] endKey)
1519 throws IOException {
1520 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1521 return createLocalHRegion(hri, desc);
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1532 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1533 }
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1544 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1561 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1562 HLog hlog, byte[]... families) throws IOException {
1563 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1564 htd.setReadOnly(isReadOnly);
1565 for (byte[] family : families) {
1566 HColumnDescriptor hcd = new HColumnDescriptor(family);
1567
1568 hcd.setMaxVersions(Integer.MAX_VALUE);
1569 htd.addFamily(hcd);
1570 }
1571 htd.setDurability(durability);
1572 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1573 return createLocalHRegion(info, htd, hlog);
1574 }
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 public HTable truncateTable(byte[] tableName) throws IOException {
1585 return truncateTable(TableName.valueOf(tableName));
1586 }
1587
1588
1589
1590
1591
1592
1593
1594 public HTable truncateTable(TableName tableName) throws IOException {
1595 HTable table = new HTable(getConfiguration(), tableName);
1596 Scan scan = new Scan();
1597 ResultScanner resScan = table.getScanner(scan);
1598 for(Result res : resScan) {
1599 Delete del = new Delete(res.getRow());
1600 table.delete(del);
1601 }
1602 resScan = table.getScanner(scan);
1603 resScan.close();
1604 return table;
1605 }
1606
1607
1608
1609
1610
1611
1612
1613
1614 public int loadTable(final HTable t, final byte[] f) throws IOException {
1615 t.setAutoFlush(false, true);
1616 byte[] k = new byte[3];
1617 int rowCount = 0;
1618 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1619 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1620 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1621 k[0] = b1;
1622 k[1] = b2;
1623 k[2] = b3;
1624 Put put = new Put(k);
1625 put.add(f, null, k);
1626 t.put(put);
1627 rowCount++;
1628 }
1629 }
1630 }
1631 t.flushCommits();
1632 return rowCount;
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1643 t.setAutoFlush(false, true);
1644 byte[] k = new byte[3];
1645 int rowCount = 0;
1646 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1647 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1648 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1649 k[0] = b1;
1650 k[1] = b2;
1651 k[2] = b3;
1652 Put put = new Put(k);
1653 for (int i = 0; i < f.length; i++) {
1654 put.add(f[i], null, k);
1655 }
1656 t.put(put);
1657 rowCount++;
1658 }
1659 }
1660 }
1661 t.flushCommits();
1662 return rowCount;
1663 }
1664
1665 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1666 return loadRegion(r, f, false);
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1678 throws IOException {
1679 byte[] k = new byte[3];
1680 int rowCount = 0;
1681 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1682 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1683 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1684 k[0] = b1;
1685 k[1] = b2;
1686 k[2] = b3;
1687 Put put = new Put(k);
1688 put.add(f, null, k);
1689 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1690
1691 int preRowCount = rowCount;
1692 int pause = 10;
1693 int maxPause = 1000;
1694 while (rowCount == preRowCount) {
1695 try {
1696 r.put(put);
1697 rowCount++;
1698 } catch (RegionTooBusyException e) {
1699 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1700 Threads.sleep(pause);
1701 }
1702 }
1703 }
1704 }
1705 if (flush) {
1706 r.flushcache();
1707 }
1708 }
1709 return rowCount;
1710 }
1711
1712 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1713 for (int i = startRow; i < endRow; i++) {
1714 byte[] data = Bytes.toBytes(String.valueOf(i));
1715 Put put = new Put(data);
1716 put.add(f, null, data);
1717 t.put(put);
1718 }
1719 }
1720
1721
1722
1723
1724 public int countRows(final HTable table) throws IOException {
1725 Scan scan = new Scan();
1726 ResultScanner results = table.getScanner(scan);
1727 int count = 0;
1728 for (@SuppressWarnings("unused") Result res : results) {
1729 count++;
1730 }
1731 results.close();
1732 return count;
1733 }
1734
1735 public int countRows(final HTable table, final byte[]... families) throws IOException {
1736 Scan scan = new Scan();
1737 for (byte[] family: families) {
1738 scan.addFamily(family);
1739 }
1740 ResultScanner results = table.getScanner(scan);
1741 int count = 0;
1742 for (@SuppressWarnings("unused") Result res : results) {
1743 count++;
1744 }
1745 results.close();
1746 return count;
1747 }
1748
1749
1750
1751
1752 public String checksumRows(final HTable table) throws Exception {
1753 Scan scan = new Scan();
1754 ResultScanner results = table.getScanner(scan);
1755 MessageDigest digest = MessageDigest.getInstance("MD5");
1756 for (Result res : results) {
1757 digest.update(res.getRow());
1758 }
1759 results.close();
1760 return digest.toString();
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 public int createMultiRegions(HTable table, byte[] columnFamily)
1772 throws IOException {
1773 return createMultiRegions(getConfiguration(), table, columnFamily);
1774 }
1775
1776 public static final byte[][] KEYS = {
1777 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1778 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1779 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1780 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1781 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1782 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1783 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1784 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1785 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1786 };
1787
1788 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1789 Bytes.toBytes("bbb"),
1790 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1791 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1792 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1793 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1794 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1795 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1796 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1797 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1798 };
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 public int createMultiRegions(final Configuration c, final HTable table,
1809 final byte[] columnFamily)
1810 throws IOException {
1811 return createMultiRegions(c, table, columnFamily, KEYS);
1812 }
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 public int createMultiRegions(final Configuration c, final HTable table,
1824 final byte [] family, int numRegions)
1825 throws IOException {
1826 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1827 byte [] startKey = Bytes.toBytes("aaaaa");
1828 byte [] endKey = Bytes.toBytes("zzzzz");
1829 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1830 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1831 for (int i=0;i<splitKeys.length;i++) {
1832 regionStartKeys[i+1] = splitKeys[i];
1833 }
1834 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1835 return createMultiRegions(c, table, family, regionStartKeys);
1836 }
1837
1838 @SuppressWarnings("deprecation")
1839 public int createMultiRegions(final Configuration c, final HTable table,
1840 final byte[] columnFamily, byte [][] startKeys)
1841 throws IOException {
1842 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1843 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1844 HTableDescriptor htd = table.getTableDescriptor();
1845 if(!htd.hasFamily(columnFamily)) {
1846 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1847 htd.addFamily(hcd);
1848 }
1849
1850
1851
1852
1853 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1854 String regionToDeleteInFS = table
1855 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1856 .getRegionInfo().getEncodedName();
1857 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1858
1859 int count = 0;
1860 for (int i = 0; i < startKeys.length; i++) {
1861 int j = (i + 1) % startKeys.length;
1862 HRegionInfo hri = new HRegionInfo(table.getName(),
1863 startKeys[i], startKeys[j]);
1864 MetaEditor.addRegionToMeta(meta, hri);
1865 newRegions.add(hri);
1866 count++;
1867 }
1868
1869 for (byte[] row : rows) {
1870 LOG.info("createMultiRegions: deleting meta row -> " +
1871 Bytes.toStringBinary(row));
1872 meta.delete(new Delete(row));
1873 }
1874
1875 Path tableDir = new Path(getDefaultRootDirPath().toString()
1876 + System.getProperty("file.separator") + htd.getTableName()
1877 + System.getProperty("file.separator") + regionToDeleteInFS);
1878 FileSystem.get(c).delete(tableDir);
1879
1880 HConnection conn = table.getConnection();
1881 conn.clearRegionCache();
1882
1883 HBaseAdmin admin = getHBaseAdmin();
1884 if (admin.isTableEnabled(table.getTableName())) {
1885 for(HRegionInfo hri : newRegions) {
1886 admin.assign(hri.getRegionName());
1887 }
1888 }
1889
1890 meta.close();
1891
1892 return count;
1893 }
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
1906 final HTableDescriptor htd, byte [][] startKeys)
1907 throws IOException {
1908 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
1909 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1910 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1911
1912 for (int i = 0; i < startKeys.length; i++) {
1913 int j = (i + 1) % startKeys.length;
1914 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
1915 startKeys[j]);
1916 MetaEditor.addRegionToMeta(meta, hri);
1917 newRegions.add(hri);
1918 }
1919
1920 meta.close();
1921 return newRegions;
1922 }
1923
1924
1925
1926
1927
1928
1929 public List<byte[]> getMetaTableRows() throws IOException {
1930
1931 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1932 List<byte[]> rows = new ArrayList<byte[]>();
1933 ResultScanner s = t.getScanner(new Scan());
1934 for (Result result : s) {
1935 LOG.info("getMetaTableRows: row -> " +
1936 Bytes.toStringBinary(result.getRow()));
1937 rows.add(result.getRow());
1938 }
1939 s.close();
1940 t.close();
1941 return rows;
1942 }
1943
1944
1945
1946
1947
1948
1949 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
1950
1951 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1952 List<byte[]> rows = new ArrayList<byte[]>();
1953 ResultScanner s = t.getScanner(new Scan());
1954 for (Result result : s) {
1955 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
1956 if (info == null) {
1957 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
1958
1959 continue;
1960 }
1961
1962 if (info.getTable().equals(tableName)) {
1963 LOG.info("getMetaTableRows: row -> " +
1964 Bytes.toStringBinary(result.getRow()) + info);
1965 rows.add(result.getRow());
1966 }
1967 }
1968 s.close();
1969 t.close();
1970 return rows;
1971 }
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
1985 throws IOException, InterruptedException {
1986 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
1987 }
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
1999 throws IOException, InterruptedException {
2000 List<byte[]> metaRows = getMetaTableRows(tableName);
2001 if (metaRows == null || metaRows.isEmpty()) {
2002 return null;
2003 }
2004 LOG.debug("Found " + metaRows.size() + " rows for table " +
2005 tableName);
2006 byte [] firstrow = metaRows.get(0);
2007 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2008 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2009 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2010 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2011 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2012 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2013 while(retrier.shouldRetry()) {
2014 int index = getMiniHBaseCluster().getServerWith(firstrow);
2015 if (index != -1) {
2016 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2017 }
2018
2019 retrier.sleepUntilNextRetry();
2020 }
2021 return null;
2022 }
2023
2024
2025
2026
2027
2028
2029
2030 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2031 startMiniMapReduceCluster(2);
2032 return mrCluster;
2033 }
2034
2035
2036
2037
2038
2039 private void forceChangeTaskLogDir() {
2040 Field logDirField;
2041 try {
2042 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2043 logDirField.setAccessible(true);
2044
2045 Field modifiersField = Field.class.getDeclaredField("modifiers");
2046 modifiersField.setAccessible(true);
2047 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2048
2049 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2050 } catch (SecurityException e) {
2051 throw new RuntimeException(e);
2052 } catch (NoSuchFieldException e) {
2053
2054 throw new RuntimeException(e);
2055 } catch (IllegalArgumentException e) {
2056 throw new RuntimeException(e);
2057 } catch (IllegalAccessException e) {
2058 throw new RuntimeException(e);
2059 }
2060 }
2061
2062
2063
2064
2065
2066
2067
2068 private void startMiniMapReduceCluster(final int servers) throws IOException {
2069 if (mrCluster != null) {
2070 throw new IllegalStateException("MiniMRCluster is already running");
2071 }
2072 LOG.info("Starting mini mapreduce cluster...");
2073 setupClusterTestDir();
2074 createDirsAndSetProperties();
2075
2076 forceChangeTaskLogDir();
2077
2078
2079
2080
2081 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2082
2083
2084
2085 conf.setBoolean("mapreduce.map.speculative", false);
2086 conf.setBoolean("mapreduce.reduce.speculative", false);
2087
2088
2089
2090 mrCluster = new MiniMRCluster(servers,
2091 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2092 null, null, new JobConf(this.conf));
2093 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2094 if (jobConf == null) {
2095 jobConf = mrCluster.createJobConf();
2096 }
2097
2098 jobConf.set("mapred.local.dir",
2099 conf.get("mapred.local.dir"));
2100 LOG.info("Mini mapreduce cluster started");
2101
2102
2103
2104
2105 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2106
2107 conf.set("mapreduce.framework.name", "yarn");
2108 conf.setBoolean("yarn.is.minicluster", true);
2109 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2110 if (rmAddress != null) {
2111 conf.set("yarn.resourcemanager.address", rmAddress);
2112 }
2113 String schedulerAddress =
2114 jobConf.get("yarn.resourcemanager.scheduler.address");
2115 if (schedulerAddress != null) {
2116 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2117 }
2118 }
2119
2120
2121
2122
2123 public void shutdownMiniMapReduceCluster() {
2124 LOG.info("Stopping mini mapreduce cluster...");
2125 if (mrCluster != null) {
2126 mrCluster.shutdown();
2127 mrCluster = null;
2128 }
2129
2130 conf.set("mapred.job.tracker", "local");
2131 LOG.info("Mini mapreduce cluster stopped");
2132 }
2133
2134
2135
2136
2137 public RegionServerServices createMockRegionServerService() throws IOException {
2138 return createMockRegionServerService((ServerName)null);
2139 }
2140
2141
2142
2143
2144
2145 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2146 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2147 rss.setFileSystem(getTestFileSystem());
2148 rss.setRpcServer(rpc);
2149 return rss;
2150 }
2151
2152
2153
2154
2155
2156 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2157 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2158 rss.setFileSystem(getTestFileSystem());
2159 return rss;
2160 }
2161
2162
2163
2164
2165
2166
2167 public void enableDebug(Class<?> clazz) {
2168 Log l = LogFactory.getLog(clazz);
2169 if (l instanceof Log4JLogger) {
2170 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2171 } else if (l instanceof Jdk14Logger) {
2172 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2173 }
2174 }
2175
2176
2177
2178
2179
2180 public void expireMasterSession() throws Exception {
2181 HMaster master = getMiniHBaseCluster().getMaster();
2182 expireSession(master.getZooKeeper(), false);
2183 }
2184
2185
2186
2187
2188
2189
2190 public void expireRegionServerSession(int index) throws Exception {
2191 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2192 expireSession(rs.getZooKeeper(), false);
2193 decrementMinRegionServerCount();
2194 }
2195
2196 private void decrementMinRegionServerCount() {
2197
2198
2199 decrementMinRegionServerCount(getConfiguration());
2200
2201
2202 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2203 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2204 }
2205 }
2206
2207 private void decrementMinRegionServerCount(Configuration conf) {
2208 int currentCount = conf.getInt(
2209 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2210 if (currentCount != -1) {
2211 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2212 Math.max(currentCount - 1, 1));
2213 }
2214 }
2215
2216 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2217 expireSession(nodeZK, false);
2218 }
2219
2220 @Deprecated
2221 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2222 throws Exception {
2223 expireSession(nodeZK, false);
2224 }
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2238 throws Exception {
2239 Configuration c = new Configuration(this.conf);
2240 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2241 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2242 byte[] password = zk.getSessionPasswd();
2243 long sessionID = zk.getSessionId();
2244
2245
2246
2247
2248
2249
2250
2251
2252 ZooKeeper monitor = new ZooKeeper(quorumServers,
2253 1000, new org.apache.zookeeper.Watcher(){
2254 @Override
2255 public void process(WatchedEvent watchedEvent) {
2256 LOG.info("Monitor ZKW received event="+watchedEvent);
2257 }
2258 } , sessionID, password);
2259
2260
2261 ZooKeeper newZK = new ZooKeeper(quorumServers,
2262 1000, EmptyWatcher.instance, sessionID, password);
2263
2264
2265
2266 long start = System.currentTimeMillis();
2267 while (newZK.getState() != States.CONNECTED
2268 && System.currentTimeMillis() - start < 1000) {
2269 Thread.sleep(1);
2270 }
2271 newZK.close();
2272 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2273
2274
2275 monitor.close();
2276
2277 if (checkStatus) {
2278 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2279 }
2280 }
2281
2282
2283
2284
2285
2286
2287
2288 public MiniHBaseCluster getHBaseCluster() {
2289 return getMiniHBaseCluster();
2290 }
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300 public HBaseCluster getHBaseClusterInterface() {
2301
2302
2303 return hbaseCluster;
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 public synchronized HBaseAdmin getHBaseAdmin()
2316 throws IOException {
2317 if (hbaseAdmin == null){
2318 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2319 }
2320 return hbaseAdmin;
2321 }
2322
2323 private HBaseAdminForTests hbaseAdmin = null;
2324 private static class HBaseAdminForTests extends HBaseAdmin {
2325 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2326 ZooKeeperConnectionException, IOException {
2327 super(c);
2328 }
2329
2330 @Override
2331 public synchronized void close() throws IOException {
2332 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2333 }
2334
2335 private synchronized void close0() throws IOException {
2336 super.close();
2337 }
2338 }
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2350 throws IOException {
2351 if (zooKeeperWatcher == null) {
2352 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2353 new Abortable() {
2354 @Override public void abort(String why, Throwable e) {
2355 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2356 }
2357 @Override public boolean isAborted() {return false;}
2358 });
2359 }
2360 return zooKeeperWatcher;
2361 }
2362 private ZooKeeperWatcher zooKeeperWatcher;
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 public void closeRegion(String regionName) throws IOException {
2373 closeRegion(Bytes.toBytes(regionName));
2374 }
2375
2376
2377
2378
2379
2380
2381
2382 public void closeRegion(byte[] regionName) throws IOException {
2383 getHBaseAdmin().closeRegion(regionName, null);
2384 }
2385
2386
2387
2388
2389
2390
2391
2392
2393 public void closeRegionByRow(String row, HTable table) throws IOException {
2394 closeRegionByRow(Bytes.toBytes(row), table);
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2405 HRegionLocation hrl = table.getRegionLocation(row);
2406 closeRegion(hrl.getRegionInfo().getRegionName());
2407 }
2408
2409
2410
2411
2412
2413
2414
2415
2416 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2417 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2418 int regCount = regions.size();
2419 Set<Integer> attempted = new HashSet<Integer>();
2420 int idx;
2421 int attempts = 0;
2422 do {
2423 regions = getHBaseCluster().getRegions(tableName);
2424 if (regCount != regions.size()) {
2425
2426 attempted.clear();
2427 }
2428 regCount = regions.size();
2429
2430
2431 if (regCount > 0) {
2432 idx = random.nextInt(regCount);
2433
2434 if (attempted.contains(idx))
2435 continue;
2436 try {
2437 regions.get(idx).checkSplit();
2438 return regions.get(idx);
2439 } catch (Exception ex) {
2440 LOG.warn("Caught exception", ex);
2441 attempted.add(idx);
2442 }
2443 }
2444 attempts++;
2445 } while (maxAttempts == -1 || attempts < maxAttempts);
2446 return null;
2447 }
2448
2449 public MiniZooKeeperCluster getZkCluster() {
2450 return zkCluster;
2451 }
2452
2453 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2454 this.passedZkCluster = true;
2455 this.zkCluster = zkCluster;
2456 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2457 }
2458
2459 public MiniDFSCluster getDFSCluster() {
2460 return dfsCluster;
2461 }
2462
2463 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2464 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2465 throw new IOException("DFSCluster is already running! Shut it down first.");
2466 }
2467 this.dfsCluster = cluster;
2468 }
2469
2470 public FileSystem getTestFileSystem() throws IOException {
2471 return HFileSystem.get(conf);
2472 }
2473
2474
2475
2476
2477
2478
2479
2480
2481 public void waitTableAvailable(byte[] table)
2482 throws InterruptedException, IOException {
2483 waitTableAvailable(getHBaseAdmin(), table, 30000);
2484 }
2485
2486 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2487 throws InterruptedException, IOException {
2488 waitTableAvailable(admin, table, 30000);
2489 }
2490
2491
2492
2493
2494
2495
2496
2497
2498 public void waitTableAvailable(byte[] table, long timeoutMillis)
2499 throws InterruptedException, IOException {
2500 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2501 }
2502
2503 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2504 throws InterruptedException, IOException {
2505 long startWait = System.currentTimeMillis();
2506 while (!admin.isTableAvailable(table)) {
2507 assertTrue("Timed out waiting for table to become available " +
2508 Bytes.toStringBinary(table),
2509 System.currentTimeMillis() - startWait < timeoutMillis);
2510 Thread.sleep(200);
2511 }
2512
2513
2514
2515
2516
2517 try {
2518 Canary.sniff(admin, TableName.valueOf(table));
2519 } catch (Exception e) {
2520 throw new IOException(e);
2521 }
2522 }
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533 public void waitTableEnabled(byte[] table)
2534 throws InterruptedException, IOException {
2535 waitTableEnabled(getHBaseAdmin(), table, 30000);
2536 }
2537
2538 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2539 throws InterruptedException, IOException {
2540 waitTableEnabled(admin, table, 30000);
2541 }
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552 public void waitTableEnabled(byte[] table, long timeoutMillis)
2553 throws InterruptedException, IOException {
2554 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2555 }
2556
2557 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2558 throws InterruptedException, IOException {
2559 long startWait = System.currentTimeMillis();
2560 waitTableAvailable(admin, table, timeoutMillis);
2561 long remainder = System.currentTimeMillis() - startWait;
2562 while (!admin.isTableEnabled(table)) {
2563 assertTrue("Timed out waiting for table to become available and enabled " +
2564 Bytes.toStringBinary(table),
2565 System.currentTimeMillis() - remainder < timeoutMillis);
2566 Thread.sleep(200);
2567 }
2568 LOG.debug("REMOVE AFTER table=" + Bytes.toString(table) + ", isTableAvailable=" +
2569 admin.isTableAvailable(table) +
2570 ", isTableEnabled=" + admin.isTableEnabled(table));
2571 }
2572
2573
2574
2575
2576
2577
2578
2579
2580 public boolean ensureSomeRegionServersAvailable(final int num)
2581 throws IOException {
2582 boolean startedServer = false;
2583 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2584 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2585 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2586 startedServer = true;
2587 }
2588
2589 return startedServer;
2590 }
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2602 throws IOException {
2603 boolean startedServer = ensureSomeRegionServersAvailable(num);
2604
2605 int nonStoppedServers = 0;
2606 for (JVMClusterUtil.RegionServerThread rst :
2607 getMiniHBaseCluster().getRegionServerThreads()) {
2608
2609 HRegionServer hrs = rst.getRegionServer();
2610 if (hrs.isStopping() || hrs.isStopped()) {
2611 LOG.info("A region server is stopped or stopping:"+hrs);
2612 } else {
2613 nonStoppedServers++;
2614 }
2615 }
2616 for (int i=nonStoppedServers; i<num; ++i) {
2617 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2618 startedServer = true;
2619 }
2620 return startedServer;
2621 }
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 public static User getDifferentUser(final Configuration c,
2634 final String differentiatingSuffix)
2635 throws IOException {
2636 FileSystem currentfs = FileSystem.get(c);
2637 if (!(currentfs instanceof DistributedFileSystem)) {
2638 return User.getCurrent();
2639 }
2640
2641
2642 String username = User.getCurrent().getName() +
2643 differentiatingSuffix;
2644 User user = User.createUserForTesting(c, username,
2645 new String[]{"supergroup"});
2646 return user;
2647 }
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2663 final int max) {
2664 try {
2665 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2666 for (Class<?> clazz: clazzes) {
2667 String className = clazz.getSimpleName();
2668 if (className.equals("DFSOutputStream")) {
2669 if (clazz.isInstance(stream)) {
2670 Field maxRecoveryErrorCountField =
2671 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2672 maxRecoveryErrorCountField.setAccessible(true);
2673 maxRecoveryErrorCountField.setInt(stream, max);
2674 break;
2675 }
2676 }
2677 }
2678 } catch (Exception e) {
2679 LOG.info("Could not set max recovery field", e);
2680 }
2681 }
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2692 waitUntilAllRegionsAssigned(tableName, 60000);
2693 }
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2705 throws IOException {
2706 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2707 try {
2708 waitFor(timeout, 200, true, new Predicate<IOException>() {
2709 @Override
2710 public boolean evaluate() throws IOException {
2711 boolean allRegionsAssigned = true;
2712 Scan scan = new Scan();
2713 scan.addFamily(HConstants.CATALOG_FAMILY);
2714 ResultScanner s = meta.getScanner(scan);
2715 try {
2716 Result r;
2717 while ((r = s.next()) != null) {
2718 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2719 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2720 if (info != null && info.getTable().equals(tableName)) {
2721 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2722 allRegionsAssigned &= (b != null);
2723 }
2724 }
2725 } finally {
2726 s.close();
2727 }
2728 return allRegionsAssigned;
2729 }
2730 });
2731 } finally {
2732 meta.close();
2733 }
2734 }
2735
2736
2737
2738
2739
2740 public static List<Cell> getFromStoreFile(HStore store,
2741 Get get) throws IOException {
2742 MultiVersionConsistencyControl.resetThreadReadPoint();
2743 Scan scan = new Scan(get);
2744 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2745 scan.getFamilyMap().get(store.getFamily().getName()));
2746
2747 List<Cell> result = new ArrayList<Cell>();
2748 scanner.next(result);
2749 if (!result.isEmpty()) {
2750
2751 Cell kv = result.get(0);
2752 if (!CellUtil.matchingRow(kv, get.getRow())) {
2753 result.clear();
2754 }
2755 }
2756 scanner.close();
2757 return result;
2758 }
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2769 assertTrue(numRegions>3);
2770 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2771 byte [][] result = new byte[tmpSplitKeys.length+1][];
2772 for (int i=0;i<tmpSplitKeys.length;i++) {
2773 result[i+1] = tmpSplitKeys[i];
2774 }
2775 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2776 return result;
2777 }
2778
2779
2780
2781
2782
2783 public static List<Cell> getFromStoreFile(HStore store,
2784 byte [] row,
2785 NavigableSet<byte[]> columns
2786 ) throws IOException {
2787 Get get = new Get(row);
2788 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2789 s.put(store.getFamily().getName(), columns);
2790
2791 return getFromStoreFile(store,get);
2792 }
2793
2794
2795
2796
2797
2798 public static ZooKeeperWatcher getZooKeeperWatcher(
2799 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2800 IOException {
2801 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2802 "unittest", new Abortable() {
2803 boolean aborted = false;
2804
2805 @Override
2806 public void abort(String why, Throwable e) {
2807 aborted = true;
2808 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2809 }
2810
2811 @Override
2812 public boolean isAborted() {
2813 return aborted;
2814 }
2815 });
2816 return zkw;
2817 }
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2831 HBaseTestingUtility TEST_UTIL, HRegion region,
2832 ServerName serverName) throws ZooKeeperConnectionException,
2833 IOException, KeeperException, NodeExistsException {
2834 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2835 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2836 int version = ZKAssign.transitionNodeOpening(zkw, region
2837 .getRegionInfo(), serverName);
2838 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2839 version);
2840 return zkw;
2841 }
2842
2843 public static void assertKVListsEqual(String additionalMsg,
2844 final List<? extends Cell> expected,
2845 final List<? extends Cell> actual) {
2846 final int eLen = expected.size();
2847 final int aLen = actual.size();
2848 final int minLen = Math.min(eLen, aLen);
2849
2850 int i;
2851 for (i = 0; i < minLen
2852 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2853 ++i) {}
2854
2855 if (additionalMsg == null) {
2856 additionalMsg = "";
2857 }
2858 if (!additionalMsg.isEmpty()) {
2859 additionalMsg = ". " + additionalMsg;
2860 }
2861
2862 if (eLen != aLen || i != minLen) {
2863 throw new AssertionError(
2864 "Expected and actual KV arrays differ at position " + i + ": " +
2865 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2866 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2867 }
2868 }
2869
2870 private static <T> String safeGetAsStr(List<T> lst, int i) {
2871 if (0 <= i && i < lst.size()) {
2872 return lst.get(i).toString();
2873 } else {
2874 return "<out_of_range>";
2875 }
2876 }
2877
2878 public String getClusterKey() {
2879 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2880 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2881 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2882 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2883 }
2884
2885
2886 public HTable createRandomTable(String tableName,
2887 final Collection<String> families,
2888 final int maxVersions,
2889 final int numColsPerRow,
2890 final int numFlushes,
2891 final int numRegions,
2892 final int numRowsPerFlush)
2893 throws IOException, InterruptedException {
2894
2895 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
2896 " regions, " + numFlushes + " storefiles per region, " +
2897 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
2898 "\n");
2899
2900 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
2901 final int numCF = families.size();
2902 final byte[][] cfBytes = new byte[numCF][];
2903 {
2904 int cfIndex = 0;
2905 for (String cf : families) {
2906 cfBytes[cfIndex++] = Bytes.toBytes(cf);
2907 }
2908 }
2909
2910 final int actualStartKey = 0;
2911 final int actualEndKey = Integer.MAX_VALUE;
2912 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
2913 final int splitStartKey = actualStartKey + keysPerRegion;
2914 final int splitEndKey = actualEndKey - keysPerRegion;
2915 final String keyFormat = "%08x";
2916 final HTable table = createTable(tableName, cfBytes,
2917 maxVersions,
2918 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
2919 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
2920 numRegions);
2921
2922 if (hbaseCluster != null) {
2923 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
2924 }
2925
2926 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
2927 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
2928 final byte[] row = Bytes.toBytes(String.format(keyFormat,
2929 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
2930
2931 Put put = new Put(row);
2932 Delete del = new Delete(row);
2933 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
2934 final byte[] cf = cfBytes[rand.nextInt(numCF)];
2935 final long ts = rand.nextInt();
2936 final byte[] qual = Bytes.toBytes("col" + iCol);
2937 if (rand.nextBoolean()) {
2938 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
2939 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
2940 ts + "_random_" + rand.nextLong());
2941 put.add(cf, qual, ts, value);
2942 } else if (rand.nextDouble() < 0.8) {
2943 del.deleteColumn(cf, qual, ts);
2944 } else {
2945 del.deleteColumns(cf, qual, ts);
2946 }
2947 }
2948
2949 if (!put.isEmpty()) {
2950 table.put(put);
2951 }
2952
2953 if (!del.isEmpty()) {
2954 table.delete(del);
2955 }
2956 }
2957 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
2958 table.flushCommits();
2959 if (hbaseCluster != null) {
2960 getMiniHBaseCluster().flushcache(table.getName());
2961 }
2962 }
2963
2964 return table;
2965 }
2966
2967 private static final int MIN_RANDOM_PORT = 0xc000;
2968 private static final int MAX_RANDOM_PORT = 0xfffe;
2969 private static Random random = new Random();
2970
2971
2972
2973
2974
2975 public static int randomPort() {
2976 return MIN_RANDOM_PORT
2977 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
2978 }
2979
2980
2981
2982
2983
2984 public static int randomFreePort() {
2985 int port = 0;
2986 do {
2987 port = randomPort();
2988 if (takenRandomPorts.contains(port)) {
2989 continue;
2990 }
2991 takenRandomPorts.add(port);
2992
2993 try {
2994 ServerSocket sock = new ServerSocket(port);
2995 sock.close();
2996 } catch (IOException ex) {
2997 port = 0;
2998 }
2999 } while (port == 0);
3000 return port;
3001 }
3002
3003
3004 public static String randomMultiCastAddress() {
3005 return "226.1.1." + random.nextInt(254);
3006 }
3007
3008
3009
3010 public static void waitForHostPort(String host, int port)
3011 throws IOException {
3012 final int maxTimeMs = 10000;
3013 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3014 IOException savedException = null;
3015 LOG.info("Waiting for server at " + host + ":" + port);
3016 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3017 try {
3018 Socket sock = new Socket(InetAddress.getByName(host), port);
3019 sock.close();
3020 savedException = null;
3021 LOG.info("Server at " + host + ":" + port + " is available");
3022 break;
3023 } catch (UnknownHostException e) {
3024 throw new IOException("Failed to look up " + host, e);
3025 } catch (IOException e) {
3026 savedException = e;
3027 }
3028 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3029 }
3030
3031 if (savedException != null) {
3032 throw savedException;
3033 }
3034 }
3035
3036
3037
3038
3039
3040
3041 public static int createPreSplitLoadTestTable(Configuration conf,
3042 TableName tableName, byte[] columnFamily, Algorithm compression,
3043 DataBlockEncoding dataBlockEncoding) throws IOException {
3044 HTableDescriptor desc = new HTableDescriptor(tableName);
3045 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3046 hcd.setDataBlockEncoding(dataBlockEncoding);
3047 hcd.setCompressionType(compression);
3048 return createPreSplitLoadTestTable(conf, desc, hcd);
3049 }
3050
3051
3052
3053
3054
3055
3056 public static int createPreSplitLoadTestTable(Configuration conf,
3057 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3058 if (!desc.hasFamily(hcd.getName())) {
3059 desc.addFamily(hcd);
3060 }
3061
3062 int totalNumberOfRegions = 0;
3063 HBaseAdmin admin = new HBaseAdmin(conf);
3064 try {
3065
3066
3067
3068 int numberOfServers = admin.getClusterStatus().getServers().size();
3069 if (numberOfServers == 0) {
3070 throw new IllegalStateException("No live regionservers");
3071 }
3072
3073 totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER;
3074 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3075 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3076 "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")");
3077
3078 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3079 totalNumberOfRegions);
3080
3081 admin.createTable(desc, splits);
3082 } catch (MasterNotRunningException e) {
3083 LOG.error("Master not running", e);
3084 throw new IOException(e);
3085 } catch (TableExistsException e) {
3086 LOG.warn("Table " + desc.getTableName() +
3087 " already exists, continuing");
3088 } finally {
3089 admin.close();
3090 }
3091 return totalNumberOfRegions;
3092 }
3093
3094 public static int getMetaRSPort(Configuration conf) throws IOException {
3095 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3096 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3097 table.close();
3098 return hloc.getPort();
3099 }
3100
3101
3102
3103
3104
3105
3106
3107 public void assertRegionOnServer(
3108 final HRegionInfo hri, final ServerName server,
3109 final long timeout) throws IOException, InterruptedException {
3110 long timeoutTime = System.currentTimeMillis() + timeout;
3111 while (true) {
3112 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3113 if (regions.contains(hri)) return;
3114 long now = System.currentTimeMillis();
3115 if (now > timeoutTime) break;
3116 Thread.sleep(10);
3117 }
3118 fail("Could not find region " + hri.getRegionNameAsString()
3119 + " on server " + server);
3120 }
3121
3122
3123
3124
3125
3126 public void assertRegionOnlyOnServer(
3127 final HRegionInfo hri, final ServerName server,
3128 final long timeout) throws IOException, InterruptedException {
3129 long timeoutTime = System.currentTimeMillis() + timeout;
3130 while (true) {
3131 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3132 if (regions.contains(hri)) {
3133 List<JVMClusterUtil.RegionServerThread> rsThreads =
3134 getHBaseCluster().getLiveRegionServerThreads();
3135 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3136 HRegionServer rs = rsThread.getRegionServer();
3137 if (server.equals(rs.getServerName())) {
3138 continue;
3139 }
3140 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3141 for (HRegion r: hrs) {
3142 assertTrue("Region should not be double assigned",
3143 r.getRegionId() != hri.getRegionId());
3144 }
3145 }
3146 return;
3147 }
3148 long now = System.currentTimeMillis();
3149 if (now > timeoutTime) break;
3150 Thread.sleep(10);
3151 }
3152 fail("Could not find region " + hri.getRegionNameAsString()
3153 + " on server " + server);
3154 }
3155
3156 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3157 throws IOException {
3158 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3159 htd.addFamily(hcd);
3160 HRegionInfo info =
3161 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3162 HRegion region =
3163 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3164 return region;
3165 }
3166
3167 public void setFileSystemURI(String fsURI) {
3168 FS_URI = fsURI;
3169 }
3170
3171
3172
3173
3174 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3175 throws E {
3176 return Waiter.waitFor(this.conf, timeout, predicate);
3177 }
3178
3179
3180
3181
3182 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3183 throws E {
3184 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3185 }
3186
3187
3188
3189
3190 public <E extends Exception> long waitFor(long timeout, long interval,
3191 boolean failIfTimeout, Predicate<E> predicate) throws E {
3192 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3193 }
3194
3195
3196
3197
3198 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3199 return new Waiter.Predicate<Exception>() {
3200 @Override
3201 public boolean evaluate() throws Exception {
3202 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3203 .getAssignmentManager().getRegionStates();
3204 return !regionStates.isRegionsInTransition();
3205 }
3206 };
3207 }
3208
3209 }