1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.security.MessageDigest;
29 import java.util.ArrayList;
30 import java.util.Arrays;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.NavigableSet;
34 import java.util.UUID;
35
36 import org.apache.commons.logging.Log;
37 import org.apache.commons.logging.LogFactory;
38 import org.apache.commons.logging.impl.Jdk14Logger;
39 import org.apache.commons.logging.impl.Log4JLogger;
40 import org.apache.hadoop.conf.Configuration;
41 import org.apache.hadoop.fs.FileSystem;
42 import org.apache.hadoop.fs.Path;
43 import org.apache.hadoop.hbase.client.Delete;
44 import org.apache.hadoop.hbase.client.Get;
45 import org.apache.hadoop.hbase.client.HBaseAdmin;
46 import org.apache.hadoop.hbase.client.HConnection;
47 import org.apache.hadoop.hbase.client.HTable;
48 import org.apache.hadoop.hbase.client.Put;
49 import org.apache.hadoop.hbase.client.Result;
50 import org.apache.hadoop.hbase.client.ResultScanner;
51 import org.apache.hadoop.hbase.client.Scan;
52 import org.apache.hadoop.hbase.master.HMaster;
53 import org.apache.hadoop.hbase.regionserver.HRegion;
54 import org.apache.hadoop.hbase.regionserver.HRegionServer;
55 import org.apache.hadoop.hbase.regionserver.InternalScanner;
56 import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
57 import org.apache.hadoop.hbase.regionserver.Store;
58 import org.apache.hadoop.hbase.util.Bytes;
59 import org.apache.hadoop.hbase.util.FSUtils;
60 import org.apache.hadoop.hbase.util.Threads;
61 import org.apache.hadoop.hbase.util.Writables;
62 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
63 import org.apache.hadoop.hdfs.DFSClient;
64 import org.apache.hadoop.hdfs.DistributedFileSystem;
65 import org.apache.hadoop.hdfs.MiniDFSCluster;
66 import org.apache.hadoop.hdfs.server.namenode.NameNode;
67 import org.apache.hadoop.mapred.MiniMRCluster;
68 import org.apache.hadoop.security.UnixUserGroupInformation;
69 import org.apache.hadoop.security.UserGroupInformation;
70 import org.apache.zookeeper.ZooKeeper;
71
72 import com.google.common.base.Preconditions;
73
74
75
76
77
78
79
80
81
82
83 public class HBaseTestingUtility {
84 private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
85 private final Configuration conf;
86 private MiniZooKeeperCluster zkCluster = null;
87 private MiniDFSCluster dfsCluster = null;
88 private MiniHBaseCluster hbaseCluster = null;
89 private MiniMRCluster mrCluster = null;
90
91 private File clusterTestBuildDir = null;
92 private HBaseAdmin hbaseAdmin = null;
93
94
95
96
97 public static final String TEST_DIRECTORY_KEY = "test.build.data";
98
99
100
101
102 public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
103
104 public HBaseTestingUtility() {
105 this(HBaseConfiguration.create());
106 }
107
108 public HBaseTestingUtility(Configuration conf) {
109 this.conf = conf;
110 }
111
112
113
114
115 public Configuration getConfiguration() {
116 return this.conf;
117 }
118
119
120
121
122
123
124 public static Path getTestDir() {
125 return new Path(System.getProperty(TEST_DIRECTORY_KEY,
126 DEFAULT_TEST_DIRECTORY));
127 }
128
129
130
131
132
133
134
135 public static Path getTestDir(final String subdirName) {
136 return new Path(getTestDir(), subdirName);
137 }
138
139
140
141
142
143
144
145
146
147
148
149 File setupClusterTestBuildDir() {
150 String randomStr = UUID.randomUUID().toString();
151 String dirStr = getTestDir(randomStr).toString();
152 File dir = new File(dirStr).getAbsoluteFile();
153
154 dir.deleteOnExit();
155 return dir;
156 }
157
158
159
160
161 void isRunningCluster() throws IOException {
162 if (this.clusterTestBuildDir == null) return;
163 throw new IOException("Cluster already running at " +
164 this.clusterTestBuildDir);
165 }
166
167
168
169
170
171
172
173
174 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
175 return startMiniDFSCluster(servers, null);
176 }
177
178
179
180
181
182
183
184
185
186
187 public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
188 throws Exception {
189
190
191
192
193 if (dir == null) this.clusterTestBuildDir = setupClusterTestBuildDir();
194 else this.clusterTestBuildDir = dir;
195 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
196 System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
197 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
198 true, null, null, null, null);
199 return this.dfsCluster;
200 }
201
202
203
204
205
206
207 public void shutdownMiniDFSCluster() throws Exception {
208 if (this.dfsCluster != null) {
209
210 this.dfsCluster.shutdown();
211 }
212 }
213
214
215
216
217
218
219
220
221 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
222 return startMiniZKCluster(setupClusterTestBuildDir());
223
224 }
225
226 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
227 throws Exception {
228 if (this.zkCluster != null) {
229 throw new IOException("Cluster already running at " + dir);
230 }
231 this.zkCluster = new MiniZooKeeperCluster();
232 int clientPort = this.zkCluster.startup(dir);
233 this.conf.set("hbase.zookeeper.property.clientPort",
234 Integer.toString(clientPort));
235 return this.zkCluster;
236 }
237
238
239
240
241
242
243
244 public void shutdownMiniZKCluster() throws IOException {
245 if (this.zkCluster != null) this.zkCluster.shutdown();
246 }
247
248
249
250
251
252
253
254 public MiniHBaseCluster startMiniCluster() throws Exception {
255 return startMiniCluster(1);
256 }
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271 public MiniHBaseCluster startMiniCluster(final int servers)
272 throws Exception {
273 LOG.info("Starting up minicluster");
274
275 isRunningCluster();
276
277
278 this.clusterTestBuildDir = setupClusterTestBuildDir();
279 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
280
281
282 startMiniDFSCluster(servers, this.clusterTestBuildDir);
283
284
285 FileSystem fs = this.dfsCluster.getFileSystem();
286 this.conf.set("fs.defaultFS", fs.getUri().toString());
287
288 this.conf.set("fs.default.name", fs.getUri().toString());
289 this.dfsCluster.waitClusterUp();
290
291
292 if (this.zkCluster == null) {
293 startMiniZKCluster(this.clusterTestBuildDir);
294 }
295
296
297 Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory());
298 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
299 fs.mkdirs(hbaseRootdir);
300 FSUtils.setVersion(fs, hbaseRootdir);
301 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
302
303 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
304 ResultScanner s = t.getScanner(new Scan());
305 while (s.next() != null) continue;
306 LOG.info("Minicluster is up");
307 return this.hbaseCluster;
308 }
309
310
311
312
313
314
315 public MiniHBaseCluster getMiniHBaseCluster() {
316 return this.hbaseCluster;
317 }
318
319
320
321
322
323 public void shutdownMiniCluster() throws IOException {
324 LOG.info("Shutting down minicluster");
325 if (this.hbaseCluster != null) {
326 this.hbaseCluster.shutdown();
327
328 this.hbaseCluster.join();
329 }
330 shutdownMiniZKCluster();
331 if (this.dfsCluster != null) {
332
333 this.dfsCluster.shutdown();
334 }
335
336 if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
337
338 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
339 new Path(this.clusterTestBuildDir.toString()))) {
340 LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
341 }
342 }
343 LOG.info("Minicluster is down");
344 }
345
346
347
348
349
350 public void flush() throws IOException {
351 this.hbaseCluster.flushcache();
352 }
353
354
355
356
357
358 public void flush(byte [] tableName) throws IOException {
359 this.hbaseCluster.flushcache(tableName);
360 }
361
362
363
364
365
366
367
368
369
370 public HTable createTable(byte[] tableName, byte[] family)
371 throws IOException{
372 return createTable(tableName, new byte[][]{family});
373 }
374
375
376
377
378
379
380
381
382 public HTable createTable(byte[] tableName, byte[][] families)
383 throws IOException {
384 HTableDescriptor desc = new HTableDescriptor(tableName);
385 for(byte[] family : families) {
386 desc.addFamily(new HColumnDescriptor(family));
387 }
388 (new HBaseAdmin(getConfiguration())).createTable(desc);
389 return new HTable(getConfiguration(), tableName);
390 }
391
392
393
394
395
396
397
398
399
400 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
401 throws IOException {
402 return createTable(tableName, new byte[][]{family}, numVersions);
403 }
404
405
406
407
408
409
410
411
412
413 public HTable createTable(byte[] tableName, byte[][] families,
414 int numVersions)
415 throws IOException {
416 HTableDescriptor desc = new HTableDescriptor(tableName);
417 for (byte[] family : families) {
418 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
419 HColumnDescriptor.DEFAULT_COMPRESSION,
420 HColumnDescriptor.DEFAULT_IN_MEMORY,
421 HColumnDescriptor.DEFAULT_BLOCKCACHE,
422 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
423 HColumnDescriptor.DEFAULT_BLOOMFILTER,
424 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
425 desc.addFamily(hcd);
426 }
427 (new HBaseAdmin(getConfiguration())).createTable(desc);
428 return new HTable(getConfiguration(), tableName);
429 }
430
431
432
433
434
435
436
437
438
439 public HTable createTable(byte[] tableName, byte[][] families,
440 int[] numVersions)
441 throws IOException {
442 HTableDescriptor desc = new HTableDescriptor(tableName);
443 int i = 0;
444 for (byte[] family : families) {
445 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
446 HColumnDescriptor.DEFAULT_COMPRESSION,
447 HColumnDescriptor.DEFAULT_IN_MEMORY,
448 HColumnDescriptor.DEFAULT_BLOCKCACHE,
449 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
450 HColumnDescriptor.DEFAULT_BLOOMFILTER,
451 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
452 desc.addFamily(hcd);
453 i++;
454 }
455 (new HBaseAdmin(getConfiguration())).createTable(desc);
456 return new HTable(getConfiguration(), tableName);
457 }
458
459
460
461
462
463
464
465 public HTable truncateTable(byte [] tableName) throws IOException {
466 HTable table = new HTable(getConfiguration(), tableName);
467 Scan scan = new Scan();
468 ResultScanner resScan = table.getScanner(scan);
469 for(Result res : resScan) {
470 Delete del = new Delete(res.getRow());
471 table.delete(del);
472 }
473 return table;
474 }
475
476
477
478
479
480
481
482
483 public int loadTable(final HTable t, final byte[] f) throws IOException {
484 t.setAutoFlush(false);
485 byte[] k = new byte[3];
486 int rowCount = 0;
487 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
488 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
489 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
490 k[0] = b1;
491 k[1] = b2;
492 k[2] = b3;
493 Put put = new Put(k);
494 put.add(f, null, k);
495 t.put(put);
496 rowCount++;
497 }
498 }
499 }
500 t.flushCommits();
501 return rowCount;
502 }
503
504
505
506
507
508
509
510 public int loadRegion(final HRegion r, final byte[] f)
511 throws IOException {
512 byte[] k = new byte[3];
513 int rowCount = 0;
514 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
515 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
516 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
517 k[0] = b1;
518 k[1] = b2;
519 k[2] = b3;
520 Put put = new Put(k);
521 put.add(f, null, k);
522 if (r.getLog() == null) put.setWriteToWAL(false);
523 r.put(put);
524 rowCount++;
525 }
526 }
527 }
528 return rowCount;
529 }
530
531
532
533
534 public int countRows(final HTable table) throws IOException {
535 Scan scan = new Scan();
536 ResultScanner results = table.getScanner(scan);
537 int count = 0;
538 for (@SuppressWarnings("unused") Result res : results) {
539 count++;
540 }
541 results.close();
542 return count;
543 }
544
545
546
547
548 public String checksumRows(final HTable table) throws Exception {
549 Scan scan = new Scan();
550 ResultScanner results = table.getScanner(scan);
551 MessageDigest digest = MessageDigest.getInstance("MD5");
552 for (Result res : results) {
553 digest.update(res.getRow());
554 }
555 results.close();
556 return digest.toString();
557 }
558
559
560
561
562
563
564
565
566
567 public int createMultiRegions(HTable table, byte[] columnFamily)
568 throws IOException {
569 return createMultiRegions(getConfiguration(), table, columnFamily);
570 }
571
572
573
574
575
576
577
578
579
580 public int createMultiRegions(final Configuration c, final HTable table,
581 final byte[] columnFamily)
582 throws IOException {
583 byte[][] KEYS = {
584 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
585 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
586 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
587 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
588 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
589 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
590 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
591 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
592 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
593 };
594 return createMultiRegions(c, table, columnFamily, KEYS);
595 }
596
597 public int createMultiRegions(final Configuration c, final HTable table,
598 final byte[] columnFamily, byte [][] startKeys)
599 throws IOException {
600 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
601 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
602 HTableDescriptor htd = table.getTableDescriptor();
603 if(!htd.hasFamily(columnFamily)) {
604 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
605 htd.addFamily(hcd);
606 }
607
608
609
610
611 List<byte[]> rows = getMetaTableRows(htd.getName());
612
613 int count = 0;
614 for (int i = 0; i < startKeys.length; i++) {
615 int j = (i + 1) % startKeys.length;
616 HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
617 startKeys[i], startKeys[j]);
618 Put put = new Put(hri.getRegionName());
619 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
620 Writables.getBytes(hri));
621 meta.put(put);
622 LOG.info("createMultiRegions: inserted " + hri.toString());
623 count++;
624 }
625
626 for (byte[] row : rows) {
627 LOG.info("createMultiRegions: deleting meta row -> " +
628 Bytes.toStringBinary(row));
629 meta.delete(new Delete(row));
630 }
631
632 HConnection conn = table.getConnection();
633 conn.clearRegionCache();
634 return count;
635 }
636
637
638
639
640
641
642 public List<byte[]> getMetaTableRows() throws IOException {
643 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
644 List<byte[]> rows = new ArrayList<byte[]>();
645 ResultScanner s = t.getScanner(new Scan());
646 for (Result result : s) {
647 LOG.info("getMetaTableRows: row -> " +
648 Bytes.toStringBinary(result.getRow()));
649 rows.add(result.getRow());
650 }
651 s.close();
652 return rows;
653 }
654
655
656
657
658
659
660 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
661 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
662 List<byte[]> rows = new ArrayList<byte[]>();
663 ResultScanner s = t.getScanner(new Scan());
664 for (Result result : s) {
665 HRegionInfo info = Writables.getHRegionInfo(
666 result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
667 HTableDescriptor desc = info.getTableDesc();
668 if (Bytes.compareTo(desc.getName(), tableName) == 0) {
669 LOG.info("getMetaTableRows: row -> " +
670 Bytes.toStringBinary(result.getRow()));
671 rows.add(result.getRow());
672 }
673 }
674 s.close();
675 return rows;
676 }
677
678
679
680
681
682
683
684
685
686
687
688 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
689 throws IOException {
690 List<byte[]> metaRows = getMetaTableRows(tableName);
691 if (metaRows == null || metaRows.size() == 0) {
692 return null;
693 }
694 int index = hbaseCluster.getServerWith(metaRows.get(0));
695 return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
696 }
697
698
699
700
701
702
703
704 public void startMiniMapReduceCluster() throws IOException {
705 startMiniMapReduceCluster(2);
706 }
707
708
709
710
711
712
713
714 public void startMiniMapReduceCluster(final int servers) throws IOException {
715 LOG.info("Starting mini mapreduce cluster...");
716
717 Configuration c = getConfiguration();
718 System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
719 c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
720 mrCluster = new MiniMRCluster(servers,
721 FileSystem.get(c).getUri().toString(), 1);
722 LOG.info("Mini mapreduce cluster started");
723 c.set("mapred.job.tracker",
724 mrCluster.createJobConf().get("mapred.job.tracker"));
725 }
726
727
728
729
730 public void shutdownMiniMapReduceCluster() {
731 LOG.info("Stopping mini mapreduce cluster...");
732 if (mrCluster != null) {
733 mrCluster.shutdown();
734 }
735
736 conf.set("mapred.job.tracker", "local");
737 LOG.info("Mini mapreduce cluster stopped");
738 }
739
740
741
742
743
744
745 public void enableDebug(Class<?> clazz) {
746 Log l = LogFactory.getLog(clazz);
747 if (l instanceof Log4JLogger) {
748 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
749 } else if (l instanceof Jdk14Logger) {
750 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
751 }
752 }
753
754
755
756
757
758 public void expireMasterSession() throws Exception {
759 HMaster master = hbaseCluster.getMaster();
760 expireSession(master.getZooKeeperWrapper());
761 }
762
763
764
765
766
767
768 public void expireRegionServerSession(int index) throws Exception {
769 HRegionServer rs = hbaseCluster.getRegionServer(index);
770 expireSession(rs.getZooKeeperWrapper());
771 }
772
773 public void expireSession(ZooKeeperWrapper nodeZK) throws Exception{
774 ZooKeeperWrapper zkw =
775 ZooKeeperWrapper.createInstance(conf,
776 ZooKeeperWrapper.class.getName());
777 zkw.registerListener(EmptyWatcher.instance);
778 String quorumServers = zkw.getQuorumServers();
779 int sessionTimeout = 5 * 1000;
780
781 byte[] password = nodeZK.getSessionPassword();
782 long sessionID = nodeZK.getSessionID();
783
784 ZooKeeper zk = new ZooKeeper(quorumServers,
785 sessionTimeout, EmptyWatcher.instance, sessionID, password);
786 zk.close();
787 final long sleep = sessionTimeout * 5L;
788 LOG.info("ZK Closed; sleeping=" + sleep);
789
790 Thread.sleep(sleep);
791
792 new HTable(conf, HConstants.META_TABLE_NAME);
793 }
794
795
796
797
798
799
800 public MiniHBaseCluster getHBaseCluster() {
801 return hbaseCluster;
802 }
803
804
805
806
807
808
809
810 public HBaseAdmin getHBaseAdmin() throws MasterNotRunningException {
811 if (hbaseAdmin == null) {
812 hbaseAdmin = new HBaseAdmin(getConfiguration());
813 }
814 return hbaseAdmin;
815 }
816
817
818
819
820
821
822
823 public void closeRegion(String regionName) throws IOException {
824 closeRegion(Bytes.toBytes(regionName));
825 }
826
827
828
829
830
831
832
833 public void closeRegion(byte[] regionName) throws IOException {
834 HBaseAdmin admin = getHBaseAdmin();
835 admin.closeRegion(regionName, (Object[]) null);
836 }
837
838
839
840
841
842
843
844
845 public void closeRegionByRow(String row, HTable table) throws IOException {
846 closeRegionByRow(Bytes.toBytes(row), table);
847 }
848
849
850
851
852
853
854
855
856 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
857 HRegionLocation hrl = table.getRegionLocation(row);
858 closeRegion(hrl.getRegionInfo().getRegionName());
859 }
860
861 public MiniZooKeeperCluster getZkCluster() {
862 return zkCluster;
863 }
864
865 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
866 this.zkCluster = zkCluster;
867 }
868
869 public MiniDFSCluster getDFSCluster() {
870 return dfsCluster;
871 }
872
873 public FileSystem getTestFileSystem() throws IOException {
874 return FileSystem.get(conf);
875 }
876
877 public void cleanupTestDir() throws IOException {
878 getTestDir().getFileSystem(conf).delete(getTestDir(), true);
879 }
880
881 public void waitTableAvailable(byte[] table, long timeoutMillis)
882 throws InterruptedException, IOException {
883 HBaseAdmin admin = new HBaseAdmin(conf);
884 long startWait = System.currentTimeMillis();
885 while (!admin.isTableAvailable(table)) {
886 assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
887 System.currentTimeMillis() - startWait < timeoutMillis);
888 Thread.sleep(500);
889 }
890 }
891
892
893
894
895
896
897
898 public void ensureSomeRegionServersAvailable(final int num)
899 throws IOException {
900 if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) {
901
902 LOG.info("Started new server=" +
903 this.getHBaseCluster().startRegionServer());
904
905 }
906 }
907
908
909
910
911
912
913
914
915
916
917 public static Configuration setDifferentUser(final Configuration c,
918 final String differentiatingSuffix)
919 throws IOException {
920 FileSystem currentfs = FileSystem.get(c);
921 Preconditions.checkArgument(currentfs instanceof DistributedFileSystem);
922
923
924 Configuration c2 = new Configuration(c);
925 String username = UserGroupInformation.getCurrentUGI().getUserName() +
926 differentiatingSuffix;
927 UnixUserGroupInformation.saveToConf(c2,
928 UnixUserGroupInformation.UGI_PROPERTY_NAME,
929 new UnixUserGroupInformation(username, new String[]{"supergroup"}));
930 return c2;
931 }
932
933
934
935
936
937
938
939
940
941
942
943 public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
944 throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
945
946
947
948
949 Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
950 field.setAccessible(true);
951 NameNode nn = (NameNode)field.get(this.dfsCluster);
952 nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
953 }
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968 public static void setMaxRecoveryErrorCount(final OutputStream stream,
969 final int max) {
970 try {
971 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
972 for (Class<?> clazz: clazzes) {
973 String className = clazz.getSimpleName();
974 if (className.equals("DFSOutputStream")) {
975 if (clazz.isInstance(stream)) {
976 Field maxRecoveryErrorCountField =
977 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
978 maxRecoveryErrorCountField.setAccessible(true);
979 maxRecoveryErrorCountField.setInt(stream, max);
980 break;
981 }
982 }
983 }
984 } catch (Exception e) {
985 LOG.info("Could not set max recovery field", e);
986 }
987 }
988
989
990
991
992
993
994
995
996
997
998 public void waitUntilAllRegionsAssigned(final int countOfRegions)
999 throws IOException {
1000 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1001 while (true) {
1002 int rows = 0;
1003 Scan scan = new Scan();
1004 scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1005 ResultScanner s = meta.getScanner(scan);
1006 for (Result r = null; (r = s.next()) != null;) {
1007 byte [] b =
1008 r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1009 if (b == null || b.length <= 0) break;
1010 rows++;
1011 }
1012 s.close();
1013
1014 if (rows == countOfRegions) break;
1015 LOG.info("Found=" + rows);
1016 Threads.sleep(1000);
1017 }
1018 }
1019
1020
1021
1022
1023
1024 public static List<KeyValue> getFromStoreFile(Store store,
1025 Get get) throws IOException {
1026 ReadWriteConsistencyControl.resetThreadReadPoint();
1027 Scan scan = new Scan(get);
1028 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1029 scan.getFamilyMap().get(store.getFamily().getName()));
1030
1031 List<KeyValue> result = new ArrayList<KeyValue>();
1032 scanner.next(result);
1033 if (!result.isEmpty()) {
1034
1035 KeyValue kv = result.get(0);
1036 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1037 result.clear();
1038 }
1039 }
1040 return result;
1041 }
1042
1043
1044
1045
1046
1047 public static List<KeyValue> getFromStoreFile(Store store,
1048 byte [] row,
1049 NavigableSet<byte[]> columns
1050 ) throws IOException {
1051 Get get = new Get(row);
1052 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1053 s.put(store.getFamily().getName(), columns);
1054
1055 return getFromStoreFile(store,get);
1056 }
1057 }