1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.security.MessageDigest;
29 import java.util.ArrayList;
30 import java.util.Arrays;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.NavigableSet;
34 import java.util.Set;
35 import java.util.UUID;
36
37 import org.apache.commons.logging.Log;
38 import org.apache.commons.logging.LogFactory;
39 import org.apache.commons.logging.impl.Jdk14Logger;
40 import org.apache.commons.logging.impl.Log4JLogger;
41 import org.apache.hadoop.conf.Configuration;
42 import org.apache.hadoop.fs.FileSystem;
43 import org.apache.hadoop.fs.Path;
44 import org.apache.hadoop.hbase.client.Delete;
45 import org.apache.hadoop.hbase.client.Get;
46 import org.apache.hadoop.hbase.client.HBaseAdmin;
47 import org.apache.hadoop.hbase.client.HConnection;
48 import org.apache.hadoop.hbase.client.HTable;
49 import org.apache.hadoop.hbase.client.Put;
50 import org.apache.hadoop.hbase.client.Result;
51 import org.apache.hadoop.hbase.client.ResultScanner;
52 import org.apache.hadoop.hbase.client.Scan;
53 import org.apache.hadoop.hbase.master.HMaster;
54 import org.apache.hadoop.hbase.regionserver.HRegionServer;
55 import org.apache.hadoop.hbase.regionserver.InternalScanner;
56 import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
57 import org.apache.hadoop.hbase.regionserver.Store;
58 import org.apache.hadoop.hbase.util.Bytes;
59 import org.apache.hadoop.hbase.util.FSUtils;
60 import org.apache.hadoop.hbase.util.Threads;
61 import org.apache.hadoop.hbase.util.Writables;
62 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
63 import org.apache.hadoop.hdfs.MiniDFSCluster;
64 import org.apache.hadoop.mapred.MiniMRCluster;
65 import org.apache.zookeeper.ZooKeeper;
66 import org.apache.hadoop.hdfs.DFSClient;
67 import org.apache.hadoop.security.UnixUserGroupInformation;
68 import org.apache.hadoop.security.UserGroupInformation;
69 import org.apache.hadoop.hdfs.server.namenode.NameNode;
70 import org.apache.hadoop.hdfs.DistributedFileSystem;
71
72 import com.google.common.base.Preconditions;
73
74
75
76
77
78
79
80
81
82
83 public class HBaseTestingUtility {
84 private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
85 private final Configuration conf;
86 private MiniZooKeeperCluster zkCluster = null;
87 private MiniDFSCluster dfsCluster = null;
88 private MiniHBaseCluster hbaseCluster = null;
89 private MiniMRCluster mrCluster = null;
90
91 private File clusterTestBuildDir = null;
92 private HBaseAdmin hbaseAdmin = null;
93
94
95
96
97 public static final String TEST_DIRECTORY_KEY = "test.build.data";
98
99
100
101
102 public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
103
104 public HBaseTestingUtility() {
105 this(HBaseConfiguration.create());
106 }
107
108 public HBaseTestingUtility(Configuration conf) {
109 this.conf = conf;
110 }
111
112
113
114
115 public Configuration getConfiguration() {
116 return this.conf;
117 }
118
119
120
121
122
123
124 public static Path getTestDir() {
125 return new Path(System.getProperty(TEST_DIRECTORY_KEY,
126 DEFAULT_TEST_DIRECTORY));
127 }
128
129
130
131
132
133
134
135 public static Path getTestDir(final String subdirName) {
136 return new Path(getTestDir(), subdirName);
137 }
138
139
140
141
142
143
144
145
146
147
148
149 File setupClusterTestBuildDir() {
150 String randomStr = UUID.randomUUID().toString();
151 String dirStr = getTestDir(randomStr).toString();
152 File dir = new File(dirStr).getAbsoluteFile();
153
154 dir.deleteOnExit();
155 return dir;
156 }
157
158
159
160
161 void isRunningCluster() throws IOException {
162 if (this.clusterTestBuildDir == null) return;
163 throw new IOException("Cluster already running at " +
164 this.clusterTestBuildDir);
165 }
166
167
168
169
170
171
172
173
174 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
175 return startMiniDFSCluster(servers, null);
176 }
177
178
179
180
181
182
183
184
185
186
187 public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
188 throws Exception {
189
190
191
192
193 if (dir == null) this.clusterTestBuildDir = setupClusterTestBuildDir();
194 else this.clusterTestBuildDir = dir;
195 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
196 System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
197 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
198 true, null, null, null, null);
199 return this.dfsCluster;
200 }
201
202
203
204
205
206
207 public void shutdownMiniDFSCluster() throws Exception {
208 if (this.dfsCluster != null) {
209
210 this.dfsCluster.shutdown();
211 }
212 }
213
214
215
216
217
218
219
220
221 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
222 return startMiniZKCluster(setupClusterTestBuildDir());
223
224 }
225
226 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
227 throws Exception {
228 if (this.zkCluster != null) {
229 throw new IOException("Cluster already running at " + dir);
230 }
231 this.zkCluster = new MiniZooKeeperCluster();
232 int clientPort = this.zkCluster.startup(dir);
233 this.conf.set("hbase.zookeeper.property.clientPort",
234 Integer.toString(clientPort));
235 return this.zkCluster;
236 }
237
238
239
240
241
242
243
244 public void shutdownMiniZKCluster() throws IOException {
245 if (this.zkCluster != null) this.zkCluster.shutdown();
246 }
247
248
249
250
251
252
253
254 public MiniHBaseCluster startMiniCluster() throws Exception {
255 return startMiniCluster(1);
256 }
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271 public MiniHBaseCluster startMiniCluster(final int servers)
272 throws Exception {
273 LOG.info("Starting up minicluster");
274
275 isRunningCluster();
276
277
278 this.clusterTestBuildDir = setupClusterTestBuildDir();
279 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
280
281
282 startMiniDFSCluster(servers, this.clusterTestBuildDir);
283
284
285 FileSystem fs = this.dfsCluster.getFileSystem();
286 this.conf.set("fs.defaultFS", fs.getUri().toString());
287
288 this.conf.set("fs.default.name", fs.getUri().toString());
289 this.dfsCluster.waitClusterUp();
290
291
292 if (this.zkCluster == null) {
293 startMiniZKCluster(this.clusterTestBuildDir);
294 }
295
296
297 Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory());
298 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
299 fs.mkdirs(hbaseRootdir);
300 FSUtils.setVersion(fs, hbaseRootdir);
301 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
302
303 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
304 ResultScanner s = t.getScanner(new Scan());
305 while (s.next() != null) continue;
306 LOG.info("Minicluster is up");
307 return this.hbaseCluster;
308 }
309
310
311
312
313
314
315 public MiniHBaseCluster getMiniHBaseCluster() {
316 return this.hbaseCluster;
317 }
318
319
320
321
322
323 public void shutdownMiniCluster() throws IOException {
324 LOG.info("Shutting down minicluster");
325 if (this.hbaseCluster != null) {
326 this.hbaseCluster.shutdown();
327
328 this.hbaseCluster.join();
329 }
330 shutdownMiniZKCluster();
331 if (this.dfsCluster != null) {
332
333 this.dfsCluster.shutdown();
334 }
335
336 if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
337
338 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
339 new Path(this.clusterTestBuildDir.toString()))) {
340 LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
341 }
342 }
343 LOG.info("Minicluster is down");
344 }
345
346
347
348
349
350 public void flush() throws IOException {
351 this.hbaseCluster.flushcache();
352 }
353
354
355
356
357
358 public void flush(byte [] tableName) throws IOException {
359 this.hbaseCluster.flushcache(tableName);
360 }
361
362
363
364
365
366
367
368
369
370 public HTable createTable(byte[] tableName, byte[] family)
371 throws IOException{
372 return createTable(tableName, new byte[][]{family});
373 }
374
375
376
377
378
379
380
381
382 public HTable createTable(byte[] tableName, byte[][] families)
383 throws IOException {
384 HTableDescriptor desc = new HTableDescriptor(tableName);
385 for(byte[] family : families) {
386 desc.addFamily(new HColumnDescriptor(family));
387 }
388 (new HBaseAdmin(getConfiguration())).createTable(desc);
389 return new HTable(getConfiguration(), tableName);
390 }
391
392
393
394
395
396
397
398
399
400 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
401 throws IOException {
402 return createTable(tableName, new byte[][]{family}, numVersions);
403 }
404
405
406
407
408
409
410
411
412
413 public HTable createTable(byte[] tableName, byte[][] families,
414 int numVersions)
415 throws IOException {
416 HTableDescriptor desc = new HTableDescriptor(tableName);
417 for (byte[] family : families) {
418 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
419 HColumnDescriptor.DEFAULT_COMPRESSION,
420 HColumnDescriptor.DEFAULT_IN_MEMORY,
421 HColumnDescriptor.DEFAULT_BLOCKCACHE,
422 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
423 HColumnDescriptor.DEFAULT_BLOOMFILTER,
424 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
425 desc.addFamily(hcd);
426 }
427 (new HBaseAdmin(getConfiguration())).createTable(desc);
428 return new HTable(getConfiguration(), tableName);
429 }
430
431
432
433
434
435
436
437
438
439 public HTable createTable(byte[] tableName, byte[][] families,
440 int[] numVersions)
441 throws IOException {
442 HTableDescriptor desc = new HTableDescriptor(tableName);
443 int i = 0;
444 for (byte[] family : families) {
445 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
446 HColumnDescriptor.DEFAULT_COMPRESSION,
447 HColumnDescriptor.DEFAULT_IN_MEMORY,
448 HColumnDescriptor.DEFAULT_BLOCKCACHE,
449 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
450 HColumnDescriptor.DEFAULT_BLOOMFILTER,
451 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
452 desc.addFamily(hcd);
453 i++;
454 }
455 (new HBaseAdmin(getConfiguration())).createTable(desc);
456 return new HTable(getConfiguration(), tableName);
457 }
458
459
460
461
462
463
464
465 public HTable truncateTable(byte [] tableName) throws IOException {
466 HTable table = new HTable(getConfiguration(), tableName);
467 Scan scan = new Scan();
468 ResultScanner resScan = table.getScanner(scan);
469 for(Result res : resScan) {
470 Delete del = new Delete(res.getRow());
471 table.delete(del);
472 }
473 return table;
474 }
475
476
477
478
479
480
481
482
483 public int loadTable(final HTable t, final byte[] f) throws IOException {
484 t.setAutoFlush(false);
485 byte[] k = new byte[3];
486 int rowCount = 0;
487 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
488 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
489 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
490 k[0] = b1;
491 k[1] = b2;
492 k[2] = b3;
493 Put put = new Put(k);
494 put.add(f, null, k);
495 t.put(put);
496 rowCount++;
497 }
498 }
499 }
500 t.flushCommits();
501 return rowCount;
502 }
503
504
505
506
507 public int countRows(final HTable table) throws IOException {
508 Scan scan = new Scan();
509 ResultScanner results = table.getScanner(scan);
510 int count = 0;
511 for (@SuppressWarnings("unused") Result res : results) {
512 count++;
513 }
514 results.close();
515 return count;
516 }
517
518
519
520
521 public String checksumRows(final HTable table) throws Exception {
522 Scan scan = new Scan();
523 ResultScanner results = table.getScanner(scan);
524 MessageDigest digest = MessageDigest.getInstance("MD5");
525 for (Result res : results) {
526 digest.update(res.getRow());
527 }
528 results.close();
529 return digest.toString();
530 }
531
532
533
534
535
536
537
538
539
540 public int createMultiRegions(HTable table, byte[] columnFamily)
541 throws IOException {
542 return createMultiRegions(getConfiguration(), table, columnFamily);
543 }
544
545
546
547
548
549
550
551
552
553 public int createMultiRegions(final Configuration c, final HTable table,
554 final byte[] columnFamily)
555 throws IOException {
556 byte[][] KEYS = {
557 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
558 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
559 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
560 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
561 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
562 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
563 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
564 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
565 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
566 };
567 return createMultiRegions(c, table, columnFamily, KEYS);
568 }
569
570 public int createMultiRegions(final Configuration c, final HTable table,
571 final byte[] columnFamily, byte [][] startKeys)
572 throws IOException {
573 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
574 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
575 HTableDescriptor htd = table.getTableDescriptor();
576 if(!htd.hasFamily(columnFamily)) {
577 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
578 htd.addFamily(hcd);
579 }
580
581
582
583
584 List<byte[]> rows = getMetaTableRows(htd.getName());
585
586 int count = 0;
587 for (int i = 0; i < startKeys.length; i++) {
588 int j = (i + 1) % startKeys.length;
589 HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
590 startKeys[i], startKeys[j]);
591 Put put = new Put(hri.getRegionName());
592 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
593 Writables.getBytes(hri));
594 meta.put(put);
595 LOG.info("createMultiRegions: inserted " + hri.toString());
596 count++;
597 }
598
599 for (byte[] row : rows) {
600 LOG.info("createMultiRegions: deleting meta row -> " +
601 Bytes.toStringBinary(row));
602 meta.delete(new Delete(row));
603 }
604
605 HConnection conn = table.getConnection();
606 conn.clearRegionCache();
607 return count;
608 }
609
610
611
612
613
614
615 public List<byte[]> getMetaTableRows() throws IOException {
616 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
617 List<byte[]> rows = new ArrayList<byte[]>();
618 ResultScanner s = t.getScanner(new Scan());
619 for (Result result : s) {
620 LOG.info("getMetaTableRows: row -> " +
621 Bytes.toStringBinary(result.getRow()));
622 rows.add(result.getRow());
623 }
624 s.close();
625 return rows;
626 }
627
628
629
630
631
632
633 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
634 HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
635 List<byte[]> rows = new ArrayList<byte[]>();
636 ResultScanner s = t.getScanner(new Scan());
637 for (Result result : s) {
638 HRegionInfo info = Writables.getHRegionInfo(
639 result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
640 HTableDescriptor desc = info.getTableDesc();
641 if (Bytes.compareTo(desc.getName(), tableName) == 0) {
642 LOG.info("getMetaTableRows: row -> " +
643 Bytes.toStringBinary(result.getRow()));
644 rows.add(result.getRow());
645 }
646 }
647 s.close();
648 return rows;
649 }
650
651
652
653
654
655
656
657 public void startMiniMapReduceCluster() throws IOException {
658 startMiniMapReduceCluster(2);
659 }
660
661
662
663
664
665
666
667 public void startMiniMapReduceCluster(final int servers) throws IOException {
668 LOG.info("Starting mini mapreduce cluster...");
669
670 Configuration c = getConfiguration();
671 System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
672 c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
673 mrCluster = new MiniMRCluster(servers,
674 FileSystem.get(c).getUri().toString(), 1);
675 LOG.info("Mini mapreduce cluster started");
676 c.set("mapred.job.tracker",
677 mrCluster.createJobConf().get("mapred.job.tracker"));
678 }
679
680
681
682
683 public void shutdownMiniMapReduceCluster() {
684 LOG.info("Stopping mini mapreduce cluster...");
685 if (mrCluster != null) {
686 mrCluster.shutdown();
687 }
688
689 conf.set("mapred.job.tracker", "local");
690 LOG.info("Mini mapreduce cluster stopped");
691 }
692
693
694
695
696
697
698 public void enableDebug(Class<?> clazz) {
699 Log l = LogFactory.getLog(clazz);
700 if (l instanceof Log4JLogger) {
701 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
702 } else if (l instanceof Jdk14Logger) {
703 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
704 }
705 }
706
707
708
709
710
711 public void expireMasterSession() throws Exception {
712 HMaster master = hbaseCluster.getMaster();
713 expireSession(master.getZooKeeperWrapper());
714 }
715
716
717
718
719
720
721 public void expireRegionServerSession(int index) throws Exception {
722 HRegionServer rs = hbaseCluster.getRegionServer(index);
723 expireSession(rs.getZooKeeperWrapper());
724 }
725
726 public void expireSession(ZooKeeperWrapper nodeZK) throws Exception{
727 ZooKeeperWrapper zkw =
728 ZooKeeperWrapper.createInstance(conf,
729 ZooKeeperWrapper.class.getName());
730 zkw.registerListener(EmptyWatcher.instance);
731 String quorumServers = zkw.getQuorumServers();
732 int sessionTimeout = 5 * 1000;
733
734 byte[] password = nodeZK.getSessionPassword();
735 long sessionID = nodeZK.getSessionID();
736
737 ZooKeeper zk = new ZooKeeper(quorumServers,
738 sessionTimeout, EmptyWatcher.instance, sessionID, password);
739 zk.close();
740 final long sleep = sessionTimeout * 5L;
741 LOG.info("ZK Closed; sleeping=" + sleep);
742
743 Thread.sleep(sleep);
744
745 new HTable(conf, HConstants.META_TABLE_NAME);
746 }
747
748
749
750
751
752
753 public MiniHBaseCluster getHBaseCluster() {
754 return hbaseCluster;
755 }
756
757
758
759
760
761
762
763 public HBaseAdmin getHBaseAdmin() throws MasterNotRunningException {
764 if (hbaseAdmin == null) {
765 hbaseAdmin = new HBaseAdmin(getConfiguration());
766 }
767 return hbaseAdmin;
768 }
769
770
771
772
773
774
775
776 public void closeRegion(String regionName) throws IOException {
777 closeRegion(Bytes.toBytes(regionName));
778 }
779
780
781
782
783
784
785
786 public void closeRegion(byte[] regionName) throws IOException {
787 HBaseAdmin admin = getHBaseAdmin();
788 admin.closeRegion(regionName, (Object[]) null);
789 }
790
791
792
793
794
795
796
797
798 public void closeRegionByRow(String row, HTable table) throws IOException {
799 closeRegionByRow(Bytes.toBytes(row), table);
800 }
801
802
803
804
805
806
807
808
809 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
810 HRegionLocation hrl = table.getRegionLocation(row);
811 closeRegion(hrl.getRegionInfo().getRegionName());
812 }
813
814 public MiniZooKeeperCluster getZkCluster() {
815 return zkCluster;
816 }
817
818 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
819 this.zkCluster = zkCluster;
820 }
821
822 public MiniDFSCluster getDFSCluster() {
823 return dfsCluster;
824 }
825
826 public FileSystem getTestFileSystem() throws IOException {
827 return FileSystem.get(conf);
828 }
829
830 public void cleanupTestDir() throws IOException {
831 getTestDir().getFileSystem(conf).delete(getTestDir(), true);
832 }
833
834 public void waitTableAvailable(byte[] table, long timeoutMillis)
835 throws InterruptedException, IOException {
836 HBaseAdmin admin = new HBaseAdmin(conf);
837 long startWait = System.currentTimeMillis();
838 while (!admin.isTableAvailable(table)) {
839 assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
840 System.currentTimeMillis() - startWait < timeoutMillis);
841 Thread.sleep(500);
842 }
843 }
844
845
846
847
848
849
850
851 public void ensureSomeRegionServersAvailable(final int num)
852 throws IOException {
853 if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) {
854
855 LOG.info("Started new server=" +
856 this.getHBaseCluster().startRegionServer());
857
858 }
859 }
860
861
862
863
864
865
866
867
868
869
870 public static Configuration setDifferentUser(final Configuration c,
871 final String differentiatingSuffix)
872 throws IOException {
873 FileSystem currentfs = FileSystem.get(c);
874 Preconditions.checkArgument(currentfs instanceof DistributedFileSystem);
875
876
877 Configuration c2 = new Configuration(c);
878 String username = UserGroupInformation.getCurrentUGI().getUserName() +
879 differentiatingSuffix;
880 UnixUserGroupInformation.saveToConf(c2,
881 UnixUserGroupInformation.UGI_PROPERTY_NAME,
882 new UnixUserGroupInformation(username, new String[]{"supergroup"}));
883 return c2;
884 }
885
886
887
888
889
890
891
892
893
894
895
896 public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
897 throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
898
899
900
901
902 Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
903 field.setAccessible(true);
904 NameNode nn = (NameNode)field.get(this.dfsCluster);
905 nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
906 }
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 public static void setMaxRecoveryErrorCount(final OutputStream stream,
922 final int max) {
923 try {
924 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
925 for (Class<?> clazz: clazzes) {
926 String className = clazz.getSimpleName();
927 if (className.equals("DFSOutputStream")) {
928 if (clazz.isInstance(stream)) {
929 Field maxRecoveryErrorCountField =
930 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
931 maxRecoveryErrorCountField.setAccessible(true);
932 maxRecoveryErrorCountField.setInt(stream, max);
933 break;
934 }
935 }
936 }
937 } catch (Exception e) {
938 LOG.info("Could not set max recovery field", e);
939 }
940 }
941
942
943
944
945
946
947
948
949
950
951 public void waitUntilAllRegionsAssigned(final int countOfRegions)
952 throws IOException {
953 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
954 while (true) {
955 int rows = 0;
956 Scan scan = new Scan();
957 scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
958 ResultScanner s = meta.getScanner(scan);
959 for (Result r = null; (r = s.next()) != null;) {
960 byte [] b =
961 r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
962 if (b == null || b.length <= 0) break;
963 rows++;
964 }
965 s.close();
966
967 if (rows == countOfRegions) break;
968 LOG.info("Found=" + rows);
969 Threads.sleep(1000);
970 }
971 }
972
973
974
975
976
977 public static List<KeyValue> getFromStoreFile(Store store,
978 Get get) throws IOException {
979 ReadWriteConsistencyControl.resetThreadReadPoint();
980 Scan scan = new Scan(get);
981 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
982 scan.getFamilyMap().get(store.getFamily().getName()));
983
984 List<KeyValue> result = new ArrayList<KeyValue>();
985 scanner.next(result);
986 if (!result.isEmpty()) {
987
988 KeyValue kv = result.get(0);
989 if (!Bytes.equals(kv.getRow(), get.getRow())) {
990 result.clear();
991 }
992 }
993 return result;
994 }
995
996
997
998
999
1000 public static List<KeyValue> getFromStoreFile(Store store,
1001 byte [] row,
1002 NavigableSet<byte[]> columns
1003 ) throws IOException {
1004 Get get = new Get(row);
1005 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1006 s.put(store.getFamily().getName(), columns);
1007
1008 return getFromStoreFile(store,get);
1009 }
1010 }