1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.IOException;
25 import java.util.List;
26
27 import org.apache.commons.logging.Log;
28 import org.apache.commons.logging.LogFactory;
29 import org.apache.hadoop.conf.Configuration;
30 import org.apache.hadoop.fs.FileSystem;
31 import org.apache.hadoop.fs.Path;
32 import org.apache.hadoop.hbase.HBaseTestingUtility;
33 import org.apache.hadoop.hbase.HColumnDescriptor;
34 import org.apache.hadoop.hbase.HRegionInfo;
35 import org.apache.hadoop.hbase.HTableDescriptor;
36 import org.apache.hadoop.hbase.catalog.CatalogTracker;
37 import org.apache.hadoop.hbase.catalog.MetaReader;
38 import org.apache.hadoop.hbase.client.HBaseAdmin;
39 import org.apache.hadoop.hbase.client.HConnection;
40 import org.apache.hadoop.hbase.client.HConnectionManager;
41 import org.apache.hadoop.hbase.client.Put;
42 import org.apache.hadoop.hbase.regionserver.HRegion;
43 import org.junit.Test;
44
45
46
47
48 public class TestMergeTable {
49 private static final Log LOG = LogFactory.getLog(TestMergeTable.class);
50 private final HBaseTestingUtility UTIL = new HBaseTestingUtility();
51 private static final byte [] COLUMN_NAME = Bytes.toBytes("contents");
52 private static final byte [] VALUE;
53 static {
54
55 String partialValue = String.valueOf(System.currentTimeMillis());
56 StringBuilder val = new StringBuilder();
57 while (val.length() < 1024) {
58 val.append(partialValue);
59 }
60 VALUE = Bytes.toBytes(val.toString());
61 }
62
63
64
65
66
67
68
69
70 @Test (timeout=300000) public void testMergeTable() throws Exception {
71
72 HTableDescriptor desc = new HTableDescriptor(Bytes.toBytes("test"));
73 desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
74
75
76 UTIL.getConfiguration().setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
77
78 UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
79
80 UTIL.startMiniDFSCluster(1);
81
82 Path rootdir = UTIL.createRootDir();
83 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
84 if (fs.exists(rootdir)) {
85 if (fs.delete(rootdir, true)) {
86 LOG.info("Cleaned up existing " + rootdir);
87 }
88 }
89
90
91
92
93
94
95
96 byte [] row_70001 = Bytes.toBytes("row_70001");
97 byte [] row_80001 = Bytes.toBytes("row_80001");
98
99
100 HRegion [] regions = {
101 createRegion(desc, null, row_70001, 1, 70000, rootdir),
102 createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
103 createRegion(desc, row_80001, null, 80001, 11000, rootdir)
104 };
105
106
107
108 setupROOTAndMeta(rootdir, regions);
109 try {
110 LOG.info("Starting mini zk cluster");
111 UTIL.startMiniZKCluster();
112 LOG.info("Starting mini hbase cluster");
113 UTIL.startMiniHBaseCluster(1, 1);
114 Configuration c = new Configuration(UTIL.getConfiguration());
115 HConnection connection = HConnectionManager.getConnection(c);
116 CatalogTracker ct = new CatalogTracker(connection);
117 ct.start();
118 List<HRegionInfo> originalTableRegions =
119 MetaReader.getTableRegions(ct, desc.getName());
120 LOG.info("originalTableRegions size=" + originalTableRegions.size() +
121 "; " + originalTableRegions);
122 HBaseAdmin admin = new HBaseAdmin(new Configuration(c));
123 admin.disableTable(desc.getName());
124 HMerge.merge(c, FileSystem.get(c), desc.getName());
125 List<HRegionInfo> postMergeTableRegions =
126 MetaReader.getTableRegions(ct, desc.getName());
127 LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
128 "; " + postMergeTableRegions);
129 assertTrue("originalTableRegions=" + originalTableRegions.size() +
130 ", postMergeTableRegions=" + postMergeTableRegions.size(),
131 postMergeTableRegions.size() < originalTableRegions.size());
132 } finally {
133 UTIL.shutdownMiniCluster();
134 }
135 }
136
137 private HRegion createRegion(final HTableDescriptor desc,
138 byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
139 throws IOException {
140 HRegionInfo hri = new HRegionInfo(desc, startKey, endKey);
141 HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration());
142 LOG.info("Created region " + region.getRegionNameAsString());
143 for(int i = firstRow; i < firstRow + nrows; i++) {
144 Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
145 put.add(COLUMN_NAME, null, VALUE);
146 region.put(put);
147 if (i % 10000 == 0) {
148 LOG.info("Flushing write #" + i);
149 region.flushcache();
150 }
151 }
152 region.close();
153 region.getLog().closeAndDelete();
154 return region;
155 }
156
157 protected void setupROOTAndMeta(Path rootdir, final HRegion [] regions)
158 throws IOException {
159 HRegion root =
160 HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rootdir, UTIL.getConfiguration());
161 HRegion meta =
162 HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
163 UTIL.getConfiguration());
164 HRegion.addRegionToMETA(root, meta);
165 for (HRegion r: regions) {
166 HRegion.addRegionToMETA(meta, r);
167 }
168 meta.close();
169 meta.getLog().closeAndDelete();
170 root.close();
171 root.getLog().closeAndDelete();
172 }
173 }