1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.mapred;
20
21 import static org.mockito.Mockito.mock;
22
23 import org.apache.hadoop.fs.Path;
24 import org.apache.hadoop.hbase.HBaseTestingUtility;
25 import org.apache.hadoop.hbase.HConstants;
26 import org.apache.hadoop.hbase.testclassification.LargeTests;
27 import org.apache.hadoop.hbase.TableName;
28 import org.apache.hadoop.hbase.client.Result;
29 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
30 import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatTestBase;
31 import org.apache.hadoop.hbase.util.Bytes;
32 import org.apache.hadoop.io.NullWritable;
33 import org.apache.hadoop.mapred.InputSplit;
34 import org.apache.hadoop.mapred.JobClient;
35 import org.apache.hadoop.mapred.JobConf;
36 import org.apache.hadoop.mapred.MapReduceBase;
37 import org.apache.hadoop.mapred.OutputCollector;
38 import org.apache.hadoop.mapred.RecordReader;
39 import org.apache.hadoop.mapred.Reducer;
40 import org.apache.hadoop.mapred.Reporter;
41 import org.apache.hadoop.mapred.RunningJob;
42 import org.apache.hadoop.mapred.lib.NullOutputFormat;
43 import org.junit.Assert;
44 import org.junit.Test;
45 import org.junit.experimental.categories.Category;
46
47 import java.io.IOException;
48 import java.util.Iterator;
49
50 @Category(LargeTests.class)
51 public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase {
52
53 private static final byte[] aaa = Bytes.toBytes("aaa");
54 private static final byte[] after_zzz = Bytes.toBytes("zz{");
55 private static final String COLUMNS =
56 Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]);
57
58 @Override
59 protected byte[] getStartRow() {
60 return aaa;
61 }
62
63 @Override
64 protected byte[] getEndRow() {
65 return after_zzz;
66 }
67
68 static class TestTableSnapshotMapper extends MapReduceBase
69 implements TableMap<ImmutableBytesWritable, NullWritable> {
70 @Override
71 public void map(ImmutableBytesWritable key, Result value,
72 OutputCollector<ImmutableBytesWritable, NullWritable> collector, Reporter reporter)
73 throws IOException {
74 verifyRowFromMap(key, value);
75 collector.collect(key, NullWritable.get());
76 }
77 }
78
79 public static class TestTableSnapshotReducer extends MapReduceBase
80 implements Reducer<ImmutableBytesWritable, NullWritable, NullWritable, NullWritable> {
81 HBaseTestingUtility.SeenRowTracker rowTracker =
82 new HBaseTestingUtility.SeenRowTracker(aaa, after_zzz);
83
84 @Override
85 public void reduce(ImmutableBytesWritable key, Iterator<NullWritable> values,
86 OutputCollector<NullWritable, NullWritable> collector, Reporter reporter)
87 throws IOException {
88 rowTracker.addRow(key.get());
89 }
90
91 @Override
92 public void close() {
93 rowTracker.validate();
94 }
95 }
96
97 @Test
98 public void testInitTableSnapshotMapperJobConfig() throws Exception {
99 setupCluster();
100 TableName tableName = TableName.valueOf("testInitTableSnapshotMapperJobConfig");
101 String snapshotName = "foo";
102
103 try {
104 createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
105 JobConf job = new JobConf(UTIL.getConfiguration());
106 Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
107
108 TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
109 COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
110 NullWritable.class, job, false, tmpTableDir);
111
112
113
114 Assert.assertEquals(
115 "Snapshot job should be configured for default LruBlockCache.",
116 HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,
117 job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01);
118 Assert.assertEquals(
119 "Snapshot job should not use SlabCache.",
120 0, job.getFloat("hbase.offheapcache.percentage", -1), 0.01);
121 Assert.assertEquals(
122 "Snapshot job should not use BucketCache.",
123 0, job.getFloat("hbase.bucketcache.size", -1), 0.01);
124 } finally {
125 UTIL.getHBaseAdmin().deleteSnapshot(snapshotName);
126 UTIL.deleteTable(tableName);
127 tearDownCluster();
128 }
129 }
130
131
132
133
134 @Test
135 @Override
136 public void testWithMockedMapReduceMultiRegion() throws Exception {
137 testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 10);
138 }
139
140 @Test
141 @Override
142 public void testWithMapReduceMultiRegion() throws Exception {
143 testWithMapReduce(UTIL, "testWithMapReduceMultiRegion", 10, 10, false);
144 }
145
146 @Test
147 @Override
148
149 public void testWithMapReduceAndOfflineHBaseMultiRegion() throws Exception {
150 testWithMapReduce(UTIL, "testWithMapReduceAndOfflineHBaseMultiRegion", 10, 10, true);
151 }
152
153 @Override
154 public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName,
155 String snapshotName, Path tmpTableDir) throws Exception {
156 JobConf job = new JobConf(UTIL.getConfiguration());
157 TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
158 COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
159 NullWritable.class, job, false, tmpTableDir);
160 }
161
162 @Override
163 protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
164 int numRegions, int expectedNumSplits) throws Exception {
165 setupCluster();
166 TableName tableName = TableName.valueOf("testWithMockedMapReduce");
167 try {
168 createTableAndSnapshot(
169 util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
170
171 JobConf job = new JobConf(util.getConfiguration());
172 Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
173
174 TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
175 COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
176 NullWritable.class, job, false, tmpTableDir);
177
178
179 verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
180
181 } finally {
182 util.getHBaseAdmin().deleteSnapshot(snapshotName);
183 util.deleteTable(tableName);
184 tearDownCluster();
185 }
186 }
187
188 private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expectedNumSplits,
189 byte[] startRow, byte[] stopRow) throws IOException, InterruptedException {
190 TableSnapshotInputFormat tsif = new TableSnapshotInputFormat();
191 InputSplit[] splits = tsif.getSplits(job, 0);
192
193 Assert.assertEquals(expectedNumSplits, splits.length);
194
195 HBaseTestingUtility.SeenRowTracker rowTracker =
196 new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
197
198 for (int i = 0; i < splits.length; i++) {
199
200 InputSplit split = splits[i];
201 Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit);
202
203
204 OutputCollector collector = mock(OutputCollector.class);
205 Reporter reporter = mock(Reporter.class);
206 RecordReader<ImmutableBytesWritable, Result> rr = tsif.getRecordReader(split, job, reporter);
207
208
209 ImmutableBytesWritable key = rr.createKey();
210 Result value = rr.createValue();
211 while (rr.next(key, value)) {
212 verifyRowFromMap(key, value);
213 rowTracker.addRow(key.copyBytes());
214 }
215
216 rr.close();
217 }
218
219
220 rowTracker.validate();
221 }
222
223 @Override
224 protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName,
225 String snapshotName, Path tableDir, int numRegions, int expectedNumSplits,
226 boolean shutdownCluster) throws Exception {
227 doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir,
228 numRegions, expectedNumSplits, shutdownCluster);
229 }
230
231
232 public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
233 String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
234 int expectedNumSplits, boolean shutdownCluster) throws Exception {
235
236
237 createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
238
239 if (shutdownCluster) {
240 util.shutdownMiniHBaseCluster();
241 }
242
243 try {
244
245 JobConf jobConf = new JobConf(util.getConfiguration());
246
247 jobConf.setJarByClass(util.getClass());
248 org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
249 TestTableSnapshotInputFormat.class);
250
251 TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
252 TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
253 NullWritable.class, jobConf, true, tableDir);
254
255 jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
256 jobConf.setNumReduceTasks(1);
257 jobConf.setOutputFormat(NullOutputFormat.class);
258
259 RunningJob job = JobClient.runJob(jobConf);
260 Assert.assertTrue(job.isSuccessful());
261 } finally {
262 if (!shutdownCluster) {
263 util.getHBaseAdmin().deleteSnapshot(snapshotName);
264 util.deleteTable(tableName);
265 }
266 }
267 }
268 }