1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.mapreduce;
20
21 import static org.mockito.Mockito.mock;
22 import static org.mockito.Mockito.when;
23
24 import java.io.IOException;
25 import java.util.List;
26
27 import org.apache.hadoop.conf.Configuration;
28 import org.apache.hadoop.fs.Path;
29 import org.apache.hadoop.hbase.HBaseTestingUtility;
30 import org.apache.hadoop.hbase.HConstants;
31 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
32 import org.apache.hadoop.hbase.testclassification.LargeTests;
33 import org.apache.hadoop.hbase.TableName;
34 import org.apache.hadoop.hbase.client.Result;
35 import org.apache.hadoop.hbase.client.Scan;
36 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
37 import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit;
38 import org.apache.hadoop.hbase.util.Bytes;
39 import org.apache.hadoop.io.NullWritable;
40 import org.apache.hadoop.mapreduce.InputSplit;
41 import org.apache.hadoop.mapreduce.Job;
42 import org.apache.hadoop.mapreduce.RecordReader;
43 import org.apache.hadoop.mapreduce.Reducer;
44 import org.apache.hadoop.mapreduce.TaskAttemptContext;
45 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
46 import org.junit.Assert;
47 import org.junit.Test;
48 import org.junit.experimental.categories.Category;
49
50 import com.google.common.collect.Lists;
51
52 @Category(LargeTests.class)
53 public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase {
54
55 public static byte[] bbb = Bytes.toBytes("bbb");
56 public static byte[] yyy = Bytes.toBytes("yyy");
57
58 @Override
59 protected byte[] getStartRow() {
60 return bbb;
61 }
62
63 @Override
64 protected byte[] getEndRow() {
65 return yyy;
66 }
67
68 @Test
69 public void testGetBestLocations() throws IOException {
70 TableSnapshotInputFormatImpl tsif = new TableSnapshotInputFormatImpl();
71 Configuration conf = UTIL.getConfiguration();
72
73 HDFSBlocksDistribution blockDistribution = new HDFSBlocksDistribution();
74 Assert.assertEquals(Lists.newArrayList(), tsif.getBestLocations(conf, blockDistribution));
75
76 blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1);
77 Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
78
79 blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1);
80 Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
81
82 blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 1);
83 Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
84
85 blockDistribution = new HDFSBlocksDistribution();
86 blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 10);
87 blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 7);
88 blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 5);
89 blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 1);
90 Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
91
92 blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2);
93 Assert.assertEquals(Lists.newArrayList("h1", "h2"),
94 tsif.getBestLocations(conf, blockDistribution));
95
96 blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3);
97 Assert.assertEquals(Lists.newArrayList("h2", "h1"),
98 tsif.getBestLocations(conf, blockDistribution));
99
100 blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6);
101 blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9);
102
103 Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"),
104 tsif.getBestLocations(conf, blockDistribution));
105 }
106
107 public static enum TestTableSnapshotCounters {
108 VALIDATION_ERROR
109 }
110
111 public static class TestTableSnapshotMapper
112 extends TableMapper<ImmutableBytesWritable, NullWritable> {
113 @Override
114 protected void map(ImmutableBytesWritable key, Result value,
115 Context context) throws IOException, InterruptedException {
116
117 verifyRowFromMap(key, value);
118 context.write(key, NullWritable.get());
119 }
120 }
121
122 public static class TestTableSnapshotReducer
123 extends Reducer<ImmutableBytesWritable, NullWritable, NullWritable, NullWritable> {
124 HBaseTestingUtility.SeenRowTracker rowTracker =
125 new HBaseTestingUtility.SeenRowTracker(bbb, yyy);
126 @Override
127 protected void reduce(ImmutableBytesWritable key, Iterable<NullWritable> values,
128 Context context) throws IOException, InterruptedException {
129 rowTracker.addRow(key.get());
130 }
131
132 @Override
133 protected void cleanup(Context context) throws IOException,
134 InterruptedException {
135 rowTracker.validate();
136 }
137 }
138
139 @Test
140 public void testInitTableSnapshotMapperJobConfig() throws Exception {
141 setupCluster();
142 TableName tableName = TableName.valueOf("testInitTableSnapshotMapperJobConfig");
143 String snapshotName = "foo";
144
145 try {
146 createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
147 Job job = new Job(UTIL.getConfiguration());
148 Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
149
150 TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
151 new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
152 NullWritable.class, job, false, tmpTableDir);
153
154
155
156 Assert.assertEquals(
157 "Snapshot job should be configured for default LruBlockCache.",
158 HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,
159 job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01);
160 Assert.assertEquals(
161 "Snapshot job should not use SlabCache.",
162 0, job.getConfiguration().getFloat("hbase.offheapcache.percentage", -1), 0.01);
163 Assert.assertEquals(
164 "Snapshot job should not use BucketCache.",
165 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01);
166 } finally {
167 UTIL.getHBaseAdmin().deleteSnapshot(snapshotName);
168 UTIL.deleteTable(tableName);
169 tearDownCluster();
170 }
171 }
172
173 @Override
174 public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName,
175 String snapshotName, Path tmpTableDir) throws Exception {
176 Job job = new Job(UTIL.getConfiguration());
177 TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
178 new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
179 NullWritable.class, job, false, tmpTableDir);
180 }
181
182 @Override
183 public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
184 int numRegions, int expectedNumSplits) throws Exception {
185 setupCluster();
186 TableName tableName = TableName.valueOf("testWithMockedMapReduce");
187 try {
188 createTableAndSnapshot(
189 util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
190
191 Job job = new Job(util.getConfiguration());
192 Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
193 Scan scan = new Scan(getStartRow(), getEndRow());
194
195 TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
196 scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
197 NullWritable.class, job, false, tmpTableDir);
198
199 verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
200
201 } finally {
202 util.getHBaseAdmin().deleteSnapshot(snapshotName);
203 util.deleteTable(tableName);
204 tearDownCluster();
205 }
206 }
207
208 private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits,
209 byte[] startRow, byte[] stopRow)
210 throws IOException, InterruptedException {
211 TableSnapshotInputFormat tsif = new TableSnapshotInputFormat();
212 List<InputSplit> splits = tsif.getSplits(job);
213
214 Assert.assertEquals(expectedNumSplits, splits.size());
215
216 HBaseTestingUtility.SeenRowTracker rowTracker =
217 new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
218
219 for (int i = 0; i < splits.size(); i++) {
220
221 InputSplit split = splits.get(i);
222 Assert.assertTrue(split instanceof TableSnapshotRegionSplit);
223
224
225 TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
226 when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
227 RecordReader<ImmutableBytesWritable, Result> rr =
228 tsif.createRecordReader(split, taskAttemptContext);
229 rr.initialize(split, taskAttemptContext);
230
231
232 while (rr.nextKeyValue()) {
233 byte[] row = rr.getCurrentKey().get();
234 verifyRowFromMap(rr.getCurrentKey(), rr.getCurrentValue());
235 rowTracker.addRow(row);
236 }
237
238 rr.close();
239 }
240
241
242 rowTracker.validate();
243 }
244
245 @Override
246 protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName,
247 String snapshotName, Path tableDir, int numRegions, int expectedNumSplits,
248 boolean shutdownCluster) throws Exception {
249 doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir,
250 numRegions, expectedNumSplits, shutdownCluster);
251 }
252
253
254 public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
255 String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
256 int expectedNumSplits, boolean shutdownCluster) throws Exception {
257
258
259 createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
260
261 if (shutdownCluster) {
262 util.shutdownMiniHBaseCluster();
263 }
264
265 try {
266
267 Job job = new Job(util.getConfiguration());
268 Scan scan = new Scan(startRow, endRow);
269
270 job.setJarByClass(util.getClass());
271 TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
272 TestTableSnapshotInputFormat.class);
273
274 TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
275 scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
276 NullWritable.class, job, true, tableDir);
277
278 job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
279 job.setNumReduceTasks(1);
280 job.setOutputFormatClass(NullOutputFormat.class);
281
282 Assert.assertTrue(job.waitForCompletion(true));
283 } finally {
284 if (!shutdownCluster) {
285 util.getHBaseAdmin().deleteSnapshot(snapshotName);
286 util.deleteTable(tableName);
287 }
288 }
289 }
290 }