1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.test;
20
21 import com.google.common.base.Joiner;
22
23 import org.apache.commons.cli.CommandLine;
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.conf.Configuration;
27 import org.apache.hadoop.conf.Configured;
28 import org.apache.hadoop.fs.Path;
29 import org.apache.hadoop.hbase.HBaseConfiguration;
30 import org.apache.hadoop.hbase.HRegionLocation;
31 import org.apache.hadoop.hbase.IntegrationTestingUtility;
32 import org.apache.hadoop.hbase.ServerName;
33 import org.apache.hadoop.hbase.TableName;
34 import org.apache.hadoop.hbase.HConstants;
35 import org.apache.hadoop.hbase.client.HBaseAdmin;
36 import org.apache.hadoop.hbase.client.HConnection;
37 import org.apache.hadoop.hbase.client.HConnectionManager;
38 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
39 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
40 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
41 import org.apache.hadoop.util.Tool;
42 import org.apache.hadoop.util.ToolRunner;
43
44 import java.util.ArrayList;
45 import java.util.HashMap;
46 import java.util.Set;
47 import java.util.TreeSet;
48 import java.util.UUID;
49
50
51
52
53
54
55
56
57 public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
58 protected String sourceClusterIdString;
59 protected String sinkClusterIdString;
60 protected int numIterations;
61 protected int numMappers;
62 protected long numNodes;
63 protected String outputDir;
64 protected int numReducers;
65 protected int generateVerifyGap;
66 protected Integer width;
67 protected Integer wrapMultiplier;
68
69 private final String SOURCE_CLUSTER_OPT = "sourceCluster";
70 private final String DEST_CLUSTER_OPT = "destCluster";
71 private final String ITERATIONS_OPT = "iterations";
72 private final String NUM_MAPPERS_OPT = "numMappers";
73 private final String OUTPUT_DIR_OPT = "outputDir";
74 private final String NUM_REDUCERS_OPT = "numReducers";
75
76
77
78
79
80 private final String GENERATE_VERIFY_GAP_OPT = "generateVerifyGap";
81
82
83
84
85
86 private final String WIDTH_OPT = "width";
87
88
89
90
91
92 private final String WRAP_MULTIPLIER_OPT = "wrapMultiplier";
93
94
95
96
97
98
99 private final String NUM_NODES_OPT = "numNodes";
100
101 private final int DEFAULT_NUM_MAPPERS = 1;
102 private final int DEFAULT_NUM_REDUCERS = 1;
103 private final int DEFAULT_NUM_ITERATIONS = 1;
104 private final int DEFAULT_GENERATE_VERIFY_GAP = 60;
105 private final int DEFAULT_WIDTH = 1000000;
106 private final int DEFAULT_WRAP_MULTIPLIER = 25;
107 private final int DEFAULT_NUM_NODES = DEFAULT_WIDTH * DEFAULT_WRAP_MULTIPLIER;
108
109
110
111
112
113 protected class ClusterID {
114 private final Configuration configuration;
115 private HConnection connection = null;
116
117
118
119
120
121
122
123
124 public ClusterID(Configuration base,
125 String key) {
126 configuration = new Configuration(base);
127 String[] parts = key.split(":");
128 configuration.set(HConstants.ZOOKEEPER_QUORUM, parts[0]);
129 configuration.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]);
130 configuration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]);
131 }
132
133 @Override
134 public String toString() {
135 return Joiner.on(":").join(configuration.get(HConstants.ZOOKEEPER_QUORUM),
136 configuration.get(HConstants.ZOOKEEPER_CLIENT_PORT),
137 configuration.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
138 }
139
140 public Configuration getConfiguration() {
141 return this.configuration;
142 }
143
144 public HConnection getConnection() throws Exception {
145 if (this.connection == null) {
146 this.connection = HConnectionManager.createConnection(this.configuration);
147 }
148 return this.connection;
149 }
150
151 public void closeConnection() throws Exception {
152 this.connection.close();
153 this.connection = null;
154 }
155
156 public boolean equals(ClusterID other) {
157 return this.toString().equalsIgnoreCase(other.toString());
158 }
159 }
160
161
162
163
164
165
166
167 protected class VerifyReplicationLoop extends Configured implements Tool {
168 private final Log LOG = LogFactory.getLog(VerifyReplicationLoop.class);
169 protected ClusterID source;
170 protected ClusterID sink;
171
172 IntegrationTestBigLinkedList integrationTestBigLinkedList;
173
174
175
176
177
178
179
180
181
182 protected void setupTablesAndReplication() throws Exception {
183 TableName tableName = getTableName(source.getConfiguration());
184
185 ClusterID[] clusters = {source, sink};
186
187
188 for (ClusterID cluster : clusters) {
189 HBaseAdmin admin = new HBaseAdmin(cluster.getConnection());
190 try {
191 if (admin.tableExists(tableName)) {
192 if (admin.isTableEnabled(tableName)) {
193 admin.disableTable(tableName);
194 }
195
196
197
198
199
200
201
202
203
204 Set<ServerName> regionServers = new TreeSet<ServerName>();
205 for (HRegionLocation location: admin.getConnection().locateRegions(tableName)) {
206 regionServers.add(location.getServerName());
207 }
208 for (ServerName server : regionServers) {
209 source.getConnection().getAdmin(server).rollWALWriter(null,
210 RollWALWriterRequest.newBuilder().build());
211 }
212
213 admin.deleteTable(tableName);
214 }
215 } finally {
216 admin.close();
217 }
218 }
219
220
221 Generator generator = new Generator();
222 generator.setConf(source.getConfiguration());
223 generator.createSchema();
224
225
226 if (!source.equals(sink)) {
227 ReplicationAdmin replicationAdmin = new ReplicationAdmin(source.getConfiguration());
228
229 for (String oldPeer : replicationAdmin.listPeerConfigs().keySet()) {
230 replicationAdmin.removePeer(oldPeer);
231 }
232
233
234 ReplicationPeerConfig peerConfig = new ReplicationPeerConfig();
235 peerConfig.setClusterKey(sink.toString());
236
237
238 HashMap<TableName, ArrayList<String>> toReplicate =
239 new HashMap<TableName, ArrayList<String>>();
240 toReplicate.put(tableName, new ArrayList<String>(0));
241
242 replicationAdmin.addPeer("TestPeer", peerConfig, toReplicate);
243
244 replicationAdmin.enableTableRep(tableName);
245 replicationAdmin.close();
246 }
247
248 for (ClusterID cluster : clusters) {
249 cluster.closeConnection();
250 }
251 }
252
253 protected void waitForReplication() throws Exception {
254
255
256 Thread.sleep(generateVerifyGap * 1000);
257 }
258
259
260
261
262
263
264
265 protected void runGenerator() throws Exception {
266 Path outputPath = new Path(outputDir);
267 UUID uuid = UUID.randomUUID();
268 Path generatorOutput = new Path(outputPath, uuid.toString());
269
270 Generator generator = new Generator();
271 generator.setConf(source.getConfiguration());
272
273 int retCode = generator.run(numMappers, numNodes, generatorOutput, width, wrapMultiplier);
274 if (retCode > 0) {
275 throw new RuntimeException("Generator failed with return code: " + retCode);
276 }
277 }
278
279
280
281
282
283
284
285
286
287
288 protected void runVerify(long expectedNumNodes) throws Exception {
289 Path outputPath = new Path(outputDir);
290 UUID uuid = UUID.randomUUID();
291 Path iterationOutput = new Path(outputPath, uuid.toString());
292
293 Verify verify = new Verify();
294 verify.setConf(sink.getConfiguration());
295
296 int retCode = verify.run(iterationOutput, numReducers);
297 if (retCode > 0) {
298 throw new RuntimeException("Verify.run failed with return code: " + retCode);
299 }
300
301 if (!verify.verify(expectedNumNodes)) {
302 throw new RuntimeException("Verify.verify failed");
303 }
304
305 LOG.info("Verify finished with success. Total nodes=" + expectedNumNodes);
306 }
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 @Override
322 public int run(String[] args) throws Exception {
323 source = new ClusterID(getConf(), sourceClusterIdString);
324 sink = new ClusterID(getConf(), sinkClusterIdString);
325
326 setupTablesAndReplication();
327 int expectedNumNodes = 0;
328 for (int i = 0; i < numIterations; i++) {
329 LOG.info("Starting iteration = " + i);
330
331 expectedNumNodes += numMappers * numNodes;
332
333 runGenerator();
334 waitForReplication();
335 runVerify(expectedNumNodes);
336 }
337
338
339
340
341
342 return 0;
343 }
344 }
345
346 @Override
347 protected void addOptions() {
348 super.addOptions();
349 addRequiredOptWithArg("s", SOURCE_CLUSTER_OPT,
350 "Cluster ID of the source cluster (e.g. localhost:2181:/hbase)");
351 addRequiredOptWithArg("r", DEST_CLUSTER_OPT,
352 "Cluster ID of the sink cluster (e.g. localhost:2182:/hbase)");
353 addRequiredOptWithArg("d", OUTPUT_DIR_OPT,
354 "Temporary directory where to write keys for the test");
355
356
357 addOptWithArg("nm", NUM_MAPPERS_OPT,
358 "Number of mappers (default: " + DEFAULT_NUM_MAPPERS + ")");
359 addOptWithArg("nr", NUM_REDUCERS_OPT,
360 "Number of reducers (default: " + DEFAULT_NUM_MAPPERS + ")");
361 addOptWithArg("n", NUM_NODES_OPT,
362 "Number of nodes. This should be a multiple of width * wrapMultiplier." +
363 " (default: " + DEFAULT_NUM_NODES + ")");
364 addOptWithArg("i", ITERATIONS_OPT, "Number of iterations to run (default: " +
365 DEFAULT_NUM_ITERATIONS + ")");
366 addOptWithArg("t", GENERATE_VERIFY_GAP_OPT,
367 "Gap between generate and verify steps in seconds (default: " +
368 DEFAULT_GENERATE_VERIFY_GAP + ")");
369 addOptWithArg("w", WIDTH_OPT,
370 "Width of the linked list chain (default: " + DEFAULT_WIDTH + ")");
371 addOptWithArg("wm", WRAP_MULTIPLIER_OPT, "How many times to wrap around (default: " +
372 DEFAULT_WRAP_MULTIPLIER + ")");
373 }
374
375 @Override
376 protected void processOptions(CommandLine cmd) {
377 processBaseOptions(cmd);
378
379 sourceClusterIdString = cmd.getOptionValue(SOURCE_CLUSTER_OPT);
380 sinkClusterIdString = cmd.getOptionValue(DEST_CLUSTER_OPT);
381 outputDir = cmd.getOptionValue(OUTPUT_DIR_OPT);
382
383
384 numMappers = parseInt(cmd.getOptionValue(NUM_MAPPERS_OPT,
385 Integer.toString(DEFAULT_NUM_MAPPERS)),
386 1, Integer.MAX_VALUE);
387 numReducers = parseInt(cmd.getOptionValue(NUM_REDUCERS_OPT,
388 Integer.toString(DEFAULT_NUM_REDUCERS)),
389 1, Integer.MAX_VALUE);
390 numNodes = parseInt(cmd.getOptionValue(NUM_NODES_OPT, Integer.toString(DEFAULT_NUM_NODES)),
391 1, Integer.MAX_VALUE);
392 generateVerifyGap = parseInt(cmd.getOptionValue(GENERATE_VERIFY_GAP_OPT,
393 Integer.toString(DEFAULT_GENERATE_VERIFY_GAP)),
394 1, Integer.MAX_VALUE);
395 numIterations = parseInt(cmd.getOptionValue(ITERATIONS_OPT,
396 Integer.toString(DEFAULT_NUM_ITERATIONS)),
397 1, Integer.MAX_VALUE);
398 width = parseInt(cmd.getOptionValue(WIDTH_OPT, Integer.toString(DEFAULT_WIDTH)),
399 1, Integer.MAX_VALUE);
400 wrapMultiplier = parseInt(cmd.getOptionValue(WRAP_MULTIPLIER_OPT,
401 Integer.toString(DEFAULT_WRAP_MULTIPLIER)),
402 1, Integer.MAX_VALUE);
403
404 if (numNodes % (width * wrapMultiplier) != 0) {
405 throw new RuntimeException("numNodes must be a multiple of width and wrap multiplier");
406 }
407 }
408
409 @Override
410 public int runTestFromCommandLine() throws Exception {
411 VerifyReplicationLoop tool = new VerifyReplicationLoop();
412 tool.integrationTestBigLinkedList = this;
413 return ToolRunner.run(getConf(), tool, null);
414 }
415
416 public static void main(String[] args) throws Exception {
417 Configuration conf = HBaseConfiguration.create();
418 IntegrationTestingUtility.setUseDistributedCluster(conf);
419 int ret = ToolRunner.run(conf, new IntegrationTestReplication(), args);
420 System.exit(ret);
421 }
422 }