1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.replication;
19
20 import static org.junit.Assert.assertArrayEquals;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.fail;
23
24 import java.io.Closeable;
25 import java.io.IOException;
26 import java.util.List;
27 import java.util.Random;
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.hadoop.conf.Configuration;
31 import org.apache.hadoop.hbase.Cell;
32 import org.apache.hadoop.hbase.HBaseConfiguration;
33 import org.apache.hadoop.hbase.HBaseTestingUtility;
34 import org.apache.hadoop.hbase.HColumnDescriptor;
35 import org.apache.hadoop.hbase.HConstants;
36 import org.apache.hadoop.hbase.HTableDescriptor;
37 import org.apache.hadoop.hbase.KeyValue;
38 import org.apache.hadoop.hbase.testclassification.LargeTests;
39 import org.apache.hadoop.hbase.TableName;
40 import org.apache.hadoop.hbase.client.Admin;
41 import org.apache.hadoop.hbase.client.Delete;
42 import org.apache.hadoop.hbase.client.Durability;
43 import org.apache.hadoop.hbase.client.Get;
44 import org.apache.hadoop.hbase.client.HBaseAdmin;
45 import org.apache.hadoop.hbase.client.HTable;
46 import org.apache.hadoop.hbase.client.Put;
47 import org.apache.hadoop.hbase.client.Result;
48 import org.apache.hadoop.hbase.client.Table;
49 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
50 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
51 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
52 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
53 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
54 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
55 import org.apache.hadoop.hbase.util.Bytes;
56 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
57 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
58 import org.junit.After;
59 import org.junit.Before;
60 import org.junit.Test;
61 import org.junit.experimental.categories.Category;
62
63 @Category(LargeTests.class)
64 public class TestMasterReplication {
65
66 private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
67
68 private Configuration baseConfiguration;
69
70 private HBaseTestingUtility[] utilities;
71 private Configuration[] configurations;
72 private MiniZooKeeperCluster miniZK;
73
74 private static final long SLEEP_TIME = 500;
75 private static final int NB_RETRIES = 10;
76
77 private static final TableName tableName = TableName.valueOf("test");
78 private static final byte[] famName = Bytes.toBytes("f");
79 private static final byte[] row = Bytes.toBytes("row");
80 private static final byte[] row1 = Bytes.toBytes("row1");
81 private static final byte[] row2 = Bytes.toBytes("row2");
82 private static final byte[] row3 = Bytes.toBytes("row3");
83 private static final byte[] row4 = Bytes.toBytes("row4");
84 private static final byte[] noRepfamName = Bytes.toBytes("norep");
85
86 private static final byte[] count = Bytes.toBytes("count");
87 private static final byte[] put = Bytes.toBytes("put");
88 private static final byte[] delete = Bytes.toBytes("delete");
89
90 private HTableDescriptor table;
91
92 @Before
93 public void setUp() throws Exception {
94 baseConfiguration = HBaseConfiguration.create();
95
96
97 baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
98 baseConfiguration.setInt("replication.source.size.capacity", 1024);
99 baseConfiguration.setLong("replication.source.sleepforretries", 100);
100 baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
101 baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
102 baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY,
103 HConstants.REPLICATION_ENABLE_DEFAULT);
104 baseConfiguration.setBoolean("dfs.support.append", true);
105 baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
106 baseConfiguration.setStrings(
107 CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
108 CoprocessorCounter.class.getName());
109
110 table = new HTableDescriptor(tableName);
111 HColumnDescriptor fam = new HColumnDescriptor(famName);
112 fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
113 table.addFamily(fam);
114 fam = new HColumnDescriptor(noRepfamName);
115 table.addFamily(fam);
116 }
117
118
119
120
121
122
123
124 @Test(timeout = 300000)
125 public void testCyclicReplication1() throws Exception {
126 LOG.info("testSimplePutDelete");
127 int numClusters = 2;
128 Table[] htables = null;
129 try {
130 startMiniClusters(numClusters);
131 createTableOnClusters(table);
132
133 htables = getHTablesOnClusters(tableName);
134
135
136 addPeer("1", 0, 1);
137 addPeer("1", 1, 0);
138
139 int[] expectedCounts = new int[] { 2, 2 };
140
141
142
143 putAndWait(row, famName, htables[0], htables[1]);
144 putAndWait(row1, famName, htables[1], htables[0]);
145 validateCounts(htables, put, expectedCounts);
146
147 deleteAndWait(row, htables[0], htables[1]);
148 deleteAndWait(row1, htables[1], htables[0]);
149 validateCounts(htables, delete, expectedCounts);
150 } finally {
151 close(htables);
152 shutDownMiniClusters();
153 }
154 }
155
156
157
158
159
160
161
162
163
164 @Test(timeout = 300000)
165 public void testCyclicReplication2() throws Exception {
166 LOG.info("testCyclicReplication1");
167 int numClusters = 3;
168 Table[] htables = null;
169 try {
170 startMiniClusters(numClusters);
171 createTableOnClusters(table);
172
173
174 addPeer("1", 0, 1);
175 addPeer("1", 1, 2);
176 addPeer("1", 2, 0);
177
178 htables = getHTablesOnClusters(tableName);
179
180
181 putAndWait(row, famName, htables[0], htables[2]);
182 putAndWait(row1, famName, htables[1], htables[0]);
183 putAndWait(row2, famName, htables[2], htables[1]);
184
185 deleteAndWait(row, htables[0], htables[2]);
186 deleteAndWait(row1, htables[1], htables[0]);
187 deleteAndWait(row2, htables[2], htables[1]);
188
189 int[] expectedCounts = new int[] { 3, 3, 3 };
190 validateCounts(htables, put, expectedCounts);
191 validateCounts(htables, delete, expectedCounts);
192
193
194 disablePeer("1", 2);
195
196
197 putAndWait(row3, famName, htables[0], htables[1]);
198
199 htables[1].put(new Put(row4).add(famName, row4, row4));
200
201 enablePeer("1", 2);
202
203
204
205 wait(row4, htables[0], true);
206 } finally {
207 close(htables);
208 shutDownMiniClusters();
209 }
210 }
211
212
213
214
215 @Test(timeout = 300000)
216 public void testCyclicReplication3() throws Exception {
217 LOG.info("testCyclicReplication2");
218 int numClusters = 3;
219 Table[] htables = null;
220 try {
221 startMiniClusters(numClusters);
222 createTableOnClusters(table);
223
224
225 addPeer("1", 0, 1);
226 addPeer("1", 1, 2);
227 addPeer("1", 2, 1);
228
229 htables = getHTablesOnClusters(tableName);
230
231
232 putAndWait(row, famName, htables[0], htables[2]);
233 putAndWait(row1, famName, htables[1], htables[2]);
234 putAndWait(row2, famName, htables[2], htables[1]);
235
236 deleteAndWait(row, htables[0], htables[2]);
237 deleteAndWait(row1, htables[1], htables[2]);
238 deleteAndWait(row2, htables[2], htables[1]);
239
240 int[] expectedCounts = new int[] { 1, 3, 3 };
241 validateCounts(htables, put, expectedCounts);
242 validateCounts(htables, delete, expectedCounts);
243 } finally {
244 close(htables);
245 shutDownMiniClusters();
246 }
247 }
248
249 @After
250 public void tearDown() throws IOException {
251 configurations = null;
252 utilities = null;
253 }
254
255 @SuppressWarnings("resource")
256 private void startMiniClusters(int numClusters) throws Exception {
257 Random random = new Random();
258 utilities = new HBaseTestingUtility[numClusters];
259 configurations = new Configuration[numClusters];
260 for (int i = 0; i < numClusters; i++) {
261 Configuration conf = new Configuration(baseConfiguration);
262 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + i + random.nextInt());
263 HBaseTestingUtility utility = new HBaseTestingUtility(conf);
264 if (i == 0) {
265 utility.startMiniZKCluster();
266 miniZK = utility.getZkCluster();
267 } else {
268 utility.setZkCluster(miniZK);
269 }
270 utility.startMiniCluster();
271 utilities[i] = utility;
272 configurations[i] = conf;
273 new ZooKeeperWatcher(conf, "cluster" + i, null, true);
274 }
275 }
276
277 private void shutDownMiniClusters() throws Exception {
278 int numClusters = utilities.length;
279 for (int i = numClusters - 1; i >= 0; i--) {
280 if (utilities[i] != null) {
281 utilities[i].shutdownMiniCluster();
282 }
283 }
284 miniZK.shutdown();
285 }
286
287 private void createTableOnClusters(HTableDescriptor table) throws Exception {
288 int numClusters = configurations.length;
289 for (int i = 0; i < numClusters; i++) {
290 Admin hbaseAdmin = null;
291 try {
292 hbaseAdmin = new HBaseAdmin(configurations[i]);
293 hbaseAdmin.createTable(table);
294 } finally {
295 close(hbaseAdmin);
296 }
297 }
298 }
299
300 private void addPeer(String id, int masterClusterNumber,
301 int slaveClusterNumber) throws Exception {
302 ReplicationAdmin replicationAdmin = null;
303 try {
304 replicationAdmin = new ReplicationAdmin(
305 configurations[masterClusterNumber]);
306 replicationAdmin.addPeer(id,
307 utilities[slaveClusterNumber].getClusterKey());
308 } finally {
309 close(replicationAdmin);
310 }
311 }
312
313 private void disablePeer(String id, int masterClusterNumber) throws Exception {
314 ReplicationAdmin replicationAdmin = null;
315 try {
316 replicationAdmin = new ReplicationAdmin(
317 configurations[masterClusterNumber]);
318 replicationAdmin.disablePeer(id);
319 } finally {
320 close(replicationAdmin);
321 }
322 }
323
324 private void enablePeer(String id, int masterClusterNumber) throws Exception {
325 ReplicationAdmin replicationAdmin = null;
326 try {
327 replicationAdmin = new ReplicationAdmin(
328 configurations[masterClusterNumber]);
329 replicationAdmin.enablePeer(id);
330 } finally {
331 close(replicationAdmin);
332 }
333 }
334
335 private void close(Closeable... closeables) {
336 try {
337 if (closeables != null) {
338 for (Closeable closeable : closeables) {
339 closeable.close();
340 }
341 }
342 } catch (Exception e) {
343 LOG.warn("Exception occured while closing the object:", e);
344 }
345 }
346
347 @SuppressWarnings("resource")
348 private Table[] getHTablesOnClusters(TableName tableName) throws Exception {
349 int numClusters = utilities.length;
350 Table[] htables = new Table[numClusters];
351 for (int i = 0; i < numClusters; i++) {
352 Table htable = new HTable(configurations[i], tableName);
353 htable.setWriteBufferSize(1024);
354 htables[i] = htable;
355 }
356 return htables;
357 }
358
359 private void validateCounts(Table[] htables, byte[] type,
360 int[] expectedCounts) throws IOException {
361 for (int i = 0; i < htables.length; i++) {
362 assertEquals(Bytes.toString(type) + " were replicated back ",
363 expectedCounts[i], getCount(htables[i], type));
364 }
365 }
366
367 private int getCount(Table t, byte[] type) throws IOException {
368 Get test = new Get(row);
369 test.setAttribute("count", new byte[] {});
370 Result res = t.get(test);
371 return Bytes.toInt(res.getValue(count, type));
372 }
373
374 private void deleteAndWait(byte[] row, Table source, Table target)
375 throws Exception {
376 Delete del = new Delete(row);
377 source.delete(del);
378 wait(row, target, true);
379 }
380
381 private void putAndWait(byte[] row, byte[] fam, Table source, Table target)
382 throws Exception {
383 Put put = new Put(row);
384 put.add(fam, row, row);
385 source.put(put);
386 wait(row, target, false);
387 }
388
389 private void wait(byte[] row, Table target, boolean isDeleted)
390 throws Exception {
391 Get get = new Get(row);
392 for (int i = 0; i < NB_RETRIES; i++) {
393 if (i == NB_RETRIES - 1) {
394 fail("Waited too much time for replication. Row:" + Bytes.toString(row)
395 + ". IsDeleteReplication:" + isDeleted);
396 }
397 Result res = target.get(get);
398 boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
399 if (sleep) {
400 LOG.info("Waiting for more time for replication. Row:"
401 + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
402 Thread.sleep(SLEEP_TIME);
403 } else {
404 if (!isDeleted) {
405 assertArrayEquals(res.value(), row);
406 }
407 LOG.info("Obtained row:"
408 + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
409 break;
410 }
411 }
412 }
413
414
415
416
417
418 public static class CoprocessorCounter extends BaseRegionObserver {
419 private int nCount = 0;
420 private int nDelete = 0;
421
422 @Override
423 public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
424 final WALEdit edit, final Durability durability) throws IOException {
425 nCount++;
426 }
427
428 @Override
429 public void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
430 final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
431 nDelete++;
432 }
433
434 @Override
435 public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
436 final Get get, final List<Cell> result) throws IOException {
437 if (get.getAttribute("count") != null) {
438 result.clear();
439
440 result.add(new KeyValue(count, count, delete, Bytes.toBytes(nDelete)));
441 result.add(new KeyValue(count, count, put, Bytes.toBytes(nCount)));
442 c.bypass();
443 }
444 }
445 }
446
447 }