1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.replication;
20
21 import static org.junit.Assert.assertArrayEquals;
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.fail;
24
25 import java.io.IOException;
26 import java.util.List;
27
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.hadoop.conf.Configuration;
31 import org.apache.hadoop.hbase.*;
32 import org.apache.hadoop.hbase.client.Delete;
33 import org.apache.hadoop.hbase.client.Get;
34 import org.apache.hadoop.hbase.client.HBaseAdmin;
35 import org.apache.hadoop.hbase.client.HTable;
36 import org.apache.hadoop.hbase.client.Put;
37 import org.apache.hadoop.hbase.client.Result;
38 import org.apache.hadoop.hbase.client.Durability;
39 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
40 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
41 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
42 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
43 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
44 import org.apache.hadoop.hbase.util.Bytes;
45 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
46 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
47 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
48 import org.junit.After;
49 import org.junit.Before;
50 import org.junit.Test;
51 import org.junit.experimental.categories.Category;
52
53 @Category(LargeTests.class)
54 public class TestMasterReplication {
55
56 private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
57
58 private Configuration conf1;
59 private Configuration conf2;
60 private Configuration conf3;
61
62 private HBaseTestingUtility utility1;
63 private HBaseTestingUtility utility2;
64 private HBaseTestingUtility utility3;
65
66 private MiniZooKeeperCluster miniZK;
67
68 private static final long SLEEP_TIME = 500;
69 private static final int NB_RETRIES = 100;
70
71 private static final byte[] tableName = Bytes.toBytes("test");
72 private static final byte[] famName = Bytes.toBytes("f");
73 private static final byte[] row = Bytes.toBytes("row");
74 private static final byte[] row1 = Bytes.toBytes("row1");
75 private static final byte[] row2 = Bytes.toBytes("row2");
76 private static final byte[] row3 = Bytes.toBytes("row3");
77 private static final byte[] row4 = Bytes.toBytes("row4");
78 private static final byte[] noRepfamName = Bytes.toBytes("norep");
79
80 private static final byte[] count = Bytes.toBytes("count");
81 private static final byte[] put = Bytes.toBytes("put");
82 private static final byte[] delete = Bytes.toBytes("delete");
83
84 private HTableDescriptor table;
85
86 @Before
87 public void setUp() throws Exception {
88 conf1 = HBaseConfiguration.create();
89 conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
90
91
92 conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
93 conf1.setInt("replication.source.size.capacity", 1024);
94 conf1.setLong("replication.source.sleepforretries", 100);
95 conf1.setInt("hbase.regionserver.maxlogs", 10);
96 conf1.setLong("hbase.master.logcleaner.ttl", 10);
97 conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
98 conf1.setBoolean("dfs.support.append", true);
99 conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
100 conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
101 CoprocessorCounter.class.getName());
102
103 utility1 = new HBaseTestingUtility(conf1);
104 utility1.startMiniZKCluster();
105 miniZK = utility1.getZkCluster();
106
107
108
109 utility1.setZkCluster(miniZK);
110 new ZooKeeperWatcher(conf1, "cluster1", null, true);
111
112 conf2 = new Configuration(conf1);
113 conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
114
115 utility2 = new HBaseTestingUtility(conf2);
116 utility2.setZkCluster(miniZK);
117 new ZooKeeperWatcher(conf2, "cluster2", null, true);
118
119 conf3 = new Configuration(conf1);
120 conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
121
122 utility3 = new HBaseTestingUtility(conf3);
123 utility3.setZkCluster(miniZK);
124 new ZooKeeperWatcher(conf3, "cluster3", null, true);
125
126 table = new HTableDescriptor(TableName.valueOf(tableName));
127 HColumnDescriptor fam = new HColumnDescriptor(famName);
128 fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
129 table.addFamily(fam);
130 fam = new HColumnDescriptor(noRepfamName);
131 table.addFamily(fam);
132 }
133
134 @After
135 public void tearDown() throws IOException {
136 miniZK.shutdown();
137 }
138
139 @Test(timeout=300000)
140 public void testCyclicReplication() throws Exception {
141 LOG.info("testCyclicReplication");
142 utility1.startMiniCluster();
143 utility2.startMiniCluster();
144 utility3.startMiniCluster();
145 ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
146 ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
147 ReplicationAdmin admin3 = new ReplicationAdmin(conf3);
148
149 new HBaseAdmin(conf1).createTable(table);
150 new HBaseAdmin(conf2).createTable(table);
151 new HBaseAdmin(conf3).createTable(table);
152 HTable htable1 = new HTable(conf1, tableName);
153 htable1.setWriteBufferSize(1024);
154 HTable htable2 = new HTable(conf2, tableName);
155 htable2.setWriteBufferSize(1024);
156 HTable htable3 = new HTable(conf3, tableName);
157 htable3.setWriteBufferSize(1024);
158
159 admin1.addPeer("1", utility2.getClusterKey());
160 admin2.addPeer("1", utility3.getClusterKey());
161 admin3.addPeer("1", utility1.getClusterKey());
162
163
164 putAndWait(row, famName, htable1, htable3);
165
166 check(row,famName,htable2);
167
168 putAndWait(row1, famName, htable2, htable1);
169 check(row,famName,htable3);
170 putAndWait(row2, famName, htable3, htable2);
171 check(row,famName,htable1);
172
173 deleteAndWait(row,htable1,htable3);
174 deleteAndWait(row1,htable2,htable1);
175 deleteAndWait(row2,htable3,htable2);
176
177 assertEquals("Puts were replicated back ", 3, getCount(htable1, put));
178 assertEquals("Puts were replicated back ", 3, getCount(htable2, put));
179 assertEquals("Puts were replicated back ", 3, getCount(htable3, put));
180 assertEquals("Deletes were replicated back ", 3, getCount(htable1, delete));
181 assertEquals("Deletes were replicated back ", 3, getCount(htable2, delete));
182 assertEquals("Deletes were replicated back ", 3, getCount(htable3, delete));
183
184
185 admin2.disablePeer("1");
186
187 putAndWait(row3, famName, htable1, htable2);
188
189 Put put = new Put(row4);
190 put.add(famName, row4, row4);
191 htable2.put(put);
192
193 admin2.enablePeer("1");
194
195
196 wait(row4, htable1);
197
198 utility3.shutdownMiniCluster();
199 utility2.shutdownMiniCluster();
200 utility1.shutdownMiniCluster();
201 }
202
203
204
205
206
207
208
209 @Test(timeout=300000)
210 public void testSimplePutDelete() throws Exception {
211 LOG.info("testSimplePutDelete");
212 utility1.startMiniCluster();
213 utility2.startMiniCluster();
214
215 ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
216 ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
217
218 new HBaseAdmin(conf1).createTable(table);
219 new HBaseAdmin(conf2).createTable(table);
220 HTable htable1 = new HTable(conf1, tableName);
221 htable1.setWriteBufferSize(1024);
222 HTable htable2 = new HTable(conf2, tableName);
223 htable2.setWriteBufferSize(1024);
224
225
226 admin1.addPeer("1", utility2.getClusterKey());
227 admin2.addPeer("1", utility1.getClusterKey());
228
229
230
231 putAndWait(row, famName, htable1, htable2);
232 putAndWait(row1, famName, htable2, htable1);
233
234
235 assertEquals("Puts were replicated back ", 2, getCount(htable1, put));
236
237
238 deleteAndWait(row, htable1, htable2);
239
240
241 assertEquals("Puts were replicated back ", 2, getCount(htable2, put));
242
243 deleteAndWait(row1, htable2, htable1);
244
245 assertEquals("Deletes were replicated back ", 2, getCount(htable1, delete));
246 utility2.shutdownMiniCluster();
247 utility1.shutdownMiniCluster();
248 }
249
250 private int getCount(HTable t, byte[] type) throws IOException {
251 Get test = new Get(row);
252 test.setAttribute("count", new byte[]{});
253 Result res = t.get(test);
254 return Bytes.toInt(res.getValue(count, type));
255 }
256
257 private void deleteAndWait(byte[] row, HTable source, HTable target)
258 throws Exception {
259 Delete del = new Delete(row);
260 source.delete(del);
261
262 Get get = new Get(row);
263 for (int i = 0; i < NB_RETRIES; i++) {
264 if (i==NB_RETRIES-1) {
265 fail("Waited too much time for del replication");
266 }
267 Result res = target.get(get);
268 if (res.size() >= 1) {
269 LOG.info("Row not deleted");
270 Thread.sleep(SLEEP_TIME);
271 } else {
272 break;
273 }
274 }
275 }
276
277 private void check(byte[] row, byte[] fam, HTable t) throws IOException {
278 Get get = new Get(row);
279 Result res = t.get(get);
280 if (res.size() == 0) {
281 fail("Row is missing");
282 }
283 }
284
285 private void putAndWait(byte[] row, byte[] fam, HTable source, HTable target)
286 throws Exception {
287 Put put = new Put(row);
288 put.add(fam, row, row);
289 source.put(put);
290
291 wait(row, target);
292 }
293
294 private void wait(byte[] row, HTable target) throws Exception {
295 Get get = new Get(row);
296 for (int i = 0; i < NB_RETRIES; i++) {
297 if (i==NB_RETRIES-1) {
298 fail("Waited too much time for put replication");
299 }
300 Result res = target.get(get);
301 if (res.size() == 0) {
302 LOG.info("Row not available");
303 Thread.sleep(SLEEP_TIME);
304 } else {
305 assertArrayEquals(res.value(), row);
306 break;
307 }
308 }
309 }
310
311
312
313
314
315
316 public static class CoprocessorCounter extends BaseRegionObserver {
317 private int nCount = 0;
318 private int nDelete = 0;
319
320 @Override
321 public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
322 final Put put, final WALEdit edit,
323 final Durability durability)
324 throws IOException {
325 nCount++;
326 }
327 @Override
328 public void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
329 final Delete delete, final WALEdit edit,
330 final Durability durability)
331 throws IOException {
332 nDelete++;
333 }
334 @Override
335 public void preGet(final ObserverContext<RegionCoprocessorEnvironment> c,
336 final Get get, final List<KeyValue> result) throws IOException {
337 if (get.getAttribute("count") != null) {
338 result.clear();
339
340 result.add(new KeyValue(count, count, delete, Bytes.toBytes(nDelete)));
341 result.add(new KeyValue(count, count, put, Bytes.toBytes(nCount)));
342 c.bypass();
343 }
344 }
345 }
346
347 }
348