View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.replication;
20  
21  import static org.junit.Assert.assertArrayEquals;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.util.HashMap;
27  import java.util.List;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.Cell;
32  import org.apache.hadoop.hbase.CellUtil;
33  import org.apache.hadoop.hbase.ClusterStatus;
34  import org.apache.hadoop.hbase.HColumnDescriptor;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.ServerLoad;
39  import org.apache.hadoop.hbase.ServerName;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.client.Delete;
42  import org.apache.hadoop.hbase.client.Get;
43  import org.apache.hadoop.hbase.client.HBaseAdmin;
44  import org.apache.hadoop.hbase.client.HTable;
45  import org.apache.hadoop.hbase.client.Put;
46  import org.apache.hadoop.hbase.client.Result;
47  import org.apache.hadoop.hbase.client.ResultScanner;
48  import org.apache.hadoop.hbase.client.Scan;
49  import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
50  import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
51  import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
52  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
53  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
54  import org.apache.hadoop.hbase.replication.regionserver.Replication;
55  import org.apache.hadoop.hbase.testclassification.LargeTests;
56  import org.apache.hadoop.hbase.util.Bytes;
57  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
58  import org.apache.hadoop.hbase.util.JVMClusterUtil;
59  import org.apache.hadoop.mapreduce.Job;
60  import org.junit.Before;
61  import org.junit.Test;
62  import org.junit.experimental.categories.Category;
63  
64  import com.google.protobuf.ByteString;
65  import com.sun.tools.javac.code.Attribute.Array;
66  
67  @Category(LargeTests.class)
68  public class TestReplicationSmallTests extends TestReplicationBase {
69  
70    private static final Log LOG = LogFactory.getLog(TestReplicationSmallTests.class);
71  
72    /**
73     * @throws java.lang.Exception
74     */
75    @Before
76    public void setUp() throws Exception {
77      htable1.setAutoFlush(true, true);
78      // Starting and stopping replication can make us miss new logs,
79      // rolling like this makes sure the most recent one gets added to the queue
80      for ( JVMClusterUtil.RegionServerThread r :
81          utility1.getHBaseCluster().getRegionServerThreads()) {
82        r.getRegionServer().getWAL().rollWriter();
83      }
84      utility1.truncateTable(tableName);
85      // truncating the table will send one Delete per row to the slave cluster
86      // in an async fashion, which is why we cannot just call truncateTable on
87      // utility2 since late writes could make it to the slave in some way.
88      // Instead, we truncate the first table and wait for all the Deletes to
89      // make it to the slave.
90      Scan scan = new Scan();
91      int lastCount = 0;
92      for (int i = 0; i < NB_RETRIES; i++) {
93        if (i==NB_RETRIES-1) {
94          fail("Waited too much time for truncate");
95        }
96        ResultScanner scanner = htable2.getScanner(scan);
97        Result[] res = scanner.next(NB_ROWS_IN_BIG_BATCH);
98        scanner.close();
99        if (res.length != 0) {
100         if (res.length < lastCount) {
101           i--; // Don't increment timeout if we make progress
102         }
103         lastCount = res.length;
104         LOG.info("Still got " + res.length + " rows");
105         Thread.sleep(SLEEP_TIME);
106       } else {
107         break;
108       }
109     }
110   }
111 
112   /**
113    * Verify that version and column delete marker types are replicated
114    * correctly.
115    * @throws Exception
116    */
117   @Test(timeout=300000)
118   public void testDeleteTypes() throws Exception {
119     LOG.info("testDeleteTypes");
120     final byte[] v1 = Bytes.toBytes("v1");
121     final byte[] v2 = Bytes.toBytes("v2");
122     final byte[] v3 = Bytes.toBytes("v3");
123     htable1 = new HTable(conf1, tableName);
124 
125     long t = EnvironmentEdgeManager.currentTimeMillis();
126     // create three versions for "row"
127     Put put = new Put(row);
128     put.add(famName, row, t, v1);
129     htable1.put(put);
130 
131     put = new Put(row);
132     put.add(famName, row, t+1, v2);
133     htable1.put(put);
134 
135     put = new Put(row);
136     put.add(famName, row, t+2, v3);
137     htable1.put(put);
138 
139     Get get = new Get(row);
140     get.setMaxVersions();
141     for (int i = 0; i < NB_RETRIES; i++) {
142       if (i==NB_RETRIES-1) {
143         fail("Waited too much time for put replication");
144       }
145       Result res = htable2.get(get);
146       if (res.size() < 3) {
147         LOG.info("Rows not available");
148         Thread.sleep(SLEEP_TIME);
149       } else {
150         assertArrayEquals(CellUtil.cloneValue(res.rawCells()[0]), v3);
151         assertArrayEquals(CellUtil.cloneValue(res.rawCells()[1]), v2);
152         assertArrayEquals(CellUtil.cloneValue(res.rawCells()[2]), v1);
153         break;
154       }
155     }
156     // place a version delete marker (delete last version)
157     Delete d = new Delete(row);
158     d.deleteColumn(famName, row, t);
159     htable1.delete(d);
160 
161     get = new Get(row);
162     get.setMaxVersions();
163     for (int i = 0; i < NB_RETRIES; i++) {
164       if (i==NB_RETRIES-1) {
165         fail("Waited too much time for put replication");
166       }
167       Result res = htable2.get(get);
168       if (res.size() > 2) {
169         LOG.info("Version not deleted");
170         Thread.sleep(SLEEP_TIME);
171       } else {
172         assertArrayEquals(CellUtil.cloneValue(res.rawCells()[0]), v3);
173         assertArrayEquals(CellUtil.cloneValue(res.rawCells()[1]), v2);
174         break;
175       }
176     }
177 
178     // place a column delete marker
179     d = new Delete(row);
180     d.deleteColumns(famName, row, t+2);
181     htable1.delete(d);
182 
183     // now *both* of the remaining version should be deleted
184     // at the replica
185     get = new Get(row);
186     for (int i = 0; i < NB_RETRIES; i++) {
187       if (i==NB_RETRIES-1) {
188         fail("Waited too much time for del replication");
189       }
190       Result res = htable2.get(get);
191       if (res.size() >= 1) {
192         LOG.info("Rows not deleted");
193         Thread.sleep(SLEEP_TIME);
194       } else {
195         break;
196       }
197     }
198   }
199 
200   /**
201    * Add a row, check it's replicated, delete it, check's gone
202    * @throws Exception
203    */
204   @Test(timeout=300000)
205   public void testSimplePutDelete() throws Exception {
206     LOG.info("testSimplePutDelete");
207     Put put = new Put(row);
208     put.add(famName, row, row);
209 
210     htable1 = new HTable(conf1, tableName);
211     htable1.put(put);
212 
213     Get get = new Get(row);
214     for (int i = 0; i < NB_RETRIES; i++) {
215       if (i==NB_RETRIES-1) {
216         fail("Waited too much time for put replication");
217       }
218       Result res = htable2.get(get);
219       if (res.size() == 0) {
220         LOG.info("Row not available");
221         Thread.sleep(SLEEP_TIME);
222       } else {
223         assertArrayEquals(res.value(), row);
224         break;
225       }
226     }
227 
228     Delete del = new Delete(row);
229     htable1.delete(del);
230 
231     get = new Get(row);
232     for (int i = 0; i < NB_RETRIES; i++) {
233       if (i==NB_RETRIES-1) {
234         fail("Waited too much time for del replication");
235       }
236       Result res = htable2.get(get);
237       if (res.size() >= 1) {
238         LOG.info("Row not deleted");
239         Thread.sleep(SLEEP_TIME);
240       } else {
241         break;
242       }
243     }
244   }
245 
246   /**
247    * Try a small batch upload using the write buffer, check it's replicated
248    * @throws Exception
249    */
250   @Test(timeout=300000)
251   public void testSmallBatch() throws Exception {
252     LOG.info("testSmallBatch");
253     Put put;
254     // normal Batch tests
255     htable1.setAutoFlush(false, true);
256     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
257       put = new Put(Bytes.toBytes(i));
258       put.add(famName, row, row);
259       htable1.put(put);
260     }
261     htable1.flushCommits();
262 
263     Scan scan = new Scan();
264 
265     ResultScanner scanner1 = htable1.getScanner(scan);
266     Result[] res1 = scanner1.next(NB_ROWS_IN_BATCH);
267     scanner1.close();
268     assertEquals(NB_ROWS_IN_BATCH, res1.length);
269 
270     for (int i = 0; i < NB_RETRIES; i++) {
271       scan = new Scan();
272       if (i==NB_RETRIES-1) {
273         fail("Waited too much time for normal batch replication");
274       }
275       ResultScanner scanner = htable2.getScanner(scan);
276       Result[] res = scanner.next(NB_ROWS_IN_BATCH);
277       scanner.close();
278       if (res.length != NB_ROWS_IN_BATCH) {
279         LOG.info("Only got " + res.length + " rows");
280         Thread.sleep(SLEEP_TIME);
281       } else {
282         break;
283       }
284     }
285   }
286 
287   /**
288    * Test disable/enable replication, trying to insert, make sure nothing's
289    * replicated, enable it, the insert should be replicated
290    *
291    * @throws Exception
292    */
293   @Test(timeout = 300000)
294   public void testDisableEnable() throws Exception {
295 
296     // Test disabling replication
297     admin.disablePeer("2");
298 
299     byte[] rowkey = Bytes.toBytes("disable enable");
300     Put put = new Put(rowkey);
301     put.add(famName, row, row);
302     htable1.put(put);
303 
304     Get get = new Get(rowkey);
305     for (int i = 0; i < NB_RETRIES; i++) {
306       Result res = htable2.get(get);
307       if (res.size() >= 1) {
308         fail("Replication wasn't disabled");
309       } else {
310         LOG.info("Row not replicated, let's wait a bit more...");
311         Thread.sleep(SLEEP_TIME);
312       }
313     }
314 
315     // Test enable replication
316     admin.enablePeer("2");
317 
318     for (int i = 0; i < NB_RETRIES; i++) {
319       Result res = htable2.get(get);
320       if (res.size() == 0) {
321         LOG.info("Row not available");
322         Thread.sleep(SLEEP_TIME);
323       } else {
324         assertArrayEquals(res.value(), row);
325         return;
326       }
327     }
328     fail("Waited too much time for put replication");
329   }
330 
331   /**
332    * Integration test for TestReplicationAdmin, removes and re-add a peer
333    * cluster
334    *
335    * @throws Exception
336    */
337   @Test(timeout=300000)
338   public void testAddAndRemoveClusters() throws Exception {
339     LOG.info("testAddAndRemoveClusters");
340     admin.removePeer("2");
341     Thread.sleep(SLEEP_TIME);
342     byte[] rowKey = Bytes.toBytes("Won't be replicated");
343     Put put = new Put(rowKey);
344     put.add(famName, row, row);
345     htable1.put(put);
346 
347     Get get = new Get(rowKey);
348     for (int i = 0; i < NB_RETRIES; i++) {
349       if (i == NB_RETRIES-1) {
350         break;
351       }
352       Result res = htable2.get(get);
353       if (res.size() >= 1) {
354         fail("Not supposed to be replicated");
355       } else {
356         LOG.info("Row not replicated, let's wait a bit more...");
357         Thread.sleep(SLEEP_TIME);
358       }
359     }
360 
361     admin.addPeer("2", utility2.getClusterKey());
362     Thread.sleep(SLEEP_TIME);
363     rowKey = Bytes.toBytes("do rep");
364     put = new Put(rowKey);
365     put.add(famName, row, row);
366     LOG.info("Adding new row");
367     htable1.put(put);
368 
369     get = new Get(rowKey);
370     for (int i = 0; i < NB_RETRIES; i++) {
371       if (i==NB_RETRIES-1) {
372         fail("Waited too much time for put replication");
373       }
374       Result res = htable2.get(get);
375       if (res.size() == 0) {
376         LOG.info("Row not available");
377         Thread.sleep(SLEEP_TIME*i);
378       } else {
379         assertArrayEquals(res.value(), row);
380         break;
381       }
382     }
383   }
384 
385 
386   /**
387    * Do a more intense version testSmallBatch, one  that will trigger
388    * hlog rolling and other non-trivial code paths
389    * @throws Exception
390    */
391   @Test(timeout=300000)
392   public void testLoading() throws Exception {
393     LOG.info("Writing out rows to table1 in testLoading");
394     htable1.setWriteBufferSize(1024);
395     htable1.setAutoFlush(false, true);
396     for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
397       Put put = new Put(Bytes.toBytes(i));
398       put.add(famName, row, row);
399       htable1.put(put);
400     }
401     htable1.flushCommits();
402 
403     Scan scan = new Scan();
404 
405     ResultScanner scanner = htable1.getScanner(scan);
406     Result[] res = scanner.next(NB_ROWS_IN_BIG_BATCH);
407     scanner.close();
408 
409     assertEquals(NB_ROWS_IN_BIG_BATCH, res.length);
410 
411     LOG.info("Looking in table2 for replicated rows in testLoading");
412     long start = System.currentTimeMillis();
413     // Retry more than NB_RETRIES.  As it was, retries were done in 5 seconds and we'd fail
414     // sometimes.
415     final long retries = NB_RETRIES * 10;
416     for (int i = 0; i < retries; i++) {
417       scan = new Scan();
418       scanner = htable2.getScanner(scan);
419       res = scanner.next(NB_ROWS_IN_BIG_BATCH);
420       scanner.close();
421       if (res.length != NB_ROWS_IN_BIG_BATCH) {
422         if (i == retries - 1) {
423           int lastRow = -1;
424           for (Result result : res) {
425             int currentRow = Bytes.toInt(result.getRow());
426             for (int row = lastRow+1; row < currentRow; row++) {
427               LOG.error("Row missing: " + row);
428             }
429             lastRow = currentRow;
430           }
431           LOG.error("Last row: " + lastRow);
432           fail("Waited too much time for normal batch replication, " +
433             res.length + " instead of " + NB_ROWS_IN_BIG_BATCH + "; waited=" +
434             (System.currentTimeMillis() - start) + "ms");
435         } else {
436           LOG.info("Only got " + res.length + " rows... retrying");
437           Thread.sleep(SLEEP_TIME);
438         }
439       } else {
440         break;
441       }
442     }
443   }
444 
445   /**
446    * Do a small loading into a table, make sure the data is really the same,
447    * then run the VerifyReplication job to check the results. Do a second
448    * comparison where all the cells are different.
449    * @throws Exception
450    */
451   @Test(timeout=300000)
452   public void testVerifyRepJob() throws Exception {
453     // Populate the tables, at the same time it guarantees that the tables are
454     // identical since it does the check
455     testSmallBatch();
456 
457     String[] args = new String[] {"2", Bytes.toString(tableName)};
458     Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
459     if (job == null) {
460       fail("Job wasn't created, see the log");
461     }
462     if (!job.waitForCompletion(true)) {
463       fail("Job failed, see the log");
464     }
465     assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
466         findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
467     assertEquals(0, job.getCounters().
468         findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
469 
470     Scan scan = new Scan();
471     ResultScanner rs = htable2.getScanner(scan);
472     Put put = null;
473     for (Result result : rs) {
474       put = new Put(result.getRow());
475       Cell firstVal = result.rawCells()[0];
476       put.add(CellUtil.cloneFamily(firstVal),
477           CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
478       htable2.put(put);
479     }
480     Delete delete = new Delete(put.getRow());
481     htable2.delete(delete);
482     job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
483     if (job == null) {
484       fail("Job wasn't created, see the log");
485     }
486     if (!job.waitForCompletion(true)) {
487       fail("Job failed, see the log");
488     }
489     assertEquals(0, job.getCounters().
490         findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
491     assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
492         findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
493   }
494 
495   @Test(timeout=300000)
496   // VerifyReplication should honor versions option
497   public void testHBase14905() throws Exception {
498     // normal Batch tests
499     byte[] qualifierName = Bytes.toBytes("f1");
500     Put put = new Put(Bytes.toBytes("r1"));
501     put.add(famName, qualifierName, Bytes.toBytes("v1002"));
502     htable1.put(put);
503     put.add(famName, qualifierName, Bytes.toBytes("v1001"));
504     htable1.put(put);
505     put.add(famName, qualifierName, Bytes.toBytes("v1112"));
506     htable1.put(put);
507 
508     Scan scan = new Scan();
509     scan.setMaxVersions(100);
510     ResultScanner scanner1 = htable1.getScanner(scan);
511     Result[] res1 = scanner1.next(1);
512     scanner1.close();
513 
514     assertEquals(1, res1.length);
515     assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
516 
517     for (int i = 0; i < NB_RETRIES; i++) {
518       scan = new Scan();
519       scan.setMaxVersions(100);
520       scanner1 = htable2.getScanner(scan);
521       res1 = scanner1.next(1);
522       scanner1.close();
523       if (res1.length != 1) {
524         LOG.info("Only got " + res1.length + " rows");
525         Thread.sleep(SLEEP_TIME);
526       } else {
527         int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
528         if (cellNumber != 3) {
529           LOG.info("Only got " + cellNumber + " cells");
530           Thread.sleep(SLEEP_TIME);
531         } else {
532           break;
533         }
534       }
535       if (i == NB_RETRIES-1) {
536         fail("Waited too much time for normal batch replication");
537       }
538     }
539 
540     put.add(famName, qualifierName, Bytes.toBytes("v1111"));
541     htable2.put(put);
542     put.add(famName, qualifierName, Bytes.toBytes("v1112"));
543     htable2.put(put);
544 
545     scan = new Scan();
546     scan.setMaxVersions(100);
547     scanner1 = htable2.getScanner(scan);
548     res1 = scanner1.next(NB_ROWS_IN_BATCH);
549     scanner1.close();
550 
551     assertEquals(1, res1.length);
552     assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size());
553 
554     String[] args = new String[] {"--versions=100", "2", Bytes.toString(tableName)};
555     Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
556     if (job == null) {
557       fail("Job wasn't created, see the log");
558     }
559     if (!job.waitForCompletion(true)) {
560       fail("Job failed, see the log");
561     }
562     assertEquals(0, job.getCounters().
563       findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
564     assertEquals(1, job.getCounters().
565       findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
566   }
567 
568   @Test(timeout=300000)
569   // VerifyReplication should honor versions option
570   public void testVersionMismatchHBase14905() throws Exception {
571     // normal Batch tests
572     byte[] qualifierName = Bytes.toBytes("f1");
573     Put put = new Put(Bytes.toBytes("r1"));
574     long ts = System.currentTimeMillis();
575     put.add(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
576     htable1.put(put);
577     put.add(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
578     htable1.put(put);
579     put.add(famName, qualifierName, ts + 3, Bytes.toBytes("v3"));
580     htable1.put(put);
581        
582     Scan scan = new Scan();
583     scan.setMaxVersions(100);
584     ResultScanner scanner1 = htable1.getScanner(scan);
585     Result[] res1 = scanner1.next(1);
586     scanner1.close();
587 
588     assertEquals(1, res1.length);
589     assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
590     
591     for (int i = 0; i < NB_RETRIES; i++) {
592       scan = new Scan();
593       scan.setMaxVersions(100);
594       scanner1 = htable2.getScanner(scan);
595       res1 = scanner1.next(1);
596       scanner1.close();
597       if (res1.length != 1) {
598         LOG.info("Only got " + res1.length + " rows");
599         Thread.sleep(SLEEP_TIME);
600       } else {
601         int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
602         if (cellNumber != 3) {
603           LOG.info("Only got " + cellNumber + " cells");
604           Thread.sleep(SLEEP_TIME);
605         } else {
606           break;
607         }
608       }
609       if (i == NB_RETRIES-1) {
610         fail("Waited too much time for normal batch replication");
611       }
612     }
613    
614     try {
615       // Disabling replication and modifying the particular version of the cell to validate the feature.  
616       admin.disablePeer("2");
617       Put put2 = new Put(Bytes.toBytes("r1"));
618       put2.add(famName, qualifierName, ts +2, Bytes.toBytes("v99"));
619       htable2.put(put2);
620       
621       scan = new Scan();
622       scan.setMaxVersions(100);
623       scanner1 = htable2.getScanner(scan);
624       res1 = scanner1.next(NB_ROWS_IN_BATCH);
625       scanner1.close();
626       assertEquals(1, res1.length);
627       assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
628     
629       String[] args = new String[] {"--versions=100", "2", Bytes.toString(tableName)};
630       Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
631       if (job == null) {
632         fail("Job wasn't created, see the log");
633       }
634       if (!job.waitForCompletion(true)) {
635         fail("Job failed, see the log");
636       }    
637       assertEquals(0, job.getCounters().
638         findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
639       assertEquals(1, job.getCounters().
640         findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
641       }
642     finally {
643       admin.enablePeer("2");
644     }
645   }
646 
647   /**
648    * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
649    * the compaction WALEdit
650    * @throws Exception
651    */
652   @Test(timeout=300000)
653   public void testCompactionWALEdits() throws Exception {
654     WALProtos.CompactionDescriptor compactionDescriptor =
655         WALProtos.CompactionDescriptor.getDefaultInstance();
656     HRegionInfo hri = new HRegionInfo(htable1.getName(),
657       HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
658     WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
659     Replication.scopeWALEdits(htable1.getTableDescriptor(), new HLogKey(), edit);
660   }
661 
662   /**
663    * Test for HBASE-8663
664    * Create two new Tables with colfamilies enabled for replication then run
665    * ReplicationAdmin.listReplicated(). Finally verify the table:colfamilies. Note:
666    * TestReplicationAdmin is a better place for this testing but it would need mocks.
667    * @throws Exception
668    */
669   @Test(timeout = 300000)
670   public void testVerifyListReplicatedTable() throws Exception {
671 	LOG.info("testVerifyListReplicatedTable");
672 
673     final String tName = "VerifyListReplicated_";
674     final String colFam = "cf1";
675     final int numOfTables = 3;
676 
677     HBaseAdmin hadmin = new HBaseAdmin(conf1);
678 
679     // Create Tables
680     for (int i = 0; i < numOfTables; i++) {
681       HTableDescriptor ht = new HTableDescriptor(TableName.valueOf(tName + i));
682       HColumnDescriptor cfd = new HColumnDescriptor(colFam);
683       cfd.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
684       ht.addFamily(cfd);
685       hadmin.createTable(ht);
686     }
687 
688     // verify the result
689     List<HashMap<String, String>> replicationColFams = admin.listReplicated();
690     int[] match = new int[numOfTables]; // array of 3 with init value of zero
691 
692     for (int i = 0; i < replicationColFams.size(); i++) {
693       HashMap<String, String> replicationEntry = replicationColFams.get(i);
694       String tn = replicationEntry.get(ReplicationAdmin.TNAME);
695       if ((tn.startsWith(tName)) && replicationEntry.get(ReplicationAdmin.CFNAME).equals(colFam)) {
696         int m = Integer.parseInt(tn.substring(tn.length() - 1)); // get the last digit
697         match[m]++; // should only increase once
698       }
699     }
700 
701     // check the matching result
702     for (int i = 0; i < match.length; i++) {
703       assertTrue("listReplicated() does not match table " + i, (match[i] == 1));
704     }
705 
706     // drop tables
707     for (int i = 0; i < numOfTables; i++) {
708       String ht = tName + i;
709       hadmin.disableTable(ht);
710       hadmin.deleteTable(ht);
711     }
712 
713     hadmin.close();
714   }
715 
716   /**
717    * Test for HBASE-9531
718    * put a few rows into htable1, which should be replicated to htable2
719    * create a ClusterStatus instance 'status' from HBaseAdmin
720    * test : status.getLoad(server).getReplicationLoadSourceList()
721    * test : status.getLoad(server).getReplicationLoadSink()
722    * * @throws Exception
723    */
724   @Test(timeout = 300000)
725   public void testReplicationStatus() throws Exception {
726     LOG.info("testReplicationStatus");
727 
728     HBaseAdmin admin = utility1.getHBaseAdmin();
729     try {
730 
731       final byte[] qualName = Bytes.toBytes("q");
732       Put p;
733 
734       for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
735         p = new Put(Bytes.toBytes("row" + i));
736         p.add(famName, qualName, Bytes.toBytes("val" + i));
737         htable1.put(p);
738       }
739 
740       ClusterStatus status = admin.getClusterStatus();
741 
742       for (ServerName server : status.getServers()) {
743         ServerLoad sl = status.getLoad(server);
744         List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();
745         ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink();
746 
747         // check SourceList has at least one entry
748         assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0));
749 
750         // check Sink exist only as it is difficult to verify the value on the fly
751         assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ",
752           (rLoadSink.getAgeOfLastAppliedOp() >= 0));
753         assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ",
754           (rLoadSink.getTimeStampsOfLastAppliedOp() >= 0));
755       }
756     } finally {
757       admin.close();
758     }
759   }
760 }