1   /*
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.replication.regionserver;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  
25  import java.util.concurrent.atomic.AtomicBoolean;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.*;
31  import org.apache.hadoop.hbase.client.Get;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.Result;
34  import org.apache.hadoop.hbase.client.ResultScanner;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.regionserver.wal.HLog;
37  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
38  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
39  import org.apache.hadoop.hbase.util.Bytes;
40  import org.junit.AfterClass;
41  import org.junit.Before;
42  import org.junit.BeforeClass;
43  import org.junit.Test;
44  import org.junit.experimental.categories.Category;
45  
46  @Category(MediumTests.class)
47  public class TestReplicationSink {
48    private static final Log LOG = LogFactory.getLog(TestReplicationSink.class);
49    private static final int BATCH_SIZE = 10;
50  
51    private final static HBaseTestingUtility TEST_UTIL =
52        new HBaseTestingUtility();
53  
54    private static ReplicationSink SINK;
55  
56    private static final byte[] TABLE_NAME1 =
57        Bytes.toBytes("table1");
58    private static final byte[] TABLE_NAME2 =
59        Bytes.toBytes("table2");
60  
61    private static final byte[] FAM_NAME1 = Bytes.toBytes("info1");
62    private static final byte[] FAM_NAME2 = Bytes.toBytes("info2");
63  
64    private static HTable table1;
65    private static Stoppable STOPPABLE = new Stoppable() {
66      final AtomicBoolean stop = new AtomicBoolean(false);
67  
68      @Override
69      public boolean isStopped() {
70        return this.stop.get();
71      }
72  
73      @Override
74      public void stop(String why) {
75        LOG.info("STOPPING BECAUSE: " + why);
76        this.stop.set(true);
77      }
78      
79    };
80  
81    private static HTable table2;
82  
83     /**
84     * @throws java.lang.Exception
85     */
86    @BeforeClass
87    public static void setUpBeforeClass() throws Exception {
88      TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
89      TEST_UTIL.getConfiguration().setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
90      TEST_UTIL.startMiniCluster(3);
91      SINK =
92        new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), STOPPABLE);
93      table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1);
94      table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2);
95    }
96  
97    /**
98     * @throws java.lang.Exception
99     */
100   @AfterClass
101   public static void tearDownAfterClass() throws Exception {
102     STOPPABLE.stop("Shutting down");
103     TEST_UTIL.shutdownMiniCluster();
104   }
105 
106   /**
107    * @throws java.lang.Exception
108    */
109   @Before
110   public void setUp() throws Exception {
111     table1 = TEST_UTIL.truncateTable(TABLE_NAME1);
112     table2 = TEST_UTIL.truncateTable(TABLE_NAME2);
113   }
114 
115   /**
116    * Insert a whole batch of entries
117    * @throws Exception
118    */
119   @Test
120   public void testBatchSink() throws Exception {
121     HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE];
122     for(int i = 0; i < BATCH_SIZE; i++) {
123       entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
124     }
125     SINK.replicateEntries(entries);
126     Scan scan = new Scan();
127     ResultScanner scanRes = table1.getScanner(scan);
128     assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
129   }
130 
131   /**
132    * Insert a mix of puts and deletes
133    * @throws Exception
134    */
135   @Test
136   public void testMixedPutDelete() throws Exception {
137     HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE/2];
138     for(int i = 0; i < BATCH_SIZE/2; i++) {
139       entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
140     }
141     SINK.replicateEntries(entries);
142 
143     entries = new HLog.Entry[BATCH_SIZE];
144     for(int i = 0; i < BATCH_SIZE; i++) {
145       entries[i] = createEntry(TABLE_NAME1, i,
146           i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn);
147     }
148 
149     SINK.replicateEntries(entries);
150     Scan scan = new Scan();
151     ResultScanner scanRes = table1.getScanner(scan);
152     assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length);
153   }
154 
155   /**
156    * Insert to 2 different tables
157    * @throws Exception
158    */
159   @Test
160   public void testMixedPutTables() throws Exception {
161     HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE];
162     for(int i = 0; i < BATCH_SIZE; i++) {
163       entries[i] =
164           createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1,
165               i, KeyValue.Type.Put);
166     }
167 
168     SINK.replicateEntries(entries);
169     Scan scan = new Scan();
170     ResultScanner scanRes = table2.getScanner(scan);
171     for(Result res : scanRes) {
172       assertTrue(Bytes.toInt(res.getRow()) % 2 == 0);
173     }
174   }
175 
176   /**
177    * Insert then do different types of deletes
178    * @throws Exception
179    */
180   @Test
181   public void testMixedDeletes() throws Exception {
182     HLog.Entry[] entries = new HLog.Entry[3];
183     for(int i = 0; i < 3; i++) {
184       entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
185     }
186     SINK.replicateEntries(entries);
187     entries = new HLog.Entry[3];
188 
189     entries[0] = createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn);
190     entries[1] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily);
191     entries[2] = createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn);
192 
193     SINK.replicateEntries(entries);
194 
195     Scan scan = new Scan();
196     ResultScanner scanRes = table1.getScanner(scan);
197     assertEquals(0, scanRes.next(3).length);
198   }
199 
200   /**
201    * Puts are buffered, but this tests when a delete (not-buffered) is applied
202    * before the actual Put that creates it.
203    * @throws Exception
204    */
205   @Test
206   public void testApplyDeleteBeforePut() throws Exception {
207     HLog.Entry[] entries = new HLog.Entry[5];
208     for(int i = 0; i < 2; i++) {
209       entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
210     }
211     entries[2] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily);
212     for(int i = 3; i < 5; i++) {
213       entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put);
214     }
215     SINK.replicateEntries(entries);
216     Get get = new Get(Bytes.toBytes(1));
217     Result res = table1.get(get);
218     assertEquals(0, res.size());
219   }
220 
221   private HLog.Entry createEntry(byte [] table, int row,  KeyValue.Type type) {
222     byte[] fam = Bytes.equals(table, TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2;
223     byte[] rowBytes = Bytes.toBytes(row);
224     // Just make sure we don't get the same ts for two consecutive rows with
225     // same key
226     try {
227       Thread.sleep(1);
228     } catch (InterruptedException e) {
229       LOG.info("Was interrupted while sleep, meh", e);
230     }
231     final long now = System.currentTimeMillis();
232     KeyValue kv = null;
233     if(type.getCode() == KeyValue.Type.Put.getCode()) {
234       kv = new KeyValue(rowBytes, fam, fam, now,
235           KeyValue.Type.Put, Bytes.toBytes(row));
236     } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) {
237         kv = new KeyValue(rowBytes, fam, fam,
238             now, KeyValue.Type.DeleteColumn);
239     } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
240         kv = new KeyValue(rowBytes, fam, null,
241             now, KeyValue.Type.DeleteFamily);
242     }
243 
244     HLogKey key = new HLogKey(table, table, now, now,
245         HConstants.DEFAULT_CLUSTER_ID);
246 
247     WALEdit edit = new WALEdit();
248     edit.add(kv);
249 
250     return new HLog.Entry(key, edit);
251   }
252 
253   @org.junit.Rule
254   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
255     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
256 }
257