1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.snapshot;
19  
20  import static org.junit.Assert.assertEquals;
21  
22  import java.io.IOException;
23  
24  import org.apache.commons.logging.Log;
25  import org.apache.commons.logging.LogFactory;
26  import org.apache.hadoop.fs.FileStatus;
27  import org.apache.hadoop.fs.Path;
28  import org.apache.hadoop.hbase.HConstants;
29  import org.apache.hadoop.hbase.HBaseTestingUtility;
30  import org.apache.hadoop.hbase.HColumnDescriptor;
31  import org.apache.hadoop.hbase.HTableDescriptor;
32  import org.apache.hadoop.hbase.LargeTests;
33  import org.apache.hadoop.hbase.client.HBaseAdmin;
34  import org.apache.hadoop.hbase.client.HTable;
35  import org.apache.hadoop.hbase.client.Put;
36  import org.apache.hadoop.hbase.master.MasterFileSystem;
37  import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
39  import org.apache.hadoop.hbase.util.Bytes;
40  import org.apache.hadoop.hbase.util.FSUtils;
41  import org.apache.hadoop.hbase.util.MD5Hash;
42  import org.junit.After;
43  import org.junit.AfterClass;
44  import org.junit.Before;
45  import org.junit.BeforeClass;
46  import org.junit.Test;
47  import org.junit.experimental.categories.Category;
48  
49  /**
50   * Test clone/restore snapshots from the client
51   *
52   * TODO This is essentially a clone of TestRestoreSnapshotFromClient.  This is worth refactoring
53   * this because there will be a few more flavors of snapshots that need to run these tests.
54   */
55  @Category(LargeTests.class)
56  public class TestRestoreFlushSnapshotFromClient {
57    final Log LOG = LogFactory.getLog(getClass());
58  
59    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
60  
61    private final byte[] FAMILY = Bytes.toBytes("cf");
62  
63    private byte[] snapshotName0;
64    private byte[] snapshotName1;
65    private byte[] snapshotName2;
66    private int snapshot0Rows;
67    private int snapshot1Rows;
68    private byte[] tableName;
69    private HBaseAdmin admin;
70  
71    @BeforeClass
72    public static void setUpBeforeClass() throws Exception {
73      TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
74      TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
75      TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
76      TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
77      TEST_UTIL.getConfiguration().setBoolean(
78          "hbase.master.enabletable.roundrobin", true);
79  
80      // Enable snapshot
81      TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
82  
83      TEST_UTIL.startMiniCluster(3);
84    }
85  
86    @AfterClass
87    public static void tearDownAfterClass() throws Exception {
88      TEST_UTIL.shutdownMiniCluster();
89    }
90  
91    /**
92     * Initialize the tests with a table filled with some data
93     * and two snapshots (snapshotName0, snapshotName1) of different states.
94     * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
95     */
96    @Before
97    public void setup() throws Exception {
98      this.admin = TEST_UTIL.getHBaseAdmin();
99  
100     long tid = System.currentTimeMillis();
101     tableName = Bytes.toBytes("testtb-" + tid);
102     snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
103     snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
104     snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
105 
106     // create Table and disable it
107     createTable(tableName, FAMILY);
108     HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
109     try {
110       loadData(table, 500, FAMILY);
111       snapshot0Rows = TEST_UTIL.countRows(table);
112       LOG.info("=== before snapshot with 500 rows");
113       logFSTree();
114 
115       // take a snapshot
116       admin.snapshot(Bytes.toString(snapshotName0), Bytes.toString(tableName),
117           SnapshotDescription.Type.FLUSH);
118 
119       LOG.info("=== after snapshot with 500 rows");
120       logFSTree();
121 
122       // insert more data
123       loadData(table, 500, FAMILY);
124       snapshot1Rows = TEST_UTIL.countRows(table);
125       LOG.info("=== before snapshot with 1000 rows");
126       logFSTree();
127 
128       // take a snapshot of the updated table
129       admin.snapshot(Bytes.toString(snapshotName1), Bytes.toString(tableName),
130           SnapshotDescription.Type.FLUSH);
131       LOG.info("=== after snapshot with 1000 rows");
132       logFSTree();
133     } finally {
134       table.close();
135     }
136   }
137 
138   @After
139   public void tearDown() throws Exception {
140     TEST_UTIL.deleteTable(tableName);
141     admin.deleteSnapshot(snapshotName0);
142     admin.deleteSnapshot(snapshotName1);
143 
144     // Ensure the archiver to be empty
145     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
146     mfs.getFileSystem().delete(
147       new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
148   }
149 
150   @Test
151   public void testTakeFlushSnapshot() throws IOException {
152     // taking happens in setup.
153   }
154 
155   @Test
156   public void testRestoreSnapshot() throws IOException {
157     verifyRowCount(tableName, snapshot1Rows);
158 
159     // Restore from snapshot-0
160     admin.disableTable(tableName);
161     admin.restoreSnapshot(snapshotName0);
162     logFSTree();
163     admin.enableTable(tableName);
164     LOG.info("=== after restore with 500 row snapshot");
165     logFSTree();
166     verifyRowCount(tableName, snapshot0Rows);
167 
168     // Restore from snapshot-1
169     admin.disableTable(tableName);
170     admin.restoreSnapshot(snapshotName1);
171     admin.enableTable(tableName);
172     verifyRowCount(tableName, snapshot1Rows);
173   }
174 
175   @Test(expected=SnapshotDoesNotExistException.class)
176   public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
177     String snapshotName = "random-snapshot-" + System.currentTimeMillis();
178     String tableName = "random-table-" + System.currentTimeMillis();
179     admin.cloneSnapshot(snapshotName, tableName);
180   }
181 
182   @Test
183   public void testCloneSnapshot() throws IOException, InterruptedException {
184     byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
185     testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
186     testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
187   }
188 
189   private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName,
190       int snapshotRows) throws IOException, InterruptedException {
191     // create a new table from snapshot
192     admin.cloneSnapshot(snapshotName, tableName);
193     verifyRowCount(tableName, snapshotRows);
194 
195     TEST_UTIL.deleteTable(tableName);
196   }
197 
198   @Test
199   public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
200     byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
201     admin.cloneSnapshot(snapshotName0, clonedTableName);
202     verifyRowCount(clonedTableName, snapshot0Rows);
203     admin.snapshot(Bytes.toString(snapshotName2), Bytes.toString(clonedTableName), SnapshotDescription.Type.FLUSH);
204     TEST_UTIL.deleteTable(clonedTableName);
205 
206     admin.cloneSnapshot(snapshotName2, clonedTableName);
207     verifyRowCount(clonedTableName, snapshot0Rows);
208     TEST_UTIL.deleteTable(clonedTableName);
209   }
210 
211   // ==========================================================================
212   //  Helpers
213   // ==========================================================================
214   private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
215     HTableDescriptor htd = new HTableDescriptor(tableName);
216     for (byte[] family: families) {
217       HColumnDescriptor hcd = new HColumnDescriptor(family);
218       htd.addFamily(hcd);
219     }
220     byte[][] splitKeys = new byte[16][];
221     byte[] hex = Bytes.toBytes("0123456789abcdef");
222     for (int i = 0; i < 16; ++i) {
223       splitKeys[i] = new byte[] { hex[i] };
224     }
225     admin.createTable(htd, splitKeys);
226   }
227 
228   public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
229     byte[] qualifier = Bytes.toBytes("q");
230     table.setAutoFlush(false);
231     while (rows-- > 0) {
232       byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
233       byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
234       Put put = new Put(key);
235       put.setWriteToWAL(false);
236       for (byte[] family: families) {
237         put.add(family, qualifier, value);
238       }
239       table.put(put);
240     }
241     table.flushCommits();
242   }
243 
244   private void logFSTree() throws IOException {
245     MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
246     FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
247   }
248 
249   private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
250     HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
251     assertEquals(expectedRows, TEST_UTIL.countRows(table));
252     table.close();
253   }
254 }