1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.snapshot;
19
20 import static org.junit.Assert.assertEquals;
21
22 import java.io.IOException;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.fs.FileStatus;
27 import org.apache.hadoop.fs.Path;
28 import org.apache.hadoop.hbase.HConstants;
29 import org.apache.hadoop.hbase.HBaseTestingUtility;
30 import org.apache.hadoop.hbase.HColumnDescriptor;
31 import org.apache.hadoop.hbase.HTableDescriptor;
32 import org.apache.hadoop.hbase.LargeTests;
33 import org.apache.hadoop.hbase.client.HBaseAdmin;
34 import org.apache.hadoop.hbase.client.HTable;
35 import org.apache.hadoop.hbase.client.Put;
36 import org.apache.hadoop.hbase.master.MasterFileSystem;
37 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
38 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
39 import org.apache.hadoop.hbase.util.Bytes;
40 import org.apache.hadoop.hbase.util.FSUtils;
41 import org.apache.hadoop.hbase.util.MD5Hash;
42 import org.junit.After;
43 import org.junit.AfterClass;
44 import org.junit.Before;
45 import org.junit.BeforeClass;
46 import org.junit.Test;
47 import org.junit.experimental.categories.Category;
48
49
50
51
52
53
54
55 @Category(LargeTests.class)
56 public class TestRestoreFlushSnapshotFromClient {
57 final Log LOG = LogFactory.getLog(getClass());
58
59 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
60
61 private final byte[] FAMILY = Bytes.toBytes("cf");
62
63 private byte[] snapshotName0;
64 private byte[] snapshotName1;
65 private byte[] snapshotName2;
66 private int snapshot0Rows;
67 private int snapshot1Rows;
68 private byte[] tableName;
69 private HBaseAdmin admin;
70
71 @BeforeClass
72 public static void setUpBeforeClass() throws Exception {
73 TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
74 TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
75 TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
76 TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
77 TEST_UTIL.getConfiguration().setBoolean(
78 "hbase.master.enabletable.roundrobin", true);
79
80
81 TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
82
83 TEST_UTIL.startMiniCluster(3);
84 }
85
86 @AfterClass
87 public static void tearDownAfterClass() throws Exception {
88 TEST_UTIL.shutdownMiniCluster();
89 }
90
91
92
93
94
95
96 @Before
97 public void setup() throws Exception {
98 this.admin = TEST_UTIL.getHBaseAdmin();
99
100 long tid = System.currentTimeMillis();
101 tableName = Bytes.toBytes("testtb-" + tid);
102 snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
103 snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
104 snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
105
106
107 createTable(tableName, FAMILY);
108 HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
109 try {
110 loadData(table, 500, FAMILY);
111 snapshot0Rows = TEST_UTIL.countRows(table);
112 LOG.info("=== before snapshot with 500 rows");
113 logFSTree();
114
115
116 admin.snapshot(Bytes.toString(snapshotName0), Bytes.toString(tableName),
117 SnapshotDescription.Type.FLUSH);
118
119 LOG.info("=== after snapshot with 500 rows");
120 logFSTree();
121
122
123 loadData(table, 500, FAMILY);
124 snapshot1Rows = TEST_UTIL.countRows(table);
125 LOG.info("=== before snapshot with 1000 rows");
126 logFSTree();
127
128
129 admin.snapshot(Bytes.toString(snapshotName1), Bytes.toString(tableName),
130 SnapshotDescription.Type.FLUSH);
131 LOG.info("=== after snapshot with 1000 rows");
132 logFSTree();
133 } finally {
134 table.close();
135 }
136 }
137
138 @After
139 public void tearDown() throws Exception {
140 TEST_UTIL.deleteTable(tableName);
141 admin.deleteSnapshot(snapshotName0);
142 admin.deleteSnapshot(snapshotName1);
143
144
145 MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
146 mfs.getFileSystem().delete(
147 new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
148 }
149
150 @Test
151 public void testTakeFlushSnapshot() throws IOException {
152
153 }
154
155 @Test
156 public void testRestoreSnapshot() throws IOException {
157 verifyRowCount(tableName, snapshot1Rows);
158
159
160 admin.disableTable(tableName);
161 admin.restoreSnapshot(snapshotName0);
162 logFSTree();
163 admin.enableTable(tableName);
164 LOG.info("=== after restore with 500 row snapshot");
165 logFSTree();
166 verifyRowCount(tableName, snapshot0Rows);
167
168
169 admin.disableTable(tableName);
170 admin.restoreSnapshot(snapshotName1);
171 admin.enableTable(tableName);
172 verifyRowCount(tableName, snapshot1Rows);
173 }
174
175 @Test(expected=SnapshotDoesNotExistException.class)
176 public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
177 String snapshotName = "random-snapshot-" + System.currentTimeMillis();
178 String tableName = "random-table-" + System.currentTimeMillis();
179 admin.cloneSnapshot(snapshotName, tableName);
180 }
181
182 @Test
183 public void testCloneSnapshot() throws IOException, InterruptedException {
184 byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
185 testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
186 testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
187 }
188
189 private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName,
190 int snapshotRows) throws IOException, InterruptedException {
191
192 admin.cloneSnapshot(snapshotName, tableName);
193 verifyRowCount(tableName, snapshotRows);
194
195 TEST_UTIL.deleteTable(tableName);
196 }
197
198 @Test
199 public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
200 byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
201 admin.cloneSnapshot(snapshotName0, clonedTableName);
202 verifyRowCount(clonedTableName, snapshot0Rows);
203 admin.snapshot(Bytes.toString(snapshotName2), Bytes.toString(clonedTableName), SnapshotDescription.Type.FLUSH);
204 TEST_UTIL.deleteTable(clonedTableName);
205
206 admin.cloneSnapshot(snapshotName2, clonedTableName);
207 verifyRowCount(clonedTableName, snapshot0Rows);
208 TEST_UTIL.deleteTable(clonedTableName);
209 }
210
211
212
213
214 private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
215 HTableDescriptor htd = new HTableDescriptor(tableName);
216 for (byte[] family: families) {
217 HColumnDescriptor hcd = new HColumnDescriptor(family);
218 htd.addFamily(hcd);
219 }
220 byte[][] splitKeys = new byte[16][];
221 byte[] hex = Bytes.toBytes("0123456789abcdef");
222 for (int i = 0; i < 16; ++i) {
223 splitKeys[i] = new byte[] { hex[i] };
224 }
225 admin.createTable(htd, splitKeys);
226 }
227
228 public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
229 byte[] qualifier = Bytes.toBytes("q");
230 table.setAutoFlush(false);
231 while (rows-- > 0) {
232 byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
233 byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
234 Put put = new Put(key);
235 put.setWriteToWAL(false);
236 for (byte[] family: families) {
237 put.add(family, qualifier, value);
238 }
239 table.put(put);
240 }
241 table.flushCommits();
242 }
243
244 private void logFSTree() throws IOException {
245 MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
246 FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
247 }
248
249 private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
250 HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
251 assertEquals(expectedRows, TEST_UTIL.countRows(table));
252 table.close();
253 }
254 }