1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.regionserver.wal;
20
21 import static org.junit.Assert.assertEquals;
22
23 import java.io.IOException;
24
25 import org.apache.hadoop.conf.Configuration;
26 import org.apache.hadoop.fs.FileSystem;
27 import org.apache.hadoop.fs.Path;
28 import org.apache.hadoop.hbase.HBaseTestingUtility;
29 import org.apache.hadoop.hbase.HColumnDescriptor;
30 import org.apache.hadoop.hbase.HRegionInfo;
31 import org.apache.hadoop.hbase.HTableDescriptor;
32 import org.apache.hadoop.hbase.MediumTests;
33 import org.apache.hadoop.hbase.TableName;
34 import org.apache.hadoop.hbase.client.Durability;
35 import org.apache.hadoop.hbase.client.Put;
36 import org.apache.hadoop.hbase.regionserver.HRegion;
37 import org.apache.hadoop.hbase.util.Bytes;
38 import org.apache.hadoop.hdfs.MiniDFSCluster;
39 import org.junit.AfterClass;
40 import org.junit.BeforeClass;
41 import org.junit.Test;
42 import org.junit.experimental.categories.Category;
43
44
45
46
47 @Category(MediumTests.class)
48 public class TestDurability {
49 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
50 private static FileSystem FS;
51 private static MiniDFSCluster CLUSTER;
52 private static Configuration CONF;
53 private static Path DIR;
54
55 private static byte[] FAMILY = Bytes.toBytes("family");
56 private static byte[] ROW = Bytes.toBytes("row");
57 private static byte[] COL = Bytes.toBytes("col");
58
59
60 @BeforeClass
61 public static void setUpBeforeClass() throws Exception {
62 CONF = TEST_UTIL.getConfiguration();
63 CONF.setLong("hbase.regionserver.optionallogflushinterval", 500*1000);
64 TEST_UTIL.startMiniDFSCluster(1);
65
66 CLUSTER = TEST_UTIL.getDFSCluster();
67 FS = CLUSTER.getFileSystem();
68 DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability");
69 }
70
71 @AfterClass
72 public static void tearDownAfterClass() throws Exception {
73 TEST_UTIL.shutdownMiniCluster();
74 }
75
76 @Test
77 public void testDurability() throws Exception {
78 HLog wal = HLogFactory.createHLog(FS, DIR, "hlogdir",
79 "hlogdir_archive", CONF);
80 byte[] tableName = Bytes.toBytes("TestDurability");
81 HRegion region = createHRegion(tableName, "region", wal, false);
82 HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, true);
83
84 region.put(newPut(null));
85
86 verifyHLogCount(wal, 1);
87
88
89 deferredRegion.put(newPut(null));
90 verifyHLogCount(wal, 1);
91
92 wal.sync();
93 verifyHLogCount(wal, 2);
94
95
96 deferredRegion.put(newPut(null));
97 verifyHLogCount(wal, 2);
98 region.put(newPut(null));
99 verifyHLogCount(wal, 4);
100
101
102 deferredRegion.put(newPut(Durability.USE_DEFAULT));
103 verifyHLogCount(wal, 4);
104 region.put(newPut(Durability.USE_DEFAULT));
105 verifyHLogCount(wal, 6);
106
107
108 region.put(newPut(Durability.SKIP_WAL));
109 deferredRegion.put(newPut(Durability.SKIP_WAL));
110 verifyHLogCount(wal, 6);
111 wal.sync();
112 verifyHLogCount(wal, 6);
113
114
115 region.put(newPut(Durability.ASYNC_WAL));
116 deferredRegion.put(newPut(Durability.ASYNC_WAL));
117 verifyHLogCount(wal, 6);
118 wal.sync();
119 verifyHLogCount(wal, 8);
120
121
122 region.put(newPut(Durability.SYNC_WAL));
123 deferredRegion.put(newPut(Durability.SYNC_WAL));
124 verifyHLogCount(wal, 10);
125
126
127 region.put(newPut(Durability.FSYNC_WAL));
128 deferredRegion.put(newPut(Durability.FSYNC_WAL));
129 verifyHLogCount(wal, 12);
130 }
131
132 private Put newPut(Durability durability) {
133 Put p = new Put(ROW);
134 p.add(FAMILY, COL, COL);
135 if (durability != null) {
136 p.setDurability(durability);
137 }
138 return p;
139 }
140
141 private void verifyHLogCount(HLog log, int expected) throws Exception {
142 Path walPath = ((FSHLog) log).computeFilename();
143 HLog.Reader reader = HLogFactory.createReader(FS, walPath, CONF);
144 int count = 0;
145 HLog.Entry entry = new HLog.Entry();
146 while (reader.next(entry) != null) count++;
147 reader.close();
148 assertEquals(expected, count);
149 }
150
151
152 private HRegion createHRegion (byte [] tableName, String callingMethod, HLog log, boolean isDeferredLogFlush)
153 throws IOException {
154 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
155 htd.setDeferredLogFlush(isDeferredLogFlush);
156 HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
157 htd.addFamily(hcd);
158 HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
159 Path path = new Path(DIR + callingMethod);
160 if (FS.exists(path)) {
161 if (!FS.delete(path, true)) {
162 throw new IOException("Failed delete of " + path);
163 }
164 }
165 return HRegion.createHRegion(info, path, CONF, htd, log);
166 }
167
168 }