1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.regionserver.wal;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import org.apache.commons.logging.Log;
24 import org.apache.commons.logging.LogFactory;
25 import org.apache.commons.logging.impl.Log4JLogger;
26 import org.apache.hadoop.hbase.*;
27 import org.apache.hadoop.hbase.client.HBaseAdmin;
28 import org.apache.hadoop.hbase.client.HTable;
29 import org.apache.hadoop.hbase.client.Put;
30 import org.apache.hadoop.hbase.ipc.RpcClient;
31 import org.apache.hadoop.hbase.regionserver.HRegion;
32 import org.apache.hadoop.hbase.regionserver.HRegionServer;
33 import org.apache.hadoop.hbase.util.Bytes;
34 import org.apache.hadoop.hbase.util.FSUtils;
35 import org.apache.hadoop.hdfs.MiniDFSCluster;
36 import org.apache.hadoop.hdfs.server.datanode.DataNode;
37 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
38 import org.apache.log4j.Level;
39 import org.junit.After;
40 import org.junit.Before;
41 import org.junit.BeforeClass;
42 import org.junit.Test;
43 import org.junit.experimental.categories.Category;
44
45
46
47
48
49 @Category(MediumTests.class)
50 public class TestLogRollAbort {
51 private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
52 private static MiniDFSCluster dfsCluster;
53 private static HBaseAdmin admin;
54 private static MiniHBaseCluster cluster;
55 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
56
57
58 {
59 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
60 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
61 ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
62 .getLogger().setLevel(Level.ALL);
63 ((Log4JLogger)HRegionServer.LOG).getLogger().setLevel(Level.ALL);
64 ((Log4JLogger)HRegion.LOG).getLogger().setLevel(Level.ALL);
65 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
66 }
67
68
69
70 @BeforeClass
71 public static void setUpBeforeClass() throws Exception {
72
73 TEST_UTIL.getConfiguration().setInt(
74 "hbase.regionserver.logroll.errors.tolerated", 2);
75 TEST_UTIL.getConfiguration().setInt(RpcClient.PING_INTERVAL_NAME, 10 * 1000);
76 TEST_UTIL.getConfiguration().setInt(RpcClient.SOCKET_TIMEOUT, 10 * 1000);
77 TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000);
78
79
80 TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 5 * 1000);
81
82
83 TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
84
85
86 TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
87 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
88
89
90 TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 10);
91 }
92
93 @Before
94 public void setUp() throws Exception {
95 TEST_UTIL.startMiniCluster(2);
96
97 cluster = TEST_UTIL.getHBaseCluster();
98 dfsCluster = TEST_UTIL.getDFSCluster();
99 admin = TEST_UTIL.getHBaseAdmin();
100
101
102 cluster.getMaster().balanceSwitch(false);
103 }
104
105 @After
106 public void tearDown() throws Exception {
107 TEST_UTIL.shutdownMiniCluster();
108 }
109
110
111
112
113
114 @Test
115 public void testRSAbortWithUnflushedEdits() throws Exception {
116 LOG.info("Starting testRSAbortWithUnflushedEdits()");
117
118
119 new HTable(TEST_UTIL.getConfiguration(),
120 TableName.META_TABLE_NAME).close();
121
122
123 String tableName = this.getClass().getSimpleName();
124 HTableDescriptor desc = new HTableDescriptor(tableName);
125 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
126 desc.setDeferredLogFlush(true);
127
128 admin.createTable(desc);
129 HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
130
131 HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
132 HLog log = server.getWAL();
133
134 assertTrue("Need HDFS-826 for this test", ((FSHLog) log).canGetCurReplicas());
135
136 assertTrue("Need append support for this test",
137 FSUtils.isAppendSupported(TEST_UTIL.getConfiguration()));
138
139 Put p = new Put(Bytes.toBytes("row2001"));
140 p.add(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2001));
141 table.put(p);
142
143 log.sync();
144
145 p = new Put(Bytes.toBytes("row2002"));
146 p.add(HConstants.CATALOG_FAMILY, Bytes.toBytes("col"), Bytes.toBytes(2002));
147 table.put(p);
148
149 dfsCluster.restartDataNodes();
150 LOG.info("Restarted datanodes");
151
152 try {
153 log.rollWriter(true);
154 } catch (FailedLogCloseException flce) {
155 assertTrue("Should have deferred flush log edits outstanding",
156 ((FSHLog) log).hasUnSyncedEntries());
157 }
158 }
159
160 }
161