1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 package org.apache.hadoop.hbase.regionserver.wal;
22
23 import java.io.IOException;
24 import java.util.LinkedList;
25 import java.util.Queue;
26
27 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
28
29 public class FaultySequenceFileLogReader extends SequenceFileLogReader {
30
31 enum FailureType {
32 BEGINNING, MIDDLE, END, NONE
33 }
34
35 Queue<Entry> nextQueue = new LinkedList<Entry>();
36 int numberOfFileEntries = 0;
37
38 FailureType getFailureType() {
39 return FailureType.valueOf(conf.get("faultysequencefilelogreader.failuretype", "NONE"));
40 }
41
42 WALEditCodec codec = new WALEditCodec();
43
44 @Override
45 public HLog.Entry next(HLog.Entry reuse) throws IOException {
46 this.entryStart = this.reader.getPosition();
47 boolean b = true;
48
49 if (nextQueue.isEmpty()) {
50 while (b == true) {
51 HLogKey key = HLog.newKey(conf);
52 WALEdit val = new WALEdit();
53 HLog.Entry e = new HLog.Entry(key, val);
54 codec.setCompression(compressionContext);
55 e.getEdit().setCodec(codec);
56 if (compressionContext != null) {
57 e.getKey().setCompressionContext(compressionContext);
58 }
59 b = this.reader.next(e.getKey(), e.getEdit());
60 nextQueue.offer(e);
61 numberOfFileEntries++;
62 }
63 }
64
65 if (nextQueue.size() == this.numberOfFileEntries
66 && getFailureType() == FailureType.BEGINNING) {
67 throw this.addFileInfoToException(new IOException("fake Exception"));
68 } else if (nextQueue.size() == this.numberOfFileEntries / 2
69 && getFailureType() == FailureType.MIDDLE) {
70 throw this.addFileInfoToException(new IOException("fake Exception"));
71 } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) {
72 throw this.addFileInfoToException(new IOException("fake Exception"));
73 }
74
75 if (nextQueue.peek() != null) {
76 edit++;
77 }
78
79 Entry e = nextQueue.poll();
80
81 if (e.getEdit().isEmpty()) {
82 return null;
83 }
84 return e;
85 }
86 }