1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.io.hfile.bucket;
20
21 import java.io.IOException;
22 import java.io.RandomAccessFile;
23 import java.nio.ByteBuffer;
24 import java.nio.channels.FileChannel;
25
26 import org.apache.commons.logging.Log;
27 import org.apache.commons.logging.LogFactory;
28 import org.apache.hadoop.classification.InterfaceAudience;
29 import org.apache.hadoop.util.StringUtils;
30
31
32
33
34 @InterfaceAudience.Private
35 public class FileIOEngine implements IOEngine {
36 static final Log LOG = LogFactory.getLog(FileIOEngine.class);
37
38 private FileChannel fileChannel = null;
39
40 public FileIOEngine(String filePath, long fileSize) throws IOException {
41 RandomAccessFile raf = null;
42 try {
43 raf = new RandomAccessFile(filePath, "rw");
44 raf.setLength(fileSize);
45 fileChannel = raf.getChannel();
46 LOG.info("Allocating " + StringUtils.byteDesc(fileSize)
47 + ", on the path:" + filePath);
48 } catch (java.io.FileNotFoundException fex) {
49 LOG.error("Can't create bucket cache file " + filePath, fex);
50 throw fex;
51 } catch (IOException ioex) {
52 LOG.error("Can't extend bucket cache file; insufficient space for "
53 + StringUtils.byteDesc(fileSize), ioex);
54 if (raf != null) raf.close();
55 throw ioex;
56 }
57 }
58
59
60
61
62
63 @Override
64 public boolean isPersistent() {
65 return true;
66 }
67
68
69
70
71
72
73
74
75 @Override
76 public int read(ByteBuffer dstBuffer, long offset) throws IOException {
77 return fileChannel.read(dstBuffer, offset);
78 }
79
80
81
82
83
84
85
86 @Override
87 public void write(ByteBuffer srcBuffer, long offset) throws IOException {
88 fileChannel.write(srcBuffer, offset);
89 }
90
91
92
93
94
95 @Override
96 public void sync() throws IOException {
97 fileChannel.force(true);
98 }
99
100
101
102
103 @Override
104 public void shutdown() {
105 try {
106 fileChannel.close();
107 } catch (IOException ex) {
108 LOG.error("Can't shutdown cleanly", ex);
109 }
110 }
111 }