1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.regionserver.compactions;
19
20 import java.io.IOException;
21 import java.io.InterruptedIOException;
22 import java.util.ArrayList;
23 import java.util.List;
24
25 import org.apache.hadoop.classification.InterfaceAudience;
26 import org.apache.hadoop.conf.Configuration;
27 import org.apache.hadoop.fs.Path;
28 import org.apache.hadoop.hbase.regionserver.Store;
29 import org.apache.hadoop.hbase.regionserver.InternalScanner;
30 import org.apache.hadoop.hbase.regionserver.ScanType;
31 import org.apache.hadoop.hbase.regionserver.StoreFile;
32 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
33
34
35
36
37 @InterfaceAudience.Private
38 public class DefaultCompactor extends Compactor {
39 public DefaultCompactor(final Configuration conf, final Store store) {
40 super(conf, store);
41 }
42
43
44
45
46 public List<Path> compact(final CompactionRequest request) throws IOException {
47 FileDetails fd = getFileDetails(request.getFiles(), request.isMajor());
48 this.progress = new CompactionProgress(fd.maxKeyCount);
49
50 List<StoreFileScanner> scanners = createFileScanners(request.getFiles());
51
52 StoreFile.Writer writer = null;
53 List<Path> newFiles = new ArrayList<Path>();
54
55 long smallestReadPoint = setSmallestReadPoint();
56 try {
57 InternalScanner scanner = null;
58 try {
59
60 ScanType scanType =
61 request.isMajor() ? ScanType.COMPACT_DROP_DELETES : ScanType.COMPACT_RETAIN_DELETES;
62 scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners);
63 if (scanner == null) {
64 scanner = createScanner(store, scanners, scanType, smallestReadPoint, fd.earliestPutTs);
65 }
66 scanner = postCreateCoprocScanner(request, scanType, scanner);
67 if (scanner == null) {
68
69 return newFiles;
70 }
71
72
73 writer = store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
74 fd.maxMVCCReadpoint >= smallestReadPoint);
75 boolean finished = performCompaction(scanner, writer, smallestReadPoint);
76 if (!finished) {
77 abortWriter(writer);
78 writer = null;
79 throw new InterruptedIOException( "Aborting compaction of store " + store +
80 " in region " + store.getRegionInfo().getRegionNameAsString() +
81 " because it was interrupted.");
82 }
83 } finally {
84 if (scanner != null) {
85 scanner.close();
86 }
87 }
88 } finally {
89 if (writer != null) {
90 writer.appendMetadata(fd.maxSeqId, request.isMajor());
91 writer.close();
92 newFiles.add(writer.getPath());
93 }
94 }
95 return newFiles;
96 }
97 }