View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver.compactions;
19  
20  import java.io.IOException;
21  import java.io.InterruptedIOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.conf.Configuration;
27  import org.apache.hadoop.fs.Path;
28  import org.apache.hadoop.hbase.regionserver.Store;
29  import org.apache.hadoop.hbase.regionserver.InternalScanner;
30  import org.apache.hadoop.hbase.regionserver.ScanType;
31  import org.apache.hadoop.hbase.regionserver.StoreFile;
32  import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
33  
34  /**
35   * Compact passed set of files. Create an instance and then call {@link #compact(CompactionRequest)}
36   */
37  @InterfaceAudience.Private
38  public class DefaultCompactor extends Compactor {
39    public DefaultCompactor(final Configuration conf, final Store store) {
40      super(conf, store);
41    }
42  
43    /**
44     * Do a minor/major compaction on an explicit set of storefiles from a Store.
45     */
46    public List<Path> compact(final CompactionRequest request) throws IOException {
47      FileDetails fd = getFileDetails(request.getFiles(), request.isMajor());
48      this.progress = new CompactionProgress(fd.maxKeyCount);
49  
50      List<StoreFileScanner> scanners = createFileScanners(request.getFiles());
51  
52      StoreFile.Writer writer = null;
53      List<Path> newFiles = new ArrayList<Path>();
54      // Find the smallest read point across all the Scanners.
55      long smallestReadPoint = setSmallestReadPoint();
56      try {
57        InternalScanner scanner = null;
58        try {
59          /* Include deletes, unless we are doing a major compaction */
60          ScanType scanType =
61              request.isMajor() ? ScanType.COMPACT_DROP_DELETES : ScanType.COMPACT_RETAIN_DELETES;
62          scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners);
63          if (scanner == null) {
64            scanner = createScanner(store, scanners, scanType, smallestReadPoint, fd.earliestPutTs);
65          }
66          scanner = postCreateCoprocScanner(request, scanType, scanner);
67          if (scanner == null) {
68            // NULL scanner returned from coprocessor hooks means skip normal processing.
69            return newFiles;
70          }
71          // Create the writer even if no kv(Empty store file is also ok),
72          // because we need record the max seq id for the store file, see HBASE-6059
73          writer = store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
74              fd.maxMVCCReadpoint >= smallestReadPoint);
75          boolean finished = performCompaction(scanner, writer, smallestReadPoint);
76          if (!finished) {
77            abortWriter(writer);
78            writer = null;
79            throw new InterruptedIOException( "Aborting compaction of store " + store +
80                " in region " + store.getRegionInfo().getRegionNameAsString() +
81                " because it was interrupted.");
82           }
83         } finally {
84           if (scanner != null) {
85             scanner.close();
86           }
87        }
88      } finally {
89        if (writer != null) {
90          writer.appendMetadata(fd.maxSeqId, request.isMajor());
91          writer.close();
92          newFiles.add(writer.getPath());
93        }
94      }
95      return newFiles;
96    }
97  }