View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver.compactions;
19  
20  import java.io.IOException;
21  import java.util.List;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.fs.Path;
27  import org.apache.hadoop.hbase.classification.InterfaceAudience;
28  import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter;
29  import org.apache.hadoop.hbase.regionserver.InternalScanner;
30  import org.apache.hadoop.hbase.regionserver.Store;
31  import org.apache.hadoop.hbase.regionserver.StoreFile;
32  import org.apache.hadoop.hbase.security.User;
33  
34  /**
35   * This compactor will generate StoreFile for different time ranges.
36   */
37  @InterfaceAudience.Private
38  public class DateTieredCompactor extends AbstractMultiOutputCompactor<DateTieredMultiFileWriter> {
39  
40    private static final Log LOG = LogFactory.getLog(DateTieredCompactor.class);
41  
42    public DateTieredCompactor(Configuration conf, Store store) {
43      super(conf, store);
44    }
45  
46    private boolean needEmptyFile(CompactionRequest request) {
47      // if we are going to compact the last N files, then we need to emit an empty file to retain the
48      // maxSeqId if we haven't written out anything.
49      return store.getMaxSequenceId() == StoreFile.getMaxSequenceIdInList(request.getFiles());
50    }
51  
52    public List<Path> compact(final CompactionRequest request, final List<Long> lowerBoundaries,
53        CompactionThroughputController throughputController, User user) throws IOException {
54      if (LOG.isDebugEnabled()) {
55        LOG.debug("Executing compaction with " + lowerBoundaries.size()
56            + "windows, lower boundaries: " + lowerBoundaries);
57      }
58      return compact(request, defaultScannerFactory,
59        new CellSinkFactory<DateTieredMultiFileWriter>() {
60  
61          @Override
62          public DateTieredMultiFileWriter createWriter(InternalScanner scanner, FileDetails fd,
63              boolean shouldDropBehind) throws IOException {
64            DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter(lowerBoundaries,
65                needEmptyFile(request));
66            initMultiWriter(writer, scanner, fd, shouldDropBehind);
67            return writer;
68          }
69        }, throughputController, user);
70    }
71  
72    @Override
73    protected List<Path> commitWriter(DateTieredMultiFileWriter writer, FileDetails fd,
74        CompactionRequest request) throws IOException {
75      return writer.commitWriters(fd.maxSeqId, request.isMajor());
76    }
77  }