View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Arrays;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HColumnDescriptor;
33  import org.apache.hadoop.hbase.HConstants;
34  import org.apache.hadoop.hbase.HRegionInfo;
35  import org.apache.hadoop.hbase.HTableDescriptor;
36  import org.apache.hadoop.hbase.TableName;
37  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
38  import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
39  import org.apache.hadoop.hbase.regionserver.wal.HLog;
40  import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
41  import org.apache.hadoop.hbase.testclassification.SmallTests;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.FSUtils;
44  import org.junit.After;
45  import org.junit.Assert;
46  import org.junit.Before;
47  import org.junit.experimental.categories.Category;
48  
49  import com.google.common.collect.Lists;
50  
51  
52  @Category(SmallTests.class)
53  public class TestCompactionPolicy
54  {
55      private final static Log LOG = LogFactory.getLog(TestCompactionPolicy.class);
56      protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
57  
58      protected Configuration conf;
59      protected HStore store;
60      private static final String DIR=
61        TEST_UTIL.getDataTestDir(TestCompactionPolicy.class.getSimpleName()).toString();
62      protected static Path TEST_FILE;
63  
64      protected static final int minFiles = 3;
65      protected static final int maxFiles = 5;
66  
67      protected static final long minSize = 10;
68      protected static final long maxSize = 2100;
69  
70      private HLog hlog;
71      private HRegion region;
72  
73      @Before
74      public void setUp() throws Exception
75      {
76        config();
77        initialize();
78      }
79  
80      /**
81       * setup config values necessary for store
82       */
83      protected void config()
84      {
85          this.conf = TEST_UTIL.getConfiguration();
86          this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
87          this.conf.setInt("hbase.hstore.compaction.min", minFiles);
88          this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
89          this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
90          this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
91          this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);
92      }
93  
94      /**
95       * Setting up a Store
96       * @throws IOException
97       */
98    protected void initialize() throws IOException
99      {
100         Path basedir = new Path(DIR);
101         String logName = "logs";
102         Path logdir = new Path(DIR, logName);
103         HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
104         FileSystem fs = FileSystem.get(conf);
105 
106         fs.delete(logdir, true);
107 
108         HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
109         htd.addFamily(hcd);
110         HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
111 
112         hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
113         region = HRegion.createHRegion(info, basedir, conf, htd);
114         HRegion.closeHRegion(region);
115         Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
116         region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
117 
118         store = new HStore(region, hcd, conf);
119 
120         TEST_FILE = region.getRegionFileSystem().createTempName();
121         fs.createNewFile(TEST_FILE);
122     }
123 
124     @After
125     public void tearDown() throws IOException {
126       IOException ex = null;
127       try {
128         region.close();
129       } catch (IOException e) {
130         LOG.warn("Caught Exception", e);
131         ex = e;
132       }
133       try {
134         hlog.closeAndDelete();
135       } catch (IOException e) {
136         LOG.warn("Caught Exception", e);
137         ex = e;
138       }
139       if (ex != null) {
140         throw ex;
141       }
142     }
143 
144     ArrayList<Long> toArrayList(long... numbers) {
145       ArrayList<Long> result = new ArrayList<Long>();
146       for (long i : numbers) {
147         result.add(i);
148       }
149       return result;
150     }
151 
152     List<StoreFile> sfCreate(long... sizes) throws IOException {
153       ArrayList<Long> ageInDisk = new ArrayList<Long>();
154       for (int i = 0; i < sizes.length; i++) {
155         ageInDisk.add(0L);
156       }
157       return sfCreate(toArrayList(sizes), ageInDisk);
158     }
159 
160     List<StoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
161       throws IOException {
162       return sfCreate(false, sizes, ageInDisk);
163     }
164 
165     List<StoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
166       ArrayList<Long> ageInDisk = new ArrayList<Long>(sizes.length);
167       for (int i = 0; i < sizes.length; i++) {
168         ageInDisk.add(0L);
169       }
170       return sfCreate(isReference, toArrayList(sizes), ageInDisk);
171     }
172 
173     List<StoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
174         throws IOException {
175       List<StoreFile> ret = Lists.newArrayList();
176       for (int i = 0; i < sizes.size(); i++) {
177         ret.add(new MockStoreFile(TEST_UTIL, TEST_FILE,
178             sizes.get(i), ageInDisk.get(i), isReference, i));
179       }
180       return ret;
181     }
182 
183     long[] getSizes(List<StoreFile> sfList) {
184       long[] aNums = new long[sfList.size()];
185       for (int i = 0; i < sfList.size(); ++i) {
186         aNums[i] = sfList.get(i).getReader().length();
187       }
188       return aNums;
189     }
190 
191     void compactEquals(List<StoreFile> candidates, long... expected)
192       throws IOException {
193       compactEquals(candidates, false, false, expected);
194     }
195 
196     void compactEquals(List<StoreFile> candidates, boolean forcemajor, long... expected)
197       throws IOException {
198       compactEquals(candidates, forcemajor, false, expected);
199     }
200 
201     void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
202         long ... expected)
203     throws IOException {
204       store.forceMajor = forcemajor;
205       //Test Default compactions
206       CompactionRequest result =
207           ((RatioBasedCompactionPolicy)store.storeEngine.getCompactionPolicy())
208           .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
209       List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
210       if (isOffPeak && !forcemajor) {
211         Assert.assertTrue(result.isOffPeak());
212       }
213       Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
214       store.forceMajor = false;
215     }
216 }