View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  // this is deliberately not in the o.a.h.h.regionserver package
20  // in order to make sure all required classes/method are available
21  
22  import static org.junit.Assert.assertEquals;
23  
24  import java.io.IOException;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.NavigableSet;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.TableName;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HTableDescriptor;
41  import org.apache.hadoop.hbase.KeyValue;
42  import org.apache.hadoop.hbase.KeyValueUtil;
43  import org.apache.hadoop.hbase.MediumTests;
44  import org.apache.hadoop.hbase.client.Get;
45  import org.apache.hadoop.hbase.client.HTable;
46  import org.apache.hadoop.hbase.client.Put;
47  import org.apache.hadoop.hbase.client.Result;
48  import org.apache.hadoop.hbase.client.Scan;
49  import org.apache.hadoop.hbase.client.Durability;
50  import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
51  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
52  import org.apache.hadoop.hbase.coprocessor.ObserverContext;
53  import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
54  import org.apache.hadoop.hbase.regionserver.InternalScanner;
55  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
56  import org.apache.hadoop.hbase.regionserver.ScanType;
57  import org.apache.hadoop.hbase.regionserver.Store;
58  import org.apache.hadoop.hbase.regionserver.ScanInfo;
59  import org.apache.hadoop.hbase.regionserver.StoreScanner;
60  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
61  import org.junit.AfterClass;
62  import org.junit.BeforeClass;
63  import org.junit.Test;
64  import org.junit.experimental.categories.Category;
65  
66  import org.junit.runner.RunWith;
67  import org.junit.runners.Parameterized;
68  import org.junit.runners.Parameterized.Parameters;
69  
70  @Category(MediumTests.class)
71  @RunWith(Parameterized.class)
72  public class TestCoprocessorScanPolicy {
73    final Log LOG = LogFactory.getLog(getClass());
74    protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
75    private static final byte[] F = Bytes.toBytes("fam");
76    private static final byte[] Q = Bytes.toBytes("qual");
77    private static final byte[] R = Bytes.toBytes("row");
78  
79    @BeforeClass
80    public static void setUpBeforeClass() throws Exception {
81      Configuration conf = TEST_UTIL.getConfiguration();
82      conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
83          ScanObserver.class.getName());
84      TEST_UTIL.startMiniCluster();
85    }
86  
87    @AfterClass
88    public static void tearDownAfterClass() throws Exception {
89      TEST_UTIL.shutdownMiniCluster();
90    }
91  
92    @Parameters
93    public static Collection<Object[]> parameters() {
94      return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
95    }
96  
97    public TestCoprocessorScanPolicy(boolean parallelSeekEnable) {
98      TEST_UTIL.getMiniHBaseCluster().getConf()
99          .setBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, parallelSeekEnable);
100   }
101 
102   @Test
103   public void testBaseCases() throws Exception {
104     TableName tableName =
105         TableName.valueOf("baseCases");
106     if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
107       TEST_UTIL.deleteTable(tableName);
108     }
109     HTable t = TEST_UTIL.createTable(tableName, F, 1);
110     // set the version override to 2
111     Put p = new Put(R);
112     p.setAttribute("versions", new byte[]{});
113     p.add(F, tableName.getName(), Bytes.toBytes(2));
114     t.put(p);
115 
116     long now = EnvironmentEdgeManager.currentTimeMillis();
117 
118     // insert 2 versions
119     p = new Put(R);
120     p.add(F, Q, now, Q);
121     t.put(p);
122     p = new Put(R);
123     p.add(F, Q, now+1, Q);
124     t.put(p);
125     Get g = new Get(R);
126     g.setMaxVersions(10);
127     Result r = t.get(g);
128     assertEquals(2, r.size());
129 
130     TEST_UTIL.flush(tableName);
131     TEST_UTIL.compact(tableName, true);
132 
133     // both version are still visible even after a flush/compaction
134     g = new Get(R);
135     g.setMaxVersions(10);
136     r = t.get(g);
137     assertEquals(2, r.size());
138 
139     // insert a 3rd version
140     p = new Put(R);
141     p.add(F, Q, now+2, Q);
142     t.put(p);
143     g = new Get(R);
144     g.setMaxVersions(10);
145     r = t.get(g);
146     // still only two version visible
147     assertEquals(2, r.size());
148 
149     t.close();
150   }
151 
152   @Test
153   public void testTTL() throws Exception {
154     TableName tableName =
155         TableName.valueOf("testTTL");
156     if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
157       TEST_UTIL.deleteTable(tableName);
158     }
159     HTableDescriptor desc = new HTableDescriptor(tableName);
160     HColumnDescriptor hcd = new HColumnDescriptor(F)
161     .setMaxVersions(10)
162     .setTimeToLive(1);
163     desc.addFamily(hcd);
164     TEST_UTIL.getHBaseAdmin().createTable(desc);
165     HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
166     long now = EnvironmentEdgeManager.currentTimeMillis();
167     ManualEnvironmentEdge me = new ManualEnvironmentEdge();
168     me.setValue(now);
169     EnvironmentEdgeManagerTestHelper.injectEdge(me);
170     // 2s in the past
171     long ts = now - 2000;
172     // Set the TTL override to 3s
173     Put p = new Put(R);
174     p.setAttribute("ttl", new byte[]{});
175     p.add(F, tableName.getName(), Bytes.toBytes(3000L));
176     t.put(p);
177 
178     p = new Put(R);
179     p.add(F, Q, ts, Q);
180     t.put(p);
181     p = new Put(R);
182     p.add(F, Q, ts+1, Q);
183     t.put(p);
184 
185     // these two should be expired but for the override
186     // (their ts was 2s in the past)
187     Get g = new Get(R);
188     g.setMaxVersions(10);
189     Result r = t.get(g);
190     // still there?
191     assertEquals(2, r.size());
192 
193     TEST_UTIL.flush(tableName);
194     TEST_UTIL.compact(tableName, true);
195 
196     g = new Get(R);
197     g.setMaxVersions(10);
198     r = t.get(g);
199     // still there?
200     assertEquals(2, r.size());
201 
202     // roll time forward 2s.
203     me.setValue(now + 2000);
204     // now verify that data eventually does expire
205     g = new Get(R);
206     g.setMaxVersions(10);
207     r = t.get(g);
208     // should be gone now
209     assertEquals(0, r.size());
210     t.close();
211   }
212 
213   public static class ScanObserver extends BaseRegionObserver {
214     private Map<TableName, Long> ttls =
215         new HashMap<TableName, Long>();
216     private Map<TableName, Integer> versions =
217         new HashMap<TableName, Integer>();
218 
219     // lame way to communicate with the coprocessor,
220     // since it is loaded by a different class loader
221     @Override
222     public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put,
223         final WALEdit edit, final Durability durability) throws IOException {
224       if (put.getAttribute("ttl") != null) {
225         Cell cell = put.getFamilyCellMap().values().iterator().next().get(0);
226         KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
227         ttls.put(TableName.valueOf(kv.getQualifier()), Bytes.toLong(kv.getValue()));
228         c.bypass();
229       } else if (put.getAttribute("versions") != null) {
230         Cell cell = put.getFamilyCellMap().values().iterator().next().get(0);
231         KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
232         versions.put(TableName.valueOf(kv.getQualifier()), Bytes.toInt(kv.getValue()));
233         c.bypass();
234       }
235     }
236 
237     @Override
238     public InternalScanner preFlushScannerOpen(
239         final ObserverContext<RegionCoprocessorEnvironment> c,
240         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
241       Long newTtl = ttls.get(store.getTableName());
242       if (newTtl != null) {
243         System.out.println("PreFlush:" + newTtl);
244       }
245       Integer newVersions = versions.get(store.getTableName());
246       ScanInfo oldSI = store.getScanInfo();
247       HColumnDescriptor family = store.getFamily();
248       ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
249           newVersions == null ? family.getMaxVersions() : newVersions,
250           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
251           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
252       Scan scan = new Scan();
253       scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
254       return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
255           ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
256           HConstants.OLDEST_TIMESTAMP);
257     }
258 
259     @Override
260     public InternalScanner preCompactScannerOpen(
261         final ObserverContext<RegionCoprocessorEnvironment> c,
262         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
263         long earliestPutTs, InternalScanner s) throws IOException {
264       Long newTtl = ttls.get(store.getTableName());
265       Integer newVersions = versions.get(store.getTableName());
266       ScanInfo oldSI = store.getScanInfo();
267       HColumnDescriptor family = store.getFamily();
268       ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
269           newVersions == null ? family.getMaxVersions() : newVersions,
270           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
271           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
272       Scan scan = new Scan();
273       scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
274       return new StoreScanner(store, scanInfo, scan, scanners, scanType,
275           store.getSmallestReadPoint(), earliestPutTs);
276     }
277 
278     @Override
279     public KeyValueScanner preStoreScannerOpen(
280         final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
281         final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
282       Long newTtl = ttls.get(store.getTableName());
283       Integer newVersions = versions.get(store.getTableName());
284       ScanInfo oldSI = store.getScanInfo();
285       HColumnDescriptor family = store.getFamily();
286       ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
287           newVersions == null ? family.getMaxVersions() : newVersions,
288           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
289           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
290       return new StoreScanner(store, scanInfo, scan, targetCols);
291     }
292   }
293 
294 }