1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  // this is deliberately not in the o.a.h.h.regionserver package
20  // in order to make sure all required classes/method are available
21  
22  import java.io.IOException;
23  import java.util.Collections;
24  import java.util.HashMap;
25  import java.util.List;
26  import java.util.Map;
27  import java.util.NavigableSet;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HTableDescriptor;
36  import org.apache.hadoop.hbase.KeyValue;
37  import org.apache.hadoop.hbase.MediumTests;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.HTable;
40  import org.apache.hadoop.hbase.client.Put;
41  import org.apache.hadoop.hbase.client.Result;
42  import org.apache.hadoop.hbase.client.Scan;
43  import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
44  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
45  import org.apache.hadoop.hbase.coprocessor.ObserverContext;
46  import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
47  import org.apache.hadoop.hbase.regionserver.InternalScanner;
48  import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
49  import org.apache.hadoop.hbase.regionserver.ScanType;
50  import org.apache.hadoop.hbase.regionserver.Store;
51  import org.apache.hadoop.hbase.regionserver.StoreScanner;
52  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.junit.AfterClass;
55  import org.junit.BeforeClass;
56  import org.junit.Test;
57  import org.junit.experimental.categories.Category;
58  
59  import static org.junit.Assert.*;
60  
61  @Category(MediumTests.class)
62  public class TestCoprocessorScanPolicy {
63    final Log LOG = LogFactory.getLog(getClass());
64    protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
65    private static final byte[] F = Bytes.toBytes("fam");
66    private static final byte[] Q = Bytes.toBytes("qual");
67    private static final byte[] R = Bytes.toBytes("row");
68  
69  
70    @BeforeClass
71    public static void setUpBeforeClass() throws Exception {
72      Configuration conf = TEST_UTIL.getConfiguration();
73      conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
74          ScanObserver.class.getName());
75      TEST_UTIL.startMiniCluster();
76    }
77  
78    @AfterClass
79    public static void tearDownAfterClass() throws Exception {
80      TEST_UTIL.shutdownMiniCluster();
81    }
82  
83    @Test
84    public void testBaseCases() throws Exception {
85      byte[] tableName = Bytes.toBytes("baseCases");
86      HTable t = TEST_UTIL.createTable(tableName, F, 1);
87      // set the version override to 2
88      Put p = new Put(R);
89      p.setAttribute("versions", new byte[]{});
90      p.add(F, tableName, Bytes.toBytes(2));
91      t.put(p);
92  
93      long now = EnvironmentEdgeManager.currentTimeMillis();
94  
95      // insert 2 versions
96      p = new Put(R);
97      p.add(F, Q, now, Q);
98      t.put(p);
99      p = new Put(R);
100     p.add(F, Q, now+1, Q);
101     t.put(p);
102     Get g = new Get(R);
103     g.setMaxVersions(10);
104     Result r = t.get(g);
105     assertEquals(2, r.size());
106 
107     TEST_UTIL.flush(tableName);
108     TEST_UTIL.compact(tableName, true);
109 
110     // both version are still visible even after a flush/compaction
111     g = new Get(R);
112     g.setMaxVersions(10);
113     r = t.get(g);
114     assertEquals(2, r.size());
115 
116     // insert a 3rd version
117     p = new Put(R);
118     p.add(F, Q, now+2, Q);
119     t.put(p);
120     g = new Get(R);
121     g.setMaxVersions(10);
122     r = t.get(g);
123     // still only two version visible
124     assertEquals(2, r.size());
125 
126     t.close();
127   }
128 
129   @Test
130   public void testTTL() throws Exception {
131     byte[] tableName = Bytes.toBytes("testTTL");
132     HTableDescriptor desc = new HTableDescriptor(tableName);
133     HColumnDescriptor hcd = new HColumnDescriptor(F)
134     .setMaxVersions(10)
135     .setTimeToLive(1);
136     desc.addFamily(hcd);
137     TEST_UTIL.getHBaseAdmin().createTable(desc);
138     HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
139     long now = EnvironmentEdgeManager.currentTimeMillis();
140     ManualEnvironmentEdge me = new ManualEnvironmentEdge();
141     me.setValue(now);
142     EnvironmentEdgeManagerTestHelper.injectEdge(me);
143     // 2s in the past
144     long ts = now - 2000;
145     // Set the TTL override to 3s
146     Put p = new Put(R);
147     p.setAttribute("ttl", new byte[]{});
148     p.add(F, tableName, Bytes.toBytes(3000L));
149     t.put(p);
150 
151     p = new Put(R);
152     p.add(F, Q, ts, Q);
153     t.put(p);
154     p = new Put(R);
155     p.add(F, Q, ts+1, Q);
156     t.put(p);
157 
158     // these two should be expired but for the override
159     // (their ts was 2s in the past)
160     Get g = new Get(R);
161     g.setMaxVersions(10);
162     Result r = t.get(g);
163     // still there?
164     assertEquals(2, r.size());
165 
166     TEST_UTIL.flush(tableName);
167     TEST_UTIL.compact(tableName, true);
168 
169     g = new Get(R);
170     g.setMaxVersions(10);
171     r = t.get(g);
172     // still there?
173     assertEquals(2, r.size());
174 
175     // roll time forward 2s.
176     me.setValue(now + 2000);
177     // now verify that data eventually does expire
178     g = new Get(R);
179     g.setMaxVersions(10);
180     r = t.get(g);
181     // should be gone now
182     assertEquals(0, r.size());
183     t.close();
184   }
185 
186   public static class ScanObserver extends BaseRegionObserver {
187     private Map<String, Long> ttls = new HashMap<String,Long>();
188     private Map<String, Integer> versions = new HashMap<String,Integer>();
189 
190     // lame way to communicate with the coprocessor,
191     // since it is loaded by a different class loader
192     @Override
193     public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put,
194         final WALEdit edit, final boolean writeToWAL) throws IOException {
195       if (put.getAttribute("ttl") != null) {
196         KeyValue kv = put.getFamilyMap().values().iterator().next().get(0);
197         ttls.put(Bytes.toString(kv.getQualifier()), Bytes.toLong(kv.getValue()));
198         c.bypass();
199       } else if (put.getAttribute("versions") != null) {
200         KeyValue kv = put.getFamilyMap().values().iterator().next().get(0);
201         versions.put(Bytes.toString(kv.getQualifier()), Bytes.toInt(kv.getValue()));
202         c.bypass();
203       }
204     }
205 
206     @Override
207     public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
208         Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
209       Long newTtl = ttls.get(store.getTableName());
210       if (newTtl != null) {
211         System.out.println("PreFlush:" + newTtl);
212       }
213       Integer newVersions = versions.get(store.getTableName());
214       Store.ScanInfo oldSI = store.getScanInfo();
215       HColumnDescriptor family = store.getFamily();
216       Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
217           newVersions == null ? family.getMaxVersions() : newVersions,
218           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
219           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
220       Scan scan = new Scan();
221       scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
222       return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
223           ScanType.MINOR_COMPACT, store.getHRegion().getSmallestReadPoint(),
224           HConstants.OLDEST_TIMESTAMP);
225     }
226 
227     @Override
228     public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
229         Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
230         long earliestPutTs, InternalScanner s) throws IOException {
231       Long newTtl = ttls.get(store.getTableName());
232       Integer newVersions = versions.get(store.getTableName());
233       Store.ScanInfo oldSI = store.getScanInfo();
234       HColumnDescriptor family = store.getFamily();
235       Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
236           newVersions == null ? family.getMaxVersions() : newVersions,
237           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
238           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
239       Scan scan = new Scan();
240       scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
241       return new StoreScanner(store, scanInfo, scan, scanners, scanType, store.getHRegion()
242           .getSmallestReadPoint(), earliestPutTs);
243     }
244 
245     @Override
246     public KeyValueScanner preStoreScannerOpen(
247         final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
248         final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
249       Long newTtl = ttls.get(store.getTableName());
250       Integer newVersions = versions.get(store.getTableName());
251       Store.ScanInfo oldSI = store.getScanInfo();
252       HColumnDescriptor family = store.getFamily();
253       Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
254           newVersions == null ? family.getMaxVersions() : newVersions,
255           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
256           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
257       return new StoreScanner(store, scanInfo, scan, targetCols);
258     }
259   }
260 
261   @org.junit.Rule
262   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
263    new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
264 }