View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.util.NavigableSet;
24  
25  import org.apache.hadoop.hbase.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.Cell;
27  import org.apache.hadoop.hbase.HConstants;
28  import org.apache.hadoop.hbase.KeepDeletedCells;
29  import org.apache.hadoop.hbase.KeyValue;
30  import org.apache.hadoop.hbase.client.Scan;
31  import org.apache.hadoop.hbase.filter.Filter;
32  import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
33  import org.apache.hadoop.hbase.io.TimeRange;
34  import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
35  import org.apache.hadoop.hbase.util.Bytes;
36  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
37  
38  import com.google.common.base.Preconditions;
39  
40  /**
41   * A query matcher that is specifically designed for the scan case.
42   */
43  @InterfaceAudience.Private
44  public class ScanQueryMatcher {
45    // Optimization so we can skip lots of compares when we decide to skip
46    // to the next row.
47    private boolean stickyNextRow;
48    private final byte[] stopRow;
49  
50    private final TimeRange tr;
51  
52    private final Filter filter;
53  
54    /** Keeps track of deletes */
55    private final DeleteTracker deletes;
56  
57    /*
58     * The following three booleans define how we deal with deletes.
59     * There are three different aspects:
60     * 1. Whether to keep delete markers. This is used in compactions.
61     *    Minor compactions always keep delete markers.
62     * 2. Whether to keep deleted rows. This is also used in compactions,
63     *    if the store is set to keep deleted rows. This implies keeping
64     *    the delete markers as well.
65     *    In this case deleted rows are subject to the normal max version
66     *    and TTL/min version rules just like "normal" rows.
67     * 3. Whether a scan can do time travel queries even before deleted
68     *    marker to reach deleted rows.
69     */
70    /** whether to retain delete markers */
71    private boolean retainDeletesInOutput;
72  
73    /** whether to return deleted rows */
74    private final KeepDeletedCells keepDeletedCells;
75    /** whether time range queries can see rows "behind" a delete */
76    private final boolean seePastDeleteMarkers;
77  
78  
79    /** Keeps track of columns and versions */
80    private final ColumnTracker columns;
81  
82    /** Key to seek to in memstore and StoreFiles */
83    private final KeyValue startKey;
84  
85    /** Row comparator for the region this query is for */
86    private final KeyValue.KVComparator rowComparator;
87  
88    /* row is not private for tests */
89    /** Row the query is on */
90    byte [] row;
91    int rowOffset;
92    short rowLength;
93    
94    /**
95     * Oldest put in any of the involved store files
96     * Used to decide whether it is ok to delete
97     * family delete marker of this store keeps
98     * deleted KVs.
99     */
100   private final long earliestPutTs;
101   private final long ttl;
102 
103   /** readPoint over which the KVs are unconditionally included */
104   protected long maxReadPointToTrackVersions;
105 
106   private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
107 
108   /**
109    * This variable shows whether there is an null column in the query. There
110    * always exists a null column in the wildcard column query.
111    * There maybe exists a null column in the explicit column query based on the
112    * first column.
113    * */
114   private boolean hasNullColumn = true;
115   
116   private RegionCoprocessorHost regionCoprocessorHost= null;
117 
118   // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
119   // marker is always removed during a major compaction. If set to non-zero
120   // value then major compaction will try to keep a delete marker around for
121   // the given number of milliseconds. We want to keep the delete markers
122   // around a bit longer because old puts might appear out-of-order. For
123   // example, during log replication between two clusters.
124   //
125   // If the delete marker has lived longer than its column-family's TTL then
126   // the delete marker will be removed even if time.to.purge.deletes has not
127   // passed. This is because all the Puts that this delete marker can influence
128   // would have also expired. (Removing of delete markers on col family TTL will
129   // not happen if min-versions is set to non-zero)
130   //
131   // But, if time.to.purge.deletes has not expired then a delete
132   // marker will not be removed just because there are no Puts that it is
133   // currently influencing. This is because Puts, that this delete can
134   // influence.  may appear out of order.
135   private final long timeToPurgeDeletes;
136   
137   private final boolean isUserScan;
138 
139   private final boolean isReversed;
140 
141   /**
142    * Construct a QueryMatcher for a scan
143    * @param scan
144    * @param scanInfo The store's immutable scan info
145    * @param columns
146    * @param scanType Type of the scan
147    * @param earliestPutTs Earliest put seen in any of the store files.
148    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
149    *  based on TTL
150    * @param regionCoprocessorHost 
151    * @throws IOException 
152    */
153   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
154       ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
155       RegionCoprocessorHost regionCoprocessorHost) throws IOException {
156     this.tr = scan.getTimeRange();
157     this.rowComparator = scanInfo.getComparator();
158     this.regionCoprocessorHost = regionCoprocessorHost;
159     this.deletes =  instantiateDeleteTracker();
160     this.stopRow = scan.getStopRow();
161     this.startKey = KeyValue.createFirstDeleteFamilyOnRow(scan.getStartRow(),
162         scanInfo.getFamily());
163     this.filter = scan.getFilter();
164     this.earliestPutTs = earliestPutTs;
165     this.maxReadPointToTrackVersions = readPointToUse;
166     this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
167     this.ttl = oldestUnexpiredTS;
168 
169     /* how to deal with deletes */
170     this.isUserScan = scanType == ScanType.USER_SCAN;
171     // keep deleted cells: if compaction or raw scan
172     this.keepDeletedCells = scan.isRaw() ? KeepDeletedCells.TRUE :
173       isUserScan ? KeepDeletedCells.FALSE : scanInfo.getKeepDeletedCells();
174     // retain deletes: if minor compaction or raw scanisDone
175     this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
176     // seePastDeleteMarker: user initiated scans
177     this.seePastDeleteMarkers =
178         scanInfo.getKeepDeletedCells() != KeepDeletedCells.FALSE && isUserScan;
179 
180     int maxVersions =
181         scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
182           scanInfo.getMaxVersions());
183 
184     // Single branch to deal with two types of reads (columns vs all in family)
185     if (columns == null || columns.size() == 0) {
186       // there is always a null column in the wildcard column query.
187       hasNullColumn = true;
188 
189       // use a specialized scan for wildcard column tracker.
190       this.columns = new ScanWildcardColumnTracker(
191           scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
192     } else {
193       // whether there is null column in the explicit column query
194       hasNullColumn = (columns.first().length == 0);
195 
196       // We can share the ExplicitColumnTracker, diff is we reset
197       // between rows, not between storefiles.
198       byte[] attr = scan.getAttribute(Scan.HINT_LOOKAHEAD);
199       this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions,
200           oldestUnexpiredTS, attr == null ? 0 : Bytes.toInt(attr));
201     }
202     this.isReversed = scan.isReversed();
203   }
204 
205   private DeleteTracker instantiateDeleteTracker() throws IOException {
206     DeleteTracker tracker = new ScanDeleteTracker();
207     if (regionCoprocessorHost != null) {
208       tracker = regionCoprocessorHost.postInstantiateDeleteTracker(tracker);
209     }
210     return tracker;
211   }
212 
213   /**
214    * Construct a QueryMatcher for a scan that drop deletes from a limited range of rows.
215    * @param scan
216    * @param scanInfo The store's immutable scan info
217    * @param columns
218    * @param earliestPutTs Earliest put seen in any of the store files.
219    * @param oldestUnexpiredTS the oldest timestamp we are interested in,
220    *  based on TTL
221    * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW.
222    * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW.
223    * @param regionCoprocessorHost 
224    * @throws IOException 
225    */
226   public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
227       long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, byte[] dropDeletesFromRow,
228       byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException {
229     this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs,
230         oldestUnexpiredTS, regionCoprocessorHost);
231     Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null));
232     this.dropDeletesFromRow = dropDeletesFromRow;
233     this.dropDeletesToRow = dropDeletesToRow;
234   }
235 
236   /*
237    * Constructor for tests
238    */
239   ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
240       NavigableSet<byte[]> columns, long oldestUnexpiredTS) throws IOException {
241     this(scan, scanInfo, columns, ScanType.USER_SCAN,
242           Long.MAX_VALUE, /* max Readpoint to track versions */
243         HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, null);
244   }
245 
246   /**
247    *
248    * @return  whether there is an null column in the query
249    */
250   public boolean hasNullColumnInQuery() {
251     return hasNullColumn;
252   }
253 
254   /**
255    * Determines if the caller should do one of several things:
256    * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
257    * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
258    * - include the current KeyValue (MatchCode.INCLUDE)
259    * - ignore the current KeyValue (MatchCode.SKIP)
260    * - got to the next row (MatchCode.DONE)
261    *
262    * @param kv KeyValue to check
263    * @return The match code instance.
264    * @throws IOException in case there is an internal consistency problem
265    *      caused by a data corruption.
266    */
267   public MatchCode match(KeyValue kv) throws IOException {
268     if (filter != null && filter.filterAllRemaining()) {
269       return MatchCode.DONE_SCAN;
270     }
271 
272     byte [] bytes = kv.getBuffer();
273     int offset = kv.getOffset();
274 
275     int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
276     offset += KeyValue.ROW_OFFSET;
277 
278     int initialOffset = offset;
279 
280     short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
281     offset += Bytes.SIZEOF_SHORT;
282 
283     int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
284         bytes, offset, rowLength);
285     if (!this.isReversed) {
286       if (ret <= -1) {
287         return MatchCode.DONE;
288       } else if (ret >= 1) {
289         // could optimize this, if necessary?
290         // Could also be called SEEK_TO_CURRENT_ROW, but this
291         // should be rare/never happens.
292         return MatchCode.SEEK_NEXT_ROW;
293       }
294     } else {
295       if (ret <= -1) {
296         return MatchCode.SEEK_NEXT_ROW;
297       } else if (ret >= 1) {
298         return MatchCode.DONE;
299       }
300     }
301 
302 
303     // optimize case.
304     if (this.stickyNextRow)
305         return MatchCode.SEEK_NEXT_ROW;
306 
307     if (this.columns.done()) {
308       stickyNextRow = true;
309       return MatchCode.SEEK_NEXT_ROW;
310     }
311 
312     //Passing rowLength
313     offset += rowLength;
314 
315     //Skipping family
316     byte familyLength = bytes [offset];
317     offset += familyLength + 1;
318 
319     int qualLength = keyLength -
320       (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
321 
322     long timestamp = Bytes.toLong(bytes, initialOffset + keyLength - KeyValue.TIMESTAMP_TYPE_SIZE);
323     // check for early out based on timestamp alone
324     if (columns.isDone(timestamp)) {
325         return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
326     }
327 
328     /*
329      * The delete logic is pretty complicated now.
330      * This is corroborated by the following:
331      * 1. The store might be instructed to keep deleted rows around.
332      * 2. A scan can optionally see past a delete marker now.
333      * 3. If deleted rows are kept, we have to find out when we can
334      *    remove the delete markers.
335      * 4. Family delete markers are always first (regardless of their TS)
336      * 5. Delete markers should not be counted as version
337      * 6. Delete markers affect puts of the *same* TS
338      * 7. Delete marker need to be version counted together with puts
339      *    they affect
340      */
341     byte type = bytes[initialOffset + keyLength - 1];
342     if (kv.isDelete()) {
343       if (keepDeletedCells == KeepDeletedCells.FALSE
344           || (keepDeletedCells == KeepDeletedCells.TTL && timestamp < ttl)) {
345         // first ignore delete markers if the scanner can do so, and the
346         // range does not include the marker
347         //
348         // during flushes and compactions also ignore delete markers newer
349         // than the readpoint of any open scanner, this prevents deleted
350         // rows that could still be seen by a scanner from being collected
351         boolean includeDeleteMarker = seePastDeleteMarkers ?
352             tr.withinTimeRange(timestamp) :
353             tr.withinOrAfterTimeRange(timestamp);
354         if (includeDeleteMarker
355             && kv.getMvccVersion() <= maxReadPointToTrackVersions) {
356           this.deletes.add(kv);
357         }
358         // Can't early out now, because DelFam come before any other keys
359       }
360      
361       if ((!isUserScan)
362           && timeToPurgeDeletes > 0
363           && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) <= timeToPurgeDeletes) {
364         return MatchCode.INCLUDE;
365       } else if (retainDeletesInOutput || kv.getMvccVersion() > maxReadPointToTrackVersions) {
366         // always include or it is not time yet to check whether it is OK
367         // to purge deltes or not
368         if (!isUserScan) {
369           // if this is not a user scan (compaction), we can filter this deletemarker right here
370           // otherwise (i.e. a "raw" scan) we fall through to normal version and timerange checking
371           return MatchCode.INCLUDE;
372         }
373       } else if (keepDeletedCells == KeepDeletedCells.TRUE
374           || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= ttl)) {
375         if (timestamp < earliestPutTs) {
376           // keeping delete rows, but there are no puts older than
377           // this delete in the store files.
378           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
379         }
380         // else: fall through and do version counting on the
381         // delete markers
382       } else {
383         return MatchCode.SKIP;
384       }
385       // note the following next else if...
386       // delete marker are not subject to other delete markers
387     } else if (!this.deletes.isEmpty()) {
388       DeleteResult deleteResult = deletes.isDeleted(kv);
389       switch (deleteResult) {
390         case FAMILY_DELETED:
391         case COLUMN_DELETED:
392           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
393         case VERSION_DELETED:
394         case FAMILY_VERSION_DELETED:
395           return MatchCode.SKIP;
396         case NOT_DELETED:
397           break;
398         default:
399           throw new RuntimeException("UNEXPECTED");
400         }
401     }
402 
403     int timestampComparison = tr.compare(timestamp);
404     if (timestampComparison >= 1) {
405       return MatchCode.SKIP;
406     } else if (timestampComparison <= -1) {
407       return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
408     }
409 
410     // STEP 1: Check if the column is part of the requested columns
411     MatchCode colChecker = columns.checkColumn(bytes, offset, qualLength, type);
412     if (colChecker == MatchCode.INCLUDE) {
413       ReturnCode filterResponse = ReturnCode.SKIP;
414       // STEP 2: Yes, the column is part of the requested columns. Check if filter is present
415       if (filter != null) {
416         // STEP 3: Filter the key value and return if it filters out
417         filterResponse = filter.filterKeyValue(kv);
418         switch (filterResponse) {
419         case SKIP:
420           return MatchCode.SKIP;
421         case NEXT_COL:
422           return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
423         case NEXT_ROW:
424           stickyNextRow = true;
425           return MatchCode.SEEK_NEXT_ROW;
426         case SEEK_NEXT_USING_HINT:
427           return MatchCode.SEEK_NEXT_USING_HINT;
428         default:
429           //It means it is either include or include and seek next
430           break;
431         }
432       }
433       /*
434        * STEP 4: Reaching this step means the column is part of the requested columns and either
435        * the filter is null or the filter has returned INCLUDE or INCLUDE_AND_NEXT_COL response.
436        * Now check the number of versions needed. This method call returns SKIP, INCLUDE,
437        * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
438        *
439        * FilterResponse            ColumnChecker               Desired behavior
440        * INCLUDE                   SKIP                        row has already been included, SKIP.
441        * INCLUDE                   INCLUDE                     INCLUDE
442        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
443        * INCLUDE                   INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
444        * INCLUDE_AND_SEEK_NEXT_COL SKIP                        row has already been included, SKIP.
445        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE                     INCLUDE_AND_SEEK_NEXT_COL
446        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL   INCLUDE_AND_SEEK_NEXT_COL
447        * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW   INCLUDE_AND_SEEK_NEXT_ROW
448        *
449        * In all the above scenarios, we return the column checker return value except for
450        * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
451        */
452       colChecker =
453           columns.checkVersions(bytes, offset, qualLength, timestamp, type,
454             kv.getMvccVersion() > maxReadPointToTrackVersions);
455       //Optimize with stickyNextRow
456       stickyNextRow = colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW ? true : stickyNextRow;
457       return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
458           colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL
459           : colChecker;
460     }
461     stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
462         : stickyNextRow;
463     return colChecker;
464   }
465 
466   /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which
467    * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this
468    * range only, and maintain consistency. */
469   private void checkPartialDropDeleteRange(byte [] row, int offset, short length) {
470     // If partial-drop-deletes are used, initially, dropDeletesFromRow and dropDeletesToRow
471     // are both set, and the matcher is set to retain deletes. We assume ordered keys. When
472     // dropDeletesFromRow is leq current kv, we start dropping deletes and reset
473     // dropDeletesFromRow; thus the 2nd "if" starts to apply.
474     if ((dropDeletesFromRow != null)
475         && ((dropDeletesFromRow == HConstants.EMPTY_START_ROW)
476           || (Bytes.compareTo(row, offset, length,
477               dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0))) {
478       retainDeletesInOutput = false;
479       dropDeletesFromRow = null;
480     }
481     // If dropDeletesFromRow is null and dropDeletesToRow is set, we are inside the partial-
482     // drop-deletes range. When dropDeletesToRow is leq current kv, we stop dropping deletes,
483     // and reset dropDeletesToRow so that we don't do any more compares.
484     if ((dropDeletesFromRow == null)
485         && (dropDeletesToRow != null) && (dropDeletesToRow != HConstants.EMPTY_END_ROW)
486         && (Bytes.compareTo(row, offset, length,
487             dropDeletesToRow, 0, dropDeletesToRow.length) >= 0)) {
488       retainDeletesInOutput = true;
489       dropDeletesToRow = null;
490     }
491   }
492 
493   public boolean moreRowsMayExistAfter(KeyValue kv) {
494     if (this.isReversed) {
495       if (rowComparator.compareRows(kv.getBuffer(), kv.getRowOffset(),
496           kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
497         return false;
498       } else {
499         return true;
500       }
501     }
502     if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
503         rowComparator.compareRows(kv.getBuffer(),kv.getRowOffset(),
504             kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
505       // KV >= STOPROW
506       // then NO there is nothing left.
507       return false;
508     } else {
509       return true;
510     }
511   }
512 
513   /**
514    * Set current row
515    * @param row
516    */
517   public void setRow(byte [] row, int offset, short length) {
518     checkPartialDropDeleteRange(row, offset, length);
519     this.row = row;
520     this.rowOffset = offset;
521     this.rowLength = length;
522     reset();
523   }
524 
525   public void reset() {
526     this.deletes.reset();
527     this.columns.reset();
528 
529     stickyNextRow = false;
530   }
531 
532   /**
533    *
534    * @return the start key
535    */
536   public KeyValue getStartKey() {
537     return this.startKey;
538   }
539 
540   /**
541    *
542    * @return the Filter
543    */
544   Filter getFilter() {
545     return this.filter;
546   }
547 
548   public Cell getNextKeyHint(Cell kv) throws IOException {
549     if (filter == null) {
550       return null;
551     } else {
552       return filter.getNextCellHint(kv);
553     }
554   }
555 
556   public KeyValue getKeyForNextColumn(KeyValue kv) {
557     ColumnCount nextColumn = columns.getColumnHint();
558     if (nextColumn == null) {
559       return KeyValue.createLastOnRow(
560           kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
561           kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
562           kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());
563     } else {
564       return KeyValue.createFirstOnRow(
565           kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
566           kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
567           nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength());
568     }
569   }
570 
571   public KeyValue getKeyForNextRow(KeyValue kv) {
572     return KeyValue.createLastOnRow(
573         kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
574         null, 0, 0,
575         null, 0, 0);
576   }
577 
578   //Used only for testing purposes
579   static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset,
580       int length, long ttl, byte type, boolean ignoreCount) throws IOException {
581     MatchCode matchCode = columnTracker.checkColumn(bytes, offset, length, type);
582     if (matchCode == MatchCode.INCLUDE) {
583       return columnTracker.checkVersions(bytes, offset, length, ttl, type, ignoreCount);
584     }
585     return matchCode;
586   }
587 
588   /**
589    * {@link #match} return codes.  These instruct the scanner moving through
590    * memstores and StoreFiles what to do with the current KeyValue.
591    * <p>
592    * Additionally, this contains "early-out" language to tell the scanner to
593    * move on to the next File (memstore or Storefile), or to return immediately.
594    */
595   public static enum MatchCode {
596     /**
597      * Include KeyValue in the returned result
598      */
599     INCLUDE,
600 
601     /**
602      * Do not include KeyValue in the returned result
603      */
604     SKIP,
605 
606     /**
607      * Do not include, jump to next StoreFile or memstore (in time order)
608      */
609     NEXT,
610 
611     /**
612      * Do not include, return current result
613      */
614     DONE,
615 
616     /**
617      * These codes are used by the ScanQueryMatcher
618      */
619 
620     /**
621      * Done with the row, seek there.
622      */
623     SEEK_NEXT_ROW,
624     /**
625      * Done with column, seek to next.
626      */
627     SEEK_NEXT_COL,
628 
629     /**
630      * Done with scan, thanks to the row filter.
631      */
632     DONE_SCAN,
633 
634     /*
635      * Seek to next key which is given as hint.
636      */
637     SEEK_NEXT_USING_HINT,
638 
639     /**
640      * Include KeyValue and done with column, seek to next.
641      */
642     INCLUDE_AND_SEEK_NEXT_COL,
643 
644     /**
645      * Include KeyValue and done with row, seek to next.
646      */
647     INCLUDE_AND_SEEK_NEXT_ROW,
648   }
649 }