1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.IOException;
23 import java.util.NavigableSet;
24
25 import org.apache.hadoop.classification.InterfaceAudience;
26 import org.apache.hadoop.hbase.Cell;
27 import org.apache.hadoop.hbase.HConstants;
28 import org.apache.hadoop.hbase.KeyValue;
29 import org.apache.hadoop.hbase.client.Scan;
30 import org.apache.hadoop.hbase.filter.Filter;
31 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
32 import org.apache.hadoop.hbase.io.TimeRange;
33 import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
34 import org.apache.hadoop.hbase.util.Bytes;
35 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
36
37 import com.google.common.base.Preconditions;
38
39 /**
40 * A query matcher that is specifically designed for the scan case.
41 */
42 @InterfaceAudience.Private
43 public class ScanQueryMatcher {
44 // Optimization so we can skip lots of compares when we decide to skip
45 // to the next row.
46 private boolean stickyNextRow;
47 private final byte[] stopRow;
48
49 private final TimeRange tr;
50
51 private final Filter filter;
52
53 /** Keeps track of deletes */
54 private final DeleteTracker deletes;
55
56 /*
57 * The following three booleans define how we deal with deletes.
58 * There are three different aspects:
59 * 1. Whether to keep delete markers. This is used in compactions.
60 * Minor compactions always keep delete markers.
61 * 2. Whether to keep deleted rows. This is also used in compactions,
62 * if the store is set to keep deleted rows. This implies keeping
63 * the delete markers as well.
64 * In this case deleted rows are subject to the normal max version
65 * and TTL/min version rules just like "normal" rows.
66 * 3. Whether a scan can do time travel queries even before deleted
67 * marker to reach deleted rows.
68 */
69 /** whether to retain delete markers */
70 private boolean retainDeletesInOutput;
71
72 /** whether to return deleted rows */
73 private final boolean keepDeletedCells;
74 /** whether time range queries can see rows "behind" a delete */
75 private final boolean seePastDeleteMarkers;
76
77
78 /** Keeps track of columns and versions */
79 private final ColumnTracker columns;
80
81 /** Key to seek to in memstore and StoreFiles */
82 private final KeyValue startKey;
83
84 /** Row comparator for the region this query is for */
85 private final KeyValue.KVComparator rowComparator;
86
87 /* row is not private for tests */
88 /** Row the query is on */
89 byte [] row;
90 int rowOffset;
91 short rowLength;
92
93 /**
94 * Oldest put in any of the involved store files
95 * Used to decide whether it is ok to delete
96 * family delete marker of this store keeps
97 * deleted KVs.
98 */
99 private final long earliestPutTs;
100
101 /** readPoint over which the KVs are unconditionally included */
102 protected long maxReadPointToTrackVersions;
103
104 private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
105
106 /**
107 * This variable shows whether there is an null column in the query. There
108 * always exists a null column in the wildcard column query.
109 * There maybe exists a null column in the explicit column query based on the
110 * first column.
111 * */
112 private boolean hasNullColumn = true;
113
114 private RegionCoprocessorHost regionCoprocessorHost= null;
115
116 // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
117 // marker is always removed during a major compaction. If set to non-zero
118 // value then major compaction will try to keep a delete marker around for
119 // the given number of milliseconds. We want to keep the delete markers
120 // around a bit longer because old puts might appear out-of-order. For
121 // example, during log replication between two clusters.
122 //
123 // If the delete marker has lived longer than its column-family's TTL then
124 // the delete marker will be removed even if time.to.purge.deletes has not
125 // passed. This is because all the Puts that this delete marker can influence
126 // would have also expired. (Removing of delete markers on col family TTL will
127 // not happen if min-versions is set to non-zero)
128 //
129 // But, if time.to.purge.deletes has not expired then a delete
130 // marker will not be removed just because there are no Puts that it is
131 // currently influencing. This is because Puts, that this delete can
132 // influence. may appear out of order.
133 private final long timeToPurgeDeletes;
134
135 private final boolean isUserScan;
136
137 private final boolean isReversed;
138
139 /**
140 * Construct a QueryMatcher for a scan
141 * @param scan
142 * @param scanInfo The store's immutable scan info
143 * @param columns
144 * @param scanType Type of the scan
145 * @param earliestPutTs Earliest put seen in any of the store files.
146 * @param oldestUnexpiredTS the oldest timestamp we are interested in,
147 * based on TTL
148 * @param regionCoprocessorHost
149 * @throws IOException
150 */
151 public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
152 ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
153 RegionCoprocessorHost regionCoprocessorHost) throws IOException {
154 this.tr = scan.getTimeRange();
155 this.rowComparator = scanInfo.getComparator();
156 this.regionCoprocessorHost = regionCoprocessorHost;
157 this.deletes = instantiateDeleteTracker();
158 this.stopRow = scan.getStopRow();
159 this.startKey = KeyValue.createFirstDeleteFamilyOnRow(scan.getStartRow(),
160 scanInfo.getFamily());
161 this.filter = scan.getFilter();
162 this.earliestPutTs = earliestPutTs;
163 this.maxReadPointToTrackVersions = readPointToUse;
164 this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
165
166 /* how to deal with deletes */
167 this.isUserScan = scanType == ScanType.USER_SCAN;
168 // keep deleted cells: if compaction or raw scan
169 this.keepDeletedCells = (scanInfo.getKeepDeletedCells() && !isUserScan) || scan.isRaw();
170 // retain deletes: if minor compaction or raw scan
171 this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
172 // seePastDeleteMarker: user initiated scans
173 this.seePastDeleteMarkers = scanInfo.getKeepDeletedCells() && isUserScan;
174
175 int maxVersions =
176 scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
177 scanInfo.getMaxVersions());
178
179 // Single branch to deal with two types of reads (columns vs all in family)
180 if (columns == null || columns.size() == 0) {
181 // there is always a null column in the wildcard column query.
182 hasNullColumn = true;
183
184 // use a specialized scan for wildcard column tracker.
185 this.columns = new ScanWildcardColumnTracker(
186 scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
187 } else {
188 // whether there is null column in the explicit column query
189 hasNullColumn = (columns.first().length == 0);
190
191 // We can share the ExplicitColumnTracker, diff is we reset
192 // between rows, not between storefiles.
193 byte[] attr = scan.getAttribute(Scan.HINT_LOOKAHEAD);
194 this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions,
195 oldestUnexpiredTS, attr == null ? 0 : Bytes.toInt(attr));
196 }
197 this.isReversed = scan.isReversed();
198 }
199
200 private DeleteTracker instantiateDeleteTracker() throws IOException {
201 DeleteTracker tracker = new ScanDeleteTracker();
202 if (regionCoprocessorHost != null) {
203 tracker = regionCoprocessorHost.postInstantiateDeleteTracker(tracker);
204 }
205 return tracker;
206 }
207
208 /**
209 * Construct a QueryMatcher for a scan that drop deletes from a limited range of rows.
210 * @param scan
211 * @param scanInfo The store's immutable scan info
212 * @param columns
213 * @param earliestPutTs Earliest put seen in any of the store files.
214 * @param oldestUnexpiredTS the oldest timestamp we are interested in,
215 * based on TTL
216 * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW.
217 * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW.
218 * @param regionCoprocessorHost
219 * @throws IOException
220 */
221 public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns,
222 long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, byte[] dropDeletesFromRow,
223 byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException {
224 this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs,
225 oldestUnexpiredTS, regionCoprocessorHost);
226 Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null));
227 this.dropDeletesFromRow = dropDeletesFromRow;
228 this.dropDeletesToRow = dropDeletesToRow;
229 }
230
231 /*
232 * Constructor for tests
233 */
234 ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
235 NavigableSet<byte[]> columns, long oldestUnexpiredTS) throws IOException {
236 this(scan, scanInfo, columns, ScanType.USER_SCAN,
237 Long.MAX_VALUE, /* max Readpoint to track versions */
238 HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, null);
239 }
240
241 /**
242 *
243 * @return whether there is an null column in the query
244 */
245 public boolean hasNullColumnInQuery() {
246 return hasNullColumn;
247 }
248
249 /**
250 * Determines if the caller should do one of several things:
251 * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
252 * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
253 * - include the current KeyValue (MatchCode.INCLUDE)
254 * - ignore the current KeyValue (MatchCode.SKIP)
255 * - got to the next row (MatchCode.DONE)
256 *
257 * @param kv KeyValue to check
258 * @return The match code instance.
259 * @throws IOException in case there is an internal consistency problem
260 * caused by a data corruption.
261 */
262 public MatchCode match(KeyValue kv) throws IOException {
263 if (filter != null && filter.filterAllRemaining()) {
264 return MatchCode.DONE_SCAN;
265 }
266
267 byte [] bytes = kv.getBuffer();
268 int offset = kv.getOffset();
269
270 int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
271 offset += KeyValue.ROW_OFFSET;
272
273 int initialOffset = offset;
274
275 short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
276 offset += Bytes.SIZEOF_SHORT;
277
278 int ret = this.rowComparator.compareRows(row, this.rowOffset, this.rowLength,
279 bytes, offset, rowLength);
280 if (!this.isReversed) {
281 if (ret <= -1) {
282 return MatchCode.DONE;
283 } else if (ret >= 1) {
284 // could optimize this, if necessary?
285 // Could also be called SEEK_TO_CURRENT_ROW, but this
286 // should be rare/never happens.
287 return MatchCode.SEEK_NEXT_ROW;
288 }
289 } else {
290 if (ret <= -1) {
291 return MatchCode.SEEK_NEXT_ROW;
292 } else if (ret >= 1) {
293 return MatchCode.DONE;
294 }
295 }
296
297
298 // optimize case.
299 if (this.stickyNextRow)
300 return MatchCode.SEEK_NEXT_ROW;
301
302 if (this.columns.done()) {
303 stickyNextRow = true;
304 return MatchCode.SEEK_NEXT_ROW;
305 }
306
307 //Passing rowLength
308 offset += rowLength;
309
310 //Skipping family
311 byte familyLength = bytes [offset];
312 offset += familyLength + 1;
313
314 int qualLength = keyLength -
315 (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
316
317 long timestamp = Bytes.toLong(bytes, initialOffset + keyLength - KeyValue.TIMESTAMP_TYPE_SIZE);
318 // check for early out based on timestamp alone
319 if (columns.isDone(timestamp)) {
320 return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
321 }
322
323 /*
324 * The delete logic is pretty complicated now.
325 * This is corroborated by the following:
326 * 1. The store might be instructed to keep deleted rows around.
327 * 2. A scan can optionally see past a delete marker now.
328 * 3. If deleted rows are kept, we have to find out when we can
329 * remove the delete markers.
330 * 4. Family delete markers are always first (regardless of their TS)
331 * 5. Delete markers should not be counted as version
332 * 6. Delete markers affect puts of the *same* TS
333 * 7. Delete marker need to be version counted together with puts
334 * they affect
335 */
336 byte type = bytes[initialOffset + keyLength - 1];
337 if (kv.isDelete()) {
338 if (!keepDeletedCells) {
339 // first ignore delete markers if the scanner can do so, and the
340 // range does not include the marker
341 //
342 // during flushes and compactions also ignore delete markers newer
343 // than the readpoint of any open scanner, this prevents deleted
344 // rows that could still be seen by a scanner from being collected
345 boolean includeDeleteMarker = seePastDeleteMarkers ?
346 tr.withinTimeRange(timestamp) :
347 tr.withinOrAfterTimeRange(timestamp);
348 if (includeDeleteMarker
349 && kv.getMvccVersion() <= maxReadPointToTrackVersions) {
350 this.deletes.add(kv);
351 }
352 // Can't early out now, because DelFam come before any other keys
353 }
354
355 if ((!isUserScan)
356 && timeToPurgeDeletes > 0
357 && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) <= timeToPurgeDeletes) {
358 return MatchCode.INCLUDE;
359 } else if (retainDeletesInOutput || kv.getMvccVersion() > maxReadPointToTrackVersions) {
360 // always include or it is not time yet to check whether it is OK
361 // to purge deltes or not
362 if (!isUserScan) {
363 // if this is not a user scan (compaction), we can filter this deletemarker right here
364 // otherwise (i.e. a "raw" scan) we fall through to normal version and timerange checking
365 return MatchCode.INCLUDE;
366 }
367 } else if (keepDeletedCells) {
368 if (timestamp < earliestPutTs) {
369 // keeping delete rows, but there are no puts older than
370 // this delete in the store files.
371 return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
372 }
373 // else: fall through and do version counting on the
374 // delete markers
375 } else {
376 return MatchCode.SKIP;
377 }
378 // note the following next else if...
379 // delete marker are not subject to other delete markers
380 } else if (!this.deletes.isEmpty()) {
381 DeleteResult deleteResult = deletes.isDeleted(kv);
382 switch (deleteResult) {
383 case FAMILY_DELETED:
384 case COLUMN_DELETED:
385 return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
386 case VERSION_DELETED:
387 case FAMILY_VERSION_DELETED:
388 return MatchCode.SKIP;
389 case NOT_DELETED:
390 break;
391 default:
392 throw new RuntimeException("UNEXPECTED");
393 }
394 }
395
396 int timestampComparison = tr.compare(timestamp);
397 if (timestampComparison >= 1) {
398 return MatchCode.SKIP;
399 } else if (timestampComparison <= -1) {
400 return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
401 }
402
403 // STEP 1: Check if the column is part of the requested columns
404 MatchCode colChecker = columns.checkColumn(bytes, offset, qualLength, type);
405 if (colChecker == MatchCode.INCLUDE) {
406 ReturnCode filterResponse = ReturnCode.SKIP;
407 // STEP 2: Yes, the column is part of the requested columns. Check if filter is present
408 if (filter != null) {
409 // STEP 3: Filter the key value and return if it filters out
410 filterResponse = filter.filterKeyValue(kv);
411 switch (filterResponse) {
412 case SKIP:
413 return MatchCode.SKIP;
414 case NEXT_COL:
415 return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
416 case NEXT_ROW:
417 stickyNextRow = true;
418 return MatchCode.SEEK_NEXT_ROW;
419 case SEEK_NEXT_USING_HINT:
420 return MatchCode.SEEK_NEXT_USING_HINT;
421 default:
422 //It means it is either include or include and seek next
423 break;
424 }
425 }
426 /*
427 * STEP 4: Reaching this step means the column is part of the requested columns and either
428 * the filter is null or the filter has returned INCLUDE or INCLUDE_AND_NEXT_COL response.
429 * Now check the number of versions needed. This method call returns SKIP, INCLUDE,
430 * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
431 *
432 * FilterResponse ColumnChecker Desired behavior
433 * INCLUDE SKIP row has already been included, SKIP.
434 * INCLUDE INCLUDE INCLUDE
435 * INCLUDE INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL
436 * INCLUDE INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW
437 * INCLUDE_AND_SEEK_NEXT_COL SKIP row has already been included, SKIP.
438 * INCLUDE_AND_SEEK_NEXT_COL INCLUDE INCLUDE_AND_SEEK_NEXT_COL
439 * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL
440 * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW INCLUDE_AND_SEEK_NEXT_ROW
441 *
442 * In all the above scenarios, we return the column checker return value except for
443 * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
444 */
445 colChecker =
446 columns.checkVersions(bytes, offset, qualLength, timestamp, type,
447 kv.getMvccVersion() > maxReadPointToTrackVersions);
448 //Optimize with stickyNextRow
449 stickyNextRow = colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW ? true : stickyNextRow;
450 return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
451 colChecker == MatchCode.INCLUDE) ? MatchCode.INCLUDE_AND_SEEK_NEXT_COL
452 : colChecker;
453 }
454 stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
455 : stickyNextRow;
456 return colChecker;
457 }
458
459 /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which
460 * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this
461 * range only, and maintain consistency. */
462 private void checkPartialDropDeleteRange(byte [] row, int offset, short length) {
463 // If partial-drop-deletes are used, initially, dropDeletesFromRow and dropDeletesToRow
464 // are both set, and the matcher is set to retain deletes. We assume ordered keys. When
465 // dropDeletesFromRow is leq current kv, we start dropping deletes and reset
466 // dropDeletesFromRow; thus the 2nd "if" starts to apply.
467 if ((dropDeletesFromRow != null)
468 && ((dropDeletesFromRow == HConstants.EMPTY_START_ROW)
469 || (Bytes.compareTo(row, offset, length,
470 dropDeletesFromRow, 0, dropDeletesFromRow.length) >= 0))) {
471 retainDeletesInOutput = false;
472 dropDeletesFromRow = null;
473 }
474 // If dropDeletesFromRow is null and dropDeletesToRow is set, we are inside the partial-
475 // drop-deletes range. When dropDeletesToRow is leq current kv, we stop dropping deletes,
476 // and reset dropDeletesToRow so that we don't do any more compares.
477 if ((dropDeletesFromRow == null)
478 && (dropDeletesToRow != null) && (dropDeletesToRow != HConstants.EMPTY_END_ROW)
479 && (Bytes.compareTo(row, offset, length,
480 dropDeletesToRow, 0, dropDeletesToRow.length) >= 0)) {
481 retainDeletesInOutput = true;
482 dropDeletesToRow = null;
483 }
484 }
485
486 public boolean moreRowsMayExistAfter(KeyValue kv) {
487 if (this.isReversed) {
488 if (rowComparator.compareRows(kv.getBuffer(), kv.getRowOffset(),
489 kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
490 return false;
491 } else {
492 return true;
493 }
494 }
495 if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
496 rowComparator.compareRows(kv.getBuffer(),kv.getRowOffset(),
497 kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
498 // KV >= STOPROW
499 // then NO there is nothing left.
500 return false;
501 } else {
502 return true;
503 }
504 }
505
506 /**
507 * Set current row
508 * @param row
509 */
510 public void setRow(byte [] row, int offset, short length) {
511 checkPartialDropDeleteRange(row, offset, length);
512 this.row = row;
513 this.rowOffset = offset;
514 this.rowLength = length;
515 reset();
516 }
517
518 public void reset() {
519 this.deletes.reset();
520 this.columns.reset();
521
522 stickyNextRow = false;
523 }
524
525 /**
526 *
527 * @return the start key
528 */
529 public KeyValue getStartKey() {
530 return this.startKey;
531 }
532
533 /**
534 *
535 * @return the Filter
536 */
537 Filter getFilter() {
538 return this.filter;
539 }
540
541 public Cell getNextKeyHint(Cell kv) throws IOException {
542 if (filter == null) {
543 return null;
544 } else {
545 return filter.getNextCellHint(kv);
546 }
547 }
548
549 public KeyValue getKeyForNextColumn(KeyValue kv) {
550 ColumnCount nextColumn = columns.getColumnHint();
551 if (nextColumn == null) {
552 return KeyValue.createLastOnRow(
553 kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
554 kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
555 kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());
556 } else {
557 return KeyValue.createFirstOnRow(
558 kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
559 kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
560 nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength());
561 }
562 }
563
564 public KeyValue getKeyForNextRow(KeyValue kv) {
565 return KeyValue.createLastOnRow(
566 kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
567 null, 0, 0,
568 null, 0, 0);
569 }
570
571 //Used only for testing purposes
572 static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset,
573 int length, long ttl, byte type, boolean ignoreCount) throws IOException {
574 MatchCode matchCode = columnTracker.checkColumn(bytes, offset, length, type);
575 if (matchCode == MatchCode.INCLUDE) {
576 return columnTracker.checkVersions(bytes, offset, length, ttl, type, ignoreCount);
577 }
578 return matchCode;
579 }
580
581 /**
582 * {@link #match} return codes. These instruct the scanner moving through
583 * memstores and StoreFiles what to do with the current KeyValue.
584 * <p>
585 * Additionally, this contains "early-out" language to tell the scanner to
586 * move on to the next File (memstore or Storefile), or to return immediately.
587 */
588 public static enum MatchCode {
589 /**
590 * Include KeyValue in the returned result
591 */
592 INCLUDE,
593
594 /**
595 * Do not include KeyValue in the returned result
596 */
597 SKIP,
598
599 /**
600 * Do not include, jump to next StoreFile or memstore (in time order)
601 */
602 NEXT,
603
604 /**
605 * Do not include, return current result
606 */
607 DONE,
608
609 /**
610 * These codes are used by the ScanQueryMatcher
611 */
612
613 /**
614 * Done with the row, seek there.
615 */
616 SEEK_NEXT_ROW,
617 /**
618 * Done with column, seek to next.
619 */
620 SEEK_NEXT_COL,
621
622 /**
623 * Done with scan, thanks to the row filter.
624 */
625 DONE_SCAN,
626
627 /*
628 * Seek to next key which is given as hint.
629 */
630 SEEK_NEXT_USING_HINT,
631
632 /**
633 * Include KeyValue and done with column, seek to next.
634 */
635 INCLUDE_AND_SEEK_NEXT_COL,
636
637 /**
638 * Include KeyValue and done with row, seek to next.
639 */
640 INCLUDE_AND_SEEK_NEXT_ROW,
641 }
642 }