View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
21  import static org.junit.Assert.assertArrayEquals;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.List;
29  
30  import org.apache.hadoop.hbase.Cell;
31  import org.apache.hadoop.hbase.CellUtil;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HConstants;
34  import org.apache.hadoop.hbase.HTableDescriptor;
35  import org.apache.hadoop.hbase.KeyValue;
36  import org.apache.hadoop.hbase.SmallTests;
37  import org.apache.hadoop.hbase.client.Delete;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.Put;
40  import org.apache.hadoop.hbase.client.Result;
41  import org.apache.hadoop.hbase.client.Scan;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
44  import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
45  import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
46  import org.junit.After;
47  import org.junit.Before;
48  import org.junit.Rule;
49  import org.junit.Test;
50  import org.junit.experimental.categories.Category;
51  import org.junit.rules.TestName;
52  
53  @Category(SmallTests.class)
54  public class TestKeepDeletes {
55    HBaseTestingUtility hbu = HBaseTestingUtility.createLocalHTU();
56    private final byte[] T0 = Bytes.toBytes("0");
57    private final byte[] T1 = Bytes.toBytes("1");
58    private final byte[] T2 = Bytes.toBytes("2");
59    private final byte[] T3 = Bytes.toBytes("3");
60    private final byte[] T4 = Bytes.toBytes("4");
61    private final byte[] T5 = Bytes.toBytes("5");
62    private final byte[] T6 = Bytes.toBytes("6");
63  
64    private final byte[] c0 = COLUMNS[0];
65    private final byte[] c1 = COLUMNS[1];
66  
67    @Rule public TestName name = new TestName();
68    
69    @Before
70    public void setUp() throws Exception {
71      /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
72       * implicit RS timing.
73       * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
74       * compact timestamps are tracked. Otherwise, forced major compaction will not purge
75       * Delete's having the same timestamp. see ScanQueryMatcher.match():
76       * if (retainDeletesInOutput
77       *     || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
78       *     <= timeToPurgeDeletes) ... )
79       *
80       */
81      EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
82    }
83  
84    @After
85    public void tearDown() throws Exception {
86      EnvironmentEdgeManager.reset();
87    }
88  
89    /**
90     * Make sure that deleted rows are retained.
91     * Family delete markers are deleted.
92     * Column Delete markers are versioned
93     * Time range scan of deleted rows are possible
94     */
95    @Test
96    public void testBasicScenario() throws Exception {
97      // keep 3 versions, rows do not expire
98      HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
99          HConstants.FOREVER, true);
100     HRegion region = hbu.createLocalHRegion(htd, null, null);
101 
102     long ts = EnvironmentEdgeManager.currentTimeMillis();
103     Put p = new Put(T1, ts);
104     p.add(c0, c0, T1);
105     region.put(p);
106     p = new Put(T1, ts+1);
107     p.add(c0, c0, T2);
108     region.put(p);
109     p = new Put(T1, ts+2);
110     p.add(c0, c0, T3);
111     region.put(p);
112     p = new Put(T1, ts+4);
113     p.add(c0, c0, T4);
114     region.put(p);
115 
116     // now place a delete marker at ts+2
117     Delete d = new Delete(T1, ts+2);
118     region.delete(d);
119 
120     // a raw scan can see the delete markers
121     // (one for each column family)
122     assertEquals(3, countDeleteMarkers(region));
123 
124     // get something *before* the delete marker
125     Get g = new Get(T1);
126     g.setMaxVersions();
127     g.setTimeRange(0L, ts+2);
128     Result r = region.get(g);
129     checkResult(r, c0, c0, T2,T1);
130 
131     // flush
132     region.flushcache();
133 
134     // yep, T2 still there, T1 gone
135     r = region.get(g);
136     checkResult(r, c0, c0, T2);
137 
138     // major compact
139     region.compactStores(true);
140     region.compactStores(true);
141 
142     // one delete marker left (the others did not
143     // have older puts)
144     assertEquals(1, countDeleteMarkers(region));
145 
146     // still there (even after multiple compactions)
147     r = region.get(g);
148     checkResult(r, c0, c0, T2);
149 
150     // a timerange that includes the delete marker won't see past rows
151     g.setTimeRange(0L, ts+4);
152     r = region.get(g);
153     assertTrue(r.isEmpty());
154 
155     // two more puts, this will expire the older puts.
156     p = new Put(T1, ts+5);
157     p.add(c0, c0, T5);
158     region.put(p);
159     p = new Put(T1, ts+6);
160     p.add(c0, c0, T6);
161     region.put(p);
162 
163     // also add an old put again
164     // (which is past the max versions)
165     p = new Put(T1, ts);
166     p.add(c0, c0, T1);
167     region.put(p);
168     r = region.get(g);
169     assertTrue(r.isEmpty());
170 
171     region.flushcache();
172     region.compactStores(true);
173     region.compactStores(true);
174 
175     // verify that the delete marker itself was collected
176     region.put(p);
177     r = region.get(g);
178     checkResult(r, c0, c0, T1);
179     assertEquals(0, countDeleteMarkers(region));
180 
181     HRegion.closeHRegion(region);
182   }
183 
184   /**
185    * Even when the store does not keep deletes a "raw" scan will
186    * return everything it can find (unless discarding cells is guaranteed
187    * to have no effect).
188    * Assuming this the desired behavior. Could also disallow "raw" scanning
189    * if the store does not have KEEP_DELETED_CELLS enabled.
190    * (can be changed easily)
191    */
192   @Test
193   public void testRawScanWithoutKeepingDeletes() throws Exception {
194     // KEEP_DELETED_CELLS is NOT enabled
195     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
196         HConstants.FOREVER, false);
197     HRegion region = hbu.createLocalHRegion(htd, null, null);
198 
199     long ts = EnvironmentEdgeManager.currentTimeMillis();
200     Put p = new Put(T1, ts);
201     p.add(c0, c0, T1);
202     region.put(p);
203 
204     Delete d = new Delete(T1, ts);
205     d.deleteColumn(c0, c0, ts);
206     region.delete(d);
207 
208     // scan still returns delete markers and deletes rows
209     Scan s = new Scan();
210     s.setRaw(true);
211     s.setMaxVersions();
212     InternalScanner scan = region.getScanner(s);
213     List<Cell> kvs = new ArrayList<Cell>();
214     scan.next(kvs);
215     assertEquals(2, kvs.size());
216 
217     region.flushcache();
218     region.compactStores(true);
219 
220     // after compaction they are gone
221     // (note that this a test with a Store without
222     //  KEEP_DELETED_CELLS)
223     s = new Scan();
224     s.setRaw(true);
225     s.setMaxVersions();
226     scan = region.getScanner(s);
227     kvs = new ArrayList<Cell>();
228     scan.next(kvs);
229     assertTrue(kvs.isEmpty());
230 
231     HRegion.closeHRegion(region);
232   }
233 
234   /**
235    * basic verification of existing behavior
236    */
237   @Test
238   public void testWithoutKeepingDeletes() throws Exception {
239     // KEEP_DELETED_CELLS is NOT enabled
240     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
241         HConstants.FOREVER, false);
242     HRegion region = hbu.createLocalHRegion(htd, null, null);
243 
244     long ts = EnvironmentEdgeManager.currentTimeMillis();
245     Put p = new Put(T1, ts);
246     p.add(c0, c0, T1);
247     region.put(p);
248     Delete d = new Delete(T1, ts+2);
249     d.deleteColumn(c0, c0, ts);
250     region.delete(d);
251 
252     // "past" get does not see rows behind delete marker
253     Get g = new Get(T1);
254     g.setMaxVersions();
255     g.setTimeRange(0L, ts+1);
256     Result r = region.get(g);
257     assertTrue(r.isEmpty());
258 
259     // "past" scan does not see rows behind delete marker
260     Scan s = new Scan();
261     s.setMaxVersions();
262     s.setTimeRange(0L, ts+1);
263     InternalScanner scanner = region.getScanner(s);
264     List<Cell> kvs = new ArrayList<Cell>();
265     while(scanner.next(kvs));
266     assertTrue(kvs.isEmpty());
267 
268     // flushing and minor compaction keep delete markers
269     region.flushcache();
270     region.compactStores();
271     assertEquals(1, countDeleteMarkers(region));
272     region.compactStores(true);
273     // major compaction deleted it
274     assertEquals(0, countDeleteMarkers(region));
275 
276     HRegion.closeHRegion(region);
277   }
278 
279   /**
280    * The ExplicitColumnTracker does not support "raw" scanning.
281    */
282   @Test
283   public void testRawScanWithColumns() throws Exception {
284     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
285         HConstants.FOREVER, true);
286     HRegion region = hbu.createLocalHRegion(htd, null, null);
287 
288     Scan s = new Scan();
289     s.setRaw(true);
290     s.setMaxVersions();
291     s.addColumn(c0, c0);
292 
293     try {
294       region.getScanner(s);
295       fail("raw scanner with columns should have failed");
296     } catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) {
297       // ok!
298     }
299 
300     HRegion.closeHRegion(region);
301   }
302 
303   /**
304    * Verify that "raw" scanning mode return delete markers and deletes rows.
305    */
306   @Test
307   public void testRawScan() throws Exception {
308     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
309         HConstants.FOREVER, true);
310     HRegion region = hbu.createLocalHRegion(htd, null, null);
311 
312     long ts = EnvironmentEdgeManager.currentTimeMillis();
313     Put p = new Put(T1, ts);
314     p.add(c0, c0, T1);
315     region.put(p);
316     p = new Put(T1, ts+2);
317     p.add(c0, c0, T2);
318     region.put(p);
319     p = new Put(T1, ts+4);
320     p.add(c0, c0, T3);
321     region.put(p);
322 
323     Delete d = new Delete(T1, ts+1);
324     region.delete(d);
325 
326     d = new Delete(T1, ts+2);
327     d.deleteColumn(c0, c0, ts+2);
328     region.delete(d);
329 
330     d = new Delete(T1, ts+3);
331     d.deleteColumns(c0, c0, ts+3);
332     region.delete(d);
333 
334     Scan s = new Scan();
335     s.setRaw(true);
336     s.setMaxVersions();
337     InternalScanner scan = region.getScanner(s);
338     List<Cell> kvs = new ArrayList<Cell>();
339     scan.next(kvs);
340     assertEquals(8, kvs.size());
341     assertTrue(CellUtil.isDeleteFamily(kvs.get(0)));
342     assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T3);
343     assertTrue(CellUtil.isDelete(kvs.get(2)));
344     assertTrue(CellUtil.isDelete(kvs.get(3))); // .isDeleteType());
345     assertArrayEquals(CellUtil.cloneValue(kvs.get(4)), T2);
346     assertArrayEquals(CellUtil.cloneValue(kvs.get(5)), T1);
347     // we have 3 CFs, so there are two more delete markers
348     assertTrue(CellUtil.isDeleteFamily(kvs.get(6)));
349     assertTrue(CellUtil.isDeleteFamily(kvs.get(7)));
350 
351     // verify that raw scans honor the passed timerange
352     s = new Scan();
353     s.setRaw(true);
354     s.setMaxVersions();
355     s.setTimeRange(0, 1);
356     scan = region.getScanner(s);
357     kvs = new ArrayList<Cell>();
358     scan.next(kvs);
359     // nothing in this interval, not even delete markers
360     assertTrue(kvs.isEmpty());
361 
362     // filter new delete markers
363     s = new Scan();
364     s.setRaw(true);
365     s.setMaxVersions();
366     s.setTimeRange(0, ts+2);
367     scan = region.getScanner(s);
368     kvs = new ArrayList<Cell>();
369     scan.next(kvs);
370     assertEquals(4, kvs.size());
371     assertTrue(CellUtil.isDeleteFamily(kvs.get(0)));
372     assertArrayEquals(CellUtil.cloneValue(kvs.get(1)), T1);
373     // we have 3 CFs
374     assertTrue(CellUtil.isDeleteFamily(kvs.get(2)));
375     assertTrue(CellUtil.isDeleteFamily(kvs.get(3)));
376 
377     // filter old delete markers
378     s = new Scan();
379     s.setRaw(true);
380     s.setMaxVersions();
381     s.setTimeRange(ts+3, ts+5);
382     scan = region.getScanner(s);
383     kvs = new ArrayList<Cell>();
384     scan.next(kvs);
385     assertEquals(2, kvs.size());
386     assertArrayEquals(CellUtil.cloneValue(kvs.get(0)), T3);
387     assertTrue(CellUtil.isDelete(kvs.get(1)));
388 
389 
390     HRegion.closeHRegion(region);
391   }
392 
393   /**
394    * Verify that delete markers are removed from an otherwise empty store.
395    */
396   @Test
397   public void testDeleteMarkerExpirationEmptyStore() throws Exception {
398     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
399         HConstants.FOREVER, true);
400     HRegion region = hbu.createLocalHRegion(htd, null, null);
401 
402     long ts = EnvironmentEdgeManager.currentTimeMillis();
403 
404     Delete d = new Delete(T1, ts);
405     d.deleteColumns(c0, c0, ts);
406     region.delete(d);
407 
408     d = new Delete(T1, ts);
409     d.deleteFamily(c0);
410     region.delete(d);
411 
412     d = new Delete(T1, ts);
413     d.deleteColumn(c0, c0, ts+1);
414     region.delete(d);
415 
416     d = new Delete(T1, ts);
417     d.deleteColumn(c0, c0, ts+2);
418     region.delete(d);
419 
420     // 1 family marker, 1 column marker, 2 version markers
421     assertEquals(4, countDeleteMarkers(region));
422 
423     // neither flush nor minor compaction removes any marker
424     region.flushcache();
425     assertEquals(4, countDeleteMarkers(region));
426     region.compactStores(false);
427     assertEquals(4, countDeleteMarkers(region));
428 
429     // major compaction removes all, since there are no puts they affect
430     region.compactStores(true);
431     assertEquals(0, countDeleteMarkers(region));
432 
433     HRegion.closeHRegion(region);
434   }
435 
436   /**
437    * Test delete marker removal from store files.
438    */
439   @Test
440   public void testDeleteMarkerExpiration() throws Exception {
441     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
442         HConstants.FOREVER, true);
443     HRegion region = hbu.createLocalHRegion(htd, null, null);
444 
445     long ts = EnvironmentEdgeManager.currentTimeMillis();
446 
447     Put p = new Put(T1, ts);
448     p.add(c0, c0, T1);
449     region.put(p);
450 
451     // a put into another store (CF) should have no effect
452     p = new Put(T1, ts-10);
453     p.add(c1, c0, T1);
454     region.put(p);
455 
456     // all the following deletes affect the put
457     Delete d = new Delete(T1, ts);
458     d.deleteColumns(c0, c0, ts);
459     region.delete(d);
460 
461     d = new Delete(T1, ts);
462     d.deleteFamily(c0, ts);
463     region.delete(d);
464 
465     d = new Delete(T1, ts);
466     d.deleteColumn(c0, c0, ts+1);
467     region.delete(d);
468 
469     d = new Delete(T1, ts);
470     d.deleteColumn(c0, c0, ts+2);
471     region.delete(d);
472 
473     // 1 family marker, 1 column marker, 2 version markers
474     assertEquals(4, countDeleteMarkers(region));
475 
476     region.flushcache();
477     assertEquals(4, countDeleteMarkers(region));
478     region.compactStores(false);
479     assertEquals(4, countDeleteMarkers(region));
480 
481     // another put will push out the earlier put...
482     p = new Put(T1, ts+3);
483     p.add(c0, c0, T1);
484     region.put(p);
485 
486     region.flushcache();
487     // no markers are collected, since there is an affected put
488     region.compactStores(true);
489     assertEquals(4, countDeleteMarkers(region));
490 
491     // the last collections collected the earlier put
492     // so after this collection all markers
493     region.compactStores(true);
494     assertEquals(0, countDeleteMarkers(region));
495 
496     HRegion.closeHRegion(region);
497   }
498 
499   /**
500    * Verify correct range demarcation
501    */
502   @Test
503   public void testRanges() throws Exception {
504     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
505         HConstants.FOREVER, true);
506     HRegion region = hbu.createLocalHRegion(htd, null, null);
507 
508     long ts = EnvironmentEdgeManager.currentTimeMillis();
509     Put p = new Put(T1, ts);
510     p.add(c0, c0, T1);
511     p.add(c0, c1, T1);
512     p.add(c1, c0, T1);
513     p.add(c1, c1, T1);
514     region.put(p);
515 
516     p = new Put(T2, ts);
517     p.add(c0, c0, T1);
518     p.add(c0, c1, T1);
519     p.add(c1, c0, T1);
520     p.add(c1, c1, T1);
521     region.put(p);
522 
523     p = new Put(T1, ts+1);
524     p.add(c0, c0, T2);
525     p.add(c0, c1, T2);
526     p.add(c1, c0, T2);
527     p.add(c1, c1, T2);
528     region.put(p);
529 
530     p = new Put(T2, ts+1);
531     p.add(c0, c0, T2);
532     p.add(c0, c1, T2);
533     p.add(c1, c0, T2);
534     p.add(c1, c1, T2);
535     region.put(p);
536 
537     Delete d = new Delete(T1, ts+2);
538     d.deleteColumns(c0, c0, ts+2);
539     region.delete(d);
540 
541     d = new Delete(T1, ts+2);
542     d.deleteFamily(c1, ts+2);
543     region.delete(d);
544 
545     d = new Delete(T2, ts+2);
546     d.deleteFamily(c0, ts+2);
547     region.delete(d);
548 
549     // add an older delete, to make sure it is filtered
550     d = new Delete(T1, ts-10);
551     d.deleteFamily(c1, ts-10);
552     region.delete(d);
553 
554     // ts + 2 does NOT include the delete at ts+2
555     checkGet(region, T1, c0, c0, ts+2, T2, T1);
556     checkGet(region, T1, c0, c1, ts+2, T2, T1);
557     checkGet(region, T1, c1, c0, ts+2, T2, T1);
558     checkGet(region, T1, c1, c1, ts+2, T2, T1);
559 
560     checkGet(region, T2, c0, c0, ts+2, T2, T1);
561     checkGet(region, T2, c0, c1, ts+2, T2, T1);
562     checkGet(region, T2, c1, c0, ts+2, T2, T1);
563     checkGet(region, T2, c1, c1, ts+2, T2, T1);
564 
565     // ts + 3 does
566     checkGet(region, T1, c0, c0, ts+3);
567     checkGet(region, T1, c0, c1, ts+3, T2, T1);
568     checkGet(region, T1, c1, c0, ts+3);
569     checkGet(region, T1, c1, c1, ts+3);
570 
571     checkGet(region, T2, c0, c0, ts+3);
572     checkGet(region, T2, c0, c1, ts+3);
573     checkGet(region, T2, c1, c0, ts+3, T2, T1);
574     checkGet(region, T2, c1, c1, ts+3, T2, T1);
575 
576     HRegion.closeHRegion(region);
577   }
578 
579   /**
580    * Verify that column/version delete makers are sorted
581    * with their respective puts and removed correctly by
582    * versioning (i.e. not relying on the store earliestPutTS).
583    */
584   @Test
585   public void testDeleteMarkerVersioning() throws Exception {
586     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
587         HConstants.FOREVER, true);
588     HRegion region = hbu.createLocalHRegion(htd, null, null);
589 
590     long ts = EnvironmentEdgeManager.currentTimeMillis();
591     Put p = new Put(T1, ts);
592     p.add(c0, c0, T1);
593     region.put(p);
594 
595     // this prevents marker collection based on earliestPut
596     // (cannot keep earliest put per column in the store file)
597     p = new Put(T1, ts-10);
598     p.add(c0, c1, T1);
599     region.put(p);
600 
601     Delete d = new Delete(T1, ts);
602     // test corner case (Put and Delete have same TS)
603     d.deleteColumns(c0, c0, ts);
604     region.delete(d);
605 
606     d = new Delete(T1, ts+1);
607     d.deleteColumn(c0, c0, ts+1);
608     region.delete(d);
609 
610     d = new Delete(T1, ts+3);
611     d.deleteColumn(c0, c0, ts+3);
612     region.delete(d);
613 
614     region.flushcache();
615     region.compactStores(true);
616     region.compactStores(true);
617     assertEquals(3, countDeleteMarkers(region));
618 
619     // add two more puts, since max version is 1
620     // the 2nd put (and all delete markers following)
621     // will be removed.
622     p = new Put(T1, ts+2);
623     p.add(c0, c0, T2);
624     region.put(p);
625 
626     // delete, put, delete, delete, put
627     assertEquals(3, countDeleteMarkers(region));
628 
629     p = new Put(T1, ts+3);
630     p.add(c0, c0, T3);
631     region.put(p);
632 
633     // This is potentially questionable behavior.
634     // This could be changed by not letting the ScanQueryMatcher
635     // return SEEK_NEXT_COL if a put is past VERSIONS, but instead
636     // return SKIP if the store has KEEP_DELETED_CELLS set.
637     //
638     // As it stands, the 1 here is correct here.
639     // There are two puts, VERSIONS is one, so after the 1st put the scanner
640     // knows that there can be no more KVs (put or delete) that have any effect.
641     //
642     // delete, put, put | delete, delete
643     assertEquals(1, countDeleteMarkers(region));
644 
645     // flush cache only sees what is in the memstore
646     region.flushcache();
647 
648     // Here we have the three markers again, because the flush above
649     // removed the 2nd put before the file is written.
650     // So there's only one put, and hence the deletes already in the store
651     // files cannot be removed safely.
652     // delete, put, delete, delete
653     assertEquals(3, countDeleteMarkers(region));
654 
655     region.compactStores(true);
656     assertEquals(3, countDeleteMarkers(region));
657 
658     // add one more put
659     p = new Put(T1, ts+4);
660     p.add(c0, c0, T4);
661     region.put(p);
662 
663     region.flushcache();
664     // one trailing delete marker remains (but only one)
665     // because delete markers do not increase the version count
666     assertEquals(1, countDeleteMarkers(region));
667     region.compactStores(true);
668     region.compactStores(true);
669     assertEquals(1, countDeleteMarkers(region));
670 
671     HRegion.closeHRegion(region);
672   }
673 
674   /**
675    * Verify scenarios with multiple CFs and columns
676    */
677   public void testWithMixedCFs() throws Exception {
678     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
679         HConstants.FOREVER, true);
680     HRegion region = hbu.createLocalHRegion(htd, null, null);
681 
682     long ts = EnvironmentEdgeManager.currentTimeMillis();
683 
684     Put p = new Put(T1, ts);
685     p.add(c0, c0, T1);
686     p.add(c0, c1, T1);
687     p.add(c1, c0, T1);
688     p.add(c1, c1, T1);
689     region.put(p);
690 
691     p = new Put(T2, ts+1);
692     p.add(c0, c0, T2);
693     p.add(c0, c1, T2);
694     p.add(c1, c0, T2);
695     p.add(c1, c1, T2);
696     region.put(p);
697 
698     // family markers are each family
699     Delete d = new Delete(T1, ts+1);
700     region.delete(d);
701 
702     d = new Delete(T2, ts+2);
703     region.delete(d);
704 
705     Scan s = new Scan(T1);
706     s.setTimeRange(0, ts+1);
707     InternalScanner scanner = region.getScanner(s);
708     List<Cell> kvs = new ArrayList<Cell>();
709     scanner.next(kvs);
710     assertEquals(4, kvs.size());
711     scanner.close();
712 
713     s = new Scan(T2);
714     s.setTimeRange(0, ts+2);
715     scanner = region.getScanner(s);
716     kvs = new ArrayList<Cell>();
717     scanner.next(kvs);
718     assertEquals(4, kvs.size());
719     scanner.close();
720 
721     HRegion.closeHRegion(region);
722   }
723 
724   /**
725    * Test keeping deleted rows together with min versions set
726    * @throws Exception
727    */
728   @Test
729   public void testWithMinVersions() throws Exception {
730     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, true);
731     HRegion region = hbu.createLocalHRegion(htd, null, null);
732 
733     long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; // 2s in the past
734 
735     Put p = new Put(T1, ts);
736     p.add(c0, c0, T3);
737     region.put(p);
738     p = new Put(T1, ts-1);
739     p.add(c0, c0, T2);
740     region.put(p);
741     p = new Put(T1, ts-3);
742     p.add(c0, c0, T1);
743     region.put(p);
744     p = new Put(T1, ts-4);
745     p.add(c0, c0, T0);
746     region.put(p);
747 
748     // all puts now are just retained because of min versions = 3
749 
750     // place a family delete marker
751     Delete d = new Delete(T1, ts-1);
752     region.delete(d);
753     // and a column delete marker
754     d = new Delete(T1, ts-2);
755     d.deleteColumns(c0, c0, ts-1);
756     region.delete(d);
757 
758     Get g = new Get(T1);
759     g.setMaxVersions();
760     g.setTimeRange(0L, ts-2);
761     Result r = region.get(g);
762     checkResult(r, c0, c0, T1,T0);
763 
764     // 3 families, one column delete marker
765     assertEquals(4, countDeleteMarkers(region));
766 
767     region.flushcache();
768     // no delete marker removes by the flush
769     assertEquals(4, countDeleteMarkers(region));
770 
771     r = region.get(g);
772     checkResult(r, c0, c0, T1);
773     p = new Put(T1, ts+1);
774     p.add(c0, c0, T4);
775     region.put(p);
776     region.flushcache();
777 
778     assertEquals(4, countDeleteMarkers(region));
779 
780     r = region.get(g);
781     checkResult(r, c0, c0, T1);
782 
783     // this will push out the last put before
784     // family delete marker
785     p = new Put(T1, ts+2);
786     p.add(c0, c0, T5);
787     region.put(p);
788 
789     region.flushcache();
790     region.compactStores(true);
791     // the two family markers without puts are gone
792     assertEquals(2, countDeleteMarkers(region));
793 
794     // the last compactStores updated the earliestPutTs,
795     // so after the next compaction the last family delete marker is also gone
796     region.compactStores(true);
797     assertEquals(0, countDeleteMarkers(region));
798 
799     HRegion.closeHRegion(region);
800   }
801 
802   private void checkGet(HRegion region, byte[] row, byte[] fam, byte[] col,
803       long time, byte[]... vals) throws IOException {
804     Get g = new Get(row);
805     g.addColumn(fam, col);
806     g.setMaxVersions();
807     g.setTimeRange(0L, time);
808     Result r = region.get(g);
809     checkResult(r, fam, col, vals);
810 
811   }
812 
813   private int countDeleteMarkers(HRegion region) throws IOException {
814     Scan s = new Scan();
815     s.setRaw(true);
816     // use max versions from the store(s)
817     s.setMaxVersions(region.getStores().values().iterator().next().getScanInfo().getMaxVersions());
818     InternalScanner scan = region.getScanner(s);
819     List<Cell> kvs = new ArrayList<Cell>();
820     int res = 0;
821     boolean hasMore;
822     do {
823       hasMore = scan.next(kvs);
824       for (Cell kv : kvs) {
825         if(CellUtil.isDelete(kv)) res++;
826       }
827       kvs.clear();
828     } while (hasMore);
829     scan.close();
830     return res;
831   }
832 
833   private void checkResult(Result r, byte[] fam, byte[] col, byte[] ... vals) {
834     assertEquals(r.size(), vals.length);
835     List<Cell> kvs = r.getColumnCells(fam, col);
836     assertEquals(kvs.size(), vals.length);
837     for (int i=0;i<vals.length;i++) {
838       assertArrayEquals(CellUtil.cloneValue(kvs.get(i)), vals[i]);
839     }
840   }
841 
842 
843 }
844