View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertTrue;
25  
26  import java.io.IOException;
27  import java.util.NavigableMap;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.hbase.HBaseTestCase.FlushCache;
33  import org.apache.hadoop.hbase.HBaseTestCase.HTableIncommon;
34  import org.apache.hadoop.hbase.HBaseTestCase.Incommon;
35  import org.apache.hadoop.hbase.client.Admin;
36  import org.apache.hadoop.hbase.client.Get;
37  import org.apache.hadoop.hbase.client.HBaseAdmin;
38  import org.apache.hadoop.hbase.client.HTable;
39  import org.apache.hadoop.hbase.client.Put;
40  import org.apache.hadoop.hbase.client.Result;
41  import org.apache.hadoop.hbase.client.ResultScanner;
42  import org.apache.hadoop.hbase.client.Scan;
43  import org.apache.hadoop.hbase.client.Table;
44  import org.apache.hadoop.hbase.testclassification.MediumTests;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.junit.After;
47  import org.junit.AfterClass;
48  import org.junit.Before;
49  import org.junit.BeforeClass;
50  import org.junit.Test;
51  import org.junit.experimental.categories.Category;
52  
53  /**
54   * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions
55   * from old testing framework to {@link HBaseTestingUtility}.
56   */
57  @Category(MediumTests.class)
58  public class TestMultiVersions {
59    private static final Log LOG = LogFactory.getLog(TestMultiVersions.class);
60    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
61    private Admin admin;
62    
63    private static final int NUM_SLAVES = 3;
64  
65    @BeforeClass
66    public static void setUpBeforeClass() throws Exception {
67      UTIL.startMiniCluster(NUM_SLAVES);
68    }
69  
70    @AfterClass
71    public static void tearDownAfterClass() throws Exception {
72      UTIL.shutdownMiniCluster();
73    }
74  
75    @Before
76    public void before()
77    throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
78      this.admin = new HBaseAdmin(UTIL.getConfiguration());
79    }
80  
81    @After
82    public void after() throws IOException {
83      this.admin.close();
84    }
85  
86    /**
87    * Tests user specifiable time stamps putting, getting and scanning.  Also
88     * tests same in presence of deletes.  Test cores are written so can be
89     * run against an HRegion and against an HTable: i.e. both local and remote.
90     * 
91     * <p>Port of old TestTimestamp test to here so can better utilize the spun
92     * up cluster running more than a single test per spin up.  Keep old tests'
93     * crazyness.
94     */
95    @Test
96    public void testTimestamps() throws Exception {
97      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("testTimestamps"));
98      HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME);
99      hcd.setMaxVersions(3);
100     desc.addFamily(hcd);
101     this.admin.createTable(desc);
102     Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
103     // TODO: Remove these deprecated classes or pull them in here if this is
104     // only test using them.
105     Incommon incommon = new HTableIncommon(table);
106     TimestampTestBase.doTestDelete(incommon, new FlushCache() {
107       public void flushcache() throws IOException {
108         UTIL.getHBaseCluster().flushcache();
109       }
110      });
111 
112     // Perhaps drop and readd the table between tests so the former does
113     // not pollute this latter?  Or put into separate tests.
114     TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() {
115       public void flushcache() throws IOException {
116         UTIL.getMiniHBaseCluster().flushcache();
117       }
118     });
119 
120     table.close();
121   }
122 
123   /**
124    * Verifies versions across a cluster restart.
125    * Port of old TestGetRowVersions test to here so can better utilize the spun
126    * up cluster running more than a single test per spin up.  Keep old tests'
127    * crazyness.
128    */
129   @Test
130   public void testGetRowVersions() throws Exception {
131     final String tableName = "testGetRowVersions";
132     final byte [] contents = Bytes.toBytes("contents");
133     final byte [] row = Bytes.toBytes("row");
134     final byte [] value1 = Bytes.toBytes("value1");
135     final byte [] value2 = Bytes.toBytes("value2");
136     final long timestamp1 = 100L;
137     final long timestamp2 = 200L;
138     final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
139     HColumnDescriptor hcd = new HColumnDescriptor(contents);
140     hcd.setMaxVersions(3);
141     desc.addFamily(hcd);
142     this.admin.createTable(desc);
143     Put put = new Put(row, timestamp1);
144     put.add(contents, contents, value1);
145     Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
146     table.put(put);
147     // Shut down and restart the HBase cluster
148     table.close();
149     UTIL.shutdownMiniHBaseCluster();
150     LOG.debug("HBase cluster shut down -- restarting");
151     UTIL.startMiniHBaseCluster(1, NUM_SLAVES);
152     // Make a new connection.  Use new Configuration instance because old one
153     // is tied to an HConnection that has since gone stale.
154     table = new HTable(new Configuration(UTIL.getConfiguration()), desc.getTableName());
155     // Overwrite previous value
156     put = new Put(row, timestamp2);
157     put.add(contents, contents, value2);
158     table.put(put);
159     // Now verify that getRow(row, column, latest) works
160     Get get = new Get(row);
161     // Should get one version by default
162     Result r = table.get(get);
163     assertNotNull(r);
164     assertFalse(r.isEmpty());
165     assertTrue(r.size() == 1);
166     byte [] value = r.getValue(contents, contents);
167     assertTrue(value.length != 0);
168     assertTrue(Bytes.equals(value, value2));
169     // Now check getRow with multiple versions
170     get = new Get(row);
171     get.setMaxVersions();
172     r = table.get(get);
173     assertTrue(r.size() == 2);
174     value = r.getValue(contents, contents);
175     assertTrue(value.length != 0);
176     assertTrue(Bytes.equals(value, value2));
177     NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
178       r.getMap();
179     NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap =
180       map.get(contents);
181     NavigableMap<Long, byte[]> versionMap = familyMap.get(contents);
182     assertTrue(versionMap.size() == 2);
183     assertTrue(Bytes.equals(value1, versionMap.get(timestamp1)));
184     assertTrue(Bytes.equals(value2, versionMap.get(timestamp2)));
185     table.close();
186   }
187 
188   /**
189    * Port of old TestScanMultipleVersions test here so can better utilize the
190    * spun up cluster running more than just a single test.  Keep old tests
191    * crazyness.
192    * 
193    * <p>Tests five cases of scans and timestamps.
194    * @throws Exception
195    */
196   @Test
197   public void testScanMultipleVersions() throws Exception {
198     final TableName tableName = TableName.valueOf("testScanMultipleVersions");
199     final HTableDescriptor desc = new HTableDescriptor(tableName);
200     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
201     final byte [][] rows = new byte[][] {
202       Bytes.toBytes("row_0200"),
203       Bytes.toBytes("row_0800")
204     };
205     final byte [][] splitRows = new byte[][] {Bytes.toBytes("row_0500")};
206     final long [] timestamp = new long[] {100L, 1000L};
207     this.admin.createTable(desc, splitRows);
208     HTable table = new HTable(UTIL.getConfiguration(), tableName);
209     // Assert we got the region layout wanted.
210     NavigableMap<HRegionInfo, ServerName> locations = table.getRegionLocations();
211     assertEquals(2, locations.size());
212     int index = 0;
213     for (HRegionInfo hri: locations.keySet()) {
214       if (index == 0) {
215         assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey()));
216         assertTrue(Bytes.equals(hri.getEndKey(), splitRows[0]));
217       } else if (index == 1) {
218         assertTrue(Bytes.equals(splitRows[0], hri.getStartKey()));
219         assertTrue(Bytes.equals(hri.getEndKey(), HConstants.EMPTY_END_ROW));
220       }
221       index++;
222     }
223     // Insert data
224     for (int i = 0; i < locations.size(); i++) {
225       for (int j = 0; j < timestamp.length; j++) {
226         Put put = new Put(rows[i], timestamp[j]);
227         put.add(HConstants.CATALOG_FAMILY, null, timestamp[j],
228             Bytes.toBytes(timestamp[j]));
229         table.put(put);
230       }
231     }
232     // There are 5 cases we have to test. Each is described below.
233     for (int i = 0; i < rows.length; i++) {
234       for (int j = 0; j < timestamp.length; j++) {
235         Get get = new Get(rows[i]);
236         get.addFamily(HConstants.CATALOG_FAMILY);
237         get.setTimeStamp(timestamp[j]);
238         Result result = table.get(get);
239         int cellCount = 0;
240         for(@SuppressWarnings("unused")Cell kv : result.listCells()) {
241           cellCount++;
242         }
243         assertTrue(cellCount == 1);
244       }
245       table.flushCommits();
246     }
247 
248     // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
249     int count = 0;
250     Scan scan = new Scan();
251     scan.addFamily(HConstants.CATALOG_FAMILY);
252     ResultScanner s = table.getScanner(scan);
253     try {
254       for (Result rr = null; (rr = s.next()) != null;) {
255         System.out.println(rr.toString());
256         count += 1;
257       }
258       assertEquals("Number of rows should be 2", 2, count);
259     } finally {
260       s.close();
261     }
262 
263     // Case 2: Scan with a timestamp greater than most recent timestamp
264     // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
265 
266     count = 0;
267     scan = new Scan();
268     scan.setTimeRange(1000L, Long.MAX_VALUE);
269     scan.addFamily(HConstants.CATALOG_FAMILY);
270 
271     s = table.getScanner(scan);
272     try {
273       while (s.next() != null) {
274         count += 1;
275       }
276       assertEquals("Number of rows should be 2", 2, count);
277     } finally {
278       s.close();
279     }
280 
281     // Case 3: scan with timestamp equal to most recent timestamp
282     // (in this case == 1000. Should get 2 rows.
283 
284     count = 0;
285     scan = new Scan();
286     scan.setTimeStamp(1000L);
287     scan.addFamily(HConstants.CATALOG_FAMILY);
288 
289     s = table.getScanner(scan);
290     try {
291       while (s.next() != null) {
292         count += 1;
293       }
294       assertEquals("Number of rows should be 2", 2, count);
295     } finally {
296       s.close();
297     }
298 
299     // Case 4: scan with timestamp greater than first timestamp but less than
300     // second timestamp (100 < timestamp < 1000). Should get 2 rows.
301 
302     count = 0;
303     scan = new Scan();
304     scan.setTimeRange(100L, 1000L);
305     scan.addFamily(HConstants.CATALOG_FAMILY);
306 
307     s = table.getScanner(scan);
308     try {
309       while (s.next() != null) {
310         count += 1;
311       }
312       assertEquals("Number of rows should be 2", 2, count);
313     } finally {
314       s.close();
315     }
316 
317     // Case 5: scan with timestamp equal to first timestamp (100)
318     // Should get 2 rows.
319 
320     count = 0;
321     scan = new Scan();
322     scan.setTimeStamp(100L);
323     scan.addFamily(HConstants.CATALOG_FAMILY);
324 
325     s = table.getScanner(scan);
326     try {
327       while (s.next() != null) {
328         count += 1;
329       }
330       assertEquals("Number of rows should be 2", 2, count);
331     } finally {
332       s.close();
333     }
334   }
335 
336 }
337