View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.IOException;
25  import java.util.ArrayList;
26  import java.util.Collection;
27  import java.util.List;
28  import java.util.Random;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.fs.FileSystem;
34  import org.apache.hadoop.fs.Path;
35  import org.apache.hadoop.hbase.HBaseTestingUtility;
36  import org.apache.hadoop.hbase.HColumnDescriptor;
37  import org.apache.hadoop.hbase.HRegionInfo;
38  import org.apache.hadoop.hbase.HTableDescriptor;
39  import org.apache.hadoop.hbase.KeyValue;
40  import org.apache.hadoop.hbase.MediumTests;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.fs.HFileSystem;
43  import org.apache.hadoop.hbase.io.hfile.BlockCache;
44  import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
45  import org.apache.hadoop.hbase.io.hfile.BlockType;
46  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
47  import org.apache.hadoop.hbase.io.hfile.HFile;
48  import org.apache.hadoop.hbase.io.hfile.HFileBlock;
49  import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
50  import org.apache.hadoop.hbase.io.hfile.HFileScanner;
51  import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
52  import org.apache.hadoop.hbase.regionserver.wal.HLog;
53  import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
54  import org.apache.hadoop.hbase.util.Bytes;
55  import org.junit.After;
56  import org.junit.Before;
57  import org.junit.Rule;
58  import org.junit.Test;
59  import org.junit.experimental.categories.Category;
60  import org.junit.rules.TestName;
61  import org.junit.runner.RunWith;
62  import org.junit.runners.Parameterized;
63  import org.junit.runners.Parameterized.Parameters;
64  
65  /**
66   * Tests {@link HFile} cache-on-write functionality for data blocks, non-root
67   * index blocks, and Bloom filter blocks, as specified by the column family.
68   */
69  @RunWith(Parameterized.class)
70  @Category(MediumTests.class)
71  public class TestCacheOnWriteInSchema {
72  
73    private static final Log LOG = LogFactory.getLog(TestCacheOnWriteInSchema.class);
74    @Rule public TestName name = new TestName();
75  
76    private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
77    private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString();
78    private static byte [] table;
79    private static byte [] family = Bytes.toBytes("family");
80    private static final int NUM_KV = 25000;
81    private static final Random rand = new Random(12983177L);
82    /** The number of valid key types possible in a store file */
83    private static final int NUM_VALID_KEY_TYPES =
84        KeyValue.Type.values().length - 2;
85  
86    private static enum CacheOnWriteType {
87      DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA),
88      BLOOM_BLOCKS(BlockType.BLOOM_CHUNK),
89      INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
90  
91      private final BlockType blockType1;
92      private final BlockType blockType2;
93  
94      private CacheOnWriteType(BlockType blockType) {
95        this(blockType, blockType);
96      }
97  
98      private CacheOnWriteType(BlockType blockType1, BlockType blockType2) {
99        this.blockType1 = blockType1;
100       this.blockType2 = blockType2;
101     }
102 
103     public boolean shouldBeCached(BlockType blockType) {
104       return blockType == blockType1 || blockType == blockType2;
105     }
106 
107     public void modifyFamilySchema(HColumnDescriptor family) {
108       switch (this) {
109       case DATA_BLOCKS:
110         family.setCacheDataOnWrite(true);
111         break;
112       case BLOOM_BLOCKS:
113         family.setCacheBloomsOnWrite(true);
114         break;
115       case INDEX_BLOCKS:
116         family.setCacheIndexesOnWrite(true);
117         break;
118       }
119     }
120   }
121 
122   private final CacheOnWriteType cowType;
123   private Configuration conf;
124   private final String testDescription;
125   private HRegion region;
126   private HStore store;
127   private HLog hlog;
128   private FileSystem fs;
129 
130   public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
131     this.cowType = cowType;
132     testDescription = "[cacheOnWrite=" + cowType + "]";
133     System.out.println(testDescription);
134   }
135 
136   @Parameters
137   public static Collection<Object[]> getParameters() {
138     List<Object[]> cowTypes = new ArrayList<Object[]>();
139     for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
140       cowTypes.add(new Object[] { cowType });
141     }
142     return cowTypes;
143   }
144 
145   @Before
146   public void setUp() throws IOException {
147     // parameterized tests add [#] suffix get rid of [ and ].
148     table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_"));
149 
150     conf = TEST_UTIL.getConfiguration();
151     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
152     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
153     conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
154     conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
155 
156     fs = HFileSystem.get(conf);
157 
158     // Create the schema
159     HColumnDescriptor hcd = new HColumnDescriptor(family);
160     hcd.setBloomFilterType(BloomType.ROWCOL);
161     cowType.modifyFamilySchema(hcd);
162     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
163     htd.addFamily(hcd);
164 
165     // Create a store based on the schema
166     Path basedir = new Path(DIR);
167     String logName = "logs";
168     Path logdir = new Path(DIR, logName);
169     fs.delete(logdir, true);
170 
171     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
172     hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
173 
174     region = TEST_UTIL.createLocalHRegion(info, htd, hlog);
175     store = new HStore(region, hcd, conf);
176   }
177 
178   @After
179   public void tearDown() throws IOException {
180     IOException ex = null;
181     try {
182       region.close();
183     } catch (IOException e) {
184       LOG.warn("Caught Exception", e);
185       ex = e;
186     }
187     try {
188       hlog.closeAndDelete();
189     } catch (IOException e) {
190       LOG.warn("Caught Exception", e);
191       ex = e;
192     }
193     try {
194       fs.delete(new Path(DIR), true);
195     } catch (IOException e) {
196       LOG.error("Could not delete " + DIR, e);
197       ex = e;
198     }
199     if (ex != null) {
200       throw ex;
201     }
202   }
203 
204   @Test
205   public void testCacheOnWriteInSchema() throws IOException {
206     // Write some random data into the store
207     StoreFile.Writer writer = store.createWriterInTmp(Integer.MAX_VALUE,
208         HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false);
209     writeStoreFile(writer);
210     writer.close();
211     // Verify the block types of interest were cached on write
212     readStoreFile(writer.getPath());
213   }
214 
215   private void readStoreFile(Path path) throws IOException {
216     CacheConfig cacheConf = store.getCacheConfig();
217     BlockCache cache = cacheConf.getBlockCache();
218     StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
219       BloomType.ROWCOL);
220     HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
221     try {
222       // Open a scanner with (on read) caching disabled
223       HFileScanner scanner = reader.getScanner(false, false);
224       assertTrue(testDescription, scanner.seekTo());
225       // Cribbed from io.hfile.TestCacheOnWrite
226       long offset = 0;
227       HFileBlock prevBlock = null;
228       while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
229         long onDiskSize = -1;
230         if (prevBlock != null) {
231           onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
232         }
233         // Flags: don't cache the block, use pread, this is not a compaction.
234         // Also, pass null for expected block type to avoid checking it.
235         HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
236           false, true, null);
237         BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
238           offset);
239         boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
240         boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
241         if (shouldBeCached != isCached) {
242           throw new AssertionError(
243             "shouldBeCached: " + shouldBeCached+ "\n" +
244             "isCached: " + isCached + "\n" +
245             "Test description: " + testDescription + "\n" +
246             "block: " + block + "\n" +
247             "blockCacheKey: " + blockCacheKey);
248         }
249         prevBlock = block;
250         offset += block.getOnDiskSizeWithHeader();
251       }
252     } finally {
253       reader.close();
254     }
255   }
256 
257   private static KeyValue.Type generateKeyType(Random rand) {
258     if (rand.nextBoolean()) {
259       // Let's make half of KVs puts.
260       return KeyValue.Type.Put;
261     } else {
262       KeyValue.Type keyType =
263           KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
264       if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum)
265       {
266         throw new RuntimeException("Generated an invalid key type: " + keyType
267             + ". " + "Probably the layout of KeyValue.Type has changed.");
268       }
269       return keyType;
270     }
271   }
272 
273   private void writeStoreFile(StoreFile.Writer writer) throws IOException {
274     final int rowLen = 32;
275     for (int i = 0; i < NUM_KV; ++i) {
276       byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
277       byte[] v = TestHFileWriterV2.randomValue(rand);
278       int cfLen = rand.nextInt(k.length - rowLen + 1);
279       KeyValue kv = new KeyValue(
280           k, 0, rowLen,
281           k, rowLen, cfLen,
282           k, rowLen + cfLen, k.length - rowLen - cfLen,
283           rand.nextLong(),
284           generateKeyType(rand),
285           v, 0, v.length);
286       writer.append(kv);
287     }
288   }
289 
290 }
291