View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertTrue;
22  
23  import java.io.IOException;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileSystem;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hbase.Cell;
32  import org.apache.hadoop.hbase.CellComparator;
33  import org.apache.hadoop.hbase.CellScanner;
34  import org.apache.hadoop.hbase.CellUtil;
35  import org.apache.hadoop.hbase.HBaseTestingUtility;
36  import org.apache.hadoop.hbase.HColumnDescriptor;
37  import org.apache.hadoop.hbase.HConstants;
38  import org.apache.hadoop.hbase.HRegionInfo;
39  import org.apache.hadoop.hbase.HTableDescriptor;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.client.Get;
42  import org.apache.hadoop.hbase.client.Result;
43  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
44  import org.apache.hadoop.hbase.testclassification.MediumTests;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.FSUtils;
47  import org.apache.hadoop.hbase.wal.WAL;
48  import org.apache.hadoop.hbase.wal.WALFactory;
49  import org.apache.hadoop.hbase.wal.WALKey;
50  import org.apache.hadoop.hbase.wal.WALSplitter;
51  import org.junit.Rule;
52  import org.junit.Test;
53  import org.junit.experimental.categories.Category;
54  import org.junit.rules.TestName;
55  
56  /**
57   * Tests around replay of recovered.edits content.
58   */
59  @Category({MediumTests.class})
60  public class TestRecoveredEdits {
61    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
62    private static final Log LOG = LogFactory.getLog(TestRecoveredEdits.class);
63    @Rule public TestName testName = new TestName();
64  
65    /**
66     * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
67     * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
68     * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
69     * made it in.
70     * @throws IOException
71     */
72    @Test (timeout=60000)
73    public void testReplayWorksThoughLotsOfFlushing() throws IOException {
74      Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
75      // Set it so we flush every 1M or so.  Thats a lot.
76      conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
77      // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
78      // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
79      final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
80      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
81      final String columnFamily = "meta";
82      byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
83      htd.addFamily(new HColumnDescriptor(columnFamily));
84      HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
85        @Override
86        public synchronized String getEncodedName() {
87          return encodedRegionName;
88        }
89  
90        // Cache the name because lots of lookups.
91        private byte [] encodedRegionNameAsBytes = null;
92        @Override
93        public synchronized byte[] getEncodedNameAsBytes() {
94          if (encodedRegionNameAsBytes == null) {
95            this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
96          }
97          return this.encodedRegionNameAsBytes;
98        }
99      };
100     Path hbaseRootDir = TEST_UTIL.getDataTestDir();
101     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
102     Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
103     HRegionFileSystem hrfs =
104         new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
105     if (fs.exists(hrfs.getRegionDir())) {
106       LOG.info("Region directory already exists. Deleting.");
107       fs.delete(hrfs.getRegionDir(), true);
108     }
109     HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
110     assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
111     List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
112     // There should be no store files.
113     assertTrue(storeFiles.isEmpty());
114     region.close();
115     Path regionDir = region.getRegionDir(hbaseRootDir, hri);
116     Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
117     // This is a little fragile getting this path to a file of 10M of edits.
118     Path recoveredEditsFile = new Path(
119       System.getProperty("test.build.classes", "target/test-classes"),
120         "0000000000000016310");
121     // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
122     Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
123     fs.copyToLocalFile(recoveredEditsFile, destination);
124     assertTrue(fs.exists(destination));
125     // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
126     region = HRegion.openHRegion(region, null);
127     assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
128     storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
129     // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
130     // we flush at 1MB, that there are at least 3 flushed files that are there because of the
131     // replay of edits.
132     assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
133     // Now verify all edits made it into the region.
134     int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
135     LOG.info("Checked " + count + " edits made it in");
136   }
137 
138   /**
139    * @param fs
140    * @param conf
141    * @param edits
142    * @param region
143    * @return Return how many edits seen.
144    * @throws IOException
145    */
146   private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
147       final Path edits, final HRegion region)
148   throws IOException {
149     int count = 0;
150     // Based on HRegion#replayRecoveredEdits
151     WAL.Reader reader = null;
152     try {
153       reader = WALFactory.createReader(fs, edits, conf);
154       WAL.Entry entry;
155       while ((entry = reader.next()) != null) {
156         WALKey key = entry.getKey();
157         WALEdit val = entry.getEdit();
158         count++;
159         // Check this edit is for this region.
160         if (!Bytes.equals(key.getEncodedRegionName(),
161             region.getRegionInfo().getEncodedNameAsBytes())) {
162           continue;
163         }
164         Cell previous = null;
165         for (Cell cell: val.getCells()) {
166           if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue;
167           if (previous != null && CellComparator.compareRows(previous, cell) == 0) continue;
168           previous = cell;
169           Get g = new Get(CellUtil.cloneRow(cell));
170           Result r = region.get(g);
171           boolean found = false;
172           for (CellScanner scanner = r.cellScanner(); scanner.advance();) {
173             Cell current = scanner.current();
174             if (CellComparator.compare(cell, current, true) == 0) {
175               found = true;
176               break;
177             }
178           }
179           assertTrue("Failed to find " + cell, found);
180         }
181       }
182     } finally {
183       if (reader != null) reader.close();
184     }
185     return count;
186   }
187 }