View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.snapshot;
19  
20  import static org.junit.Assert.assertArrayEquals;
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  
25  import java.io.IOException;
26  import java.util.Map;
27  import java.util.TreeMap;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.FileStatus;
33  import org.apache.hadoop.fs.FileSystem;
34  import org.apache.hadoop.fs.Path;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.HBaseTestingUtility;
37  import org.apache.hadoop.hbase.HConstants;
38  import org.apache.hadoop.hbase.KeyValue;
39  import org.apache.hadoop.hbase.SmallTests;
40  import org.apache.hadoop.hbase.regionserver.wal.HLog;
41  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
42  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
43  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
44  import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.FSUtils;
47  import org.junit.*;
48  import org.junit.experimental.categories.Category;
49  
50  /**
51   * Test snapshot log splitter
52   */
53  @Category(SmallTests.class)
54  public class TestSnapshotLogSplitter {
55    final Log LOG = LogFactory.getLog(getClass());
56  
57    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
58  
59    private byte[] TEST_QUALIFIER = Bytes.toBytes("q");
60    private byte[] TEST_FAMILY = Bytes.toBytes("f");
61  
62    private Configuration conf;
63    private FileSystem fs;
64    private Path logFile;
65  
66    @Before
67    public void setup() throws Exception {
68      conf = TEST_UTIL.getConfiguration();
69      fs = FileSystem.get(conf);
70      logFile = new Path(TEST_UTIL.getDataTestDir(), "test.log");
71      writeTestLog(logFile);
72    }
73  
74    @After
75    public void tearDown() throws Exception {
76      fs.delete(logFile, false);
77    }
78  
79    @Test
80    public void testSplitLogs() throws IOException {
81      Map<byte[], byte[]> regionsMap = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
82      splitTestLogs(getTableName(5), regionsMap);
83    }
84  
85    @Test
86    public void testSplitLogsOnDifferentTable() throws IOException {
87      TableName tableName = getTableName(1);
88      Map<byte[], byte[]> regionsMap = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
89      for (int j = 0; j < 10; ++j) {
90        byte[] regionName = getRegionName(tableName, j);
91        byte[] newRegionName = getNewRegionName(tableName, j);
92        regionsMap.put(regionName, newRegionName);
93      }
94      splitTestLogs(tableName, regionsMap);
95    }
96  
97    /*
98     * Split and verify test logs for the specified table
99     */
100   private void splitTestLogs(final TableName tableName,
101                              final Map<byte[], byte[]> regionsMap) throws IOException {
102     Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDataTestDir(), tableName);
103     SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
104       tableName, regionsMap);
105     try {
106       logSplitter.splitLog(logFile);
107     } finally {
108       logSplitter.close();
109     }
110     verifyRecoverEdits(tableDir, tableName, regionsMap);
111   }
112 
113   /*
114    * Verify that every logs in the table directory has just the specified table and regions.
115    */
116   private void verifyRecoverEdits(final Path tableDir, final TableName tableName,
117       final Map<byte[], byte[]> regionsMap) throws IOException {
118     for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
119       assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString()));
120       Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath());
121       byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
122       assertFalse(regionsMap.containsKey(regionName));
123       for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
124         HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf);
125         try {
126           HLog.Entry entry;
127           while ((entry = reader.next()) != null) {
128             HLogKey key = entry.getKey();
129             assertEquals(tableName, key.getTablename());
130             assertArrayEquals(regionName, key.getEncodedRegionName());
131           }
132         } finally {
133           reader.close();
134         }
135       }
136     }
137   }
138 
139   /*
140    * Write some entries in the log file.
141    * 7 different tables with name "testtb-%d"
142    * 10 region per table with name "tableName-region-%d"
143    * 50 entry with row key "row-%d"
144    */
145   private void writeTestLog(final Path logFile) throws IOException {
146     fs.mkdirs(logFile.getParent());
147     HLog.Writer writer = HLogFactory.createWALWriter(fs, logFile, conf);
148     try {
149       for (int i = 0; i < 7; ++i) {
150         TableName tableName = getTableName(i);
151         for (int j = 0; j < 10; ++j) {
152           byte[] regionName = getRegionName(tableName, j);
153           for (int k = 0; k < 50; ++k) {
154             byte[] rowkey = Bytes.toBytes("row-" + k);
155             HLogKey key = new HLogKey(regionName, tableName, (long)k,
156               System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
157             WALEdit edit = new WALEdit();
158             edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
159             writer.append(new HLog.Entry(key, edit));
160           }
161         }
162       }
163     } finally {
164       writer.close();
165     }
166   }
167 
168   private TableName getTableName(int tableId) {
169     return TableName.valueOf("testtb-" + tableId);
170   }
171 
172   private byte[] getRegionName(final TableName tableName, int regionId) {
173     return Bytes.toBytes(tableName + "-region-" + regionId);
174   }
175 
176   private byte[] getNewRegionName(final TableName tableName, int regionId) {
177     return Bytes.toBytes(tableName + "-new-region-" + regionId);
178   }
179 }