View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import static org.junit.Assert.*;
21  
22  import java.security.Key;
23  import java.security.SecureRandom;
24  import java.util.ArrayList;
25  import java.util.List;
26  
27  import javax.crypto.spec.SecretKeySpec;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.hbase.HBaseTestingUtility;
34  import org.apache.hadoop.hbase.HColumnDescriptor;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.MediumTests;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.Waiter.Predicate;
40  import org.apache.hadoop.hbase.client.HTable;
41  import org.apache.hadoop.hbase.client.Put;
42  import org.apache.hadoop.hbase.io.crypto.Encryption;
43  import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
44  import org.apache.hadoop.hbase.io.crypto.aes.AES;
45  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
46  import org.apache.hadoop.hbase.io.hfile.HFile;
47  import org.apache.hadoop.hbase.security.EncryptionUtil;
48  import org.apache.hadoop.hbase.security.User;
49  import org.apache.hadoop.hbase.util.Bytes;
50  
51  import org.junit.AfterClass;
52  import org.junit.BeforeClass;
53  import org.junit.Test;
54  import org.junit.experimental.categories.Category;
55  
56  @Category(MediumTests.class)
57  public class TestEncryptionKeyRotation {
58    private static final Log LOG = LogFactory.getLog(TestEncryptionKeyRotation.class);
59    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
60    private static final Configuration conf = TEST_UTIL.getConfiguration();
61    private static final Key initialCFKey;
62    private static final Key secondCFKey;
63    static {
64      // Create the test encryption keys
65      SecureRandom rng = new SecureRandom();
66      byte[] keyBytes = new byte[AES.KEY_LENGTH];
67      rng.nextBytes(keyBytes);
68      initialCFKey = new SecretKeySpec(keyBytes, "AES");
69      rng.nextBytes(keyBytes);
70      secondCFKey = new SecretKeySpec(keyBytes, "AES");
71    }
72  
73    @BeforeClass
74    public static void setUp() throws Exception {
75      conf.setInt("hfile.format.version", 3);
76      conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
77      conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
78      // Enable online schema updates
79      conf.setBoolean("hbase.online.schema.update.enable", true);
80  
81      // Start the minicluster
82      TEST_UTIL.startMiniCluster(1);
83    }
84  
85    @AfterClass
86    public static void tearDown() throws Exception {
87      TEST_UTIL.shutdownMiniCluster();
88    }
89  
90    @Test
91    public void testCFKeyRotation() throws Exception {
92      // Create the table schema
93      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default",
94        "testCFKeyRotation"));
95      HColumnDescriptor hcd = new HColumnDescriptor("cf");
96      hcd.setEncryptionType("AES");
97      hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
98      htd.addFamily(hcd);
99  
100     // Create the table and some on disk files
101     createTableAndFlush(htd);
102 
103     // Verify we have store file(s) with the initial key
104     final List<Path> initialPaths = findStorefilePaths(htd.getTableName());
105     assertTrue(initialPaths.size() > 0);
106     for (Path path: initialPaths) {
107       assertTrue("Store file " + path + " has incorrect key",
108         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
109     }
110 
111     // Update the schema with a new encryption key
112     hcd = htd.getFamily(Bytes.toBytes("cf"));
113     hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,
114       conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),
115       secondCFKey));
116     TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getName(), hcd);
117     Thread.sleep(5000); // Need a predicate for online schema change
118 
119     // And major compact
120     TEST_UTIL.getHBaseAdmin().majorCompact(htd.getName());
121     TEST_UTIL.waitFor(30000, 1000, true, new Predicate<Exception>() {
122       @Override
123       public boolean evaluate() throws Exception {
124         // When compaction has finished, all of the original files will be
125         // gone
126         boolean found = false;
127         for (Path path: initialPaths) {
128           found = TEST_UTIL.getTestFileSystem().exists(path);
129           if (found) {
130             LOG.info("Found " + path);
131             break;
132           }
133         }
134         return !found;
135       }
136     });
137 
138     // Verify we have store file(s) with only the new key
139     List<Path> pathsAfterCompaction = findStorefilePaths(htd.getTableName());
140     assertTrue(pathsAfterCompaction.size() > 0);
141     for (Path path: pathsAfterCompaction) {
142       assertFalse("Store file " + path + " retains initial key",
143         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
144       assertTrue("Store file " + path + " has incorrect key",
145         Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path)));
146     }
147   }
148 
149   @Test
150   public void testMasterKeyRotation() throws Exception {
151     // Create the table schema
152     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default",
153       "testMasterKeyRotation"));
154     HColumnDescriptor hcd = new HColumnDescriptor("cf");
155     hcd.setEncryptionType("AES");
156     hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
157     htd.addFamily(hcd);
158 
159     // Create the table and some on disk files
160     createTableAndFlush(htd);
161 
162     // Verify we have store file(s) with the initial key
163     List<Path> storeFilePaths = findStorefilePaths(htd.getTableName());
164     assertTrue(storeFilePaths.size() > 0);
165     for (Path path: storeFilePaths) {
166       assertTrue("Store file " + path + " has incorrect key",
167         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
168     }
169 
170     // Now shut down the HBase cluster
171     TEST_UTIL.shutdownMiniHBaseCluster();
172 
173     // "Rotate" the master key
174     conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "other");
175     conf.set(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY, "hbase");
176 
177     // Start the cluster back up
178     TEST_UTIL.startMiniHBaseCluster(1, 1);
179     // Verify the table can still be loaded
180     TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
181     // Double check that the store file keys can be unwrapped
182     storeFilePaths = findStorefilePaths(htd.getTableName());
183     assertTrue(storeFilePaths.size() > 0);
184     for (Path path: storeFilePaths) {
185       assertTrue("Store file " + path + " has incorrect key",
186         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
187     }
188   }
189 
190   private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
191     List<Path> paths = new ArrayList<Path>();
192     for (HRegion region:
193         TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) {
194       for (Store store: region.getStores().values()) {
195         for (StoreFile storefile: store.getStorefiles()) {
196           paths.add(storefile.getPath());
197         }
198       }
199     }
200     return paths;
201   }
202 
203   private void createTableAndFlush(HTableDescriptor htd) throws Exception {
204     HColumnDescriptor hcd = htd.getFamilies().iterator().next();
205     // Create the test table
206     TEST_UTIL.getHBaseAdmin().createTable(htd);
207     TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
208     // Create a store file
209     HTable table = new HTable(conf, htd.getName());
210     try {
211       table.put(new Put(Bytes.toBytes("testrow"))
212         .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
213     } finally {
214       table.close();
215     }
216     TEST_UTIL.getHBaseAdmin().flush(htd.getName());
217   }
218 
219   private static byte[] extractHFileKey(Path path) throws Exception {
220     HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
221       new CacheConfig(conf), conf);
222     try {
223       reader.loadFileInfo();
224       Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
225       assertNotNull("Reader has a null crypto context", cryptoContext);
226       Key key = cryptoContext.getKey();
227       assertNotNull("Crypto context has no key", key);
228       return key.getEncoded();
229     } finally {
230       reader.close();
231     }
232   }
233 
234 }