View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import static org.junit.Assert.*;
21  
22  import java.security.Key;
23  import java.security.SecureRandom;
24  import java.util.ArrayList;
25  import java.util.List;
26  
27  import javax.crypto.spec.SecretKeySpec;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.hbase.HBaseTestingUtility;
34  import org.apache.hadoop.hbase.HColumnDescriptor;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.testclassification.MediumTests;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.Waiter.Predicate;
40  import org.apache.hadoop.hbase.client.HTable;
41  import org.apache.hadoop.hbase.client.Put;
42  import org.apache.hadoop.hbase.client.Table;
43  import org.apache.hadoop.hbase.io.crypto.Encryption;
44  import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
45  import org.apache.hadoop.hbase.io.crypto.aes.AES;
46  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
47  import org.apache.hadoop.hbase.io.hfile.HFile;
48  import org.apache.hadoop.hbase.security.EncryptionUtil;
49  import org.apache.hadoop.hbase.security.User;
50  import org.apache.hadoop.hbase.util.Bytes;
51  
52  import org.junit.AfterClass;
53  import org.junit.BeforeClass;
54  import org.junit.Test;
55  import org.junit.experimental.categories.Category;
56  
57  @Category(MediumTests.class)
58  public class TestEncryptionKeyRotation {
59    private static final Log LOG = LogFactory.getLog(TestEncryptionKeyRotation.class);
60    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
61    private static final Configuration conf = TEST_UTIL.getConfiguration();
62    private static final Key initialCFKey;
63    private static final Key secondCFKey;
64    static {
65      // Create the test encryption keys
66      SecureRandom rng = new SecureRandom();
67      byte[] keyBytes = new byte[AES.KEY_LENGTH];
68      rng.nextBytes(keyBytes);
69      initialCFKey = new SecretKeySpec(keyBytes, "AES");
70      rng.nextBytes(keyBytes);
71      secondCFKey = new SecretKeySpec(keyBytes, "AES");
72    }
73  
74    @BeforeClass
75    public static void setUp() throws Exception {
76      conf.setInt("hfile.format.version", 3);
77      conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
78      conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
79      // Enable online schema updates
80      conf.setBoolean("hbase.online.schema.update.enable", true);
81  
82      // Start the minicluster
83      TEST_UTIL.startMiniCluster(1);
84    }
85  
86    @AfterClass
87    public static void tearDown() throws Exception {
88      TEST_UTIL.shutdownMiniCluster();
89    }
90  
91    @Test
92    public void testCFKeyRotation() throws Exception {
93      // Create the table schema
94      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default",
95        "testCFKeyRotation"));
96      HColumnDescriptor hcd = new HColumnDescriptor("cf");
97      hcd.setEncryptionType("AES");
98      hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
99      htd.addFamily(hcd);
100 
101     // Create the table and some on disk files
102     createTableAndFlush(htd);
103 
104     // Verify we have store file(s) with the initial key
105     final List<Path> initialPaths = findStorefilePaths(htd.getTableName());
106     assertTrue(initialPaths.size() > 0);
107     for (Path path: initialPaths) {
108       assertTrue("Store file " + path + " has incorrect key",
109         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
110     }
111 
112     // Update the schema with a new encryption key
113     hcd = htd.getFamily(Bytes.toBytes("cf"));
114     hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,
115       conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),
116       secondCFKey));
117     TEST_UTIL.getHBaseAdmin().modifyColumn(htd.getTableName(), hcd);
118     Thread.sleep(5000); // Need a predicate for online schema change
119 
120     // And major compact
121     TEST_UTIL.getHBaseAdmin().majorCompact(htd.getTableName());
122     TEST_UTIL.waitFor(30000, 1000, true, new Predicate<Exception>() {
123       @Override
124       public boolean evaluate() throws Exception {
125         // When compaction has finished, all of the original files will be
126         // gone
127         boolean found = false;
128         for (Path path: initialPaths) {
129           found = TEST_UTIL.getTestFileSystem().exists(path);
130           if (found) {
131             LOG.info("Found " + path);
132             break;
133           }
134         }
135         return !found;
136       }
137     });
138 
139     // Verify we have store file(s) with only the new key
140     List<Path> pathsAfterCompaction = findStorefilePaths(htd.getTableName());
141     assertTrue(pathsAfterCompaction.size() > 0);
142     for (Path path: pathsAfterCompaction) {
143       assertFalse("Store file " + path + " retains initial key",
144         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
145       assertTrue("Store file " + path + " has incorrect key",
146         Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path)));
147     }
148   }
149 
150   @Test
151   public void testMasterKeyRotation() throws Exception {
152     // Create the table schema
153     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default",
154       "testMasterKeyRotation"));
155     HColumnDescriptor hcd = new HColumnDescriptor("cf");
156     hcd.setEncryptionType("AES");
157     hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
158     htd.addFamily(hcd);
159 
160     // Create the table and some on disk files
161     createTableAndFlush(htd);
162 
163     // Verify we have store file(s) with the initial key
164     List<Path> storeFilePaths = findStorefilePaths(htd.getTableName());
165     assertTrue(storeFilePaths.size() > 0);
166     for (Path path: storeFilePaths) {
167       assertTrue("Store file " + path + " has incorrect key",
168         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
169     }
170 
171     // Now shut down the HBase cluster
172     TEST_UTIL.shutdownMiniHBaseCluster();
173 
174     // "Rotate" the master key
175     conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "other");
176     conf.set(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY, "hbase");
177 
178     // Start the cluster back up
179     TEST_UTIL.startMiniHBaseCluster(1, 1);
180     // Verify the table can still be loaded
181     TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
182     // Double check that the store file keys can be unwrapped
183     storeFilePaths = findStorefilePaths(htd.getTableName());
184     assertTrue(storeFilePaths.size() > 0);
185     for (Path path: storeFilePaths) {
186       assertTrue("Store file " + path + " has incorrect key",
187         Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
188     }
189   }
190 
191   private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
192     List<Path> paths = new ArrayList<Path>();
193     for (HRegion region:
194         TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) {
195       for (Store store: region.getStores().values()) {
196         for (StoreFile storefile: store.getStorefiles()) {
197           paths.add(storefile.getPath());
198         }
199       }
200     }
201     return paths;
202   }
203 
204   private void createTableAndFlush(HTableDescriptor htd) throws Exception {
205     HColumnDescriptor hcd = htd.getFamilies().iterator().next();
206     // Create the test table
207     TEST_UTIL.getHBaseAdmin().createTable(htd);
208     TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
209     // Create a store file
210     Table table = new HTable(conf, htd.getTableName());
211     try {
212       table.put(new Put(Bytes.toBytes("testrow"))
213         .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
214     } finally {
215       table.close();
216     }
217     TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
218   }
219 
220   private static byte[] extractHFileKey(Path path) throws Exception {
221     HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
222       new CacheConfig(conf), conf);
223     try {
224       reader.loadFileInfo();
225       Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
226       assertNotNull("Reader has a null crypto context", cryptoContext);
227       Key key = cryptoContext.getKey();
228       assertNotNull("Crypto context has no key", key);
229       return key.getEncoded();
230     } finally {
231       reader.close();
232     }
233   }
234 
235 }