View Javadoc

1   /**
2    * Copyright The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.migration;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.assertFalse;
25  
26  import java.io.File;
27  import java.io.IOException;
28  import java.util.ArrayList;
29  import java.util.List;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.FileSystem;
35  import org.apache.hadoop.fs.FileUtil;
36  import org.apache.hadoop.fs.FsShell;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.MediumTests;
41  import org.apache.hadoop.hbase.NamespaceDescriptor;
42  import org.apache.hadoop.hbase.TableName;
43  import org.apache.hadoop.hbase.Waiter;
44  import org.apache.hadoop.hbase.client.HTable;
45  import org.apache.hadoop.hbase.client.Result;
46  import org.apache.hadoop.hbase.client.ResultScanner;
47  import org.apache.hadoop.hbase.client.Scan;
48  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
49  import org.apache.hadoop.hbase.regionserver.HRegion;
50  import org.apache.hadoop.hbase.security.access.AccessControlLists;
51  import org.apache.hadoop.hbase.util.Bytes;
52  import org.apache.hadoop.hbase.util.FSUtils;
53  import org.apache.hadoop.util.ToolRunner;
54  import org.junit.AfterClass;
55  import org.junit.Assert;
56  import org.junit.BeforeClass;
57  import org.junit.Test;
58  import org.junit.experimental.categories.Category;
59  
60  /**
61   * Test upgrade from no namespace in 0.94 to namespace directory structure.
62   * Mainly tests that tables are migrated and consistent. Also verifies
63   * that snapshots have been migrated correctly.
64   *
65   * <p>Uses a tarball which is an image of an 0.94 hbase.rootdir.
66   *
67   * <p>Contains tables with currentKeys as the stored keys:
68   * foo, ns1.foo, ns2.foo
69   *
70   * <p>Contains snapshots with snapshot{num}Keys as the contents:
71   * snapshot1Keys, snapshot2Keys
72   *
73   * Image also contains _acl_ table with one region and two storefiles.
74   * This is needed to test the acl table migration.
75   *
76   */
77  @Category(MediumTests.class)
78  public class TestNamespaceUpgrade {
79    static final Log LOG = LogFactory.getLog(TestNamespaceUpgrade.class);
80    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
81    private final static String snapshot1Keys[] =
82        {"1","10","2","3","4","5","6","7","8","9"};
83    private final static String snapshot2Keys[] =
84        {"1","2","3","4","5","6","7","8","9"};
85    private final static String currentKeys[] =
86        {"1","2","3","4","5","6","7","8","9","A"};
87    private final static String tables[] = {"foo", "ns1.foo","ns.two.foo"};
88  
89    @BeforeClass
90    public static void setUpBeforeClass() throws Exception {
91      // Start up our mini cluster on top of an 0.94 root.dir that has data from
92      // a 0.94 hbase run and see if we can migrate to 0.96
93      TEST_UTIL.startMiniZKCluster();
94      TEST_UTIL.startMiniDFSCluster(1);
95      Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade");
96      // Untar our test dir.
97      File untar = untar(new File(testdir.toString()));
98      // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
99      Configuration conf = TEST_UTIL.getConfiguration();
100     FsShell shell = new FsShell(conf);
101     FileSystem fs = FileSystem.get(conf);
102     // find where hbase will root itself, so we can copy filesystem there
103     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
104     if (!fs.isDirectory(hbaseRootDir.getParent())) {
105       // mkdir at first
106       fs.mkdirs(hbaseRootDir.getParent());
107     }
108     if(org.apache.hadoop.util.VersionInfo.getVersion().startsWith("2.")) {
109       LOG.info("Hadoop version is 2.x, pre-migrating snapshot dir");
110       FileSystem localFS = FileSystem.getLocal(conf);
111       if(!localFS.rename(new Path(untar.toString(), HConstants.OLD_SNAPSHOT_DIR_NAME),
112           new Path(untar.toString(), HConstants.SNAPSHOT_DIR_NAME))) {
113         throw new IllegalStateException("Failed to move snapshot dir to 2.x expectation");
114       }
115     }
116     doFsCommand(shell,
117       new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
118     doFsCommand(shell, new String [] {"-lsr", "/"});
119     // See whats in minihdfs.
120     Configuration toolConf = TEST_UTIL.getConfiguration();
121     conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
122     ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
123     assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION));
124     doFsCommand(shell, new String [] {"-lsr", "/"});
125     TEST_UTIL.startMiniHBaseCluster(1, 1);
126 
127     for(String table: tables) {
128       int count = 0;
129       for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) {
130         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
131       }
132       Assert.assertEquals(currentKeys.length, count);
133     }
134     assertEquals(2, TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors().length);
135 
136     //verify ACL table is migrated
137     HTable secureTable = new HTable(conf, AccessControlLists.ACL_TABLE_NAME);
138     ResultScanner scanner = secureTable.getScanner(new Scan());
139     int count = 0;
140     for(Result r : scanner) {
141       count++;
142     }
143     assertEquals(3, count);
144     assertFalse(TEST_UTIL.getHBaseAdmin().tableExists("_acl_"));
145 
146     //verify ACL table was compacted
147     List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(secureTable.getName());
148     for(HRegion region : regions) {
149       assertEquals(1, region.getStores().size());
150     }
151   }
152 
153   private static File untar(final File testdir) throws IOException {
154     // Find the src data under src/test/data
155     final String datafile = "TestNamespaceUpgrade";
156     File srcTarFile = new File(
157       System.getProperty("project.build.testSourceDirectory", "src/test") +
158       File.separator + "data" + File.separator + datafile + ".tgz");
159     File homedir = new File(testdir.toString());
160     File tgtUntarDir = new File(homedir, "hbase");
161     if (tgtUntarDir.exists()) {
162       if (!FileUtil.fullyDelete(tgtUntarDir)) {
163         throw new IOException("Failed delete of " + tgtUntarDir.toString());
164       }
165     }
166     if (!srcTarFile.exists()) {
167       throw new IOException(srcTarFile+" does not exist");
168     }
169     LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
170     FileUtil.unTar(srcTarFile, homedir);
171     Assert.assertTrue(tgtUntarDir.exists());
172     return tgtUntarDir;
173   }
174 
175   private static void doFsCommand(final FsShell shell, final String [] args)
176   throws Exception {
177     // Run the 'put' command.
178     int errcode = shell.run(args);
179     if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
180   }
181 
182   @AfterClass
183   public static void tearDownAfterClass() throws Exception {
184     TEST_UTIL.shutdownMiniCluster();
185   }
186 
187   @Test
188   public void testSnapshots() throws IOException, InterruptedException {
189     String snapshots[][] = {snapshot1Keys, snapshot2Keys};
190     for(int i = 1; i <= snapshots.length; i++) {
191       for(String table: tables) {
192         TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i);
193         FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
194             FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
195             LOG);
196         int count = 0;
197         for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
198             Scan())) {
199           assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
200         }
201         Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
202       }
203     }
204   }
205 
206   @Test
207   public void testRenameUsingSnapshots() throws Exception {
208     String newNS = "newNS";
209     TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build());
210     for(String table: tables) {
211       int count = 0;
212       for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new
213           Scan())) {
214         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
215       }
216       TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table);
217       final String newTableName = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
218       TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName);
219       Thread.sleep(1000);
220       count = 0;
221       for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
222           Scan())) {
223         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
224       }
225       FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath()
226           , LOG);
227       Assert.assertEquals(newTableName, currentKeys.length, count);
228       TEST_UTIL.getHBaseAdmin().flush(newTableName);
229       TEST_UTIL.getHBaseAdmin().majorCompact(newTableName);
230       TEST_UTIL.waitFor(2000, new Waiter.Predicate<IOException>() {
231         @Override
232         public boolean evaluate() throws IOException {
233           try {
234             return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) ==
235                 AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
236           } catch (InterruptedException e) {
237             throw new IOException(e);
238           }
239         }
240       });
241     }
242 
243     String nextNS = "nextNS";
244     TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build());
245     for(String table: tables) {
246       String srcTable = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
247       TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable);
248       String newTableName = nextNS + TableName.NAMESPACE_DELIM + table + "_clone4";
249       TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName);
250       FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(),
251         LOG);
252       int count = 0;
253       for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
254           Scan())) {
255         assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
256       }
257       Assert.assertEquals(newTableName, currentKeys.length, count);
258     }
259   }
260 
261   @Test
262   public void testOldDirsAreGonePostMigration() throws IOException {
263     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
264     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
265     List <String> dirs = new ArrayList<String>(NamespaceUpgrade.NON_USER_TABLE_DIRS);
266     // Remove those that are not renamed
267     dirs.remove(HConstants.HBCK_SIDELINEDIR_NAME);
268     dirs.remove(HConstants.SNAPSHOT_DIR_NAME);
269     dirs.remove(HConstants.HBASE_TEMP_DIRECTORY);
270     for (String dir: dirs) {
271       assertFalse(fs.exists(new Path(hbaseRootDir, dir)));
272     }
273   }
274 
275   @Test
276   public void testNewDirsArePresentPostMigration() throws IOException {
277     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
278     // Below list does not include 'corrupt' because there is no 'corrupt' in the tgz
279     String [] newdirs = new String [] {HConstants.BASE_NAMESPACE_DIR,
280       HConstants.HREGION_LOGDIR_NAME};
281     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
282     for (String dir: newdirs) {
283       assertTrue(dir, fs.exists(new Path(hbaseRootDir, dir)));
284     }
285   }
286 }