1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase;
21  
22  import java.io.File;
23  import java.io.IOException;
24  import java.io.PrintWriter;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.client.HConnectionManager;
31  import org.apache.hadoop.hbase.client.HTable;
32  import org.apache.hadoop.hbase.util.FSUtils;
33  import org.apache.hadoop.hdfs.MiniDFSCluster;
34  import org.apache.hadoop.util.ReflectionUtils;
35  
36  /**
37   * Abstract base class for HBase cluster junit tests.  Spins up an hbase
38   * cluster in setup and tears it down again in tearDown.
39   * @deprecated Use junit4 and {@link HBaseTestingUtility}
40   */
41  public abstract class HBaseClusterTestCase extends HBaseTestCase {
42    private static final Log LOG = LogFactory.getLog(HBaseClusterTestCase.class);
43    public MiniHBaseCluster cluster;
44    protected MiniDFSCluster dfsCluster;
45    protected MiniZooKeeperCluster zooKeeperCluster;
46    protected int regionServers;
47    protected boolean startDfs;
48    private boolean openMetaTable = true;
49  
50    /** default constructor */
51    public HBaseClusterTestCase() {
52      this(1);
53    }
54  
55    /**
56     * Start a MiniHBaseCluster with regionServers region servers in-process to
57     * start with. Also, start a MiniDfsCluster before starting the hbase cluster.
58     * The configuration used will be edited so that this works correctly.
59     * @param regionServers number of region servers to start.
60     */
61    public HBaseClusterTestCase(int regionServers) {
62      this(regionServers, true);
63    }
64  
65    /**  in-process to
66     * start with. Optionally, startDfs indicates if a MiniDFSCluster should be
67     * started. If startDfs is false, the assumption is that an external DFS is
68     * configured in hbase-site.xml and is already started, or you have started a
69     * MiniDFSCluster on your own and edited the configuration in memory. (You
70     * can modify the config used by overriding the preHBaseClusterSetup method.)
71     * @param regionServers number of region servers to start.
72     * @param startDfs set to true if MiniDFS should be started
73     */
74    public HBaseClusterTestCase(int regionServers, boolean startDfs) {
75      super();
76      this.startDfs = startDfs;
77      this.regionServers = regionServers;
78    }
79  
80    protected void setOpenMetaTable(boolean val) {
81      openMetaTable = val;
82    }
83  
84    /**
85     * Subclass hook.
86     *
87     * Run after dfs is ready but before hbase cluster is started up.
88     */
89    protected void preHBaseClusterSetup() throws Exception {
90      // continue
91    }
92  
93    /**
94     * Actually start the MiniHBase instance.
95     */
96    protected void hBaseClusterSetup() throws Exception {
97      File testDir = new File(getUnitTestdir(getName()).toString());
98  
99      // Note that this is done before we create the MiniHBaseCluster because we
100     // need to edit the config to add the ZooKeeper servers.
101     this.zooKeeperCluster = new MiniZooKeeperCluster();
102     int clientPort = this.zooKeeperCluster.startup(testDir);
103     conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort));
104 
105     // start the mini cluster
106     this.cluster = new MiniHBaseCluster(conf, regionServers);
107 
108     if (openMetaTable) {
109       // opening the META table ensures that cluster is running
110       new HTable(conf, HConstants.META_TABLE_NAME);
111     }
112   }
113 
114   /**
115    * Run after hbase cluster is started up.
116    */
117   protected void postHBaseClusterSetup() throws Exception {
118     // continue
119   }
120 
121   @Override
122   protected void setUp() throws Exception {
123     try {
124       if (this.startDfs) {
125         // This spews a bunch of warnings about missing scheme. TODO: fix.
126         this.dfsCluster = new MiniDFSCluster(0, this.conf, 2, true, true, true,
127           null, null, null, null);
128 
129         // mangle the conf so that the fs parameter points to the minidfs we
130         // just started up
131         FileSystem filesystem = dfsCluster.getFileSystem();
132         conf.set("fs.defaultFS", filesystem.getUri().toString());
133         Path parentdir = filesystem.getHomeDirectory();
134         conf.set(HConstants.HBASE_DIR, parentdir.toString());
135         filesystem.mkdirs(parentdir);
136         FSUtils.setVersion(filesystem, parentdir);
137       }
138 
139       // do the super setup now. if we had done it first, then we would have
140       // gotten our conf all mangled and a local fs started up.
141       super.setUp();
142 
143       // run the pre-cluster setup
144       preHBaseClusterSetup();
145 
146       // start the instance
147       hBaseClusterSetup();
148 
149       // run post-cluster setup
150       postHBaseClusterSetup();
151     } catch (Exception e) {
152       LOG.error("Exception in setup!", e);
153       if (cluster != null) {
154         cluster.shutdown();
155       }
156       if (zooKeeperCluster != null) {
157         zooKeeperCluster.shutdown();
158       }
159       if (dfsCluster != null) {
160         shutdownDfs(dfsCluster);
161       }
162       throw e;
163     }
164   }
165 
166   @Override
167   protected void tearDown() throws Exception {
168     if (!openMetaTable) {
169       // open the META table now to ensure cluster is running before shutdown.
170       new HTable(conf, HConstants.META_TABLE_NAME);
171     }
172     super.tearDown();
173     try {
174       HConnectionManager.deleteConnectionInfo(conf, true);
175       if (this.cluster != null) {
176         try {
177           this.cluster.shutdown();
178         } catch (Exception e) {
179           LOG.warn("Closing mini dfs", e);
180         }
181         try {
182           this.zooKeeperCluster.shutdown();
183         } catch (IOException e) {
184           LOG.warn("Shutting down ZooKeeper cluster", e);
185         }
186       }
187       if (startDfs) {
188         shutdownDfs(dfsCluster);
189       }
190     } catch (Exception e) {
191       LOG.error(e);
192     }
193     // ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
194     //  "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
195   }
196 
197 
198   /**
199    * Use this utility method debugging why cluster won't go down.  On a
200    * period it throws a thread dump.  Method ends when all cluster
201    * regionservers and master threads are no long alive.
202    */
203   public void threadDumpingJoin() {
204     if (this.cluster.getRegionServerThreads() != null) {
205       for(Thread t: this.cluster.getRegionServerThreads()) {
206         threadDumpingJoin(t);
207       }
208     }
209     threadDumpingJoin(this.cluster.getMaster());
210   }
211 
212   protected void threadDumpingJoin(final Thread t) {
213     if (t == null) {
214       return;
215     }
216     long startTime = System.currentTimeMillis();
217     while (t.isAlive()) {
218       try {
219         Thread.sleep(1000);
220       } catch (InterruptedException e) {
221         LOG.info("Continuing...", e);
222       }
223       if (System.currentTimeMillis() - startTime > 60000) {
224         startTime = System.currentTimeMillis();
225         ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
226             "Automatic Stack Trace every 60 seconds waiting on " +
227             t.getName());
228       }
229     }
230   }
231 }