View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import org.apache.hadoop.conf.Configuration;
23  import org.apache.hadoop.fs.Path;
24  import org.apache.hadoop.hbase.io.hfile.HFile;
25  import org.apache.hadoop.hdfs.DistributedFileSystem;
26  
27  import java.net.URI;
28  
29  /**
30   * Compression validation test.  Checks compression is working.  Be sure to run
31   * on every node in your cluster.
32   */
33  public class CompressionTest {
34    protected static Path path = new Path(".hfile-comp-test");
35  
36    public static void usage() {
37      System.err.println("Usage: CompressionTest HDFS_PATH none|gz|lzo");
38      System.exit(1);
39    }
40  
41    protected static DistributedFileSystem openConnection(String urlString)
42    throws java.net.URISyntaxException, java.io.IOException {
43      URI dfsUri = new URI(urlString);
44      Configuration dfsConf = new Configuration();
45      DistributedFileSystem dfs = new DistributedFileSystem();
46      dfs.initialize(dfsUri, dfsConf);
47      return dfs;
48    }
49  
50    protected static boolean closeConnection(DistributedFileSystem dfs) {
51      if (dfs != null) {
52        try {
53          dfs.close();
54          dfs = null;
55        } catch (Exception e) {
56          e.printStackTrace();
57        }
58      }
59      return dfs == null;
60    }
61  
62    public static void main(String[] args) {
63      if (args.length != 2) usage();
64      try {
65        DistributedFileSystem dfs = openConnection(args[0]);
66        dfs.delete(path, false);
67        HFile.Writer writer = new HFile.Writer(dfs, path,
68          HFile.DEFAULT_BLOCKSIZE, args[1], null);
69        writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
70        writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
71        writer.close();
72  
73        HFile.Reader reader = new HFile.Reader(dfs, path, null, false);
74        reader.loadFileInfo();
75        byte[] key = reader.getFirstKey();
76        boolean rc = Bytes.toString(key).equals("testkey");
77        reader.close();
78  
79        dfs.delete(path, false);
80        closeConnection(dfs);
81  
82        if (rc) System.exit(0);
83      } catch (Exception e) {
84        e.printStackTrace();
85      }
86      System.out.println("FAILED");
87      System.exit(1);
88    }
89  }