View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import org.apache.commons.logging.Log;
23  import org.apache.commons.logging.LogFactory;
24  import org.apache.hadoop.conf.Configuration;
25  import org.apache.hadoop.fs.Path;
26  import org.apache.hadoop.hbase.io.hfile.Compression;
27  import org.apache.hadoop.hbase.io.hfile.HFile;
28  import org.apache.hadoop.hdfs.DistributedFileSystem;
29  import org.apache.hadoop.io.compress.Compressor;
30  
31  import java.io.IOException;
32  import java.net.URI;
33  
34  /**
35   * Compression validation test.  Checks compression is working.  Be sure to run
36   * on every node in your cluster.
37   */
38  public class CompressionTest {
39    static final Log LOG = LogFactory.getLog(CompressionTest.class);
40  
41    public static boolean testCompression(String codec) {
42      codec = codec.toLowerCase();
43  
44      Compression.Algorithm a;
45  
46      try {
47        a = Compression.getCompressionAlgorithmByName(codec);
48      } catch (IllegalArgumentException e) {
49        LOG.warn("Codec type: " + codec + " is not known");
50        return false;
51      }
52  
53      try {
54        testCompression(a);
55        return true;
56      } catch (IOException ignored) {
57        LOG.warn("Can't instantiate codec: " + codec, ignored);
58        return false;
59      }
60    }
61  
62    private final static Boolean[] compressionTestResults
63        = new Boolean[Compression.Algorithm.values().length];
64    static {
65      for (int i = 0 ; i < compressionTestResults.length ; ++i) {
66        compressionTestResults[i] = null;
67      }
68    }
69  
70    public static void testCompression(Compression.Algorithm algo)
71        throws IOException {
72      if (compressionTestResults[algo.ordinal()] != null) {
73        if (compressionTestResults[algo.ordinal()]) {
74          return ; // already passed test, dont do it again.
75        } else {
76          // failed.
77          throw new IOException("Compression algorithm '" + algo.getName() + "'" +
78          " previously failed test.");
79        }
80      }
81  
82      try {
83        Compressor c = algo.getCompressor();
84        algo.returnCompressor(c);
85        compressionTestResults[algo.ordinal()] = true; // passes
86      } catch (Throwable t) {
87        compressionTestResults[algo.ordinal()] = false; // failure
88        throw new IOException(t);
89      }
90    }
91  
92    protected static Path path = new Path(".hfile-comp-test");
93  
94    public static void usage() {
95      System.err.println("Usage: CompressionTest HDFS_PATH none|gz|lzo");
96      System.exit(1);
97    }
98  
99    protected static DistributedFileSystem openConnection(String urlString)
100   throws java.net.URISyntaxException, java.io.IOException {
101     URI dfsUri = new URI(urlString);
102     Configuration dfsConf = new Configuration();
103     DistributedFileSystem dfs = new DistributedFileSystem();
104     dfs.initialize(dfsUri, dfsConf);
105     return dfs;
106   }
107 
108   protected static boolean closeConnection(DistributedFileSystem dfs) {
109     if (dfs != null) {
110       try {
111         dfs.close();
112       } catch (Exception e) {
113         e.printStackTrace();
114       }
115     }
116     return dfs == null;
117   }
118 
119   public static void main(String[] args) {
120     if (args.length != 2) usage();
121     try {
122       DistributedFileSystem dfs = openConnection(args[0]);
123       dfs.delete(path, false);
124       HFile.Writer writer = new HFile.Writer(dfs, path,
125         HFile.DEFAULT_BLOCKSIZE, args[1], null);
126       writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
127       writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
128       writer.close();
129 
130       HFile.Reader reader = new HFile.Reader(dfs, path, null, false);
131       reader.loadFileInfo();
132       byte[] key = reader.getFirstKey();
133       boolean rc = Bytes.toString(key).equals("testkey");
134       reader.close();
135 
136       dfs.delete(path, false);
137       closeConnection(dfs);
138 
139       if (rc) System.exit(0);
140     } catch (Exception e) {
141       e.printStackTrace();
142     }
143     System.out.println("FAILED");
144     System.exit(1);
145   }
146 }