View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce.replication;
20  
21  import java.io.IOException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.conf.Configured;
27  import org.apache.hadoop.hbase.*;
28  import org.apache.hadoop.hbase.client.HConnectable;
29  import org.apache.hadoop.hbase.client.HConnection;
30  import org.apache.hadoop.hbase.client.HConnectionManager;
31  import org.apache.hadoop.hbase.client.HTable;
32  import org.apache.hadoop.hbase.client.Put;
33  import org.apache.hadoop.hbase.client.Result;
34  import org.apache.hadoop.hbase.client.ResultScanner;
35  import org.apache.hadoop.hbase.client.Scan;
36  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
37  import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
38  import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
39  import org.apache.hadoop.hbase.mapreduce.TableMapper;
40  import org.apache.hadoop.hbase.replication.ReplicationException;
41  import org.apache.hadoop.hbase.replication.ReplicationFactory;
42  import org.apache.hadoop.hbase.replication.ReplicationPeer;
43  import org.apache.hadoop.hbase.replication.ReplicationPeers;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
46  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
47  import org.apache.hadoop.mapreduce.Job;
48  import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
49  import org.apache.hadoop.util.Tool;
50  import org.apache.hadoop.util.ToolRunner;
51  
52  /**
53   * This map-only job compares the data from a local table with a remote one.
54   * Every cell is compared and must have exactly the same keys (even timestamp)
55   * as well as same value. It is possible to restrict the job by time range and
56   * families. The peer id that's provided must match the one given when the
57   * replication stream was setup.
58   * <p>
59   * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason
60   * for a why a row is different is shown in the map's log.
61   */
62  public class VerifyReplication extends Configured implements Tool {
63  
64    private static final Log LOG =
65        LogFactory.getLog(VerifyReplication.class);
66  
67    public final static String NAME = "verifyrep";
68    static long startTime = 0;
69    static long endTime = Long.MAX_VALUE;
70    static int versions = -1;
71    static String tableName = null;
72    static String families = null;
73    static String peerId = null;
74  
75    /**
76     * Map-only comparator for 2 tables
77     */
78    public static class Verifier
79        extends TableMapper<ImmutableBytesWritable, Put> {
80  
81      public static enum Counters {GOODROWS, BADROWS}
82  
83      private ResultScanner replicatedScanner;
84  
85      /**
86       * Map method that compares every scanned row with the equivalent from
87       * a distant cluster.
88       * @param row  The current table row key.
89       * @param value  The columns.
90       * @param context  The current context.
91       * @throws IOException When something is broken with the data.
92       */
93      @Override
94      public void map(ImmutableBytesWritable row, final Result value,
95                      Context context)
96          throws IOException {
97        if (replicatedScanner == null) {
98          Configuration conf = context.getConfiguration();
99          final Scan scan = new Scan();
100         scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
101         long startTime = conf.getLong(NAME + ".startTime", 0);
102         long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
103         String families = conf.get(NAME + ".families", null);
104         if(families != null) {
105           String[] fams = families.split(",");
106           for(String fam : fams) {
107             scan.addFamily(Bytes.toBytes(fam));
108           }
109         }
110         scan.setTimeRange(startTime, endTime);
111         if (versions >= 0) {
112           scan.setMaxVersions(versions);
113         }
114         HConnectionManager.execute(new HConnectable<Void>(conf) {
115           @Override
116           public Void connect(HConnection conn) throws IOException {
117             String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
118             Configuration peerConf = HBaseConfiguration.create(conf);
119             ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
120 
121             HTable replicatedTable = new HTable(peerConf, conf.get(NAME + ".tableName"));
122             scan.setStartRow(value.getRow());
123             replicatedScanner = replicatedTable.getScanner(scan);
124             return null;
125           }
126         });
127       }
128       Result res = replicatedScanner.next();
129       try {
130         Result.compareResults(value, res);
131         context.getCounter(Counters.GOODROWS).increment(1);
132       } catch (Exception e) {
133         LOG.warn("Bad row", e);
134         context.getCounter(Counters.BADROWS).increment(1);
135       }
136     }
137 
138     protected void cleanup(Context context) {
139       if (replicatedScanner != null) {
140         replicatedScanner.close();
141         replicatedScanner = null;
142       }
143     }
144   }
145 
146   private static String getPeerQuorumAddress(final Configuration conf) throws IOException {
147     ZooKeeperWatcher localZKW = null;
148     ReplicationPeer peer = null;
149     try {
150       localZKW = new ZooKeeperWatcher(conf, "VerifyReplication",
151           new Abortable() {
152             @Override public void abort(String why, Throwable e) {}
153             @Override public boolean isAborted() {return false;}
154           });
155 
156       ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf, localZKW);
157       rp.init();
158 
159       Configuration peerConf = rp.getPeerConf(peerId);
160       if (peerConf == null) {
161         throw new IOException("Couldn't get peer conf!");
162       }
163 
164       return ZKUtil.getZooKeeperClusterKey(peerConf);
165     } catch (ReplicationException e) {
166       throw new IOException(
167           "An error occured while trying to connect to the remove peer cluster", e);
168     } finally {
169       if (peer != null) {
170         peer.close();
171       }
172       if (localZKW != null) {
173         localZKW.close();
174       }
175     }
176   }
177 
178   /**
179    * Sets up the actual job.
180    *
181    * @param conf  The current configuration.
182    * @param args  The command line parameters.
183    * @return The newly created job.
184    * @throws java.io.IOException When setting up the job fails.
185    */
186   public static Job createSubmittableJob(Configuration conf, String[] args)
187   throws IOException {
188     if (!doCommandLine(args)) {
189       return null;
190     }
191     if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
192         HConstants.REPLICATION_ENABLE_DEFAULT)) {
193       throw new IOException("Replication needs to be enabled to verify it.");
194     }
195     conf.set(NAME+".peerId", peerId);
196     conf.set(NAME+".tableName", tableName);
197     conf.setLong(NAME+".startTime", startTime);
198     conf.setLong(NAME+".endTime", endTime);
199     if (families != null) {
200       conf.set(NAME+".families", families);
201     }
202 
203     String peerQuorumAddress = getPeerQuorumAddress(conf);
204     conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
205     LOG.info("Peer Quorum Address: " + peerQuorumAddress);
206 
207     Job job = new Job(conf, NAME + "_" + tableName);
208     job.setJarByClass(VerifyReplication.class);
209 
210     Scan scan = new Scan();
211     scan.setTimeRange(startTime, endTime);
212     if (versions >= 0) {
213       scan.setMaxVersions(versions);
214     }
215     if(families != null) {
216       String[] fams = families.split(",");
217       for(String fam : fams) {
218         scan.addFamily(Bytes.toBytes(fam));
219       }
220     }
221     TableMapReduceUtil.initTableMapperJob(tableName, scan,
222         Verifier.class, null, null, job);
223 
224     // Obtain the auth token from peer cluster
225     TableMapReduceUtil.initCredentialsForCluster(job, peerQuorumAddress);
226 
227     job.setOutputFormatClass(NullOutputFormat.class);
228     job.setNumReduceTasks(0);
229     return job;
230   }
231 
232   private static boolean doCommandLine(final String[] args) {
233     if (args.length < 2) {
234       printUsage(null);
235       return false;
236     }
237     try {
238       for (int i = 0; i < args.length; i++) {
239         String cmd = args[i];
240         if (cmd.equals("-h") || cmd.startsWith("--h")) {
241           printUsage(null);
242           return false;
243         }
244 
245         final String startTimeArgKey = "--starttime=";
246         if (cmd.startsWith(startTimeArgKey)) {
247           startTime = Long.parseLong(cmd.substring(startTimeArgKey.length()));
248           continue;
249         }
250 
251         final String endTimeArgKey = "--endtime=";
252         if (cmd.startsWith(endTimeArgKey)) {
253           endTime = Long.parseLong(cmd.substring(endTimeArgKey.length()));
254           continue;
255         }
256 
257         final String versionsArgKey = "--versions=";
258         if (cmd.startsWith(versionsArgKey)) {
259           versions = Integer.parseInt(cmd.substring(versionsArgKey.length()));
260           continue;
261         }
262 
263         final String familiesArgKey = "--families=";
264         if (cmd.startsWith(familiesArgKey)) {
265           families = cmd.substring(familiesArgKey.length());
266           continue;
267         }
268 
269         if (i == args.length-2) {
270           peerId = cmd;
271         }
272 
273         if (i == args.length-1) {
274           tableName = cmd;
275         }
276       }
277     } catch (Exception e) {
278       e.printStackTrace();
279       printUsage("Can't start because " + e.getMessage());
280       return false;
281     }
282     return true;
283   }
284 
285   /*
286    * @param errorMsg Error message.  Can be null.
287    */
288   private static void printUsage(final String errorMsg) {
289     if (errorMsg != null && errorMsg.length() > 0) {
290       System.err.println("ERROR: " + errorMsg);
291     }
292     System.err.println("Usage: verifyrep [--starttime=X]" +
293         " [--stoptime=Y] [--families=A] <peerid> <tablename>");
294     System.err.println();
295     System.err.println("Options:");
296     System.err.println(" starttime    beginning of the time range");
297     System.err.println("              without endtime means from starttime to forever");
298     System.err.println(" endtime      end of the time range");
299     System.err.println(" versions     number of cell versions to verify");
300     System.err.println(" families     comma-separated list of families to copy");
301     System.err.println();
302     System.err.println("Args:");
303     System.err.println(" peerid       Id of the peer used for verification, must match the one given for replication");
304     System.err.println(" tablename    Name of the table to verify");
305     System.err.println();
306     System.err.println("Examples:");
307     System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
308     System.err.println(" $ bin/hbase " +
309         "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" +
310         " --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
311   }
312 
313   @Override
314   public int run(String[] args) throws Exception {
315     Configuration conf = this.getConf();
316     Job job = createSubmittableJob(conf, args);
317     if (job != null) {
318       return job.waitForCompletion(true) ? 0 : 1;
319     } 
320     return 1;
321   }
322 
323   /**
324    * Main entry point.
325    *
326    * @param args  The command line parameters.
327    * @throws Exception When running the job fails.
328    */
329   public static void main(String[] args) throws Exception {
330     int res = ToolRunner.run(HBaseConfiguration.create(), new VerifyReplication(), args);
331     System.exit(res);
332   }
333 }