View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.snapshot;
20  
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.Comparator;
26  import java.util.LinkedList;
27  import java.util.List;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  
32  import org.apache.hadoop.classification.InterfaceAudience;
33  import org.apache.hadoop.classification.InterfaceStability;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.conf.Configured;
36  import org.apache.hadoop.fs.FSDataInputStream;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileChecksum;
39  import org.apache.hadoop.fs.FileStatus;
40  import org.apache.hadoop.fs.FileSystem;
41  import org.apache.hadoop.fs.FileUtil;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.fs.permission.FsPermission;
44  import org.apache.hadoop.hbase.HBaseConfiguration;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.io.HFileLink;
47  import org.apache.hadoop.hbase.io.HLogLink;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
49  import org.apache.hadoop.hbase.snapshot.ExportSnapshotException;
50  import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
51  import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
54  import org.apache.hadoop.hbase.util.FSUtils;
55  import org.apache.hadoop.hbase.util.Pair;
56  import org.apache.hadoop.io.NullWritable;
57  import org.apache.hadoop.io.SequenceFile;
58  import org.apache.hadoop.io.Text;
59  import org.apache.hadoop.mapreduce.Job;
60  import org.apache.hadoop.mapreduce.Mapper;
61  import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
62  import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
63  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
64  import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
65  import org.apache.hadoop.util.StringUtils;
66  import org.apache.hadoop.util.Tool;
67  import org.apache.hadoop.util.ToolRunner;
68  
69  /**
70   * Export the specified snapshot to a given FileSystem.
71   *
72   * The .snapshot/name folder is copied to the destination cluster
73   * and then all the hfiles/hlogs are copied using a Map-Reduce Job in the .archive/ location.
74   * When everything is done, the second cluster can restore the snapshot.
75   */
76  @InterfaceAudience.Public
77  @InterfaceStability.Evolving
78  public final class ExportSnapshot extends Configured implements Tool {
79    private static final Log LOG = LogFactory.getLog(ExportSnapshot.class);
80  
81    private static final String CONF_TMP_DIR = "hbase.tmp.dir";
82    private static final String CONF_FILES_USER = "snapshot.export.files.attributes.user";
83    private static final String CONF_FILES_GROUP = "snapshot.export.files.attributes.group";
84    private static final String CONF_FILES_MODE = "snapshot.export.files.attributes.mode";
85    private static final String CONF_CHECKSUM_VERIFY = "snapshot.export.checksum.verify";
86    private static final String CONF_OUTPUT_ROOT = "snapshot.export.output.root";
87    private static final String CONF_INPUT_ROOT = "snapshot.export.input.root";
88  
89    private static final String INPUT_FOLDER_PREFIX = "export-files.";
90  
91    // Export Map-Reduce Counters, to keep track of the progress
92    public enum Counter { MISSING_FILES, COPY_FAILED, BYTES_EXPECTED, BYTES_COPIED };
93  
94    private static class ExportMapper extends Mapper<Text, NullWritable, NullWritable, NullWritable> {
95      final static int REPORT_SIZE = 1 * 1024 * 1024;
96      final static int BUFFER_SIZE = 64 * 1024;
97  
98      private boolean verifyChecksum;
99      private String filesGroup;
100     private String filesUser;
101     private short filesMode;
102 
103     private FileSystem outputFs;
104     private Path outputArchive;
105     private Path outputRoot;
106 
107     private FileSystem inputFs;
108     private Path inputArchive;
109     private Path inputRoot;
110 
111     @Override
112     public void setup(Context context) {
113       Configuration conf = context.getConfiguration();
114       verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true);
115 
116       filesGroup = conf.get(CONF_FILES_GROUP);
117       filesUser = conf.get(CONF_FILES_USER);
118       filesMode = (short)conf.getInt(CONF_FILES_MODE, 0);
119       outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT));
120       inputRoot = new Path(conf.get(CONF_INPUT_ROOT));
121 
122       inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
123       outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
124 
125       try {
126         inputFs = FileSystem.get(inputRoot.toUri(), conf);
127       } catch (IOException e) {
128         throw new RuntimeException("Could not get the input FileSystem with root=" + inputRoot, e);
129       }
130 
131       try {
132         outputFs = FileSystem.get(outputRoot.toUri(), conf);
133       } catch (IOException e) {
134         throw new RuntimeException("Could not get the output FileSystem with root="+ outputRoot, e);
135       }
136     }
137 
138     @Override
139     public void map(Text key, NullWritable value, Context context)
140         throws InterruptedException, IOException {
141       Path inputPath = new Path(key.toString());
142       Path outputPath = getOutputPath(inputPath);
143 
144       LOG.info("copy file input=" + inputPath + " output=" + outputPath);
145       if (copyFile(context, inputPath, outputPath)) {
146         LOG.info("copy completed for input=" + inputPath + " output=" + outputPath);
147       }
148     }
149 
150     /**
151      * Returns the location where the inputPath will be copied.
152      *  - hfiles are encoded as hfile links hfile-region-table
153      *  - logs are encoded as serverName/logName
154      */
155     private Path getOutputPath(final Path inputPath) throws IOException {
156       Path path;
157       if (HFileLink.isHFileLink(inputPath)) {
158         String family = inputPath.getParent().getName();
159         String table = HFileLink.getReferencedTableName(inputPath.getName());
160         String region = HFileLink.getReferencedRegionName(inputPath.getName());
161         String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
162         path = new Path(table, new Path(region, new Path(family, hfile)));
163       } else if (isHLogLinkPath(inputPath)) {
164         String logName = inputPath.getName();
165         path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName);
166       } else {
167         path = inputPath;
168       }
169       return new Path(outputArchive, path);
170     }
171 
172     private boolean copyFile(final Context context, final Path inputPath, final Path outputPath)
173         throws IOException {
174       FSDataInputStream in = openSourceFile(inputPath);
175       if (in == null) {
176         context.getCounter(Counter.MISSING_FILES).increment(1);
177         return false;
178       }
179 
180       try {
181         // Verify if the input file exists
182         FileStatus inputStat = getFileStatus(inputFs, inputPath);
183         if (inputStat == null) return false;
184 
185         // Verify if the output file exists and is the same that we want to copy
186         FileStatus outputStat = getFileStatus(outputFs, outputPath);
187         if (outputStat != null && sameFile(inputStat, outputStat)) {
188           LOG.info("Skip copy " + inputPath + " to " + outputPath + ", same file.");
189           return true;
190         }
191 
192         context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());
193 
194         // Ensure that the output folder is there and copy the file
195         outputFs.mkdirs(outputPath.getParent());
196         FSDataOutputStream out = outputFs.create(outputPath, true);
197         try {
198           if (!copyData(context, inputPath, in, outputPath, out, inputStat.getLen()))
199             return false;
200         } finally {
201           out.close();
202         }
203 
204         // Preserve attributes
205         return preserveAttributes(outputPath, inputStat);
206       } finally {
207         in.close();
208       }
209     }
210 
211     /**
212      * Preserve the files attribute selected by the user copying them from the source file
213      */
214     private boolean preserveAttributes(final Path path, final FileStatus refStat) {
215       FileStatus stat;
216       try {
217         stat = outputFs.getFileStatus(path);
218       } catch (IOException e) {
219         LOG.warn("Unable to get the status for file=" + path);
220         return false;
221       }
222 
223       try {
224         if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
225           outputFs.setPermission(path, new FsPermission(filesMode));
226         } else if (!stat.getPermission().equals(refStat.getPermission())) {
227           outputFs.setPermission(path, refStat.getPermission());
228         }
229       } catch (IOException e) {
230         LOG.error("Unable to set the permission for file=" + path, e);
231         return false;
232       }
233 
234       try {
235         String user = (filesUser != null) ? filesUser : refStat.getOwner();
236         String group = (filesGroup != null) ? filesGroup : refStat.getGroup();
237         if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) {
238           outputFs.setOwner(path, user, group);
239         }
240       } catch (IOException e) {
241         LOG.error("Unable to set the owner/group for file=" + path, e);
242         return false;
243       }
244 
245       return true;
246     }
247 
248     private boolean copyData(final Context context,
249         final Path inputPath, final FSDataInputStream in,
250         final Path outputPath, final FSDataOutputStream out,
251         final long inputFileSize) {
252       final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) +
253                                    " (%.3f%%) from " + inputPath + " to " + outputPath;
254 
255       try {
256         byte[] buffer = new byte[BUFFER_SIZE];
257         long totalBytesWritten = 0;
258         int reportBytes = 0;
259         int bytesRead;
260 
261         while ((bytesRead = in.read(buffer)) > 0) {
262           out.write(buffer, 0, bytesRead);
263           totalBytesWritten += bytesRead;
264           reportBytes += bytesRead;
265 
266           if (reportBytes >= REPORT_SIZE) {
267             context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
268             context.setStatus(String.format(statusMessage,
269                               StringUtils.humanReadableInt(totalBytesWritten),
270                               reportBytes/(float)inputFileSize));
271             reportBytes = 0;
272           }
273         }
274 
275         context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
276         context.setStatus(String.format(statusMessage,
277                           StringUtils.humanReadableInt(totalBytesWritten),
278                           reportBytes/(float)inputFileSize));
279 
280         // Verify that the written size match
281         if (totalBytesWritten != inputFileSize) {
282           LOG.error("number of bytes copied not matching copied=" + totalBytesWritten +
283                     " expected=" + inputFileSize + " for file=" + inputPath);
284           context.getCounter(Counter.COPY_FAILED).increment(1);
285           return false;
286         }
287 
288         return true;
289       } catch (IOException e) {
290         LOG.error("Error copying " + inputPath + " to " + outputPath, e);
291         context.getCounter(Counter.COPY_FAILED).increment(1);
292         return false;
293       }
294     }
295 
296     private FSDataInputStream openSourceFile(final Path path) {
297       try {
298         if (HFileLink.isHFileLink(path)) {
299           return new HFileLink(inputRoot, inputArchive, path).open(inputFs);
300         } else if (isHLogLinkPath(path)) {
301           String serverName = path.getParent().getName();
302           String logName = path.getName();
303           return new HLogLink(inputRoot, serverName, logName).open(inputFs);
304         }
305         return inputFs.open(path);
306       } catch (IOException e) {
307         LOG.error("Unable to open source file=" + path, e);
308         return null;
309       }
310     }
311 
312     private FileStatus getFileStatus(final FileSystem fs, final Path path) {
313       try {
314         if (HFileLink.isHFileLink(path)) {
315           HFileLink link = new HFileLink(inputRoot, inputArchive, path);
316           return link.getFileStatus(fs);
317         } else if (isHLogLinkPath(path)) {
318           String serverName = path.getParent().getName();
319           String logName = path.getName();
320           return new HLogLink(inputRoot, serverName, logName).getFileStatus(fs);
321         }
322         return fs.getFileStatus(path);
323       } catch (IOException e) {
324         LOG.warn("Unable to get the status for file=" + path);
325         return null;
326       }
327     }
328 
329     private FileChecksum getFileChecksum(final FileSystem fs, final Path path) {
330       try {
331         return fs.getFileChecksum(path);
332       } catch (IOException e) {
333         LOG.warn("Unable to get checksum for file=" + path, e);
334         return null;
335       }
336     }
337 
338     /**
339      * Check if the two files are equal by looking at the file length,
340      * and at the checksum (if user has specified the verifyChecksum flag).
341      */
342     private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) {
343       // Not matching length
344       if (inputStat.getLen() != outputStat.getLen()) return false;
345 
346       // Mark files as equals, since user asked for no checksum verification
347       if (!verifyChecksum) return true;
348 
349       // If checksums are not available, files are not the same.
350       FileChecksum inChecksum = getFileChecksum(inputFs, inputStat.getPath());
351       if (inChecksum == null) return false;
352 
353       FileChecksum outChecksum = getFileChecksum(outputFs, outputStat.getPath());
354       if (outChecksum == null) return false;
355 
356       return inChecksum.equals(outChecksum);
357     }
358 
359     /**
360      * HLog files are encoded as serverName/logName
361      * and since all the other files should be in /hbase/table/..path..
362      * we can rely on the depth, for now.
363      */
364     private static boolean isHLogLinkPath(final Path path) {
365       return path.depth() == 2;
366     }
367   }
368 
369   /**
370    * Extract the list of files (HFiles/HLogs) to copy using Map-Reduce.
371    * @return list of files referenced by the snapshot (pair of path and size)
372    */
373   private List<Pair<Path, Long>> getSnapshotFiles(final FileSystem fs, final Path snapshotDir)
374       throws IOException {
375     SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
376 
377     final List<Pair<Path, Long>> files = new ArrayList<Pair<Path, Long>>();
378     final String table = snapshotDesc.getTable();
379     final Configuration conf = getConf();
380 
381     // Get snapshot files
382     SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
383       new SnapshotReferenceUtil.FileVisitor() {
384         public void storeFile (final String region, final String family, final String hfile)
385             throws IOException {
386           Path path = new Path(family, HFileLink.createHFileLinkName(table, region, hfile));
387           long size = new HFileLink(conf, path).getFileStatus(fs).getLen();
388           files.add(new Pair<Path, Long>(path, size));
389         }
390 
391         public void recoveredEdits (final String region, final String logfile)
392             throws IOException {
393           // copied with the snapshot referenecs
394         }
395 
396         public void logFile (final String server, final String logfile)
397             throws IOException {
398           long size = new HLogLink(conf, server, logfile).getFileStatus(fs).getLen();
399           files.add(new Pair<Path, Long>(new Path(server, logfile), size));
400         }
401     });
402 
403     return files;
404   }
405 
406   /**
407    * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible.
408    * The groups created will have similar amounts of bytes.
409    * <p>
410    * The algorithm used is pretty straightforward; the file list is sorted by size,
411    * and then each group fetch the bigger file available, iterating through groups
412    * alternating the direction.
413    */
414   static List<List<Path>> getBalancedSplits(final List<Pair<Path, Long>> files, int ngroups) {
415     // Sort files by size, from small to big
416     Collections.sort(files, new Comparator<Pair<Path, Long>>() {
417       public int compare(Pair<Path, Long> a, Pair<Path, Long> b) {
418         long r = a.getSecond() - b.getSecond();
419         return (r < 0) ? -1 : ((r > 0) ? 1 : 0);
420       }
421     });
422 
423     // create balanced groups
424     List<List<Path>> fileGroups = new LinkedList<List<Path>>();
425     long[] sizeGroups = new long[ngroups];
426     int hi = files.size() - 1;
427     int lo = 0;
428 
429     List<Path> group;
430     int dir = 1;
431     int g = 0;
432 
433     while (hi >= lo) {
434       if (g == fileGroups.size()) {
435         group = new LinkedList<Path>();
436         fileGroups.add(group);
437       } else {
438         group = fileGroups.get(g);
439       }
440 
441       Pair<Path, Long> fileInfo = files.get(hi--);
442 
443       // add the hi one
444       sizeGroups[g] += fileInfo.getSecond();
445       group.add(fileInfo.getFirst());
446 
447       // change direction when at the end or the beginning
448       g += dir;
449       if (g == ngroups) {
450         dir = -1;
451         g = ngroups - 1;
452       } else if (g < 0) {
453         dir = 1;
454         g = 0;
455       }
456     }
457 
458     if (LOG.isDebugEnabled()) {
459       for (int i = 0; i < sizeGroups.length; ++i) {
460         LOG.debug("export split=" + i + " size=" + StringUtils.humanReadableInt(sizeGroups[i]));
461       }
462     }
463 
464     return fileGroups;
465   }
466 
467   private static Path getInputFolderPath(final FileSystem fs, final Configuration conf)
468       throws IOException, InterruptedException {
469     String stagingName = "exportSnapshot-" + EnvironmentEdgeManager.currentTimeMillis();
470     Path stagingDir = new Path(conf.get(CONF_TMP_DIR), stagingName);
471     fs.mkdirs(stagingDir);
472     return new Path(stagingDir, INPUT_FOLDER_PREFIX +
473       String.valueOf(EnvironmentEdgeManager.currentTimeMillis()));
474   }
475 
476   /**
477    * Create the input files, with the path to copy, for the MR job.
478    * Each input files contains n files, and each input file has a similar amount data to copy.
479    * The number of input files created are based on the number of mappers provided as argument
480    * and the number of the files to copy.
481    */
482   private static Path[] createInputFiles(final Configuration conf,
483       final List<Pair<Path, Long>> snapshotFiles, int mappers)
484       throws IOException, InterruptedException {
485     FileSystem fs = FileSystem.get(conf);
486     Path inputFolderPath = getInputFolderPath(fs, conf);
487     LOG.debug("Input folder location: " + inputFolderPath);
488 
489     List<List<Path>> splits = getBalancedSplits(snapshotFiles, mappers);
490     Path[] inputFiles = new Path[splits.size()];
491 
492     Text key = new Text();
493     for (int i = 0; i < inputFiles.length; i++) {
494       List<Path> files = splits.get(i);
495       inputFiles[i] = new Path(inputFolderPath, String.format("export-%d.seq", i));
496       SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inputFiles[i],
497         Text.class, NullWritable.class);
498       LOG.debug("Input split: " + i);
499       try {
500         for (Path file: files) {
501           LOG.debug(file.toString());
502           key.set(file.toString());
503           writer.append(key, NullWritable.get());
504         }
505       } finally {
506         writer.close();
507       }
508     }
509 
510     return inputFiles;
511   }
512 
513   /**
514    * Run Map-Reduce Job to perform the files copy.
515    */
516   private boolean runCopyJob(final Path inputRoot, final Path outputRoot,
517       final List<Pair<Path, Long>> snapshotFiles, final boolean verifyChecksum,
518       final String filesUser, final String filesGroup, final int filesMode,
519       final int mappers) throws IOException, InterruptedException, ClassNotFoundException {
520     Configuration conf = getConf();
521     if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup);
522     if (filesUser != null) conf.set(CONF_FILES_USER, filesUser);
523     conf.setInt(CONF_FILES_MODE, filesMode);
524     conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
525     conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
526     conf.set(CONF_INPUT_ROOT, inputRoot.toString());
527     conf.setInt("mapreduce.job.maps", mappers);
528 
529     // job.setMapSpeculativeExecution(false)
530     conf.setBoolean("mapreduce.map.speculative", false);
531     conf.setBoolean("mapreduce.reduce.speculative", false);
532     conf.setBoolean("mapred.map.tasks.speculative.execution", false);
533     conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);
534 
535     Job job = new Job(conf);
536     job.setJobName("ExportSnapshot");
537     job.setJarByClass(ExportSnapshot.class);
538     job.setMapperClass(ExportMapper.class);
539     job.setInputFormatClass(SequenceFileInputFormat.class);
540     job.setOutputFormatClass(NullOutputFormat.class);
541     job.setNumReduceTasks(0);
542     for (Path path: createInputFiles(conf, snapshotFiles, mappers)) {
543       LOG.debug("Add Input Path=" + path);
544       SequenceFileInputFormat.addInputPath(job, path);
545     }
546 
547     return job.waitForCompletion(true);
548   }
549 
550   /**
551    * Execute the export snapshot by copying the snapshot metadata, hfiles and hlogs.
552    * @return 0 on success, and != 0 upon failure.
553    */
554   @Override
555   public int run(String[] args) throws Exception {
556     boolean verifyChecksum = true;
557     String snapshotName = null;
558     String filesGroup = null;
559     String filesUser = null;
560     Path outputRoot = null;
561     int filesMode = 0;
562     int mappers = getConf().getInt("mapreduce.job.maps", 1);
563 
564     // Process command line args
565     for (int i = 0; i < args.length; i++) {
566       String cmd = args[i];
567       try {
568         if (cmd.equals("-snapshot")) {
569           snapshotName = args[++i];
570         } else if (cmd.equals("-copy-to")) {
571           outputRoot = new Path(args[++i]);
572         } else if (cmd.equals("-no-checksum-verify")) {
573           verifyChecksum = false;
574         } else if (cmd.equals("-mappers")) {
575           mappers = Integer.parseInt(args[++i]);
576         } else if (cmd.equals("-chuser")) {
577           filesUser = args[++i];
578         } else if (cmd.equals("-chgroup")) {
579           filesGroup = args[++i];
580         } else if (cmd.equals("-chmod")) {
581           filesMode = Integer.parseInt(args[++i], 8);
582         } else if (cmd.equals("-h") || cmd.equals("--help")) {
583           printUsageAndExit();
584         } else {
585           System.err.println("UNEXPECTED: " + cmd);
586           printUsageAndExit();
587         }
588       } catch (Exception e) {
589         printUsageAndExit();
590       }
591     }
592 
593     // Check user options
594     if (snapshotName == null) {
595       System.err.println("Snapshot name not provided.");
596       printUsageAndExit();
597     }
598 
599     if (outputRoot == null) {
600       System.err.println("Destination file-system not provided.");
601       printUsageAndExit();
602     }
603 
604     Configuration conf = getConf();
605     Path inputRoot = FSUtils.getRootDir(conf);
606     FileSystem inputFs = FileSystem.get(conf);
607     FileSystem outputFs = FileSystem.get(outputRoot.toUri(), conf);
608 
609     Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
610     Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshotName, outputRoot);
611     Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, outputRoot);
612 
613     // Check if the snapshot already exists
614     if (outputFs.exists(outputSnapshotDir)) {
615       System.err.println("The snapshot '" + snapshotName +
616         "' already exists in the destination: " + outputSnapshotDir);
617       return 1;
618     }
619 
620     // Check if the snapshot already in-progress
621     if (outputFs.exists(snapshotTmpDir)) {
622       System.err.println("A snapshot with the same name '" + snapshotName + "' is in-progress");
623       return 1;
624     }
625 
626     // Step 0 - Extract snapshot files to copy
627     final List<Pair<Path, Long>> files = getSnapshotFiles(inputFs, snapshotDir);
628 
629     // Step 1 - Copy fs1:/.snapshot/<snapshot> to  fs2:/.snapshot/.tmp/<snapshot>
630     // The snapshot references must be copied before the hfiles otherwise the cleaner
631     // will remove them because they are unreferenced.
632     try {
633       FileUtil.copy(inputFs, snapshotDir, outputFs, snapshotTmpDir, false, false, conf);
634     } catch (IOException e) {
635       System.err.println("Failed to copy the snapshot directory: from=" + snapshotDir +
636         " to=" + snapshotTmpDir);
637       e.printStackTrace(System.err);
638       return 1;
639     }
640 
641     // Step 2 - Start MR Job to copy files
642     // The snapshot references must be copied before the files otherwise the files gets removed
643     // by the HFileArchiver, since they have no references.
644     try {
645       if (files.size() == 0) {
646         LOG.warn("There are 0 store file to be copied. There may be no data in the table.");
647       } else {
648         if (!runCopyJob(inputRoot, outputRoot, files, verifyChecksum,
649             filesUser, filesGroup, filesMode, mappers)) {
650           throw new ExportSnapshotException("Snapshot export failed!");
651         }
652       }
653 
654       // Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
655       if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) {
656         System.err.println("Snapshot export failed!");
657         System.err.println("Unable to rename snapshot directory from=" +
658                            snapshotTmpDir + " to=" + outputSnapshotDir);
659         return 1;
660       }
661 
662       return 0;
663     } catch (Exception e) {
664       System.err.println("Snapshot export failed!");
665       e.printStackTrace(System.err);
666       outputFs.delete(outputSnapshotDir, true);
667       return 1;
668     }
669   }
670 
671   // ExportSnapshot
672   private void printUsageAndExit() {
673     System.err.printf("Usage: bin/hbase %s [options]%n", getClass().getName());
674     System.err.println(" where [options] are:");
675     System.err.println("  -h|-help                Show this help and exit.");
676     System.err.println("  -snapshot NAME          Snapshot to restore.");
677     System.err.println("  -copy-to NAME           Remote destination hdfs://");
678     System.err.println("  -no-checksum-verify     Do not verify checksum.");
679     System.err.println("  -chuser USERNAME        Change the owner of the files to the specified one.");
680     System.err.println("  -chgroup GROUP          Change the group of the files to the specified one.");
681     System.err.println("  -chmod MODE             Change the permission of the files to the specified one.");
682     System.err.println("  -mappers                Number of mappers to use during the copy (mapreduce.job.maps).");
683     System.err.println();
684     System.err.println("Examples:");
685     System.err.println("  hbase " + getClass() + " \\");
686     System.err.println("    -snapshot MySnapshot -copy-to hdfs:///srv2:8082/hbase \\");
687     System.err.println("    -chuser MyUser -chgroup MyGroup -chmod 700 -mappers 16");
688     System.exit(1);
689   }
690 
691   /**
692    * The guts of the {@link #main} method.
693    * Call this method to avoid the {@link #main(String[])} System.exit.
694    * @param args
695    * @return errCode
696    * @throws Exception
697    */
698   static int innerMain(final Configuration conf, final String [] args) throws Exception {
699     return ToolRunner.run(conf, new ExportSnapshot(), args);
700   }
701 
702   public static void main(String[] args) throws Exception {
703      System.exit(innerMain(HBaseConfiguration.create(), args));
704   }
705 }