View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import org.apache.commons.logging.Log;
23  import org.apache.commons.logging.LogFactory;
24  import org.apache.hadoop.conf.Configuration;
25  import org.apache.hadoop.fs.FSDataInputStream;
26  import org.apache.hadoop.fs.FSDataOutputStream;
27  import org.apache.hadoop.fs.FileStatus;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.fs.PathFilter;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.RemoteExceptionHandler;
34  import org.apache.hadoop.hbase.master.HMaster;
35  import org.apache.hadoop.hbase.regionserver.HRegion;
36  import org.apache.hadoop.hdfs.DistributedFileSystem;
37  import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
38  import org.apache.hadoop.hdfs.protocol.FSConstants;
39  import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
40  import org.apache.hadoop.io.SequenceFile;
41  
42  import java.io.DataInputStream;
43  import java.io.EOFException;
44  import java.io.FileNotFoundException;
45  import java.io.IOException;
46  import java.io.InterruptedIOException;
47  import java.lang.reflect.InvocationTargetException;
48  import java.net.URI;
49  import java.net.URISyntaxException;
50  import java.util.HashMap;
51  import java.util.Map;
52  
53  /**
54   * Utility methods for interacting with the underlying file system.
55   */
56  public class FSUtils {
57    private static final Log LOG = LogFactory.getLog(FSUtils.class);
58  
59    /**
60     * Not instantiable
61     */
62    private FSUtils() {
63      super();
64    }
65  
66    /**
67     * Delete if exists.
68     * @param fs filesystem object
69     * @param dir directory to delete
70     * @return True if deleted <code>dir</code>
71     * @throws IOException e
72     */
73    public static boolean deleteDirectory(final FileSystem fs, final Path dir)
74    throws IOException {
75      return fs.exists(dir) && fs.delete(dir, true);
76    }
77  
78    /**
79     * Check if directory exists.  If it does not, create it.
80     * @param fs filesystem object
81     * @param dir path to check
82     * @return Path
83     * @throws IOException e
84     */
85    public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
86      if (!fs.exists(dir)) {
87        fs.mkdirs(dir);
88      }
89      return dir;
90    }
91  
92    /**
93     * Create file.
94     * @param fs filesystem object
95     * @param p path to create
96     * @return Path
97     * @throws IOException e
98     */
99    public static Path create(final FileSystem fs, final Path p)
100   throws IOException {
101     if (fs.exists(p)) {
102       throw new IOException("File already exists " + p.toString());
103     }
104     if (!fs.createNewFile(p)) {
105       throw new IOException("Failed create of " + p);
106     }
107     return p;
108   }
109 
110   /**
111    * Checks to see if the specified file system is available
112    *
113    * @param fs filesystem
114    * @throws IOException e
115    */
116   public static void checkFileSystemAvailable(final FileSystem fs)
117   throws IOException {
118     if (!(fs instanceof DistributedFileSystem)) {
119       return;
120     }
121     IOException exception = null;
122     DistributedFileSystem dfs = (DistributedFileSystem) fs;
123     try {
124       if (dfs.exists(new Path("/"))) {
125         return;
126       }
127     } catch (IOException e) {
128       exception = RemoteExceptionHandler.checkIOException(e);
129     }
130     try {
131       fs.close();
132     } catch (Exception e) {
133         LOG.error("file system close failed: ", e);
134     }
135     IOException io = new IOException("File system is not available");
136     io.initCause(exception);
137     throw io;
138   }
139 
140   /**
141    * Verifies current version of file system
142    *
143    * @param fs filesystem object
144    * @param rootdir root hbase directory
145    * @return null if no version file exists, version string otherwise.
146    * @throws IOException e
147    */
148   public static String getVersion(FileSystem fs, Path rootdir)
149   throws IOException {
150     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
151     String version = null;
152     if (fs.exists(versionFile)) {
153       FSDataInputStream s =
154         fs.open(versionFile);
155       try {
156         version = DataInputStream.readUTF(s);
157       } catch (EOFException eof) {
158         LOG.warn("Version file was empty, odd, will try to set it.");
159       } finally {
160         s.close();
161       }
162     }
163     return version;
164   }
165 
166   /**
167    * Verifies current version of file system
168    *
169    * @param fs file system
170    * @param rootdir root directory of HBase installation
171    * @param message if true, issues a message on System.out
172    *
173    * @throws IOException e
174    */
175   public static void checkVersion(FileSystem fs, Path rootdir,
176       boolean message) throws IOException {
177     checkVersion(fs, rootdir, message, 0);
178   }
179 
180   /**
181    * Verifies current version of file system
182    *
183    * @param fs file system
184    * @param rootdir root directory of HBase installation
185    * @param message if true, issues a message on System.out
186    * @param wait wait interval for retry if > 0
187    *
188    * @throws IOException e
189    */
190   public static void checkVersion(FileSystem fs, Path rootdir,
191       boolean message, int wait) throws IOException {
192     String version = getVersion(fs, rootdir);
193 
194     if (version == null) {
195       if (!rootRegionExists(fs, rootdir)) {
196         // rootDir is empty (no version file and no root region)
197         // just create new version file (HBASE-1195)
198         FSUtils.setVersion(fs, rootdir, wait);
199         return;
200       }
201     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
202         return;
203 
204     // version is deprecated require migration
205     // Output on stdout so user sees it in terminal.
206     String msg = "File system needs to be upgraded."
207       + "  You have version " + version
208       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
209       + ".  Run the '${HBASE_HOME}/bin/hbase migrate' script.";
210     if (message) {
211       System.out.println("WARNING! " + msg);
212     }
213     throw new FileSystemVersionException(msg);
214   }
215 
216   /**
217    * Sets version of file system
218    *
219    * @param fs filesystem object
220    * @param rootdir hbase root
221    * @throws IOException e
222    */
223   public static void setVersion(FileSystem fs, Path rootdir)
224   throws IOException {
225     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0);
226   }
227 
228   /**
229    * Sets version of file system
230    *
231    * @param fs filesystem object
232    * @param rootdir hbase root
233    * @param wait time to wait for retry
234    * @throws IOException e
235    */
236   public static void setVersion(FileSystem fs, Path rootdir, int wait)
237   throws IOException {
238     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait);
239   }
240 
241   /**
242    * Sets version of file system
243    *
244    * @param fs filesystem object
245    * @param rootdir hbase root directory
246    * @param version version to set
247    * @param wait time to wait for retry
248    * @throws IOException e
249    */
250   public static void setVersion(FileSystem fs, Path rootdir, String version,
251       int wait) throws IOException {
252     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
253     while (true) {
254       try {
255         FSDataOutputStream s = fs.create(versionFile);
256         s.writeUTF(version);
257         LOG.debug("Created version file at " + rootdir.toString() +
258             " set its version at:" + version);
259         s.close();
260         return;
261       } catch (IOException e) {
262         if (wait > 0) {
263           LOG.warn("Unable to create version file at " + rootdir.toString() +
264               ", retrying: " + e.getMessage());
265           fs.delete(versionFile, false);
266           try {
267             Thread.sleep(wait);
268           } catch (InterruptedException ex) {
269             // ignore
270           }
271         }
272       }
273     }
274   }
275 
276   /**
277    * Verifies root directory path is a valid URI with a scheme
278    *
279    * @param root root directory path
280    * @return Passed <code>root</code> argument.
281    * @throws IOException if not a valid URI with a scheme
282    */
283   public static Path validateRootPath(Path root) throws IOException {
284     try {
285       URI rootURI = new URI(root.toString());
286       String scheme = rootURI.getScheme();
287       if (scheme == null) {
288         throw new IOException("Root directory does not have a scheme");
289       }
290       return root;
291     } catch (URISyntaxException e) {
292       IOException io = new IOException("Root directory path is not a valid " +
293         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
294       io.initCause(e);
295       throw io;
296     }
297   }
298 
299   /**
300    * If DFS, check safe mode and if so, wait until we clear it.
301    * @param conf configuration
302    * @param wait Sleep between retries
303    * @throws IOException e
304    */
305   public static void waitOnSafeMode(final Configuration conf,
306     final long wait)
307   throws IOException {
308     FileSystem fs = FileSystem.get(conf);
309     if (!(fs instanceof DistributedFileSystem)) return;
310     DistributedFileSystem dfs = (DistributedFileSystem)fs;
311     // Make sure dfs is not in safe mode
312     while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) {
313       LOG.info("Waiting for dfs to exit safe mode...");
314       try {
315         Thread.sleep(wait);
316       } catch (InterruptedException e) {
317         //continue
318       }
319     }
320   }
321 
322   /**
323    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
324    * method returns the 'path' component of a Path's URI: e.g. If a Path is
325    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
326    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
327    * This method is useful if you want to print out a Path without qualifying
328    * Filesystem instance.
329    * @param p Filesystem Path whose 'path' component we are to return.
330    * @return Path portion of the Filesystem
331    */
332   public static String getPath(Path p) {
333     return p.toUri().getPath();
334   }
335 
336   /**
337    * @param c configuration
338    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
339    * configuration as a qualified Path.
340    * @throws IOException e
341    */
342   public static Path getRootDir(final Configuration c) throws IOException {
343     Path p = new Path(c.get(HConstants.HBASE_DIR));
344     FileSystem fs = p.getFileSystem(c);
345     return p.makeQualified(fs);
346   }
347 
348   /**
349    * Checks if root region exists
350    *
351    * @param fs file system
352    * @param rootdir root directory of HBase installation
353    * @return true if exists
354    * @throws IOException e
355    */
356   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
357   throws IOException {
358     Path rootRegionDir =
359       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
360     return fs.exists(rootRegionDir);
361   }
362 
363   /**
364    * Runs through the hbase rootdir and checks all stores have only
365    * one file in them -- that is, they've been major compacted.  Looks
366    * at root and meta tables too.
367    * @param fs filesystem
368    * @param hbaseRootDir hbase root directory
369    * @return True if this hbase install is major compacted.
370    * @throws IOException e
371    */
372   public static boolean isMajorCompacted(final FileSystem fs,
373       final Path hbaseRootDir)
374   throws IOException {
375     // Presumes any directory under hbase.rootdir is a table.
376     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
377     for (FileStatus tableDir : tableDirs) {
378       // Skip the .log directory.  All others should be tables.  Inside a table,
379       // there are compaction.dir directories to skip.  Otherwise, all else
380       // should be regions.  Then in each region, should only be family
381       // directories.  Under each of these, should be one file only.
382       Path d = tableDir.getPath();
383       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
384         continue;
385       }
386       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
387       for (FileStatus regionDir : regionDirs) {
388         Path dd = regionDir.getPath();
389         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
390           continue;
391         }
392         // Else its a region name.  Now look in region for families.
393         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
394         for (FileStatus familyDir : familyDirs) {
395           Path family = familyDir.getPath();
396           // Now in family make sure only one file.
397           FileStatus[] familyStatus = fs.listStatus(family);
398           if (familyStatus.length > 1) {
399             LOG.debug(family.toString() + " has " + familyStatus.length +
400                 " files.");
401             return false;
402           }
403         }
404       }
405     }
406     return true;
407   }
408 
409   // TODO move this method OUT of FSUtils. No dependencies to HMaster
410   /**
411    * Returns the total overall fragmentation percentage. Includes .META. and
412    * -ROOT- as well.
413    *
414    * @param master  The master defining the HBase root and file system.
415    * @return A map for each table and its percentage.
416    * @throws IOException When scanning the directory fails.
417    */
418   public static int getTotalTableFragmentation(final HMaster master)
419   throws IOException {
420     Map<String, Integer> map = getTableFragmentation(master);
421     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
422   }
423 
424   /**
425    * Runs through the HBase rootdir and checks how many stores for each table
426    * have more than one file in them. Checks -ROOT- and .META. too. The total
427    * percentage across all tables is stored under the special key "-TOTAL-".
428    *
429    * @param master  The master defining the HBase root and file system.
430    * @return A map for each table and its percentage.
431    * @throws IOException When scanning the directory fails.
432    */
433   public static Map<String, Integer> getTableFragmentation(
434     final HMaster master)
435   throws IOException {
436     Path path = getRootDir(master.getConfiguration());
437     // since HMaster.getFileSystem() is package private
438     FileSystem fs = path.getFileSystem(master.getConfiguration());
439     return getTableFragmentation(fs, path);
440   }
441 
442   /**
443    * Runs through the HBase rootdir and checks how many stores for each table
444    * have more than one file in them. Checks -ROOT- and .META. too. The total
445    * percentage across all tables is stored under the special key "-TOTAL-".
446    *
447    * @param fs  The file system to use.
448    * @param hbaseRootDir  The root directory to scan.
449    * @return A map for each table and its percentage.
450    * @throws IOException When scanning the directory fails.
451    */
452   public static Map<String, Integer> getTableFragmentation(
453     final FileSystem fs, final Path hbaseRootDir)
454   throws IOException {
455     Map<String, Integer> frags = new HashMap<String, Integer>();
456     int cfCountTotal = 0;
457     int cfFragTotal = 0;
458     DirFilter df = new DirFilter(fs);
459     // presumes any directory under hbase.rootdir is a table
460     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
461     for (FileStatus tableDir : tableDirs) {
462       // Skip the .log directory.  All others should be tables.  Inside a table,
463       // there are compaction.dir directories to skip.  Otherwise, all else
464       // should be regions.  Then in each region, should only be family
465       // directories.  Under each of these, should be one file only.
466       Path d = tableDir.getPath();
467       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
468         continue;
469       }
470       int cfCount = 0;
471       int cfFrag = 0;
472       FileStatus[] regionDirs = fs.listStatus(d, df);
473       for (FileStatus regionDir : regionDirs) {
474         Path dd = regionDir.getPath();
475         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
476           continue;
477         }
478         // else its a region name, now look in region for families
479         FileStatus[] familyDirs = fs.listStatus(dd, df);
480         for (FileStatus familyDir : familyDirs) {
481           cfCount++;
482           cfCountTotal++;
483           Path family = familyDir.getPath();
484           // now in family make sure only one file
485           FileStatus[] familyStatus = fs.listStatus(family);
486           if (familyStatus.length > 1) {
487             cfFrag++;
488             cfFragTotal++;
489           }
490         }
491       }
492       // compute percentage per table and store in result list
493       frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
494     }
495     // set overall percentage for all tables
496     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
497     return frags;
498   }
499 
500   /**
501    * Expects to find -ROOT- directory.
502    * @param fs filesystem
503    * @param hbaseRootDir hbase root directory
504    * @return True if this a pre020 layout.
505    * @throws IOException e
506    */
507   public static boolean isPre020FileLayout(final FileSystem fs,
508     final Path hbaseRootDir)
509   throws IOException {
510     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
511       "70236052"), "info"), "mapfiles");
512     return fs.exists(mapfiles);
513   }
514 
515   /**
516    * Runs through the hbase rootdir and checks all stores have only
517    * one file in them -- that is, they've been major compacted.  Looks
518    * at root and meta tables too.  This version differs from
519    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
520    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
521    * @param fs filesystem
522    * @param hbaseRootDir hbase root directory
523    * @return True if this hbase install is major compacted.
524    * @throws IOException e
525    */
526   public static boolean isMajorCompactedPre020(final FileSystem fs,
527       final Path hbaseRootDir)
528   throws IOException {
529     // Presumes any directory under hbase.rootdir is a table.
530     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
531     for (FileStatus tableDir : tableDirs) {
532       // Inside a table, there are compaction.dir directories to skip.
533       // Otherwise, all else should be regions.  Then in each region, should
534       // only be family directories.  Under each of these, should be a mapfile
535       // and info directory and in these only one file.
536       Path d = tableDir.getPath();
537       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
538         continue;
539       }
540       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
541       for (FileStatus regionDir : regionDirs) {
542         Path dd = regionDir.getPath();
543         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
544           continue;
545         }
546         // Else its a region name.  Now look in region for families.
547         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
548         for (FileStatus familyDir : familyDirs) {
549           Path family = familyDir.getPath();
550           FileStatus[] infoAndMapfile = fs.listStatus(family);
551           // Assert that only info and mapfile in family dir.
552           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
553             LOG.debug(family.toString() +
554                 " has more than just info and mapfile: " + infoAndMapfile.length);
555             return false;
556           }
557           // Make sure directory named info or mapfile.
558           for (int ll = 0; ll < 2; ll++) {
559             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
560                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
561               continue;
562             LOG.debug("Unexpected directory name: " +
563                 infoAndMapfile[ll].getPath());
564             return false;
565           }
566           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
567           // look in the 'mapfile' subdir.
568           FileStatus[] familyStatus =
569               fs.listStatus(new Path(family, "mapfiles"));
570           if (familyStatus.length > 1) {
571             LOG.debug(family.toString() + " has " + familyStatus.length +
572                 " files.");
573             return false;
574           }
575         }
576       }
577     }
578     return true;
579   }
580 
581   /**
582    * A {@link PathFilter} that returns directories.
583    */
584   public static class DirFilter implements PathFilter {
585     private final FileSystem fs;
586 
587     public DirFilter(final FileSystem fs) {
588       this.fs = fs;
589     }
590 
591     public boolean accept(Path p) {
592       boolean isdir = false;
593       try {
594         isdir = this.fs.getFileStatus(p).isDir();
595       } catch (IOException e) {
596         e.printStackTrace();
597       }
598       return isdir;
599     }
600   }
601 
602   /**
603    * Heuristic to determine whether is safe or not to open a file for append
604    * Looks both for dfs.support.append and use reflection to search
605    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
606    * @param conf
607    * @return True if append support
608    */
609   public static boolean isAppendSupported(final Configuration conf) {
610     boolean append = conf.getBoolean("dfs.support.append", false);
611     if (append) {
612       try {
613         // TODO: The implementation that comes back when we do a createWriter
614         // may not be using SequenceFile so the below is not a definitive test.
615         // Will do for now (hdfs-200).
616         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
617         append = true;
618       } catch (SecurityException e) {
619       } catch (NoSuchMethodException e) {
620         append = false;
621       }
622     } else {
623       try {
624         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
625       } catch (NoSuchMethodException e) {
626         append = false;
627       }
628     }
629     return append;
630   }
631 
632   /**
633    * @param conf
634    * @return True if this filesystem whose scheme is 'hdfs'.
635    * @throws IOException
636    */
637   public static boolean isHDFS(final Configuration conf) throws IOException {
638     FileSystem fs = FileSystem.get(conf);
639     String scheme = fs.getUri().getScheme();
640     return scheme.equalsIgnoreCase("hdfs");
641   }
642 
643   /*
644    * Recover file lease. Used when a file might be suspect to be had been left open by another process. <code>p</code>
645    * @param fs
646    * @param p
647    * @param append True if append supported
648    * @throws IOException
649    */
650   public static void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
651   throws IOException{
652     if (!isAppendSupported(conf)) {
653       LOG.warn("Running on HDFS without append enabled may result in data loss");
654       return;
655     }
656     // lease recovery not needed for local file system case.
657     // currently, local file system doesn't implement append either.
658     if (!(fs instanceof DistributedFileSystem)) {
659       return;
660     }
661     LOG.info("Recovering file " + p);
662     long startWaiting = System.currentTimeMillis();
663 
664     // Trying recovery
665     boolean recovered = false;
666     while (!recovered) {
667       try {
668         try {
669           if (fs instanceof DistributedFileSystem) {
670             DistributedFileSystem dfs = (DistributedFileSystem)fs;
671             DistributedFileSystem.class.getMethod("recoverLease",
672               new Class[] {Path.class}).invoke(dfs, p);
673           } else {
674             throw new Exception("Not a DistributedFileSystem");
675           }
676         } catch (InvocationTargetException ite) {
677           // function was properly called, but threw it's own exception
678           throw (IOException) ite.getCause();
679         } catch (Exception e) {
680           LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
681             ", trying fs.append instead");
682           FSDataOutputStream out = fs.append(p);
683           out.close();
684         }
685         recovered = true;
686       } catch (IOException e) {
687         e = RemoteExceptionHandler.checkIOException(e);
688         if (e instanceof AlreadyBeingCreatedException) {
689           // We expect that we'll get this message while the lease is still
690           // within its soft limit, but if we get it past that, it means
691           // that the RS is holding onto the file even though it lost its
692           // znode. We could potentially abort after some time here.
693           long waitedFor = System.currentTimeMillis() - startWaiting;
694           if (waitedFor > FSConstants.LEASE_SOFTLIMIT_PERIOD) {
695             LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
696               ":" + e.getMessage());
697           }
698         } else if (e instanceof LeaseExpiredException &&
699             e.getMessage().contains("File does not exist")) {
700           // This exception comes out instead of FNFE, fix it
701           throw new FileNotFoundException(
702               "The given HLog wasn't found at " + p.toString());
703         } else {
704           throw new IOException("Failed to open " + p + " for append", e);
705         }
706       }
707       try {
708         Thread.sleep(1000);
709       } catch (InterruptedException ex) {
710         new InterruptedIOException().initCause(ex);
711       }
712     }
713     LOG.info("Finished lease recover attempt for " + p);
714   }
715 }