View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import org.apache.commons.logging.Log;
23  import org.apache.commons.logging.LogFactory;
24  import org.apache.hadoop.conf.Configuration;
25  import org.apache.hadoop.fs.FSDataInputStream;
26  import org.apache.hadoop.fs.FSDataOutputStream;
27  import org.apache.hadoop.fs.FileStatus;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.fs.PathFilter;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.HRegionInfo;
33  import org.apache.hadoop.hbase.RemoteExceptionHandler;
34  import org.apache.hadoop.hbase.master.HMaster;
35  import org.apache.hadoop.hbase.regionserver.HRegion;
36  import org.apache.hadoop.hdfs.DistributedFileSystem;
37  import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
38  import org.apache.hadoop.hdfs.protocol.FSConstants;
39  import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
40  import org.apache.hadoop.io.SequenceFile;
41  import org.apache.hadoop.util.StringUtils;
42  
43  import java.io.DataInputStream;
44  import java.io.EOFException;
45  import java.io.FileNotFoundException;
46  import java.io.IOException;
47  import java.io.InterruptedIOException;
48  import java.lang.reflect.InvocationTargetException;
49  import java.net.URI;
50  import java.net.URISyntaxException;
51  import java.util.HashMap;
52  import java.util.Map;
53  
54  /**
55   * Utility methods for interacting with the underlying file system.
56   */
57  public class FSUtils {
58    private static final Log LOG = LogFactory.getLog(FSUtils.class);
59  
60    /**
61     * Not instantiable
62     */
63    private FSUtils() {
64      super();
65    }
66  
67    /**
68     * Delete if exists.
69     * @param fs filesystem object
70     * @param dir directory to delete
71     * @return True if deleted <code>dir</code>
72     * @throws IOException e
73     */
74    public static boolean deleteDirectory(final FileSystem fs, final Path dir)
75    throws IOException {
76      return fs.exists(dir) && fs.delete(dir, true);
77    }
78  
79    /**
80     * Check if directory exists.  If it does not, create it.
81     * @param fs filesystem object
82     * @param dir path to check
83     * @return Path
84     * @throws IOException e
85     */
86    public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
87      if (!fs.exists(dir)) {
88        fs.mkdirs(dir);
89      }
90      return dir;
91    }
92  
93    /**
94     * Create file.
95     * @param fs filesystem object
96     * @param p path to create
97     * @return Path
98     * @throws IOException e
99     */
100   public static Path create(final FileSystem fs, final Path p)
101   throws IOException {
102     if (fs.exists(p)) {
103       throw new IOException("File already exists " + p.toString());
104     }
105     if (!fs.createNewFile(p)) {
106       throw new IOException("Failed create of " + p);
107     }
108     return p;
109   }
110 
111   /**
112    * Checks to see if the specified file system is available
113    *
114    * @param fs filesystem
115    * @throws IOException e
116    */
117   public static void checkFileSystemAvailable(final FileSystem fs)
118   throws IOException {
119     if (!(fs instanceof DistributedFileSystem)) {
120       return;
121     }
122     IOException exception = null;
123     DistributedFileSystem dfs = (DistributedFileSystem) fs;
124     try {
125       if (dfs.exists(new Path("/"))) {
126         return;
127       }
128     } catch (IOException e) {
129       exception = RemoteExceptionHandler.checkIOException(e);
130     }
131     try {
132       fs.close();
133     } catch (Exception e) {
134         LOG.error("file system close failed: ", e);
135     }
136     IOException io = new IOException("File system is not available");
137     io.initCause(exception);
138     throw io;
139   }
140 
141   /**
142    * Verifies current version of file system
143    *
144    * @param fs filesystem object
145    * @param rootdir root hbase directory
146    * @return null if no version file exists, version string otherwise.
147    * @throws IOException e
148    */
149   public static String getVersion(FileSystem fs, Path rootdir)
150   throws IOException {
151     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
152     String version = null;
153     if (fs.exists(versionFile)) {
154       FSDataInputStream s =
155         fs.open(versionFile);
156       try {
157         version = DataInputStream.readUTF(s);
158       } catch (EOFException eof) {
159         LOG.warn("Version file was empty, odd, will try to set it.");
160       } finally {
161         s.close();
162       }
163     }
164     return version;
165   }
166 
167   /**
168    * Verifies current version of file system
169    *
170    * @param fs file system
171    * @param rootdir root directory of HBase installation
172    * @param message if true, issues a message on System.out
173    *
174    * @throws IOException e
175    */
176   public static void checkVersion(FileSystem fs, Path rootdir,
177       boolean message) throws IOException {
178     checkVersion(fs, rootdir, message, 0);
179   }
180 
181   /**
182    * Verifies current version of file system
183    *
184    * @param fs file system
185    * @param rootdir root directory of HBase installation
186    * @param message if true, issues a message on System.out
187    * @param wait wait interval for retry if > 0
188    *
189    * @throws IOException e
190    */
191   public static void checkVersion(FileSystem fs, Path rootdir,
192       boolean message, int wait) throws IOException {
193     String version = getVersion(fs, rootdir);
194 
195     if (version == null) {
196       if (!rootRegionExists(fs, rootdir)) {
197         // rootDir is empty (no version file and no root region)
198         // just create new version file (HBASE-1195)
199         FSUtils.setVersion(fs, rootdir, wait);
200         return;
201       }
202     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
203         return;
204 
205     // version is deprecated require migration
206     // Output on stdout so user sees it in terminal.
207     String msg = "File system needs to be upgraded."
208       + "  You have version " + version
209       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
210       + ".  Run the '${HBASE_HOME}/bin/hbase migrate' script.";
211     if (message) {
212       System.out.println("WARNING! " + msg);
213     }
214     throw new FileSystemVersionException(msg);
215   }
216 
217   /**
218    * Sets version of file system
219    *
220    * @param fs filesystem object
221    * @param rootdir hbase root
222    * @throws IOException e
223    */
224   public static void setVersion(FileSystem fs, Path rootdir)
225   throws IOException {
226     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0);
227   }
228 
229   /**
230    * Sets version of file system
231    *
232    * @param fs filesystem object
233    * @param rootdir hbase root
234    * @param wait time to wait for retry
235    * @throws IOException e
236    */
237   public static void setVersion(FileSystem fs, Path rootdir, int wait)
238   throws IOException {
239     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait);
240   }
241 
242   /**
243    * Sets version of file system
244    *
245    * @param fs filesystem object
246    * @param rootdir hbase root directory
247    * @param version version to set
248    * @param wait time to wait for retry
249    * @throws IOException e
250    */
251   public static void setVersion(FileSystem fs, Path rootdir, String version,
252       int wait) throws IOException {
253     while (true) try {
254       FSDataOutputStream s =
255         fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
256       s.writeUTF(version);
257       s.close();
258       LOG.debug("Created version file at " + rootdir.toString() +
259         " set its version at:" + version);
260       return;
261     } catch (IOException e) {
262       if (wait > 0) {
263         LOG.warn("Unable to create version file at " + rootdir.toString() +
264           ", retrying: " + StringUtils.stringifyException(e));
265         try {
266           Thread.sleep(wait);
267         } catch (InterruptedException ex) {
268           // ignore
269         }
270       } else {
271         // rethrow
272         throw e;
273       }
274     }
275   }
276 
277   /**
278    * Verifies root directory path is a valid URI with a scheme
279    *
280    * @param root root directory path
281    * @return Passed <code>root</code> argument.
282    * @throws IOException if not a valid URI with a scheme
283    */
284   public static Path validateRootPath(Path root) throws IOException {
285     try {
286       URI rootURI = new URI(root.toString());
287       String scheme = rootURI.getScheme();
288       if (scheme == null) {
289         throw new IOException("Root directory does not have a scheme");
290       }
291       return root;
292     } catch (URISyntaxException e) {
293       IOException io = new IOException("Root directory path is not a valid " +
294         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
295       io.initCause(e);
296       throw io;
297     }
298   }
299 
300   /**
301    * If DFS, check safe mode and if so, wait until we clear it.
302    * @param conf configuration
303    * @param wait Sleep between retries
304    * @throws IOException e
305    */
306   public static void waitOnSafeMode(final Configuration conf,
307     final long wait)
308   throws IOException {
309     FileSystem fs = FileSystem.get(conf);
310     if (!(fs instanceof DistributedFileSystem)) return;
311     DistributedFileSystem dfs = (DistributedFileSystem)fs;
312     // Make sure dfs is not in safe mode
313     while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) {
314       LOG.info("Waiting for dfs to exit safe mode...");
315       try {
316         Thread.sleep(wait);
317       } catch (InterruptedException e) {
318         //continue
319       }
320     }
321   }
322 
323   /**
324    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
325    * method returns the 'path' component of a Path's URI: e.g. If a Path is
326    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
327    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
328    * This method is useful if you want to print out a Path without qualifying
329    * Filesystem instance.
330    * @param p Filesystem Path whose 'path' component we are to return.
331    * @return Path portion of the Filesystem
332    */
333   public static String getPath(Path p) {
334     return p.toUri().getPath();
335   }
336 
337   /**
338    * @param c configuration
339    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
340    * configuration as a qualified Path.
341    * @throws IOException e
342    */
343   public static Path getRootDir(final Configuration c) throws IOException {
344     Path p = new Path(c.get(HConstants.HBASE_DIR));
345     FileSystem fs = p.getFileSystem(c);
346     return p.makeQualified(fs);
347   }
348 
349   /**
350    * Checks if root region exists
351    *
352    * @param fs file system
353    * @param rootdir root directory of HBase installation
354    * @return true if exists
355    * @throws IOException e
356    */
357   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
358   throws IOException {
359     Path rootRegionDir =
360       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
361     return fs.exists(rootRegionDir);
362   }
363 
364   /**
365    * Runs through the hbase rootdir and checks all stores have only
366    * one file in them -- that is, they've been major compacted.  Looks
367    * at root and meta tables too.
368    * @param fs filesystem
369    * @param hbaseRootDir hbase root directory
370    * @return True if this hbase install is major compacted.
371    * @throws IOException e
372    */
373   public static boolean isMajorCompacted(final FileSystem fs,
374       final Path hbaseRootDir)
375   throws IOException {
376     // Presumes any directory under hbase.rootdir is a table.
377     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
378     for (FileStatus tableDir : tableDirs) {
379       // Skip the .log directory.  All others should be tables.  Inside a table,
380       // there are compaction.dir directories to skip.  Otherwise, all else
381       // should be regions.  Then in each region, should only be family
382       // directories.  Under each of these, should be one file only.
383       Path d = tableDir.getPath();
384       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
385         continue;
386       }
387       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
388       for (FileStatus regionDir : regionDirs) {
389         Path dd = regionDir.getPath();
390         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
391           continue;
392         }
393         // Else its a region name.  Now look in region for families.
394         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
395         for (FileStatus familyDir : familyDirs) {
396           Path family = familyDir.getPath();
397           // Now in family make sure only one file.
398           FileStatus[] familyStatus = fs.listStatus(family);
399           if (familyStatus.length > 1) {
400             LOG.debug(family.toString() + " has " + familyStatus.length +
401                 " files.");
402             return false;
403           }
404         }
405       }
406     }
407     return true;
408   }
409 
410   // TODO move this method OUT of FSUtils. No dependencies to HMaster
411   /**
412    * Returns the total overall fragmentation percentage. Includes .META. and
413    * -ROOT- as well.
414    *
415    * @param master  The master defining the HBase root and file system.
416    * @return A map for each table and its percentage.
417    * @throws IOException When scanning the directory fails.
418    */
419   public static int getTotalTableFragmentation(final HMaster master)
420   throws IOException {
421     Map<String, Integer> map = getTableFragmentation(master);
422     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
423   }
424 
425   /**
426    * Runs through the HBase rootdir and checks how many stores for each table
427    * have more than one file in them. Checks -ROOT- and .META. too. The total
428    * percentage across all tables is stored under the special key "-TOTAL-".
429    *
430    * @param master  The master defining the HBase root and file system.
431    * @return A map for each table and its percentage.
432    * @throws IOException When scanning the directory fails.
433    */
434   public static Map<String, Integer> getTableFragmentation(
435     final HMaster master)
436   throws IOException {
437     Path path = getRootDir(master.getConfiguration());
438     // since HMaster.getFileSystem() is package private
439     FileSystem fs = path.getFileSystem(master.getConfiguration());
440     return getTableFragmentation(fs, path);
441   }
442 
443   /**
444    * Runs through the HBase rootdir and checks how many stores for each table
445    * have more than one file in them. Checks -ROOT- and .META. too. The total
446    * percentage across all tables is stored under the special key "-TOTAL-".
447    *
448    * @param fs  The file system to use.
449    * @param hbaseRootDir  The root directory to scan.
450    * @return A map for each table and its percentage.
451    * @throws IOException When scanning the directory fails.
452    */
453   public static Map<String, Integer> getTableFragmentation(
454     final FileSystem fs, final Path hbaseRootDir)
455   throws IOException {
456     Map<String, Integer> frags = new HashMap<String, Integer>();
457     int cfCountTotal = 0;
458     int cfFragTotal = 0;
459     DirFilter df = new DirFilter(fs);
460     // presumes any directory under hbase.rootdir is a table
461     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
462     for (FileStatus tableDir : tableDirs) {
463       // Skip the .log directory.  All others should be tables.  Inside a table,
464       // there are compaction.dir directories to skip.  Otherwise, all else
465       // should be regions.  Then in each region, should only be family
466       // directories.  Under each of these, should be one file only.
467       Path d = tableDir.getPath();
468       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
469         continue;
470       }
471       int cfCount = 0;
472       int cfFrag = 0;
473       FileStatus[] regionDirs = fs.listStatus(d, df);
474       for (FileStatus regionDir : regionDirs) {
475         Path dd = regionDir.getPath();
476         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
477           continue;
478         }
479         // else its a region name, now look in region for families
480         FileStatus[] familyDirs = fs.listStatus(dd, df);
481         for (FileStatus familyDir : familyDirs) {
482           cfCount++;
483           cfCountTotal++;
484           Path family = familyDir.getPath();
485           // now in family make sure only one file
486           FileStatus[] familyStatus = fs.listStatus(family);
487           if (familyStatus.length > 1) {
488             cfFrag++;
489             cfFragTotal++;
490           }
491         }
492       }
493       // compute percentage per table and store in result list
494       frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
495     }
496     // set overall percentage for all tables
497     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
498     return frags;
499   }
500 
501   /**
502    * Expects to find -ROOT- directory.
503    * @param fs filesystem
504    * @param hbaseRootDir hbase root directory
505    * @return True if this a pre020 layout.
506    * @throws IOException e
507    */
508   public static boolean isPre020FileLayout(final FileSystem fs,
509     final Path hbaseRootDir)
510   throws IOException {
511     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
512       "70236052"), "info"), "mapfiles");
513     return fs.exists(mapfiles);
514   }
515 
516   /**
517    * Runs through the hbase rootdir and checks all stores have only
518    * one file in them -- that is, they've been major compacted.  Looks
519    * at root and meta tables too.  This version differs from
520    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
521    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
522    * @param fs filesystem
523    * @param hbaseRootDir hbase root directory
524    * @return True if this hbase install is major compacted.
525    * @throws IOException e
526    */
527   public static boolean isMajorCompactedPre020(final FileSystem fs,
528       final Path hbaseRootDir)
529   throws IOException {
530     // Presumes any directory under hbase.rootdir is a table.
531     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
532     for (FileStatus tableDir : tableDirs) {
533       // Inside a table, there are compaction.dir directories to skip.
534       // Otherwise, all else should be regions.  Then in each region, should
535       // only be family directories.  Under each of these, should be a mapfile
536       // and info directory and in these only one file.
537       Path d = tableDir.getPath();
538       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
539         continue;
540       }
541       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
542       for (FileStatus regionDir : regionDirs) {
543         Path dd = regionDir.getPath();
544         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
545           continue;
546         }
547         // Else its a region name.  Now look in region for families.
548         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
549         for (FileStatus familyDir : familyDirs) {
550           Path family = familyDir.getPath();
551           FileStatus[] infoAndMapfile = fs.listStatus(family);
552           // Assert that only info and mapfile in family dir.
553           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
554             LOG.debug(family.toString() +
555                 " has more than just info and mapfile: " + infoAndMapfile.length);
556             return false;
557           }
558           // Make sure directory named info or mapfile.
559           for (int ll = 0; ll < 2; ll++) {
560             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
561                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
562               continue;
563             LOG.debug("Unexpected directory name: " +
564                 infoAndMapfile[ll].getPath());
565             return false;
566           }
567           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
568           // look in the 'mapfile' subdir.
569           FileStatus[] familyStatus =
570               fs.listStatus(new Path(family, "mapfiles"));
571           if (familyStatus.length > 1) {
572             LOG.debug(family.toString() + " has " + familyStatus.length +
573                 " files.");
574             return false;
575           }
576         }
577       }
578     }
579     return true;
580   }
581 
582   /**
583    * A {@link PathFilter} that returns directories.
584    */
585   public static class DirFilter implements PathFilter {
586     private final FileSystem fs;
587 
588     public DirFilter(final FileSystem fs) {
589       this.fs = fs;
590     }
591 
592     public boolean accept(Path p) {
593       boolean isdir = false;
594       try {
595         isdir = this.fs.getFileStatus(p).isDir();
596       } catch (IOException e) {
597         e.printStackTrace();
598       }
599       return isdir;
600     }
601   }
602 
603   /**
604    * Heuristic to determine whether is safe or not to open a file for append
605    * Looks both for dfs.support.append and use reflection to search
606    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
607    * @param conf
608    * @return True if append support
609    */
610   public static boolean isAppendSupported(final Configuration conf) {
611     boolean append = conf.getBoolean("dfs.support.append", false);
612     if (append) {
613       try {
614         // TODO: The implementation that comes back when we do a createWriter
615         // may not be using SequenceFile so the below is not a definitive test.
616         // Will do for now (hdfs-200).
617         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
618         append = true;
619       } catch (SecurityException e) {
620       } catch (NoSuchMethodException e) {
621         append = false;
622       }
623     } else {
624       try {
625         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
626       } catch (NoSuchMethodException e) {
627         append = false;
628       }
629     }
630     return append;
631   }
632 
633   /**
634    * @param conf
635    * @return True if this filesystem whose scheme is 'hdfs'.
636    * @throws IOException
637    */
638   public static boolean isHDFS(final Configuration conf) throws IOException {
639     FileSystem fs = FileSystem.get(conf);
640     String scheme = fs.getUri().getScheme();
641     return scheme.equalsIgnoreCase("hdfs");
642   }
643 
644   /*
645    * Recover file lease. Used when a file might be suspect to be had been left open by another process. <code>p</code>
646    * @param fs
647    * @param p
648    * @param append True if append supported
649    * @throws IOException
650    */
651   public static void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
652   throws IOException{
653     if (!isAppendSupported(conf)) {
654       LOG.warn("Running on HDFS without append enabled may result in data loss");
655       return;
656     }
657     // lease recovery not needed for local file system case.
658     // currently, local file system doesn't implement append either.
659     if (!(fs instanceof DistributedFileSystem)) {
660       return;
661     }
662     LOG.info("Recovering file " + p);
663     long startWaiting = System.currentTimeMillis();
664 
665     // Trying recovery
666     boolean recovered = false;
667     while (!recovered) {
668       try {
669         try {
670           if (fs instanceof DistributedFileSystem) {
671             DistributedFileSystem dfs = (DistributedFileSystem)fs;
672             DistributedFileSystem.class.getMethod("recoverLease",
673               new Class[] {Path.class}).invoke(dfs, p);
674           } else {
675             throw new Exception("Not a DistributedFileSystem");
676           }
677         } catch (InvocationTargetException ite) {
678           // function was properly called, but threw it's own exception
679           throw (IOException) ite.getCause();
680         } catch (Exception e) {
681           LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
682             ", trying fs.append instead");
683           FSDataOutputStream out = fs.append(p);
684           out.close();
685         }
686         recovered = true;
687       } catch (IOException e) {
688         e = RemoteExceptionHandler.checkIOException(e);
689         if (e instanceof AlreadyBeingCreatedException) {
690           // We expect that we'll get this message while the lease is still
691           // within its soft limit, but if we get it past that, it means
692           // that the RS is holding onto the file even though it lost its
693           // znode. We could potentially abort after some time here.
694           long waitedFor = System.currentTimeMillis() - startWaiting;
695           if (waitedFor > FSConstants.LEASE_SOFTLIMIT_PERIOD) {
696             LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
697               ":" + e.getMessage());
698           }
699         } else if (e instanceof LeaseExpiredException &&
700             e.getMessage().contains("File does not exist")) {
701           // This exception comes out instead of FNFE, fix it
702           throw new FileNotFoundException(
703               "The given HLog wasn't found at " + p.toString());
704         } else {
705           throw new IOException("Failed to open " + p + " for append", e);
706         }
707       }
708       try {
709         Thread.sleep(1000);
710       } catch (InterruptedException ex) {
711         new InterruptedIOException().initCause(ex);
712       }
713     }
714     LOG.info("Finished lease recover attempt for " + p);
715   }
716 }