View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.DataInputStream;
23  import java.io.EOFException;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.lang.reflect.Method;
27  import java.net.URI;
28  import java.net.URISyntaxException;
29  import java.util.ArrayList;
30  import java.util.HashMap;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.regex.Pattern;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.BlockLocation;
39  import org.apache.hadoop.fs.FSDataInputStream;
40  import org.apache.hadoop.fs.FSDataOutputStream;
41  import org.apache.hadoop.fs.FileStatus;
42  import org.apache.hadoop.fs.FileSystem;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.fs.PathFilter;
45  import org.apache.hadoop.fs.permission.FsAction;
46  import org.apache.hadoop.fs.permission.FsPermission;
47  import org.apache.hadoop.hbase.HBaseFileSystem;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51  import org.apache.hadoop.hbase.HRegionInfo;
52  import org.apache.hadoop.hbase.RemoteExceptionHandler;
53  import org.apache.hadoop.hbase.master.HMaster;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.security.User;
56  import org.apache.hadoop.hdfs.DistributedFileSystem;
57  import org.apache.hadoop.io.SequenceFile;
58  import org.apache.hadoop.security.AccessControlException;
59  import org.apache.hadoop.util.ReflectionUtils;
60  import org.apache.hadoop.util.StringUtils;
61  
62  /**
63   * Utility methods for interacting with the underlying file system.
64   */
65  public abstract class FSUtils {
66    private static final Log LOG = LogFactory.getLog(FSUtils.class);
67  
68    /** Full access permissions (starting point for a umask) */
69    private static final String FULL_RWX_PERMISSIONS = "777";
70  
71    protected FSUtils() {
72      super();
73    }
74  
75    public static FSUtils getInstance(FileSystem fs, Configuration conf) {
76      String scheme = fs.getUri().getScheme();
77      if (scheme == null) {
78        LOG.warn("Could not find scheme for uri " +
79            fs.getUri() + ", default to hdfs");
80        scheme = "hdfs";
81      }
82      Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
83          scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
84      FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
85      return fsUtils;
86    }
87  
88    /**
89     * Delete if exists.
90     * @param fs filesystem object
91     * @param dir directory to delete
92     * @return True if deleted <code>dir</code>
93     * @throws IOException e
94     */
95    public static boolean deleteDirectory(final FileSystem fs, final Path dir)
96    throws IOException {
97      return fs.exists(dir) && fs.delete(dir, true);
98    }
99  
100   /**
101    * Check if directory exists.  If it does not, create it.
102    * @param fs filesystem object
103    * @param dir path to check
104    * @return Path
105    * @throws IOException e
106    */
107   public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
108     if (!fs.exists(dir)) {
109       HBaseFileSystem.makeDirOnFileSystem(fs, dir);
110     }
111     return dir;
112   }
113 
114   /**
115    * Create the specified file on the filesystem. By default, this will:
116    * <ol>
117    * <li>overwrite the file if it exists</li>
118    * <li>apply the umask in the configuration (if it is enabled)</li>
119    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
120    * not set)</li>
121    * <li>use the default replication</li>
122    * <li>use the default block size</li>
123    * <li>not track progress</li>
124    * </ol>
125    *
126    * @param fs {@link FileSystem} on which to write the file
127    * @param path {@link Path} to the file to write
128    * @return output stream to the created file
129    * @throws IOException if the file cannot be created
130    */
131   public static FSDataOutputStream create(FileSystem fs, Path path,
132       FsPermission perm) throws IOException {
133     return create(fs, path, perm, true);
134   }
135 
136   /**
137    * Create the specified file on the filesystem. By default, this will:
138    * <ol>
139    * <li>apply the umask in the configuration (if it is enabled)</li>
140    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
141    * not set)</li>
142    * <li>use the default replication</li>
143    * <li>use the default block size</li>
144    * <li>not track progress</li>
145    * </ol>
146    *
147    * @param fs {@link FileSystem} on which to write the file
148    * @param path {@link Path} to the file to write
149    * @param perm
150    * @param overwrite Whether or not the created file should be overwritten.
151    * @return output stream to the created file
152    * @throws IOException if the file cannot be created
153    */
154   public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
155       boolean overwrite) throws IOException {
156     LOG.debug("Creating file=" + path + " with permission=" + perm);
157     return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
158   }
159 
160   /**
161    * Get the file permissions specified in the configuration, if they are
162    * enabled.
163    *
164    * @param fs filesystem that the file will be created on.
165    * @param conf configuration to read for determining if permissions are
166    *          enabled and which to use
167    * @param permssionConfKey property key in the configuration to use when
168    *          finding the permission
169    * @return the permission to use when creating a new file on the fs. If
170    *         special permissions are not specified in the configuration, then
171    *         the default permissions on the the fs will be returned.
172    */
173   public static FsPermission getFilePermissions(final FileSystem fs,
174       final Configuration conf, final String permssionConfKey) {
175     boolean enablePermissions = conf.getBoolean(
176         HConstants.ENABLE_DATA_FILE_UMASK, false);
177 
178     if (enablePermissions) {
179       try {
180         FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
181         // make sure that we have a mask, if not, go default.
182         String mask = conf.get(permssionConfKey);
183         if (mask == null)
184           return FsPermission.getDefault();
185         // appy the umask
186         FsPermission umask = new FsPermission(mask);
187         return perm.applyUMask(umask);
188       } catch (IllegalArgumentException e) {
189         LOG.warn(
190             "Incorrect umask attempted to be created: "
191                 + conf.get(permssionConfKey)
192                 + ", using default file permissions.", e);
193         return FsPermission.getDefault();
194       }
195     }
196     return FsPermission.getDefault();
197   }
198 
199   /**
200    * Checks to see if the specified file system is available
201    *
202    * @param fs filesystem
203    * @throws IOException e
204    */
205   public static void checkFileSystemAvailable(final FileSystem fs)
206   throws IOException {
207     if (!(fs instanceof DistributedFileSystem)) {
208       return;
209     }
210     IOException exception = null;
211     DistributedFileSystem dfs = (DistributedFileSystem) fs;
212     try {
213       if (dfs.exists(new Path("/"))) {
214         return;
215       }
216     } catch (IOException e) {
217       exception = RemoteExceptionHandler.checkIOException(e);
218     }
219     try {
220       fs.close();
221     } catch (Exception e) {
222       LOG.error("file system close failed: ", e);
223     }
224     IOException io = new IOException("File system is not available");
225     io.initCause(exception);
226     throw io;
227   }
228 
229   /**
230    * We use reflection because {@link DistributedFileSystem#setSafeMode(
231    * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
232    * 
233    * @param dfs
234    * @return whether we're in safe mode
235    * @throws IOException
236    */
237   private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
238     boolean inSafeMode = false;
239     try {
240       Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
241           org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
242       inSafeMode = (Boolean) m.invoke(dfs,
243         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
244     } catch (Exception e) {
245       if (e instanceof IOException) throw (IOException) e;
246       
247       // Check whether dfs is on safemode.
248       inSafeMode = dfs.setSafeMode(
249         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);      
250     }
251     return inSafeMode;    
252   }
253   
254   /**
255    * Check whether dfs is in safemode.
256    * @param conf
257    * @throws IOException
258    */
259   public static void checkDfsSafeMode(final Configuration conf)
260   throws IOException {
261     boolean isInSafeMode = false;
262     FileSystem fs = FileSystem.get(conf);
263     if (fs instanceof DistributedFileSystem) {
264       DistributedFileSystem dfs = (DistributedFileSystem)fs;
265       isInSafeMode = isInSafeMode(dfs);
266     }
267     if (isInSafeMode) {
268       throw new IOException("File system is in safemode, it can't be written now");
269     }
270   }
271 
272   /**
273    * Verifies current version of file system
274    *
275    * @param fs filesystem object
276    * @param rootdir root hbase directory
277    * @return null if no version file exists, version string otherwise.
278    * @throws IOException e
279    */
280   public static String getVersion(FileSystem fs, Path rootdir)
281   throws IOException {
282     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283     String version = null;
284     if (fs.exists(versionFile)) {
285       FSDataInputStream s =
286         fs.open(versionFile);
287       try {
288         version = DataInputStream.readUTF(s);
289       } catch (EOFException eof) {
290         LOG.warn("Version file was empty, odd, will try to set it.");
291       } finally {
292         s.close();
293       }
294     }
295     return version;
296   }
297 
298   /**
299    * Verifies current version of file system
300    *
301    * @param fs file system
302    * @param rootdir root directory of HBase installation
303    * @param message if true, issues a message on System.out
304    *
305    * @throws IOException e
306    */
307   public static void checkVersion(FileSystem fs, Path rootdir,
308       boolean message) throws IOException {
309     checkVersion(fs, rootdir, message, 0,
310     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
311   }
312 
313   /**
314    * Verifies current version of file system
315    *
316    * @param fs file system
317    * @param rootdir root directory of HBase installation
318    * @param message if true, issues a message on System.out
319    * @param wait wait interval
320    * @param retries number of times to retry
321    *
322    * @throws IOException e
323    */
324   public static void checkVersion(FileSystem fs, Path rootdir,
325       boolean message, int wait, int retries) throws IOException {
326     String version = getVersion(fs, rootdir);
327 
328     if (version == null) {
329       if (!rootRegionExists(fs, rootdir)) {
330         // rootDir is empty (no version file and no root region)
331         // just create new version file (HBASE-1195)
332         FSUtils.setVersion(fs, rootdir, wait, retries);
333         return;
334       }
335     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
336         return;
337 
338     // version is deprecated require migration
339     // Output on stdout so user sees it in terminal.
340     String msg = "HBase file layout needs to be upgraded."
341       + "  You have version " + version
342       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
343       + ".  Is your hbase.rootdir valid?  If so, you may need to run "
344       + "'hbase hbck -fixVersionFile'.";
345     if (message) {
346       System.out.println("WARNING! " + msg);
347     }
348     throw new FileSystemVersionException(msg);
349   }
350 
351   /**
352    * Sets version of file system
353    *
354    * @param fs filesystem object
355    * @param rootdir hbase root
356    * @throws IOException e
357    */
358   public static void setVersion(FileSystem fs, Path rootdir)
359   throws IOException {
360     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
361     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
362   }
363 
364   /**
365    * Sets version of file system
366    *
367    * @param fs filesystem object
368    * @param rootdir hbase root
369    * @param wait time to wait for retry
370    * @param retries number of times to retry before failing
371    * @throws IOException e
372    */
373   public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
374   throws IOException {
375     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
376   }
377 
378   /**
379    * Return the number of bytes that large input files should be optimally
380    * be split into to minimize i/o time.
381    *
382    * use reflection to search for getDefaultBlockSize(Path f)
383    * if the method doesn't exist, fall back to using getDefaultBlockSize()
384    *
385    * @param fs filesystem object
386    * @return the default block size for the path's filesystem
387    * @throws IOException e
388    */
389   public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
390     Method m = null;
391     Class<? extends FileSystem> cls = fs.getClass();
392     try {
393       m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
394     } catch (NoSuchMethodException e) {
395       LOG.info("FileSystem doesn't support getDefaultBlockSize");
396     } catch (SecurityException e) {
397       LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
398       m = null; // could happen on setAccessible()
399     }
400     if (m == null) {
401       return fs.getDefaultBlockSize();
402     } else {
403       try {
404         Object ret = m.invoke(fs, path);
405         return ((Long)ret).longValue();
406       } catch (Exception e) {
407         throw new IOException(e);
408       }
409     }
410   }
411 
412   /*
413    * Get the default replication.
414    *
415    * use reflection to search for getDefaultReplication(Path f)
416    * if the method doesn't exist, fall back to using getDefaultReplication()
417    *
418    * @param fs filesystem object
419    * @param f path of file
420    * @return default replication for the path's filesystem
421    * @throws IOException e
422    */
423   public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
424     Method m = null;
425     Class<? extends FileSystem> cls = fs.getClass();
426     try {
427       m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
428     } catch (NoSuchMethodException e) {
429       LOG.info("FileSystem doesn't support getDefaultReplication");
430     } catch (SecurityException e) {
431       LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
432       m = null; // could happen on setAccessible()
433     }
434     if (m == null) {
435       return fs.getDefaultReplication();
436     } else {
437       try {
438         Object ret = m.invoke(fs, path);
439         return ((Number)ret).shortValue();
440       } catch (Exception e) {
441         throw new IOException(e);
442       }
443     }
444   }
445 
446   /**
447    * Returns the default buffer size to use during writes.
448    *
449    * The size of the buffer should probably be a multiple of hardware
450    * page size (4096 on Intel x86), and it determines how much data is
451    * buffered during read and write operations.
452    *
453    * @param fs filesystem object
454    * @return default buffer size to use during writes
455    */
456   public static int getDefaultBufferSize(final FileSystem fs) {
457     return fs.getConf().getInt("io.file.buffer.size", 4096);
458   }
459 
460   /**
461    * Sets version of file system
462    *
463    * @param fs filesystem object
464    * @param rootdir hbase root directory
465    * @param version version to set
466    * @param wait time to wait for retry
467    * @param retries number of times to retry before throwing an IOException
468    * @throws IOException e
469    */
470   public static void setVersion(FileSystem fs, Path rootdir, String version,
471       int wait, int retries) throws IOException {
472     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
473     while (true) {
474       try {
475         FSDataOutputStream s = fs.create(versionFile);
476         s.writeUTF(version);
477         LOG.debug("Created version file at " + rootdir.toString() +
478             " set its version at:" + version);
479         s.close();
480         return;
481       } catch (IOException e) {
482         if (retries > 0) {
483           LOG.warn("Unable to create version file at " + rootdir.toString() +
484               ", retrying: " + e.getMessage());
485           fs.delete(versionFile, false);
486           try {
487             if (wait > 0) {
488               Thread.sleep(wait);
489             }
490           } catch (InterruptedException ex) {
491             // ignore
492           }
493           retries--;
494         } else {
495           throw e;
496         }
497       }
498     }
499   }
500 
501   /**
502    * Checks that a cluster ID file exists in the HBase root directory
503    * @param fs the root directory FileSystem
504    * @param rootdir the HBase root directory in HDFS
505    * @param wait how long to wait between retries
506    * @return <code>true</code> if the file exists, otherwise <code>false</code>
507    * @throws IOException if checking the FileSystem fails
508    */
509   public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
510       int wait) throws IOException {
511     while (true) {
512       try {
513         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
514         return fs.exists(filePath);
515       } catch (IOException ioe) {
516         if (wait > 0) {
517           LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
518               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
519           try {
520             Thread.sleep(wait);
521           } catch (InterruptedException ie) {
522             Thread.interrupted();
523             break;
524           }
525         } else {
526           throw ioe;
527         }
528       }
529     }
530     return false;
531   }
532 
533   /**
534    * Returns the value of the unique cluster ID stored for this HBase instance.
535    * @param fs the root directory FileSystem
536    * @param rootdir the path to the HBase root directory
537    * @return the unique cluster identifier
538    * @throws IOException if reading the cluster ID file fails
539    */
540   public static String getClusterId(FileSystem fs, Path rootdir)
541       throws IOException {
542     Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
543     String clusterId = null;
544     if (fs.exists(idPath)) {
545       FSDataInputStream in = fs.open(idPath);
546       try {
547         clusterId = in.readUTF();
548       } catch (EOFException eof) {
549         LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
550       } finally{
551         in.close();
552       }
553     } else {
554       LOG.warn("Cluster ID file does not exist at " + idPath.toString());
555     }
556     return clusterId;
557   }
558 
559   /**
560    * Writes a new unique identifier for this cluster to the "hbase.id" file
561    * in the HBase root directory
562    * @param fs the root directory FileSystem
563    * @param rootdir the path to the HBase root directory
564    * @param clusterId the unique identifier to store
565    * @param wait how long (in milliseconds) to wait between retries
566    * @throws IOException if writing to the FileSystem fails and no wait value
567    */
568   public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
569       int wait) throws IOException {
570     while (true) {
571       try {
572         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
573         FSDataOutputStream s = fs.create(filePath);
574         s.writeUTF(clusterId);
575         s.close();
576         if (LOG.isDebugEnabled()) {
577           LOG.debug("Created cluster ID file at " + filePath.toString() +
578               " with ID: " + clusterId);
579         }
580         return;
581       } catch (IOException ioe) {
582         if (wait > 0) {
583           LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
584               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
585           try {
586             Thread.sleep(wait);
587           } catch (InterruptedException ie) {
588             Thread.interrupted();
589             break;
590           }
591         } else {
592           throw ioe;
593         }
594       }
595     }
596   }
597 
598   /**
599    * Verifies root directory path is a valid URI with a scheme
600    *
601    * @param root root directory path
602    * @return Passed <code>root</code> argument.
603    * @throws IOException if not a valid URI with a scheme
604    */
605   public static Path validateRootPath(Path root) throws IOException {
606     try {
607       URI rootURI = new URI(root.toString());
608       String scheme = rootURI.getScheme();
609       if (scheme == null) {
610         throw new IOException("Root directory does not have a scheme");
611       }
612       return root;
613     } catch (URISyntaxException e) {
614       IOException io = new IOException("Root directory path is not a valid " +
615         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
616       io.initCause(e);
617       throw io;
618     }
619   }
620 
621   /**
622    * If DFS, check safe mode and if so, wait until we clear it.
623    * @param conf configuration
624    * @param wait Sleep between retries
625    * @throws IOException e
626    */
627   public static void waitOnSafeMode(final Configuration conf,
628     final long wait)
629   throws IOException {
630     FileSystem fs = FileSystem.get(conf);
631     if (!(fs instanceof DistributedFileSystem)) return;
632     DistributedFileSystem dfs = (DistributedFileSystem)fs;
633     // Make sure dfs is not in safe mode
634     while (isInSafeMode(dfs)) {
635       LOG.info("Waiting for dfs to exit safe mode...");
636       try {
637         Thread.sleep(wait);
638       } catch (InterruptedException e) {
639         //continue
640       }
641     }
642   }
643 
644   /**
645    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
646    * method returns the 'path' component of a Path's URI: e.g. If a Path is
647    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
648    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
649    * This method is useful if you want to print out a Path without qualifying
650    * Filesystem instance.
651    * @param p Filesystem Path whose 'path' component we are to return.
652    * @return Path portion of the Filesystem
653    */
654   public static String getPath(Path p) {
655     return p.toUri().getPath();
656   }
657 
658   /**
659    * @param c configuration
660    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
661    * configuration as a qualified Path.
662    * @throws IOException e
663    */
664   public static Path getRootDir(final Configuration c) throws IOException {
665     Path p = new Path(c.get(HConstants.HBASE_DIR));
666     FileSystem fs = p.getFileSystem(c);
667     return p.makeQualified(fs);
668   }
669 
670   public static void setRootDir(final Configuration c, final Path root) throws IOException {
671     c.set(HConstants.HBASE_DIR, root.toString());
672   }
673 
674   /**
675    * Checks if root region exists
676    *
677    * @param fs file system
678    * @param rootdir root directory of HBase installation
679    * @return true if exists
680    * @throws IOException e
681    */
682   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
683   throws IOException {
684     Path rootRegionDir =
685       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
686     return fs.exists(rootRegionDir);
687   }
688 
689   /**
690    * Compute HDFS blocks distribution of a given file, or a portion of the file
691    * @param fs file system
692    * @param status file status of the file
693    * @param start start position of the portion
694    * @param length length of the portion
695    * @return The HDFS blocks distribution
696    */
697   static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
698     final FileSystem fs, FileStatus status, long start, long length)
699     throws IOException {
700     HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
701     BlockLocation [] blockLocations =
702       fs.getFileBlockLocations(status, start, length);
703     for(BlockLocation bl : blockLocations) {
704       String [] hosts = bl.getHosts();
705       long len = bl.getLength();
706       blocksDistribution.addHostsAndBlockWeight(hosts, len);
707     }
708 
709     return blocksDistribution;
710   }
711 
712 
713 
714   /**
715    * Runs through the hbase rootdir and checks all stores have only
716    * one file in them -- that is, they've been major compacted.  Looks
717    * at root and meta tables too.
718    * @param fs filesystem
719    * @param hbaseRootDir hbase root directory
720    * @return True if this hbase install is major compacted.
721    * @throws IOException e
722    */
723   public static boolean isMajorCompacted(final FileSystem fs,
724       final Path hbaseRootDir)
725   throws IOException {
726     // Presumes any directory under hbase.rootdir is a table.
727     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
728     for (FileStatus tableDir : tableDirs) {
729       // Skip the .log directory.  All others should be tables.  Inside a table,
730       // there are compaction.dir directories to skip.  Otherwise, all else
731       // should be regions.  Then in each region, should only be family
732       // directories.  Under each of these, should be one file only.
733       Path d = tableDir.getPath();
734       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
735         continue;
736       }
737       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
738       for (FileStatus regionDir : regionDirs) {
739         Path dd = regionDir.getPath();
740         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
741           continue;
742         }
743         // Else its a region name.  Now look in region for families.
744         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
745         for (FileStatus familyDir : familyDirs) {
746           Path family = familyDir.getPath();
747           // Now in family make sure only one file.
748           FileStatus[] familyStatus = fs.listStatus(family);
749           if (familyStatus.length > 1) {
750             LOG.debug(family.toString() + " has " + familyStatus.length +
751                 " files.");
752             return false;
753           }
754         }
755       }
756     }
757     return true;
758   }
759 
760   // TODO move this method OUT of FSUtils. No dependencies to HMaster
761   /**
762    * Returns the total overall fragmentation percentage. Includes .META. and
763    * -ROOT- as well.
764    *
765    * @param master  The master defining the HBase root and file system.
766    * @return A map for each table and its percentage.
767    * @throws IOException When scanning the directory fails.
768    */
769   public static int getTotalTableFragmentation(final HMaster master)
770   throws IOException {
771     Map<String, Integer> map = getTableFragmentation(master);
772     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
773   }
774 
775   /**
776    * Runs through the HBase rootdir and checks how many stores for each table
777    * have more than one file in them. Checks -ROOT- and .META. too. The total
778    * percentage across all tables is stored under the special key "-TOTAL-".
779    *
780    * @param master  The master defining the HBase root and file system.
781    * @return A map for each table and its percentage.
782    * @throws IOException When scanning the directory fails.
783    */
784   public static Map<String, Integer> getTableFragmentation(
785     final HMaster master)
786   throws IOException {
787     Path path = getRootDir(master.getConfiguration());
788     // since HMaster.getFileSystem() is package private
789     FileSystem fs = path.getFileSystem(master.getConfiguration());
790     return getTableFragmentation(fs, path);
791   }
792 
793   /**
794    * Runs through the HBase rootdir and checks how many stores for each table
795    * have more than one file in them. Checks -ROOT- and .META. too. The total
796    * percentage across all tables is stored under the special key "-TOTAL-".
797    *
798    * @param fs  The file system to use.
799    * @param hbaseRootDir  The root directory to scan.
800    * @return A map for each table and its percentage.
801    * @throws IOException When scanning the directory fails.
802    */
803   public static Map<String, Integer> getTableFragmentation(
804     final FileSystem fs, final Path hbaseRootDir)
805   throws IOException {
806     Map<String, Integer> frags = new HashMap<String, Integer>();
807     int cfCountTotal = 0;
808     int cfFragTotal = 0;
809     DirFilter df = new DirFilter(fs);
810     // presumes any directory under hbase.rootdir is a table
811     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
812     for (FileStatus tableDir : tableDirs) {
813       // Skip the .log directory.  All others should be tables.  Inside a table,
814       // there are compaction.dir directories to skip.  Otherwise, all else
815       // should be regions.  Then in each region, should only be family
816       // directories.  Under each of these, should be one file only.
817       Path d = tableDir.getPath();
818       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
819         continue;
820       }
821       int cfCount = 0;
822       int cfFrag = 0;
823       FileStatus[] regionDirs = fs.listStatus(d, df);
824       for (FileStatus regionDir : regionDirs) {
825         Path dd = regionDir.getPath();
826         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
827           continue;
828         }
829         // else its a region name, now look in region for families
830         FileStatus[] familyDirs = fs.listStatus(dd, df);
831         for (FileStatus familyDir : familyDirs) {
832           cfCount++;
833           cfCountTotal++;
834           Path family = familyDir.getPath();
835           // now in family make sure only one file
836           FileStatus[] familyStatus = fs.listStatus(family);
837           if (familyStatus.length > 1) {
838             cfFrag++;
839             cfFragTotal++;
840           }
841         }
842       }
843       // compute percentage per table and store in result list
844       frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
845     }
846     // set overall percentage for all tables
847     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
848     return frags;
849   }
850 
851   /**
852    * Expects to find -ROOT- directory.
853    * @param fs filesystem
854    * @param hbaseRootDir hbase root directory
855    * @return True if this a pre020 layout.
856    * @throws IOException e
857    */
858   public static boolean isPre020FileLayout(final FileSystem fs,
859     final Path hbaseRootDir)
860   throws IOException {
861     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
862       "70236052"), "info"), "mapfiles");
863     return fs.exists(mapfiles);
864   }
865 
866   /**
867    * Runs through the hbase rootdir and checks all stores have only
868    * one file in them -- that is, they've been major compacted.  Looks
869    * at root and meta tables too.  This version differs from
870    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
871    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
872    * @param fs filesystem
873    * @param hbaseRootDir hbase root directory
874    * @return True if this hbase install is major compacted.
875    * @throws IOException e
876    */
877   public static boolean isMajorCompactedPre020(final FileSystem fs,
878       final Path hbaseRootDir)
879   throws IOException {
880     // Presumes any directory under hbase.rootdir is a table.
881     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
882     for (FileStatus tableDir : tableDirs) {
883       // Inside a table, there are compaction.dir directories to skip.
884       // Otherwise, all else should be regions.  Then in each region, should
885       // only be family directories.  Under each of these, should be a mapfile
886       // and info directory and in these only one file.
887       Path d = tableDir.getPath();
888       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
889         continue;
890       }
891       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
892       for (FileStatus regionDir : regionDirs) {
893         Path dd = regionDir.getPath();
894         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
895           continue;
896         }
897         // Else its a region name.  Now look in region for families.
898         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
899         for (FileStatus familyDir : familyDirs) {
900           Path family = familyDir.getPath();
901           FileStatus[] infoAndMapfile = fs.listStatus(family);
902           // Assert that only info and mapfile in family dir.
903           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
904             LOG.debug(family.toString() +
905                 " has more than just info and mapfile: " + infoAndMapfile.length);
906             return false;
907           }
908           // Make sure directory named info or mapfile.
909           for (int ll = 0; ll < 2; ll++) {
910             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
911                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
912               continue;
913             LOG.debug("Unexpected directory name: " +
914                 infoAndMapfile[ll].getPath());
915             return false;
916           }
917           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
918           // look in the 'mapfile' subdir.
919           FileStatus[] familyStatus =
920               fs.listStatus(new Path(family, "mapfiles"));
921           if (familyStatus.length > 1) {
922             LOG.debug(family.toString() + " has " + familyStatus.length +
923                 " files.");
924             return false;
925           }
926         }
927       }
928     }
929     return true;
930   }
931 
932   /**
933    * A {@link PathFilter} that returns only regular files.
934    */
935   static class FileFilter implements PathFilter {
936     private final FileSystem fs;
937 
938     public FileFilter(final FileSystem fs) {
939       this.fs = fs;
940     }
941 
942     @Override
943     public boolean accept(Path p) {
944       try {
945         return fs.isFile(p);
946       } catch (IOException e) {
947         LOG.debug("unable to verify if path=" + p + " is a regular file", e);
948         return false;
949       }
950     }
951   }
952 
953   /**
954    * A {@link PathFilter} that returns directories.
955    */
956   public static class DirFilter implements PathFilter {
957     private final FileSystem fs;
958 
959     public DirFilter(final FileSystem fs) {
960       this.fs = fs;
961     }
962 
963     @Override
964     public boolean accept(Path p) {
965       boolean isValid = false;
966       try {
967         if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
968           isValid = false;
969         } else {
970           isValid = this.fs.getFileStatus(p).isDir();
971         }
972       } catch (IOException e) {
973         e.printStackTrace();
974       }
975       return isValid;
976     }
977   }
978 
979   /**
980    * Heuristic to determine whether is safe or not to open a file for append
981    * Looks both for dfs.support.append and use reflection to search
982    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
983    * @param conf
984    * @return True if append support
985    */
986   public static boolean isAppendSupported(final Configuration conf) {
987     boolean append = conf.getBoolean("dfs.support.append", false);
988     if (append) {
989       try {
990         // TODO: The implementation that comes back when we do a createWriter
991         // may not be using SequenceFile so the below is not a definitive test.
992         // Will do for now (hdfs-200).
993         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
994         append = true;
995       } catch (SecurityException e) {
996       } catch (NoSuchMethodException e) {
997         append = false;
998       }
999     }
1000     if (!append) {
1001       // Look for the 0.21, 0.22, new-style append evidence.
1002       try {
1003         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1004         append = true;
1005       } catch (NoSuchMethodException e) {
1006         append = false;
1007       }
1008     }
1009     return append;
1010   }
1011 
1012   /**
1013    * @param conf
1014    * @return True if this filesystem whose scheme is 'hdfs'.
1015    * @throws IOException
1016    */
1017   public static boolean isHDFS(final Configuration conf) throws IOException {
1018     FileSystem fs = FileSystem.get(conf);
1019     String scheme = fs.getUri().getScheme();
1020     return scheme.equalsIgnoreCase("hdfs");
1021   }
1022 
1023   /**
1024    * Recover file lease. Used when a file might be suspect
1025    * to be had been left open by another process.
1026    * @param fs FileSystem handle
1027    * @param p Path of file to recover lease
1028    * @param conf Configuration handle
1029    * @throws IOException
1030    */
1031   public abstract void recoverFileLease(final FileSystem fs, final Path p,
1032       Configuration conf) throws IOException;
1033 
1034   /**
1035    * @param fs
1036    * @param rootdir
1037    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
1038    * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders.
1039    * @throws IOException
1040    */
1041   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1042   throws IOException {
1043     // presumes any directory under hbase.rootdir is a table
1044     FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
1045     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1046     for (FileStatus dir: dirs) {
1047       Path p = dir.getPath();
1048       String tableName = p.getName();
1049       if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
1050         tabledirs.add(p);
1051       }
1052     }
1053     return tabledirs;
1054   }
1055 
1056   public static Path getTablePath(Path rootdir, byte [] tableName) {
1057     return getTablePath(rootdir, Bytes.toString(tableName));
1058   }
1059 
1060   public static Path getTablePath(Path rootdir, final String tableName) {
1061     return new Path(rootdir, tableName);
1062   }
1063 
1064   /**
1065    * Filter for all dirs that don't start with '.'
1066    */
1067   public static class RegionDirFilter implements PathFilter {
1068     // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
1069     final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1070     final FileSystem fs;
1071 
1072     public RegionDirFilter(FileSystem fs) {
1073       this.fs = fs;
1074     }
1075 
1076     @Override
1077     public boolean accept(Path rd) {
1078       if (!regionDirPattern.matcher(rd.getName()).matches()) {
1079         return false;
1080       }
1081 
1082       try {
1083         return fs.getFileStatus(rd).isDir();
1084       } catch (IOException ioe) {
1085         // Maybe the file was moved or the fs was disconnected.
1086         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1087         return false;
1088       }
1089     }
1090   }
1091 
1092   /**
1093    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
1094    * .tableinfo
1095    * @param fs A file system for the Path
1096    * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
1097    * @return List of paths to valid region directories in table dir.
1098    * @throws IOException
1099    */
1100   public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1101     // assumes we are in a table dir.
1102     FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1103     List<Path> regionDirs = new ArrayList<Path>(rds.length);
1104     for (FileStatus rdfs: rds) {
1105       Path rdPath = rdfs.getPath();
1106       regionDirs.add(rdPath);
1107     }
1108     return regionDirs;
1109   }
1110 
1111   /**
1112    * Filter for all dirs that are legal column family names.  This is generally used for colfam
1113    * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
1114    */
1115   public static class FamilyDirFilter implements PathFilter {
1116     final FileSystem fs;
1117 
1118     public FamilyDirFilter(FileSystem fs) {
1119       this.fs = fs;
1120     }
1121 
1122     @Override
1123     public boolean accept(Path rd) {
1124       try {
1125         // throws IAE if invalid
1126         HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1127       } catch (IllegalArgumentException iae) {
1128         // path name is an invalid family name and thus is excluded.
1129         return false;
1130       }
1131 
1132       try {
1133         return fs.getFileStatus(rd).isDir();
1134       } catch (IOException ioe) {
1135         // Maybe the file was moved or the fs was disconnected.
1136         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1137         return false;
1138       }
1139     }
1140   }
1141 
1142   /**
1143    * Given a particular region dir, return all the familydirs inside it
1144    *
1145    * @param fs A file system for the Path
1146    * @param regionDir Path to a specific region directory
1147    * @return List of paths to valid family directories in region dir.
1148    * @throws IOException
1149    */
1150   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1151     // assumes we are in a region dir.
1152     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1153     List<Path> familyDirs = new ArrayList<Path>(fds.length);
1154     for (FileStatus fdfs: fds) {
1155       Path fdPath = fdfs.getPath();
1156       familyDirs.add(fdPath);
1157     }
1158     return familyDirs;
1159   }
1160 
1161   /**
1162    * Filter for HFiles that excludes reference files.
1163    */
1164   public static class HFileFilter implements PathFilter {
1165     // This pattern will accept 0.90+ style hex hfies files but reject reference files
1166     final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1167 
1168     final FileSystem fs;
1169 
1170     public HFileFilter(FileSystem fs) {
1171       this.fs = fs;
1172     }
1173 
1174     @Override
1175     public boolean accept(Path rd) {
1176       if (!hfilePattern.matcher(rd.getName()).matches()) {
1177         return false;
1178       }
1179 
1180       try {
1181         // only files
1182         return !fs.getFileStatus(rd).isDir();
1183       } catch (IOException ioe) {
1184         // Maybe the file was moved or the fs was disconnected.
1185         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1186         return false;
1187       }
1188     }
1189   }
1190 
1191   /**
1192    * @param conf
1193    * @return Returns the filesystem of the hbase rootdir.
1194    * @throws IOException
1195    */
1196   public static FileSystem getCurrentFileSystem(Configuration conf)
1197   throws IOException {
1198     return getRootDir(conf).getFileSystem(conf);
1199   }
1200 
1201   /**
1202    * Runs through the HBase rootdir and creates a reverse lookup map for
1203    * table StoreFile names to the full Path.
1204    * <br>
1205    * Example...<br>
1206    * Key = 3944417774205889744  <br>
1207    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1208    *
1209    * @param fs  The file system to use.
1210    * @param hbaseRootDir  The root directory to scan.
1211    * @return Map keyed by StoreFile name with a value of the full Path.
1212    * @throws IOException When scanning the directory fails.
1213    */
1214   public static Map<String, Path> getTableStoreFilePathMap(
1215     final FileSystem fs, final Path hbaseRootDir)
1216   throws IOException {
1217     Map<String, Path> map = new HashMap<String, Path>();
1218     
1219     // if this method looks similar to 'getTableFragmentation' that is because 
1220     // it was borrowed from it.
1221     
1222     DirFilter df = new DirFilter(fs);
1223     // presumes any directory under hbase.rootdir is a table
1224     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1225     for (FileStatus tableDir : tableDirs) {
1226       // Skip the .log and other non-table directories.  All others should be tables.
1227       // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
1228       // should be regions. 
1229       Path d = tableDir.getPath();
1230       if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1231         continue;
1232       }
1233       FileStatus[] regionDirs = fs.listStatus(d, df);
1234       for (FileStatus regionDir : regionDirs) {
1235         Path dd = regionDir.getPath();
1236         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1237           continue;
1238         }
1239         // else its a region name, now look in region for families
1240         FileStatus[] familyDirs = fs.listStatus(dd, df);
1241         for (FileStatus familyDir : familyDirs) {
1242           Path family = familyDir.getPath();
1243           // now in family, iterate over the StoreFiles and
1244           // put in map
1245           FileStatus[] familyStatus = fs.listStatus(family);
1246           for (FileStatus sfStatus : familyStatus) {
1247             Path sf = sfStatus.getPath();
1248             map.put( sf.getName(), sf);
1249           }
1250 
1251         }
1252       }
1253     }
1254       return map;
1255   }
1256 
1257   /**
1258    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1259    * This accommodates differences between hadoop versions
1260    *
1261    * @param fs file system
1262    * @param dir directory
1263    * @param filter path filter
1264    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1265    */
1266   public static FileStatus [] listStatus(final FileSystem fs,
1267       final Path dir, final PathFilter filter) throws IOException {
1268     FileStatus [] status = null;
1269     try {
1270       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1271     } catch (FileNotFoundException fnfe) {
1272       // if directory doesn't exist, return null
1273       LOG.debug(dir + " doesn't exist");
1274     }
1275     if (status == null || status.length < 1) return null;
1276     return status;
1277   }
1278 
1279   /**
1280    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1281    * This would accommodates differences between hadoop versions
1282    *
1283    * @param fs file system
1284    * @param dir directory
1285    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1286    */
1287   public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1288     return listStatus(fs, dir, null);
1289   }
1290 
1291   /**
1292    * Calls fs.delete() and returns the value returned by the fs.delete()
1293    *
1294    * @param fs
1295    * @param path
1296    * @param recursive
1297    * @return
1298    * @throws IOException
1299    */
1300   public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1301       throws IOException {
1302     return fs.delete(path, recursive);
1303   }
1304 
1305   /**
1306    * Throw an exception if an action is not permitted by a user on a file.
1307    * 
1308    * @param user
1309    *          the user
1310    * @param file
1311    *          the file
1312    * @param action
1313    *          the action
1314    */
1315   public static void checkAccess(User user, FileStatus file,
1316       FsAction action) throws AccessControlException {
1317     // See HBASE-7814. UserGroupInformation from hadoop 0.20.x may not support getShortName().
1318     String username = user.getShortName();
1319     if (username.equals(file.getOwner())) {
1320       if (file.getPermission().getUserAction().implies(action)) {
1321         return;
1322       }
1323     } else if (contains(user.getGroupNames(), file.getGroup())) {
1324       if (file.getPermission().getGroupAction().implies(action)) {
1325         return;
1326       }
1327     } else if (file.getPermission().getOtherAction().implies(action)) {
1328       return;
1329     }
1330     throw new AccessControlException("Permission denied:" + " action=" + action
1331         + " path=" + file.getPath() + " user=" + username);
1332   }
1333 
1334   private static boolean contains(String[] groups, String user) {
1335     for (String group : groups) {
1336       if (group.equals(user)) {
1337         return true;
1338       }
1339     }
1340     return false;
1341   }
1342 
1343   /**
1344    * Calls fs.exists(). Checks if the specified path exists
1345    *
1346    * @param fs
1347    * @param path
1348    * @return
1349    * @throws IOException
1350    */
1351   public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1352     return fs.exists(path);
1353   }
1354 
1355   /**
1356    * Log the current state of the filesystem from a certain root directory
1357    * @param fs filesystem to investigate
1358    * @param root root file/directory to start logging from
1359    * @param LOG log to output information
1360    * @throws IOException if an unexpected exception occurs
1361    */
1362   public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1363       throws IOException {
1364     LOG.debug("Current file system:");
1365     logFSTree(LOG, fs, root, "|-");
1366   }
1367 
1368   /**
1369    * Recursive helper to log the state of the FS
1370    * @see #logFileSystemState(FileSystem, Path, Log)
1371    */
1372   private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1373       throws IOException {
1374     FileStatus[] files = FSUtils.listStatus(fs, root, null);
1375     if (files == null) return;
1376 
1377     for (FileStatus file : files) {
1378       if (file.isDir()) {
1379         LOG.debug(prefix + file.getPath().getName() + "/");
1380         logFSTree(LOG, fs, file.getPath(), prefix + "---");
1381       } else {
1382         LOG.debug(prefix + file.getPath().getName());
1383       }
1384     }
1385   }
1386 }