View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.DataInputStream;
23  import java.io.EOFException;
24  import java.io.FileNotFoundException;
25  import java.io.IOException;
26  import java.lang.reflect.Method;
27  import java.net.URI;
28  import java.net.URISyntaxException;
29  import java.util.ArrayList;
30  import java.util.HashMap;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.regex.Pattern;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.BlockLocation;
39  import org.apache.hadoop.fs.FSDataInputStream;
40  import org.apache.hadoop.fs.FSDataOutputStream;
41  import org.apache.hadoop.fs.FileStatus;
42  import org.apache.hadoop.fs.FileSystem;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.fs.PathFilter;
45  import org.apache.hadoop.fs.permission.FsAction;
46  import org.apache.hadoop.fs.permission.FsPermission;
47  import org.apache.hadoop.hbase.HBaseFileSystem;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51  import org.apache.hadoop.hbase.HRegionInfo;
52  import org.apache.hadoop.hbase.RemoteExceptionHandler;
53  import org.apache.hadoop.hbase.master.HMaster;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.security.User;
56  import org.apache.hadoop.hdfs.DistributedFileSystem;
57  import org.apache.hadoop.io.SequenceFile;
58  import org.apache.hadoop.security.AccessControlException;
59  import org.apache.hadoop.util.ReflectionUtils;
60  import org.apache.hadoop.util.StringUtils;
61  
62  /**
63   * Utility methods for interacting with the underlying file system.
64   */
65  public abstract class FSUtils {
66    private static final Log LOG = LogFactory.getLog(FSUtils.class);
67  
68    /** Full access permissions (starting point for a umask) */
69    private static final String FULL_RWX_PERMISSIONS = "777";
70  
71    protected FSUtils() {
72      super();
73    }
74  
75    public static FSUtils getInstance(FileSystem fs, Configuration conf) {
76      String scheme = fs.getUri().getScheme();
77      if (scheme == null) {
78        LOG.warn("Could not find scheme for uri " +
79            fs.getUri() + ", default to hdfs");
80        scheme = "hdfs";
81      }
82      Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
83          scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl
84      FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
85      return fsUtils;
86    }
87  
88    /**
89     * Delete if exists.
90     * @param fs filesystem object
91     * @param dir directory to delete
92     * @return True if deleted <code>dir</code>
93     * @throws IOException e
94     */
95    public static boolean deleteDirectory(final FileSystem fs, final Path dir)
96    throws IOException {
97      return fs.exists(dir) && fs.delete(dir, true);
98    }
99  
100   /**
101    * Check if directory exists.  If it does not, create it.
102    * @param fs filesystem object
103    * @param dir path to check
104    * @return Path
105    * @throws IOException e
106    */
107   public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
108     if (!fs.exists(dir)) {
109       HBaseFileSystem.makeDirOnFileSystem(fs, dir);
110     }
111     return dir;
112   }
113 
114   /**
115    * Create the specified file on the filesystem. By default, this will:
116    * <ol>
117    * <li>overwrite the file if it exists</li>
118    * <li>apply the umask in the configuration (if it is enabled)</li>
119    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
120    * not set)</li>
121    * <li>use the default replication</li>
122    * <li>use the default block size</li>
123    * <li>not track progress</li>
124    * </ol>
125    *
126    * @param fs {@link FileSystem} on which to write the file
127    * @param path {@link Path} to the file to write
128    * @return output stream to the created file
129    * @throws IOException if the file cannot be created
130    */
131   public static FSDataOutputStream create(FileSystem fs, Path path,
132       FsPermission perm) throws IOException {
133     return create(fs, path, perm, true);
134   }
135 
136   /**
137    * Create the specified file on the filesystem. By default, this will:
138    * <ol>
139    * <li>apply the umask in the configuration (if it is enabled)</li>
140    * <li>use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if
141    * not set)</li>
142    * <li>use the default replication</li>
143    * <li>use the default block size</li>
144    * <li>not track progress</li>
145    * </ol>
146    *
147    * @param fs {@link FileSystem} on which to write the file
148    * @param path {@link Path} to the file to write
149    * @param perm
150    * @param overwrite Whether or not the created file should be overwritten.
151    * @return output stream to the created file
152    * @throws IOException if the file cannot be created
153    */
154   public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
155       boolean overwrite) throws IOException {
156     LOG.debug("Creating file=" + path + " with permission=" + perm);
157     return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
158   }
159 
160   /**
161    * Get the file permissions specified in the configuration, if they are
162    * enabled.
163    *
164    * @param fs filesystem that the file will be created on.
165    * @param conf configuration to read for determining if permissions are
166    *          enabled and which to use
167    * @param permssionConfKey property key in the configuration to use when
168    *          finding the permission
169    * @return the permission to use when creating a new file on the fs. If
170    *         special permissions are not specified in the configuration, then
171    *         the default permissions on the the fs will be returned.
172    */
173   public static FsPermission getFilePermissions(final FileSystem fs,
174       final Configuration conf, final String permssionConfKey) {
175     boolean enablePermissions = conf.getBoolean(
176         HConstants.ENABLE_DATA_FILE_UMASK, false);
177 
178     if (enablePermissions) {
179       try {
180         FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
181         // make sure that we have a mask, if not, go default.
182         String mask = conf.get(permssionConfKey);
183         if (mask == null)
184           return FsPermission.getDefault();
185         // appy the umask
186         FsPermission umask = new FsPermission(mask);
187         return perm.applyUMask(umask);
188       } catch (IllegalArgumentException e) {
189         LOG.warn(
190             "Incorrect umask attempted to be created: "
191                 + conf.get(permssionConfKey)
192                 + ", using default file permissions.", e);
193         return FsPermission.getDefault();
194       }
195     }
196     return FsPermission.getDefault();
197   }
198 
199   /**
200    * Checks to see if the specified file system is available
201    *
202    * @param fs filesystem
203    * @throws IOException e
204    */
205   public static void checkFileSystemAvailable(final FileSystem fs)
206   throws IOException {
207     if (!(fs instanceof DistributedFileSystem)) {
208       return;
209     }
210     IOException exception = null;
211     DistributedFileSystem dfs = (DistributedFileSystem) fs;
212     try {
213       if (dfs.exists(new Path("/"))) {
214         return;
215       }
216     } catch (IOException e) {
217       exception = RemoteExceptionHandler.checkIOException(e);
218     }
219     try {
220       fs.close();
221     } catch (Exception e) {
222       LOG.error("file system close failed: ", e);
223     }
224     IOException io = new IOException("File system is not available");
225     io.initCause(exception);
226     throw io;
227   }
228 
229   /**
230    * We use reflection because {@link DistributedFileSystem#setSafeMode(
231    * FSConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
232    * 
233    * @param dfs
234    * @return whether we're in safe mode
235    * @throws IOException
236    */
237   private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
238     boolean inSafeMode = false;
239     try {
240       Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
241           org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
242       inSafeMode = (Boolean) m.invoke(dfs,
243         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
244     } catch (Exception e) {
245       if (e instanceof IOException) throw (IOException) e;
246       
247       // Check whether dfs is on safemode.
248       inSafeMode = dfs.setSafeMode(
249         org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);      
250     }
251     return inSafeMode;    
252   }
253   
254   /**
255    * Check whether dfs is in safemode.
256    * @param conf
257    * @throws IOException
258    */
259   public static void checkDfsSafeMode(final Configuration conf)
260   throws IOException {
261     boolean isInSafeMode = false;
262     FileSystem fs = FileSystem.get(conf);
263     if (fs instanceof DistributedFileSystem) {
264       DistributedFileSystem dfs = (DistributedFileSystem)fs;
265       isInSafeMode = isInSafeMode(dfs);
266     }
267     if (isInSafeMode) {
268       throw new IOException("File system is in safemode, it can't be written now");
269     }
270   }
271 
272   /**
273    * Verifies current version of file system
274    *
275    * @param fs filesystem object
276    * @param rootdir root hbase directory
277    * @return null if no version file exists, version string otherwise.
278    * @throws IOException e
279    */
280   public static String getVersion(FileSystem fs, Path rootdir)
281   throws IOException {
282     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283     String version = null;
284     if (fs.exists(versionFile)) {
285       FSDataInputStream s =
286         fs.open(versionFile);
287       try {
288         version = DataInputStream.readUTF(s);
289       } catch (EOFException eof) {
290         LOG.warn("Version file was empty, odd, will try to set it.");
291       } finally {
292         s.close();
293       }
294     }
295     return version;
296   }
297 
298   /**
299    * Verifies current version of file system
300    *
301    * @param fs file system
302    * @param rootdir root directory of HBase installation
303    * @param message if true, issues a message on System.out
304    *
305    * @throws IOException e
306    */
307   public static void checkVersion(FileSystem fs, Path rootdir,
308       boolean message) throws IOException {
309     checkVersion(fs, rootdir, message, 0,
310     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
311   }
312 
313   /**
314    * Verifies current version of file system
315    *
316    * @param fs file system
317    * @param rootdir root directory of HBase installation
318    * @param message if true, issues a message on System.out
319    * @param wait wait interval
320    * @param retries number of times to retry
321    *
322    * @throws IOException e
323    */
324   public static void checkVersion(FileSystem fs, Path rootdir,
325       boolean message, int wait, int retries) throws IOException {
326     String version = getVersion(fs, rootdir);
327 
328     if (version == null) {
329       if (!rootRegionExists(fs, rootdir)) {
330         // rootDir is empty (no version file and no root region)
331         // just create new version file (HBASE-1195)
332         FSUtils.setVersion(fs, rootdir, wait, retries);
333         return;
334       }
335     } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
336         return;
337 
338     // version is deprecated require migration
339     // Output on stdout so user sees it in terminal.
340     String msg = "HBase file layout needs to be upgraded."
341       + "  You have version " + version
342       + " and I want version " + HConstants.FILE_SYSTEM_VERSION
343       + ".  Is your hbase.rootdir valid?  If so, you may need to run "
344       + "'hbase hbck -fixVersionFile'.";
345     if (message) {
346       System.out.println("WARNING! " + msg);
347     }
348     throw new FileSystemVersionException(msg);
349   }
350 
351   /**
352    * Sets version of file system
353    *
354    * @param fs filesystem object
355    * @param rootdir hbase root
356    * @throws IOException e
357    */
358   public static void setVersion(FileSystem fs, Path rootdir)
359   throws IOException {
360     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
361     		HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
362   }
363 
364   /**
365    * Sets version of file system
366    *
367    * @param fs filesystem object
368    * @param rootdir hbase root
369    * @param wait time to wait for retry
370    * @param retries number of times to retry before failing
371    * @throws IOException e
372    */
373   public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
374   throws IOException {
375     setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
376   }
377 
378   /**
379    * Return the number of bytes that large input files should be optimally
380    * be split into to minimize i/o time.
381    *
382    * use reflection to search for getDefaultBlockSize(Path f)
383    * if the method doesn't exist, fall back to using getDefaultBlockSize()
384    *
385    * @param fs filesystem object
386    * @return the default block size for the path's filesystem
387    * @throws IOException e
388    */
389   public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
390     Method m = null;
391     Class<? extends FileSystem> cls = fs.getClass();
392     try {
393       m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
394     } catch (NoSuchMethodException e) {
395       LOG.info("FileSystem doesn't support getDefaultBlockSize");
396     } catch (SecurityException e) {
397       LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
398       m = null; // could happen on setAccessible()
399     }
400     if (m == null) {
401       return fs.getDefaultBlockSize();
402     } else {
403       try {
404         Object ret = m.invoke(fs, path);
405         return ((Long)ret).longValue();
406       } catch (Exception e) {
407         throw new IOException(e);
408       }
409     }
410   }
411 
412   /*
413    * Get the default replication.
414    *
415    * use reflection to search for getDefaultReplication(Path f)
416    * if the method doesn't exist, fall back to using getDefaultReplication()
417    *
418    * @param fs filesystem object
419    * @param f path of file
420    * @return default replication for the path's filesystem
421    * @throws IOException e
422    */
423   public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
424     Method m = null;
425     Class<? extends FileSystem> cls = fs.getClass();
426     try {
427       m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
428     } catch (NoSuchMethodException e) {
429       LOG.info("FileSystem doesn't support getDefaultReplication");
430     } catch (SecurityException e) {
431       LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
432       m = null; // could happen on setAccessible()
433     }
434     if (m == null) {
435       return fs.getDefaultReplication();
436     } else {
437       try {
438         Object ret = m.invoke(fs, path);
439         return ((Number)ret).shortValue();
440       } catch (Exception e) {
441         throw new IOException(e);
442       }
443     }
444   }
445 
446   /**
447    * Returns the default buffer size to use during writes.
448    *
449    * The size of the buffer should probably be a multiple of hardware
450    * page size (4096 on Intel x86), and it determines how much data is
451    * buffered during read and write operations.
452    *
453    * @param fs filesystem object
454    * @return default buffer size to use during writes
455    */
456   public static int getDefaultBufferSize(final FileSystem fs) {
457     return fs.getConf().getInt("io.file.buffer.size", 4096);
458   }
459 
460   /**
461    * Sets version of file system
462    *
463    * @param fs filesystem object
464    * @param rootdir hbase root directory
465    * @param version version to set
466    * @param wait time to wait for retry
467    * @param retries number of times to retry before throwing an IOException
468    * @throws IOException e
469    */
470   public static void setVersion(FileSystem fs, Path rootdir, String version,
471       int wait, int retries) throws IOException {
472     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
473     while (true) {
474       try {
475         FSDataOutputStream s = fs.create(versionFile);
476         s.writeUTF(version);
477         LOG.debug("Created version file at " + rootdir.toString() +
478             " set its version at:" + version);
479         s.close();
480         return;
481       } catch (IOException e) {
482         if (retries > 0) {
483           LOG.warn("Unable to create version file at " + rootdir.toString() +
484               ", retrying: " + e.getMessage());
485           fs.delete(versionFile, false);
486           try {
487             if (wait > 0) {
488               Thread.sleep(wait);
489             }
490           } catch (InterruptedException ex) {
491             // ignore
492           }
493           retries--;
494         } else {
495           throw e;
496         }
497       }
498     }
499   }
500 
501   /**
502    * Checks that a cluster ID file exists in the HBase root directory
503    * @param fs the root directory FileSystem
504    * @param rootdir the HBase root directory in HDFS
505    * @param wait how long to wait between retries
506    * @return <code>true</code> if the file exists, otherwise <code>false</code>
507    * @throws IOException if checking the FileSystem fails
508    */
509   public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
510       int wait) throws IOException {
511     while (true) {
512       try {
513         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
514         return fs.exists(filePath);
515       } catch (IOException ioe) {
516         if (wait > 0) {
517           LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
518               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
519           try {
520             Thread.sleep(wait);
521           } catch (InterruptedException ie) {
522             Thread.interrupted();
523             break;
524           }
525         } else {
526           throw ioe;
527         }
528       }
529     }
530     return false;
531   }
532 
533   /**
534    * Returns the value of the unique cluster ID stored for this HBase instance.
535    * @param fs the root directory FileSystem
536    * @param rootdir the path to the HBase root directory
537    * @return the unique cluster identifier
538    * @throws IOException if reading the cluster ID file fails
539    */
540   public static String getClusterId(FileSystem fs, Path rootdir)
541       throws IOException {
542     Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
543     String clusterId = null;
544     if (fs.exists(idPath)) {
545       FSDataInputStream in = fs.open(idPath);
546       try {
547         clusterId = in.readUTF();
548       } catch (EOFException eof) {
549         LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
550       } finally{
551         in.close();
552       }
553     } else {
554       LOG.warn("Cluster ID file does not exist at " + idPath.toString());
555     }
556     return clusterId;
557   }
558 
559   /**
560    * Writes a new unique identifier for this cluster to the "hbase.id" file
561    * in the HBase root directory
562    * @param fs the root directory FileSystem
563    * @param rootdir the path to the HBase root directory
564    * @param clusterId the unique identifier to store
565    * @param wait how long (in milliseconds) to wait between retries
566    * @throws IOException if writing to the FileSystem fails and no wait value
567    */
568   public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
569       int wait) throws IOException {
570     while (true) {
571       try {
572         Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
573         FSDataOutputStream s = fs.create(filePath);
574         s.writeUTF(clusterId);
575         s.close();
576         if (LOG.isDebugEnabled()) {
577           LOG.debug("Created cluster ID file at " + filePath.toString() +
578               " with ID: " + clusterId);
579         }
580         return;
581       } catch (IOException ioe) {
582         if (wait > 0) {
583           LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
584               ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
585           try {
586             Thread.sleep(wait);
587           } catch (InterruptedException ie) {
588             Thread.interrupted();
589             break;
590           }
591         } else {
592           throw ioe;
593         }
594       }
595     }
596   }
597 
598   /**
599    * Verifies root directory path is a valid URI with a scheme
600    *
601    * @param root root directory path
602    * @return Passed <code>root</code> argument.
603    * @throws IOException if not a valid URI with a scheme
604    */
605   public static Path validateRootPath(Path root) throws IOException {
606     try {
607       URI rootURI = new URI(root.toString());
608       String scheme = rootURI.getScheme();
609       if (scheme == null) {
610         throw new IOException("Root directory does not have a scheme");
611       }
612       return root;
613     } catch (URISyntaxException e) {
614       IOException io = new IOException("Root directory path is not a valid " +
615         "URI -- check your " + HConstants.HBASE_DIR + " configuration");
616       io.initCause(e);
617       throw io;
618     }
619   }
620 
621   /**
622    * If DFS, check safe mode and if so, wait until we clear it.
623    * @param conf configuration
624    * @param wait Sleep between retries
625    * @throws IOException e
626    */
627   public static void waitOnSafeMode(final Configuration conf,
628     final long wait)
629   throws IOException {
630     FileSystem fs = FileSystem.get(conf);
631     if (!(fs instanceof DistributedFileSystem)) return;
632     DistributedFileSystem dfs = (DistributedFileSystem)fs;
633     // Make sure dfs is not in safe mode
634     while (isInSafeMode(dfs)) {
635       LOG.info("Waiting for dfs to exit safe mode...");
636       try {
637         Thread.sleep(wait);
638       } catch (InterruptedException e) {
639         //continue
640       }
641     }
642   }
643 
644   /**
645    * Return the 'path' component of a Path.  In Hadoop, Path is an URI.  This
646    * method returns the 'path' component of a Path's URI: e.g. If a Path is
647    * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>,
648    * this method returns <code>/hbase_trunk/TestTable/compaction.dir</code>.
649    * This method is useful if you want to print out a Path without qualifying
650    * Filesystem instance.
651    * @param p Filesystem Path whose 'path' component we are to return.
652    * @return Path portion of the Filesystem
653    */
654   public static String getPath(Path p) {
655     return p.toUri().getPath();
656   }
657 
658   /**
659    * @param c configuration
660    * @return Path to hbase root directory: i.e. <code>hbase.rootdir</code> from
661    * configuration as a qualified Path.
662    * @throws IOException e
663    */
664   public static Path getRootDir(final Configuration c) throws IOException {
665     Path p = new Path(c.get(HConstants.HBASE_DIR));
666     FileSystem fs = p.getFileSystem(c);
667     return p.makeQualified(fs);
668   }
669 
670   public static void setRootDir(final Configuration c, final Path root) throws IOException {
671     c.set(HConstants.HBASE_DIR, root.toString());
672   }
673 
674   public static void setFsDefault(final Configuration c, final Path root) throws IOException {
675     c.set("fs.defaultFS", root.toString());    // for hadoop 0.21+
676     c.set("fs.default.name", root.toString()); // for hadoop 0.20
677   }
678 
679   /**
680    * Checks if root region exists
681    *
682    * @param fs file system
683    * @param rootdir root directory of HBase installation
684    * @return true if exists
685    * @throws IOException e
686    */
687   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
688   throws IOException {
689     Path rootRegionDir =
690       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
691     return fs.exists(rootRegionDir);
692   }
693 
694   /**
695    * Compute HDFS blocks distribution of a given file, or a portion of the file
696    * @param fs file system
697    * @param status file status of the file
698    * @param start start position of the portion
699    * @param length length of the portion
700    * @return The HDFS blocks distribution
701    */
702   static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
703     final FileSystem fs, FileStatus status, long start, long length)
704     throws IOException {
705     HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
706     BlockLocation [] blockLocations =
707       fs.getFileBlockLocations(status, start, length);
708     for(BlockLocation bl : blockLocations) {
709       String [] hosts = bl.getHosts();
710       long len = bl.getLength();
711       blocksDistribution.addHostsAndBlockWeight(hosts, len);
712     }
713 
714     return blocksDistribution;
715   }
716 
717 
718 
719   /**
720    * Runs through the hbase rootdir and checks all stores have only
721    * one file in them -- that is, they've been major compacted.  Looks
722    * at root and meta tables too.
723    * @param fs filesystem
724    * @param hbaseRootDir hbase root directory
725    * @return True if this hbase install is major compacted.
726    * @throws IOException e
727    */
728   public static boolean isMajorCompacted(final FileSystem fs,
729       final Path hbaseRootDir)
730   throws IOException {
731     // Presumes any directory under hbase.rootdir is a table.
732     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
733     for (FileStatus tableDir : tableDirs) {
734       // Skip the .log directory.  All others should be tables.  Inside a table,
735       // there are compaction.dir directories to skip.  Otherwise, all else
736       // should be regions.  Then in each region, should only be family
737       // directories.  Under each of these, should be one file only.
738       Path d = tableDir.getPath();
739       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
740         continue;
741       }
742       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
743       for (FileStatus regionDir : regionDirs) {
744         Path dd = regionDir.getPath();
745         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
746           continue;
747         }
748         // Else its a region name.  Now look in region for families.
749         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
750         for (FileStatus familyDir : familyDirs) {
751           Path family = familyDir.getPath();
752           // Now in family make sure only one file.
753           FileStatus[] familyStatus = fs.listStatus(family);
754           if (familyStatus.length > 1) {
755             LOG.debug(family.toString() + " has " + familyStatus.length +
756                 " files.");
757             return false;
758           }
759         }
760       }
761     }
762     return true;
763   }
764 
765   // TODO move this method OUT of FSUtils. No dependencies to HMaster
766   /**
767    * Returns the total overall fragmentation percentage. Includes .META. and
768    * -ROOT- as well.
769    *
770    * @param master  The master defining the HBase root and file system.
771    * @return A map for each table and its percentage.
772    * @throws IOException When scanning the directory fails.
773    */
774   public static int getTotalTableFragmentation(final HMaster master)
775   throws IOException {
776     Map<String, Integer> map = getTableFragmentation(master);
777     return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
778   }
779 
780   /**
781    * Runs through the HBase rootdir and checks how many stores for each table
782    * have more than one file in them. Checks -ROOT- and .META. too. The total
783    * percentage across all tables is stored under the special key "-TOTAL-".
784    *
785    * @param master  The master defining the HBase root and file system.
786    * @return A map for each table and its percentage.
787    * @throws IOException When scanning the directory fails.
788    */
789   public static Map<String, Integer> getTableFragmentation(
790     final HMaster master)
791   throws IOException {
792     Path path = getRootDir(master.getConfiguration());
793     // since HMaster.getFileSystem() is package private
794     FileSystem fs = path.getFileSystem(master.getConfiguration());
795     return getTableFragmentation(fs, path);
796   }
797 
798   /**
799    * Runs through the HBase rootdir and checks how many stores for each table
800    * have more than one file in them. Checks -ROOT- and .META. too. The total
801    * percentage across all tables is stored under the special key "-TOTAL-".
802    *
803    * @param fs  The file system to use.
804    * @param hbaseRootDir  The root directory to scan.
805    * @return A map for each table and its percentage.
806    * @throws IOException When scanning the directory fails.
807    */
808   public static Map<String, Integer> getTableFragmentation(
809     final FileSystem fs, final Path hbaseRootDir)
810   throws IOException {
811     Map<String, Integer> frags = new HashMap<String, Integer>();
812     int cfCountTotal = 0;
813     int cfFragTotal = 0;
814     DirFilter df = new DirFilter(fs);
815     // presumes any directory under hbase.rootdir is a table
816     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
817     for (FileStatus tableDir : tableDirs) {
818       // Skip the .log directory.  All others should be tables.  Inside a table,
819       // there are compaction.dir directories to skip.  Otherwise, all else
820       // should be regions.  Then in each region, should only be family
821       // directories.  Under each of these, should be one file only.
822       Path d = tableDir.getPath();
823       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
824         continue;
825       }
826       int cfCount = 0;
827       int cfFrag = 0;
828       FileStatus[] regionDirs = fs.listStatus(d, df);
829       for (FileStatus regionDir : regionDirs) {
830         Path dd = regionDir.getPath();
831         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
832           continue;
833         }
834         // else its a region name, now look in region for families
835         FileStatus[] familyDirs = fs.listStatus(dd, df);
836         for (FileStatus familyDir : familyDirs) {
837           cfCount++;
838           cfCountTotal++;
839           Path family = familyDir.getPath();
840           // now in family make sure only one file
841           FileStatus[] familyStatus = fs.listStatus(family);
842           if (familyStatus.length > 1) {
843             cfFrag++;
844             cfFragTotal++;
845           }
846         }
847       }
848       // compute percentage per table and store in result list
849       frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
850     }
851     // set overall percentage for all tables
852     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
853     return frags;
854   }
855 
856   /**
857    * Expects to find -ROOT- directory.
858    * @param fs filesystem
859    * @param hbaseRootDir hbase root directory
860    * @return True if this a pre020 layout.
861    * @throws IOException e
862    */
863   public static boolean isPre020FileLayout(final FileSystem fs,
864     final Path hbaseRootDir)
865   throws IOException {
866     Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
867       "70236052"), "info"), "mapfiles");
868     return fs.exists(mapfiles);
869   }
870 
871   /**
872    * Runs through the hbase rootdir and checks all stores have only
873    * one file in them -- that is, they've been major compacted.  Looks
874    * at root and meta tables too.  This version differs from
875    * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
876    * pre-0.20.0 hbase layout on the filesystem.  Used migrating.
877    * @param fs filesystem
878    * @param hbaseRootDir hbase root directory
879    * @return True if this hbase install is major compacted.
880    * @throws IOException e
881    */
882   public static boolean isMajorCompactedPre020(final FileSystem fs,
883       final Path hbaseRootDir)
884   throws IOException {
885     // Presumes any directory under hbase.rootdir is a table.
886     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
887     for (FileStatus tableDir : tableDirs) {
888       // Inside a table, there are compaction.dir directories to skip.
889       // Otherwise, all else should be regions.  Then in each region, should
890       // only be family directories.  Under each of these, should be a mapfile
891       // and info directory and in these only one file.
892       Path d = tableDir.getPath();
893       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
894         continue;
895       }
896       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
897       for (FileStatus regionDir : regionDirs) {
898         Path dd = regionDir.getPath();
899         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
900           continue;
901         }
902         // Else its a region name.  Now look in region for families.
903         FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
904         for (FileStatus familyDir : familyDirs) {
905           Path family = familyDir.getPath();
906           FileStatus[] infoAndMapfile = fs.listStatus(family);
907           // Assert that only info and mapfile in family dir.
908           if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
909             LOG.debug(family.toString() +
910                 " has more than just info and mapfile: " + infoAndMapfile.length);
911             return false;
912           }
913           // Make sure directory named info or mapfile.
914           for (int ll = 0; ll < 2; ll++) {
915             if (infoAndMapfile[ll].getPath().getName().equals("info") ||
916                 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
917               continue;
918             LOG.debug("Unexpected directory name: " +
919                 infoAndMapfile[ll].getPath());
920             return false;
921           }
922           // Now in family, there are 'mapfile' and 'info' subdirs.  Just
923           // look in the 'mapfile' subdir.
924           FileStatus[] familyStatus =
925               fs.listStatus(new Path(family, "mapfiles"));
926           if (familyStatus.length > 1) {
927             LOG.debug(family.toString() + " has " + familyStatus.length +
928                 " files.");
929             return false;
930           }
931         }
932       }
933     }
934     return true;
935   }
936 
937   /**
938    * A {@link PathFilter} that returns only regular files.
939    */
940   static class FileFilter implements PathFilter {
941     private final FileSystem fs;
942 
943     public FileFilter(final FileSystem fs) {
944       this.fs = fs;
945     }
946 
947     @Override
948     public boolean accept(Path p) {
949       try {
950         return fs.isFile(p);
951       } catch (IOException e) {
952         LOG.debug("unable to verify if path=" + p + " is a regular file", e);
953         return false;
954       }
955     }
956   }
957 
958   /**
959    * A {@link PathFilter} that returns directories.
960    */
961   public static class DirFilter implements PathFilter {
962     private final FileSystem fs;
963 
964     public DirFilter(final FileSystem fs) {
965       this.fs = fs;
966     }
967 
968     @Override
969     public boolean accept(Path p) {
970       boolean isValid = false;
971       try {
972         if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
973           isValid = false;
974         } else {
975           isValid = this.fs.getFileStatus(p).isDir();
976         }
977       } catch (IOException e) {
978         e.printStackTrace();
979       }
980       return isValid;
981     }
982   }
983 
984   /**
985    * Heuristic to determine whether is safe or not to open a file for append
986    * Looks both for dfs.support.append and use reflection to search
987    * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush()
988    * @param conf
989    * @return True if append support
990    */
991   public static boolean isAppendSupported(final Configuration conf) {
992     boolean append = conf.getBoolean("dfs.support.append", false);
993     if (append) {
994       try {
995         // TODO: The implementation that comes back when we do a createWriter
996         // may not be using SequenceFile so the below is not a definitive test.
997         // Will do for now (hdfs-200).
998         SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
999         append = true;
1000       } catch (SecurityException e) {
1001       } catch (NoSuchMethodException e) {
1002         append = false;
1003       }
1004     }
1005     if (!append) {
1006       // Look for the 0.21, 0.22, new-style append evidence.
1007       try {
1008         FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1009         append = true;
1010       } catch (NoSuchMethodException e) {
1011         append = false;
1012       }
1013     }
1014     return append;
1015   }
1016 
1017   /**
1018    * @param conf
1019    * @return True if this filesystem whose scheme is 'hdfs'.
1020    * @throws IOException
1021    */
1022   public static boolean isHDFS(final Configuration conf) throws IOException {
1023     FileSystem fs = FileSystem.get(conf);
1024     String scheme = fs.getUri().getScheme();
1025     return scheme.equalsIgnoreCase("hdfs");
1026   }
1027 
1028   /**
1029    * Recover file lease. Used when a file might be suspect
1030    * to be had been left open by another process.
1031    * @param fs FileSystem handle
1032    * @param p Path of file to recover lease
1033    * @param conf Configuration handle
1034    * @throws IOException
1035    */
1036   public abstract void recoverFileLease(final FileSystem fs, final Path p,
1037       Configuration conf) throws IOException;
1038 
1039   /**
1040    * @param fs
1041    * @param rootdir
1042    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
1043    * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders.
1044    * @throws IOException
1045    */
1046   public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1047   throws IOException {
1048     // presumes any directory under hbase.rootdir is a table
1049     FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
1050     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1051     for (FileStatus dir: dirs) {
1052       Path p = dir.getPath();
1053       String tableName = p.getName();
1054       if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
1055         tabledirs.add(p);
1056       }
1057     }
1058     return tabledirs;
1059   }
1060 
1061   public static Path getTablePath(Path rootdir, byte [] tableName) {
1062     return getTablePath(rootdir, Bytes.toString(tableName));
1063   }
1064 
1065   public static Path getTablePath(Path rootdir, final String tableName) {
1066     return new Path(rootdir, tableName);
1067   }
1068 
1069   /**
1070    * Filter for all dirs that don't start with '.'
1071    */
1072   public static class RegionDirFilter implements PathFilter {
1073     // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
1074     final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1075     final FileSystem fs;
1076 
1077     public RegionDirFilter(FileSystem fs) {
1078       this.fs = fs;
1079     }
1080 
1081     @Override
1082     public boolean accept(Path rd) {
1083       if (!regionDirPattern.matcher(rd.getName()).matches()) {
1084         return false;
1085       }
1086 
1087       try {
1088         return fs.getFileStatus(rd).isDir();
1089       } catch (IOException ioe) {
1090         // Maybe the file was moved or the fs was disconnected.
1091         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1092         return false;
1093       }
1094     }
1095   }
1096 
1097   /**
1098    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
1099    * .tableinfo
1100    * @param fs A file system for the Path
1101    * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
1102    * @return List of paths to valid region directories in table dir.
1103    * @throws IOException
1104    */
1105   public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1106     // assumes we are in a table dir.
1107     FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1108     List<Path> regionDirs = new ArrayList<Path>(rds.length);
1109     for (FileStatus rdfs: rds) {
1110       Path rdPath = rdfs.getPath();
1111       regionDirs.add(rdPath);
1112     }
1113     return regionDirs;
1114   }
1115 
1116   /**
1117    * Filter for all dirs that are legal column family names.  This is generally used for colfam
1118    * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
1119    */
1120   public static class FamilyDirFilter implements PathFilter {
1121     final FileSystem fs;
1122 
1123     public FamilyDirFilter(FileSystem fs) {
1124       this.fs = fs;
1125     }
1126 
1127     @Override
1128     public boolean accept(Path rd) {
1129       try {
1130         // throws IAE if invalid
1131         HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1132       } catch (IllegalArgumentException iae) {
1133         // path name is an invalid family name and thus is excluded.
1134         return false;
1135       }
1136 
1137       try {
1138         return fs.getFileStatus(rd).isDir();
1139       } catch (IOException ioe) {
1140         // Maybe the file was moved or the fs was disconnected.
1141         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1142         return false;
1143       }
1144     }
1145   }
1146 
1147   /**
1148    * Given a particular region dir, return all the familydirs inside it
1149    *
1150    * @param fs A file system for the Path
1151    * @param regionDir Path to a specific region directory
1152    * @return List of paths to valid family directories in region dir.
1153    * @throws IOException
1154    */
1155   public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1156     // assumes we are in a region dir.
1157     FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1158     List<Path> familyDirs = new ArrayList<Path>(fds.length);
1159     for (FileStatus fdfs: fds) {
1160       Path fdPath = fdfs.getPath();
1161       familyDirs.add(fdPath);
1162     }
1163     return familyDirs;
1164   }
1165 
1166   /**
1167    * Filter for HFiles that excludes reference files.
1168    */
1169   public static class HFileFilter implements PathFilter {
1170     // This pattern will accept 0.90+ style hex hfies files but reject reference files
1171     final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1172 
1173     final FileSystem fs;
1174 
1175     public HFileFilter(FileSystem fs) {
1176       this.fs = fs;
1177     }
1178 
1179     @Override
1180     public boolean accept(Path rd) {
1181       if (!hfilePattern.matcher(rd.getName()).matches()) {
1182         return false;
1183       }
1184 
1185       try {
1186         // only files
1187         return !fs.getFileStatus(rd).isDir();
1188       } catch (IOException ioe) {
1189         // Maybe the file was moved or the fs was disconnected.
1190         LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1191         return false;
1192       }
1193     }
1194   }
1195 
1196   /**
1197    * @param conf
1198    * @return Returns the filesystem of the hbase rootdir.
1199    * @throws IOException
1200    */
1201   public static FileSystem getCurrentFileSystem(Configuration conf)
1202   throws IOException {
1203     return getRootDir(conf).getFileSystem(conf);
1204   }
1205 
1206   /**
1207    * Runs through the HBase rootdir and creates a reverse lookup map for
1208    * table StoreFile names to the full Path.
1209    * <br>
1210    * Example...<br>
1211    * Key = 3944417774205889744  <br>
1212    * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
1213    *
1214    * @param fs  The file system to use.
1215    * @param hbaseRootDir  The root directory to scan.
1216    * @return Map keyed by StoreFile name with a value of the full Path.
1217    * @throws IOException When scanning the directory fails.
1218    */
1219   public static Map<String, Path> getTableStoreFilePathMap(
1220     final FileSystem fs, final Path hbaseRootDir)
1221   throws IOException {
1222     Map<String, Path> map = new HashMap<String, Path>();
1223     
1224     // if this method looks similar to 'getTableFragmentation' that is because 
1225     // it was borrowed from it.
1226     
1227     DirFilter df = new DirFilter(fs);
1228     // presumes any directory under hbase.rootdir is a table
1229     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1230     for (FileStatus tableDir : tableDirs) {
1231       // Skip the .log and other non-table directories.  All others should be tables.
1232       // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
1233       // should be regions. 
1234       Path d = tableDir.getPath();
1235       if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1236         continue;
1237       }
1238       FileStatus[] regionDirs = fs.listStatus(d, df);
1239       for (FileStatus regionDir : regionDirs) {
1240         Path dd = regionDir.getPath();
1241         if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1242           continue;
1243         }
1244         // else its a region name, now look in region for families
1245         FileStatus[] familyDirs = fs.listStatus(dd, df);
1246         for (FileStatus familyDir : familyDirs) {
1247           Path family = familyDir.getPath();
1248           // now in family, iterate over the StoreFiles and
1249           // put in map
1250           FileStatus[] familyStatus = fs.listStatus(family);
1251           for (FileStatus sfStatus : familyStatus) {
1252             Path sf = sfStatus.getPath();
1253             map.put( sf.getName(), sf);
1254           }
1255 
1256         }
1257       }
1258     }
1259       return map;
1260   }
1261 
1262   /**
1263    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1264    * This accommodates differences between hadoop versions
1265    *
1266    * @param fs file system
1267    * @param dir directory
1268    * @param filter path filter
1269    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1270    */
1271   public static FileStatus [] listStatus(final FileSystem fs,
1272       final Path dir, final PathFilter filter) throws IOException {
1273     FileStatus [] status = null;
1274     try {
1275       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1276     } catch (FileNotFoundException fnfe) {
1277       // if directory doesn't exist, return null
1278       LOG.debug(dir + " doesn't exist");
1279     }
1280     if (status == null || status.length < 1) return null;
1281     return status;
1282   }
1283 
1284   /**
1285    * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
1286    * This would accommodates differences between hadoop versions
1287    *
1288    * @param fs file system
1289    * @param dir directory
1290    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
1291    */
1292   public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1293     return listStatus(fs, dir, null);
1294   }
1295 
1296   /**
1297    * Calls fs.delete() and returns the value returned by the fs.delete()
1298    *
1299    * @param fs
1300    * @param path
1301    * @param recursive
1302    * @return
1303    * @throws IOException
1304    */
1305   public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1306       throws IOException {
1307     return fs.delete(path, recursive);
1308   }
1309 
1310   /**
1311    * Throw an exception if an action is not permitted by a user on a file.
1312    * 
1313    * @param user
1314    *          the user
1315    * @param file
1316    *          the file
1317    * @param action
1318    *          the action
1319    */
1320   public static void checkAccess(User user, FileStatus file,
1321       FsAction action) throws AccessControlException {
1322     // See HBASE-7814. UserGroupInformation from hadoop 0.20.x may not support getShortName().
1323     String username = user.getShortName();
1324     if (username.equals(file.getOwner())) {
1325       if (file.getPermission().getUserAction().implies(action)) {
1326         return;
1327       }
1328     } else if (contains(user.getGroupNames(), file.getGroup())) {
1329       if (file.getPermission().getGroupAction().implies(action)) {
1330         return;
1331       }
1332     } else if (file.getPermission().getOtherAction().implies(action)) {
1333       return;
1334     }
1335     throw new AccessControlException("Permission denied:" + " action=" + action
1336         + " path=" + file.getPath() + " user=" + username);
1337   }
1338 
1339   private static boolean contains(String[] groups, String user) {
1340     for (String group : groups) {
1341       if (group.equals(user)) {
1342         return true;
1343       }
1344     }
1345     return false;
1346   }
1347 
1348   /**
1349    * Calls fs.exists(). Checks if the specified path exists
1350    *
1351    * @param fs
1352    * @param path
1353    * @return
1354    * @throws IOException
1355    */
1356   public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1357     return fs.exists(path);
1358   }
1359 
1360   /**
1361    * Log the current state of the filesystem from a certain root directory
1362    * @param fs filesystem to investigate
1363    * @param root root file/directory to start logging from
1364    * @param LOG log to output information
1365    * @throws IOException if an unexpected exception occurs
1366    */
1367   public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1368       throws IOException {
1369     LOG.debug("Current file system:");
1370     logFSTree(LOG, fs, root, "|-");
1371   }
1372 
1373   /**
1374    * Recursive helper to log the state of the FS
1375    * @see #logFileSystemState(FileSystem, Path, Log)
1376    */
1377   private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1378       throws IOException {
1379     FileStatus[] files = FSUtils.listStatus(fs, root, null);
1380     if (files == null) return;
1381 
1382     for (FileStatus file : files) {
1383       if (file.isDir()) {
1384         LOG.debug(prefix + file.getPath().getName() + "/");
1385         logFSTree(LOG, fs, file.getPath(), prefix + "---");
1386       } else {
1387         LOG.debug(prefix + file.getPath().getName());
1388       }
1389     }
1390   }
1391 }