1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.lang.reflect.Method;
27 import java.net.URI;
28 import java.net.URISyntaxException;
29 import java.util.ArrayList;
30 import java.util.HashMap;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.regex.Pattern;
34
35 import org.apache.commons.logging.Log;
36 import org.apache.commons.logging.LogFactory;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.fs.BlockLocation;
39 import org.apache.hadoop.fs.FSDataInputStream;
40 import org.apache.hadoop.fs.FSDataOutputStream;
41 import org.apache.hadoop.fs.FileStatus;
42 import org.apache.hadoop.fs.FileSystem;
43 import org.apache.hadoop.fs.Path;
44 import org.apache.hadoop.fs.PathFilter;
45 import org.apache.hadoop.fs.permission.FsAction;
46 import org.apache.hadoop.fs.permission.FsPermission;
47 import org.apache.hadoop.hbase.HBaseFileSystem;
48 import org.apache.hadoop.hbase.HColumnDescriptor;
49 import org.apache.hadoop.hbase.HConstants;
50 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51 import org.apache.hadoop.hbase.HRegionInfo;
52 import org.apache.hadoop.hbase.RemoteExceptionHandler;
53 import org.apache.hadoop.hbase.master.HMaster;
54 import org.apache.hadoop.hbase.regionserver.HRegion;
55 import org.apache.hadoop.hbase.security.User;
56 import org.apache.hadoop.hdfs.DistributedFileSystem;
57 import org.apache.hadoop.io.SequenceFile;
58 import org.apache.hadoop.security.AccessControlException;
59 import org.apache.hadoop.security.UserGroupInformation;
60 import org.apache.hadoop.util.ReflectionUtils;
61 import org.apache.hadoop.util.StringUtils;
62
63
64
65
66 public abstract class FSUtils {
67 private static final Log LOG = LogFactory.getLog(FSUtils.class);
68
69
70 private static final String FULL_RWX_PERMISSIONS = "777";
71
72 protected FSUtils() {
73 super();
74 }
75
76 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
77 String scheme = fs.getUri().getScheme();
78 if (scheme == null) {
79 LOG.warn("Could not find scheme for uri " +
80 fs.getUri() + ", default to hdfs");
81 scheme = "hdfs";
82 }
83 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
84 scheme + ".impl", FSHDFSUtils.class);
85 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
86 return fsUtils;
87 }
88
89
90
91
92
93
94
95
96 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
97 throws IOException {
98 return fs.exists(dir) && fs.delete(dir, true);
99 }
100
101
102
103
104
105
106
107
108 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
109 if (!fs.exists(dir)) {
110 HBaseFileSystem.makeDirOnFileSystem(fs, dir);
111 }
112 return dir;
113 }
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 public static FSDataOutputStream create(FileSystem fs, Path path,
133 FsPermission perm) throws IOException {
134 return create(fs, path, perm, true);
135 }
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155 public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
156 boolean overwrite) throws IOException {
157 LOG.debug("Creating file=" + path + " with permission=" + perm);
158 return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
159 }
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 public static FsPermission getFilePermissions(final FileSystem fs,
175 final Configuration conf, final String permssionConfKey) {
176 boolean enablePermissions = conf.getBoolean(
177 HConstants.ENABLE_DATA_FILE_UMASK, false);
178
179 if (enablePermissions) {
180 try {
181 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
182
183 String mask = conf.get(permssionConfKey);
184 if (mask == null)
185 return FsPermission.getDefault();
186
187 FsPermission umask = new FsPermission(mask);
188 return perm.applyUMask(umask);
189 } catch (IllegalArgumentException e) {
190 LOG.warn(
191 "Incorrect umask attempted to be created: "
192 + conf.get(permssionConfKey)
193 + ", using default file permissions.", e);
194 return FsPermission.getDefault();
195 }
196 }
197 return FsPermission.getDefault();
198 }
199
200
201
202
203
204
205
206 public static void checkFileSystemAvailable(final FileSystem fs)
207 throws IOException {
208 if (!(fs instanceof DistributedFileSystem)) {
209 return;
210 }
211 IOException exception = null;
212 DistributedFileSystem dfs = (DistributedFileSystem) fs;
213 try {
214 if (dfs.exists(new Path("/"))) {
215 return;
216 }
217 } catch (IOException e) {
218 exception = RemoteExceptionHandler.checkIOException(e);
219 }
220 try {
221 fs.close();
222 } catch (Exception e) {
223 LOG.error("file system close failed: ", e);
224 }
225 IOException io = new IOException("File system is not available");
226 io.initCause(exception);
227 throw io;
228 }
229
230
231
232
233
234
235
236
237
238 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
239 boolean inSafeMode = false;
240 try {
241 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
242 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
243 inSafeMode = (Boolean) m.invoke(dfs,
244 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
245 } catch (Exception e) {
246 if (e instanceof IOException) throw (IOException) e;
247
248
249 inSafeMode = dfs.setSafeMode(
250 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
251 }
252 return inSafeMode;
253 }
254
255
256
257
258
259
260 public static void checkDfsSafeMode(final Configuration conf)
261 throws IOException {
262 boolean isInSafeMode = false;
263 FileSystem fs = FileSystem.get(conf);
264 if (fs instanceof DistributedFileSystem) {
265 DistributedFileSystem dfs = (DistributedFileSystem)fs;
266 isInSafeMode = isInSafeMode(dfs);
267 }
268 if (isInSafeMode) {
269 throw new IOException("File system is in safemode, it can't be written now");
270 }
271 }
272
273
274
275
276
277
278
279
280
281 public static String getVersion(FileSystem fs, Path rootdir)
282 throws IOException {
283 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
284 String version = null;
285 if (fs.exists(versionFile)) {
286 FSDataInputStream s =
287 fs.open(versionFile);
288 try {
289 version = DataInputStream.readUTF(s);
290 } catch (EOFException eof) {
291 LOG.warn("Version file was empty, odd, will try to set it.");
292 } finally {
293 s.close();
294 }
295 }
296 return version;
297 }
298
299
300
301
302
303
304
305
306
307
308 public static void checkVersion(FileSystem fs, Path rootdir,
309 boolean message) throws IOException {
310 checkVersion(fs, rootdir, message, 0,
311 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
312 }
313
314
315
316
317
318
319
320
321
322
323
324
325 public static void checkVersion(FileSystem fs, Path rootdir,
326 boolean message, int wait, int retries) throws IOException {
327 String version = getVersion(fs, rootdir);
328
329 if (version == null) {
330 if (!rootRegionExists(fs, rootdir)) {
331
332
333 FSUtils.setVersion(fs, rootdir, wait, retries);
334 return;
335 }
336 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
337 return;
338
339
340
341 String msg = "HBase file layout needs to be upgraded."
342 + " You have version " + version
343 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
344 + ". Is your hbase.rootdir valid? If so, you may need to run "
345 + "'hbase hbck -fixVersionFile'.";
346 if (message) {
347 System.out.println("WARNING! " + msg);
348 }
349 throw new FileSystemVersionException(msg);
350 }
351
352
353
354
355
356
357
358
359 public static void setVersion(FileSystem fs, Path rootdir)
360 throws IOException {
361 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
362 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
363 }
364
365
366
367
368
369
370
371
372
373
374 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
375 throws IOException {
376 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
377 }
378
379
380
381
382
383
384
385
386
387
388
389
390 public static void setVersion(FileSystem fs, Path rootdir, String version,
391 int wait, int retries) throws IOException {
392 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
393 while (true) {
394 try {
395 FSDataOutputStream s = fs.create(versionFile);
396 s.writeUTF(version);
397 LOG.debug("Created version file at " + rootdir.toString() +
398 " set its version at:" + version);
399 s.close();
400 return;
401 } catch (IOException e) {
402 if (retries > 0) {
403 LOG.warn("Unable to create version file at " + rootdir.toString() +
404 ", retrying: " + e.getMessage());
405 fs.delete(versionFile, false);
406 try {
407 if (wait > 0) {
408 Thread.sleep(wait);
409 }
410 } catch (InterruptedException ex) {
411
412 }
413 retries--;
414 } else {
415 throw e;
416 }
417 }
418 }
419 }
420
421
422
423
424
425
426
427
428
429 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
430 int wait) throws IOException {
431 while (true) {
432 try {
433 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
434 return fs.exists(filePath);
435 } catch (IOException ioe) {
436 if (wait > 0) {
437 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
438 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
439 try {
440 Thread.sleep(wait);
441 } catch (InterruptedException ie) {
442 Thread.interrupted();
443 break;
444 }
445 } else {
446 throw ioe;
447 }
448 }
449 }
450 return false;
451 }
452
453
454
455
456
457
458
459
460 public static String getClusterId(FileSystem fs, Path rootdir)
461 throws IOException {
462 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
463 String clusterId = null;
464 if (fs.exists(idPath)) {
465 FSDataInputStream in = fs.open(idPath);
466 try {
467 clusterId = in.readUTF();
468 } catch (EOFException eof) {
469 LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
470 } finally{
471 in.close();
472 }
473 } else {
474 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
475 }
476 return clusterId;
477 }
478
479
480
481
482
483
484
485
486
487
488 public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
489 int wait) throws IOException {
490 while (true) {
491 try {
492 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
493 FSDataOutputStream s = fs.create(filePath);
494 s.writeUTF(clusterId);
495 s.close();
496 if (LOG.isDebugEnabled()) {
497 LOG.debug("Created cluster ID file at " + filePath.toString() +
498 " with ID: " + clusterId);
499 }
500 return;
501 } catch (IOException ioe) {
502 if (wait > 0) {
503 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
504 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
505 try {
506 Thread.sleep(wait);
507 } catch (InterruptedException ie) {
508 Thread.interrupted();
509 break;
510 }
511 } else {
512 throw ioe;
513 }
514 }
515 }
516 }
517
518
519
520
521
522
523
524
525 public static Path validateRootPath(Path root) throws IOException {
526 try {
527 URI rootURI = new URI(root.toString());
528 String scheme = rootURI.getScheme();
529 if (scheme == null) {
530 throw new IOException("Root directory does not have a scheme");
531 }
532 return root;
533 } catch (URISyntaxException e) {
534 IOException io = new IOException("Root directory path is not a valid " +
535 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
536 io.initCause(e);
537 throw io;
538 }
539 }
540
541
542
543
544
545
546
547 public static void waitOnSafeMode(final Configuration conf,
548 final long wait)
549 throws IOException {
550 FileSystem fs = FileSystem.get(conf);
551 if (!(fs instanceof DistributedFileSystem)) return;
552 DistributedFileSystem dfs = (DistributedFileSystem)fs;
553
554 while (isInSafeMode(dfs)) {
555 LOG.info("Waiting for dfs to exit safe mode...");
556 try {
557 Thread.sleep(wait);
558 } catch (InterruptedException e) {
559
560 }
561 }
562 }
563
564
565
566
567
568
569
570
571
572
573
574 public static String getPath(Path p) {
575 return p.toUri().getPath();
576 }
577
578
579
580
581
582
583
584 public static Path getRootDir(final Configuration c) throws IOException {
585 Path p = new Path(c.get(HConstants.HBASE_DIR));
586 FileSystem fs = p.getFileSystem(c);
587 return p.makeQualified(fs);
588 }
589
590 public static void setRootDir(final Configuration c, final Path root) throws IOException {
591 c.set(HConstants.HBASE_DIR, root.toString());
592 }
593
594
595
596
597
598
599
600
601
602 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
603 throws IOException {
604 Path rootRegionDir =
605 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
606 return fs.exists(rootRegionDir);
607 }
608
609
610
611
612
613
614
615
616
617 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
618 final FileSystem fs, FileStatus status, long start, long length)
619 throws IOException {
620 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
621 BlockLocation [] blockLocations =
622 fs.getFileBlockLocations(status, start, length);
623 for(BlockLocation bl : blockLocations) {
624 String [] hosts = bl.getHosts();
625 long len = bl.getLength();
626 blocksDistribution.addHostsAndBlockWeight(hosts, len);
627 }
628
629 return blocksDistribution;
630 }
631
632
633
634
635
636
637
638
639
640
641
642
643 public static boolean isMajorCompacted(final FileSystem fs,
644 final Path hbaseRootDir)
645 throws IOException {
646
647 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
648 for (FileStatus tableDir : tableDirs) {
649
650
651
652
653 Path d = tableDir.getPath();
654 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
655 continue;
656 }
657 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
658 for (FileStatus regionDir : regionDirs) {
659 Path dd = regionDir.getPath();
660 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
661 continue;
662 }
663
664 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
665 for (FileStatus familyDir : familyDirs) {
666 Path family = familyDir.getPath();
667
668 FileStatus[] familyStatus = fs.listStatus(family);
669 if (familyStatus.length > 1) {
670 LOG.debug(family.toString() + " has " + familyStatus.length +
671 " files.");
672 return false;
673 }
674 }
675 }
676 }
677 return true;
678 }
679
680
681
682
683
684
685
686
687
688
689 public static int getTotalTableFragmentation(final HMaster master)
690 throws IOException {
691 Map<String, Integer> map = getTableFragmentation(master);
692 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
693 }
694
695
696
697
698
699
700
701
702
703
704 public static Map<String, Integer> getTableFragmentation(
705 final HMaster master)
706 throws IOException {
707 Path path = getRootDir(master.getConfiguration());
708
709 FileSystem fs = path.getFileSystem(master.getConfiguration());
710 return getTableFragmentation(fs, path);
711 }
712
713
714
715
716
717
718
719
720
721
722
723 public static Map<String, Integer> getTableFragmentation(
724 final FileSystem fs, final Path hbaseRootDir)
725 throws IOException {
726 Map<String, Integer> frags = new HashMap<String, Integer>();
727 int cfCountTotal = 0;
728 int cfFragTotal = 0;
729 DirFilter df = new DirFilter(fs);
730
731 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
732 for (FileStatus tableDir : tableDirs) {
733
734
735
736
737 Path d = tableDir.getPath();
738 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
739 continue;
740 }
741 int cfCount = 0;
742 int cfFrag = 0;
743 FileStatus[] regionDirs = fs.listStatus(d, df);
744 for (FileStatus regionDir : regionDirs) {
745 Path dd = regionDir.getPath();
746 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
747 continue;
748 }
749
750 FileStatus[] familyDirs = fs.listStatus(dd, df);
751 for (FileStatus familyDir : familyDirs) {
752 cfCount++;
753 cfCountTotal++;
754 Path family = familyDir.getPath();
755
756 FileStatus[] familyStatus = fs.listStatus(family);
757 if (familyStatus.length > 1) {
758 cfFrag++;
759 cfFragTotal++;
760 }
761 }
762 }
763
764 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
765 }
766
767 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
768 return frags;
769 }
770
771
772
773
774
775
776
777
778 public static boolean isPre020FileLayout(final FileSystem fs,
779 final Path hbaseRootDir)
780 throws IOException {
781 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
782 "70236052"), "info"), "mapfiles");
783 return fs.exists(mapfiles);
784 }
785
786
787
788
789
790
791
792
793
794
795
796
797 public static boolean isMajorCompactedPre020(final FileSystem fs,
798 final Path hbaseRootDir)
799 throws IOException {
800
801 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
802 for (FileStatus tableDir : tableDirs) {
803
804
805
806
807 Path d = tableDir.getPath();
808 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
809 continue;
810 }
811 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
812 for (FileStatus regionDir : regionDirs) {
813 Path dd = regionDir.getPath();
814 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
815 continue;
816 }
817
818 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
819 for (FileStatus familyDir : familyDirs) {
820 Path family = familyDir.getPath();
821 FileStatus[] infoAndMapfile = fs.listStatus(family);
822
823 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
824 LOG.debug(family.toString() +
825 " has more than just info and mapfile: " + infoAndMapfile.length);
826 return false;
827 }
828
829 for (int ll = 0; ll < 2; ll++) {
830 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
831 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
832 continue;
833 LOG.debug("Unexpected directory name: " +
834 infoAndMapfile[ll].getPath());
835 return false;
836 }
837
838
839 FileStatus[] familyStatus =
840 fs.listStatus(new Path(family, "mapfiles"));
841 if (familyStatus.length > 1) {
842 LOG.debug(family.toString() + " has " + familyStatus.length +
843 " files.");
844 return false;
845 }
846 }
847 }
848 }
849 return true;
850 }
851
852
853
854
855 static class FileFilter implements PathFilter {
856 private final FileSystem fs;
857
858 public FileFilter(final FileSystem fs) {
859 this.fs = fs;
860 }
861
862 @Override
863 public boolean accept(Path p) {
864 try {
865 return fs.isFile(p);
866 } catch (IOException e) {
867 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
868 return false;
869 }
870 }
871 }
872
873
874
875
876 public static class DirFilter implements PathFilter {
877 private final FileSystem fs;
878
879 public DirFilter(final FileSystem fs) {
880 this.fs = fs;
881 }
882
883 @Override
884 public boolean accept(Path p) {
885 boolean isValid = false;
886 try {
887 if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
888 isValid = false;
889 } else {
890 isValid = this.fs.getFileStatus(p).isDir();
891 }
892 } catch (IOException e) {
893 e.printStackTrace();
894 }
895 return isValid;
896 }
897 }
898
899
900
901
902
903
904
905
906 public static boolean isAppendSupported(final Configuration conf) {
907 boolean append = conf.getBoolean("dfs.support.append", false);
908 if (append) {
909 try {
910
911
912
913 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
914 append = true;
915 } catch (SecurityException e) {
916 } catch (NoSuchMethodException e) {
917 append = false;
918 }
919 }
920 if (!append) {
921
922 try {
923 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
924 append = true;
925 } catch (NoSuchMethodException e) {
926 append = false;
927 }
928 }
929 return append;
930 }
931
932
933
934
935
936
937 public static boolean isHDFS(final Configuration conf) throws IOException {
938 FileSystem fs = FileSystem.get(conf);
939 String scheme = fs.getUri().getScheme();
940 return scheme.equalsIgnoreCase("hdfs");
941 }
942
943
944
945
946
947
948
949
950
951 public abstract void recoverFileLease(final FileSystem fs, final Path p,
952 Configuration conf) throws IOException;
953
954
955
956
957
958
959
960
961 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
962 throws IOException {
963
964 FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
965 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
966 for (FileStatus dir: dirs) {
967 Path p = dir.getPath();
968 String tableName = p.getName();
969 if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
970 tabledirs.add(p);
971 }
972 }
973 return tabledirs;
974 }
975
976 public static Path getTablePath(Path rootdir, byte [] tableName) {
977 return getTablePath(rootdir, Bytes.toString(tableName));
978 }
979
980 public static Path getTablePath(Path rootdir, final String tableName) {
981 return new Path(rootdir, tableName);
982 }
983
984
985
986
987 public static class RegionDirFilter implements PathFilter {
988
989 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
990 final FileSystem fs;
991
992 public RegionDirFilter(FileSystem fs) {
993 this.fs = fs;
994 }
995
996 @Override
997 public boolean accept(Path rd) {
998 if (!regionDirPattern.matcher(rd.getName()).matches()) {
999 return false;
1000 }
1001
1002 try {
1003 return fs.getFileStatus(rd).isDir();
1004 } catch (IOException ioe) {
1005
1006 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1007 return false;
1008 }
1009 }
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1021
1022 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1023 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1024 for (FileStatus rdfs: rds) {
1025 Path rdPath = rdfs.getPath();
1026 regionDirs.add(rdPath);
1027 }
1028 return regionDirs;
1029 }
1030
1031
1032
1033
1034
1035 public static class FamilyDirFilter implements PathFilter {
1036 final FileSystem fs;
1037
1038 public FamilyDirFilter(FileSystem fs) {
1039 this.fs = fs;
1040 }
1041
1042 @Override
1043 public boolean accept(Path rd) {
1044 try {
1045
1046 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1047 } catch (IllegalArgumentException iae) {
1048
1049 return false;
1050 }
1051
1052 try {
1053 return fs.getFileStatus(rd).isDir();
1054 } catch (IOException ioe) {
1055
1056 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1057 return false;
1058 }
1059 }
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1071
1072 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1073 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1074 for (FileStatus fdfs: fds) {
1075 Path fdPath = fdfs.getPath();
1076 familyDirs.add(fdPath);
1077 }
1078 return familyDirs;
1079 }
1080
1081
1082
1083
1084 public static class HFileFilter implements PathFilter {
1085
1086 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1087
1088 final FileSystem fs;
1089
1090 public HFileFilter(FileSystem fs) {
1091 this.fs = fs;
1092 }
1093
1094 @Override
1095 public boolean accept(Path rd) {
1096 if (!hfilePattern.matcher(rd.getName()).matches()) {
1097 return false;
1098 }
1099
1100 try {
1101
1102 return !fs.getFileStatus(rd).isDir();
1103 } catch (IOException ioe) {
1104
1105 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1106 return false;
1107 }
1108 }
1109 }
1110
1111
1112
1113
1114
1115
1116 public static FileSystem getCurrentFileSystem(Configuration conf)
1117 throws IOException {
1118 return getRootDir(conf).getFileSystem(conf);
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 public static Map<String, Path> getTableStoreFilePathMap(
1135 final FileSystem fs, final Path hbaseRootDir)
1136 throws IOException {
1137 Map<String, Path> map = new HashMap<String, Path>();
1138
1139
1140
1141
1142 DirFilter df = new DirFilter(fs);
1143
1144 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1145 for (FileStatus tableDir : tableDirs) {
1146
1147
1148
1149 Path d = tableDir.getPath();
1150 if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1151 continue;
1152 }
1153 FileStatus[] regionDirs = fs.listStatus(d, df);
1154 for (FileStatus regionDir : regionDirs) {
1155 Path dd = regionDir.getPath();
1156 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1157 continue;
1158 }
1159
1160 FileStatus[] familyDirs = fs.listStatus(dd, df);
1161 for (FileStatus familyDir : familyDirs) {
1162 Path family = familyDir.getPath();
1163
1164
1165 FileStatus[] familyStatus = fs.listStatus(family);
1166 for (FileStatus sfStatus : familyStatus) {
1167 Path sf = sfStatus.getPath();
1168 map.put( sf.getName(), sf);
1169 }
1170
1171 }
1172 }
1173 }
1174 return map;
1175 }
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 public static FileStatus [] listStatus(final FileSystem fs,
1187 final Path dir, final PathFilter filter) throws IOException {
1188 FileStatus [] status = null;
1189 try {
1190 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1191 } catch (FileNotFoundException fnfe) {
1192
1193 LOG.debug(dir + " doesn't exist");
1194 }
1195 if (status == null || status.length < 1) return null;
1196 return status;
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1208 return listStatus(fs, dir, null);
1209 }
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1221 throws IOException {
1222 return fs.delete(path, recursive);
1223 }
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235 public static void checkAccess(User user, FileStatus file,
1236 FsAction action) throws AccessControlException {
1237
1238 String username = user.getShortName();
1239 if (username.equals(file.getOwner())) {
1240 if (file.getPermission().getUserAction().implies(action)) {
1241 return;
1242 }
1243 } else if (contains(user.getGroupNames(), file.getGroup())) {
1244 if (file.getPermission().getGroupAction().implies(action)) {
1245 return;
1246 }
1247 } else if (file.getPermission().getOtherAction().implies(action)) {
1248 return;
1249 }
1250 throw new AccessControlException("Permission denied:" + " action=" + action
1251 + " path=" + file.getPath() + " user=" + username);
1252 }
1253
1254 private static boolean contains(String[] groups, String user) {
1255 for (String group : groups) {
1256 if (group.equals(user)) {
1257 return true;
1258 }
1259 }
1260 return false;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1272 return fs.exists(path);
1273 }
1274
1275
1276
1277
1278
1279
1280
1281
1282 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1283 throws IOException {
1284 LOG.debug("Current file system:");
1285 logFSTree(LOG, fs, root, "|-");
1286 }
1287
1288
1289
1290
1291
1292 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1293 throws IOException {
1294 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1295 if (files == null) return;
1296
1297 for (FileStatus file : files) {
1298 if (file.isDir()) {
1299 LOG.debug(prefix + file.getPath().getName() + "/");
1300 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1301 } else {
1302 LOG.debug(prefix + file.getPath().getName());
1303 }
1304 }
1305 }
1306 }