1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.lang.reflect.Method;
27 import java.net.URI;
28 import java.net.URISyntaxException;
29 import java.util.ArrayList;
30 import java.util.HashMap;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.regex.Pattern;
34
35 import org.apache.commons.logging.Log;
36 import org.apache.commons.logging.LogFactory;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.fs.BlockLocation;
39 import org.apache.hadoop.fs.FSDataInputStream;
40 import org.apache.hadoop.fs.FSDataOutputStream;
41 import org.apache.hadoop.fs.FileStatus;
42 import org.apache.hadoop.fs.FileSystem;
43 import org.apache.hadoop.fs.Path;
44 import org.apache.hadoop.fs.PathFilter;
45 import org.apache.hadoop.fs.permission.FsAction;
46 import org.apache.hadoop.fs.permission.FsPermission;
47 import org.apache.hadoop.hbase.HBaseFileSystem;
48 import org.apache.hadoop.hbase.HColumnDescriptor;
49 import org.apache.hadoop.hbase.HConstants;
50 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51 import org.apache.hadoop.hbase.HRegionInfo;
52 import org.apache.hadoop.hbase.RemoteExceptionHandler;
53 import org.apache.hadoop.hbase.master.HMaster;
54 import org.apache.hadoop.hbase.regionserver.HRegion;
55 import org.apache.hadoop.hbase.security.User;
56 import org.apache.hadoop.hdfs.DistributedFileSystem;
57 import org.apache.hadoop.io.SequenceFile;
58 import org.apache.hadoop.security.AccessControlException;
59 import org.apache.hadoop.util.ReflectionUtils;
60 import org.apache.hadoop.util.StringUtils;
61
62
63
64
65 public abstract class FSUtils {
66 private static final Log LOG = LogFactory.getLog(FSUtils.class);
67
68
69 private static final String FULL_RWX_PERMISSIONS = "777";
70
71 protected FSUtils() {
72 super();
73 }
74
75 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
76 String scheme = fs.getUri().getScheme();
77 if (scheme == null) {
78 LOG.warn("Could not find scheme for uri " +
79 fs.getUri() + ", default to hdfs");
80 scheme = "hdfs";
81 }
82 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
83 scheme + ".impl", FSHDFSUtils.class);
84 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
85 return fsUtils;
86 }
87
88
89
90
91
92
93
94
95 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
96 throws IOException {
97 return fs.exists(dir) && fs.delete(dir, true);
98 }
99
100
101
102
103
104
105
106
107 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
108 if (!fs.exists(dir)) {
109 HBaseFileSystem.makeDirOnFileSystem(fs, dir);
110 }
111 return dir;
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 public static FSDataOutputStream create(FileSystem fs, Path path,
132 FsPermission perm) throws IOException {
133 return create(fs, path, perm, true);
134 }
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
155 boolean overwrite) throws IOException {
156 LOG.debug("Creating file=" + path + " with permission=" + perm);
157 return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
158 }
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 public static FsPermission getFilePermissions(final FileSystem fs,
174 final Configuration conf, final String permssionConfKey) {
175 boolean enablePermissions = conf.getBoolean(
176 HConstants.ENABLE_DATA_FILE_UMASK, false);
177
178 if (enablePermissions) {
179 try {
180 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
181
182 String mask = conf.get(permssionConfKey);
183 if (mask == null)
184 return FsPermission.getDefault();
185
186 FsPermission umask = new FsPermission(mask);
187 return perm.applyUMask(umask);
188 } catch (IllegalArgumentException e) {
189 LOG.warn(
190 "Incorrect umask attempted to be created: "
191 + conf.get(permssionConfKey)
192 + ", using default file permissions.", e);
193 return FsPermission.getDefault();
194 }
195 }
196 return FsPermission.getDefault();
197 }
198
199
200
201
202
203
204
205 public static void checkFileSystemAvailable(final FileSystem fs)
206 throws IOException {
207 if (!(fs instanceof DistributedFileSystem)) {
208 return;
209 }
210 IOException exception = null;
211 DistributedFileSystem dfs = (DistributedFileSystem) fs;
212 try {
213 if (dfs.exists(new Path("/"))) {
214 return;
215 }
216 } catch (IOException e) {
217 exception = RemoteExceptionHandler.checkIOException(e);
218 }
219 try {
220 fs.close();
221 } catch (Exception e) {
222 LOG.error("file system close failed: ", e);
223 }
224 IOException io = new IOException("File system is not available");
225 io.initCause(exception);
226 throw io;
227 }
228
229
230
231
232
233
234
235
236
237 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
238 boolean inSafeMode = false;
239 try {
240 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
241 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
242 inSafeMode = (Boolean) m.invoke(dfs,
243 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
244 } catch (Exception e) {
245 if (e instanceof IOException) throw (IOException) e;
246
247
248 inSafeMode = dfs.setSafeMode(
249 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
250 }
251 return inSafeMode;
252 }
253
254
255
256
257
258
259 public static void checkDfsSafeMode(final Configuration conf)
260 throws IOException {
261 boolean isInSafeMode = false;
262 FileSystem fs = FileSystem.get(conf);
263 if (fs instanceof DistributedFileSystem) {
264 DistributedFileSystem dfs = (DistributedFileSystem)fs;
265 isInSafeMode = isInSafeMode(dfs);
266 }
267 if (isInSafeMode) {
268 throw new IOException("File system is in safemode, it can't be written now");
269 }
270 }
271
272
273
274
275
276
277
278
279
280 public static String getVersion(FileSystem fs, Path rootdir)
281 throws IOException {
282 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283 String version = null;
284 if (fs.exists(versionFile)) {
285 FSDataInputStream s =
286 fs.open(versionFile);
287 try {
288 version = DataInputStream.readUTF(s);
289 } catch (EOFException eof) {
290 LOG.warn("Version file was empty, odd, will try to set it.");
291 } finally {
292 s.close();
293 }
294 }
295 return version;
296 }
297
298
299
300
301
302
303
304
305
306
307 public static void checkVersion(FileSystem fs, Path rootdir,
308 boolean message) throws IOException {
309 checkVersion(fs, rootdir, message, 0,
310 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
311 }
312
313
314
315
316
317
318
319
320
321
322
323
324 public static void checkVersion(FileSystem fs, Path rootdir,
325 boolean message, int wait, int retries) throws IOException {
326 String version = getVersion(fs, rootdir);
327
328 if (version == null) {
329 if (!rootRegionExists(fs, rootdir)) {
330
331
332 FSUtils.setVersion(fs, rootdir, wait, retries);
333 return;
334 }
335 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
336 return;
337
338
339
340 String msg = "HBase file layout needs to be upgraded."
341 + " You have version " + version
342 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
343 + ". Is your hbase.rootdir valid? If so, you may need to run "
344 + "'hbase hbck -fixVersionFile'.";
345 if (message) {
346 System.out.println("WARNING! " + msg);
347 }
348 throw new FileSystemVersionException(msg);
349 }
350
351
352
353
354
355
356
357
358 public static void setVersion(FileSystem fs, Path rootdir)
359 throws IOException {
360 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
361 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
362 }
363
364
365
366
367
368
369
370
371
372
373 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
374 throws IOException {
375 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
376 }
377
378
379
380
381
382
383
384
385
386
387
388
389 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
390 Method m = null;
391 Class<? extends FileSystem> cls = fs.getClass();
392 try {
393 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
394 } catch (NoSuchMethodException e) {
395 LOG.info("FileSystem doesn't support getDefaultBlockSize");
396 } catch (SecurityException e) {
397 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
398 m = null;
399 }
400 if (m == null) {
401 return fs.getDefaultBlockSize();
402 } else {
403 try {
404 Object ret = m.invoke(fs, path);
405 return ((Long)ret).longValue();
406 } catch (Exception e) {
407 throw new IOException(e);
408 }
409 }
410 }
411
412
413
414
415
416
417
418
419
420
421
422
423 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
424 Method m = null;
425 Class<? extends FileSystem> cls = fs.getClass();
426 try {
427 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
428 } catch (NoSuchMethodException e) {
429 LOG.info("FileSystem doesn't support getDefaultReplication");
430 } catch (SecurityException e) {
431 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
432 m = null;
433 }
434 if (m == null) {
435 return fs.getDefaultReplication();
436 } else {
437 try {
438 Object ret = m.invoke(fs, path);
439 return ((Number)ret).shortValue();
440 } catch (Exception e) {
441 throw new IOException(e);
442 }
443 }
444 }
445
446
447
448
449
450
451
452
453
454
455
456 public static int getDefaultBufferSize(final FileSystem fs) {
457 return fs.getConf().getInt("io.file.buffer.size", 4096);
458 }
459
460
461
462
463
464
465
466
467
468
469
470 public static void setVersion(FileSystem fs, Path rootdir, String version,
471 int wait, int retries) throws IOException {
472 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
473 Path tmpFile = new Path(new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.VERSION_FILE_NAME);
474 while (true) {
475 try {
476 FSDataOutputStream s = fs.create(tmpFile);
477 s.writeUTF(version);
478 s.close();
479 if (!fs.rename(tmpFile, versionFile)) {
480 throw new IOException("Unable to move temp version file to " + versionFile);
481 }
482 LOG.debug("Created version file at " + rootdir.toString() +
483 " set its version at:" + version);
484 return;
485 } catch (IOException e) {
486 if (retries > 0) {
487 LOG.warn("Unable to create version file at " + rootdir.toString() +
488 ", retrying: " + e.getMessage());
489 fs.delete(versionFile, false);
490 try {
491 if (wait > 0) {
492 Thread.sleep(wait);
493 }
494 } catch (InterruptedException ex) {
495
496 }
497 retries--;
498 } else {
499 throw e;
500 }
501 }
502 }
503 }
504
505
506
507
508
509
510
511
512
513 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
514 int wait) throws IOException {
515 while (true) {
516 try {
517 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
518 return fs.exists(filePath);
519 } catch (IOException ioe) {
520 if (wait > 0) {
521 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
522 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
523 try {
524 Thread.sleep(wait);
525 } catch (InterruptedException ie) {
526 Thread.interrupted();
527 break;
528 }
529 } else {
530 throw ioe;
531 }
532 }
533 }
534 return false;
535 }
536
537
538
539
540
541
542
543
544 public static String getClusterId(FileSystem fs, Path rootdir)
545 throws IOException {
546 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
547 String clusterId = null;
548 if (fs.exists(idPath)) {
549 FSDataInputStream in = fs.open(idPath);
550 try {
551 clusterId = in.readUTF();
552 } catch (EOFException eof) {
553 LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
554 } finally{
555 in.close();
556 }
557 } else {
558 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
559 }
560 return clusterId;
561 }
562
563
564
565
566
567
568
569
570
571
572 public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
573 int wait) throws IOException {
574 Path idFfile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
575 Path tmpFile = new Path(new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY), HConstants.CLUSTER_ID_FILE_NAME);
576 while (true) {
577 try {
578 FSDataOutputStream s = fs.create(tmpFile);
579 s.writeUTF(clusterId);
580 s.close();
581 if (!fs.rename(tmpFile, idFfile)) {
582 throw new IOException("Unable to move temp version file to " + idFfile);
583 }
584 if (LOG.isDebugEnabled()) {
585 LOG.debug("Created cluster ID file at " + idFfile.toString() +
586 " with ID: " + clusterId);
587 }
588 return;
589 } catch (IOException ioe) {
590 if (wait > 0) {
591 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
592 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
593 try {
594 Thread.sleep(wait);
595 } catch (InterruptedException ie) {
596 Thread.interrupted();
597 break;
598 }
599 } else {
600 throw ioe;
601 }
602 }
603 }
604 }
605
606
607
608
609
610
611
612
613 public static Path validateRootPath(Path root) throws IOException {
614 try {
615 URI rootURI = new URI(root.toString());
616 String scheme = rootURI.getScheme();
617 if (scheme == null) {
618 throw new IOException("Root directory does not have a scheme");
619 }
620 return root;
621 } catch (URISyntaxException e) {
622 IOException io = new IOException("Root directory path is not a valid " +
623 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
624 io.initCause(e);
625 throw io;
626 }
627 }
628
629
630
631
632
633
634
635 public static void waitOnSafeMode(final Configuration conf,
636 final long wait)
637 throws IOException {
638 FileSystem fs = FileSystem.get(conf);
639 if (!(fs instanceof DistributedFileSystem)) return;
640 DistributedFileSystem dfs = (DistributedFileSystem)fs;
641
642 while (isInSafeMode(dfs)) {
643 LOG.info("Waiting for dfs to exit safe mode...");
644 try {
645 Thread.sleep(wait);
646 } catch (InterruptedException e) {
647
648 }
649 }
650 }
651
652
653
654
655
656
657
658
659
660
661
662 public static String getPath(Path p) {
663 return p.toUri().getPath();
664 }
665
666
667
668
669
670
671
672 public static Path getRootDir(final Configuration c) throws IOException {
673 Path p = new Path(c.get(HConstants.HBASE_DIR));
674 FileSystem fs = p.getFileSystem(c);
675 return p.makeQualified(fs);
676 }
677
678 public static void setRootDir(final Configuration c, final Path root) throws IOException {
679 c.set(HConstants.HBASE_DIR, root.toString());
680 }
681
682 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
683 c.set("fs.defaultFS", root.toString());
684 c.set("fs.default.name", root.toString());
685 }
686
687
688
689
690
691
692
693
694
695 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
696 throws IOException {
697 Path rootRegionDir =
698 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
699 return fs.exists(rootRegionDir);
700 }
701
702
703
704
705
706
707
708
709
710 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
711 final FileSystem fs, FileStatus status, long start, long length)
712 throws IOException {
713 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
714 BlockLocation [] blockLocations =
715 fs.getFileBlockLocations(status, start, length);
716 for(BlockLocation bl : blockLocations) {
717 String [] hosts = bl.getHosts();
718 long len = bl.getLength();
719 blocksDistribution.addHostsAndBlockWeight(hosts, len);
720 }
721
722 return blocksDistribution;
723 }
724
725
726
727
728
729
730
731
732
733
734
735
736 public static boolean isMajorCompacted(final FileSystem fs,
737 final Path hbaseRootDir)
738 throws IOException {
739
740 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
741 for (FileStatus tableDir : tableDirs) {
742
743
744
745
746 Path d = tableDir.getPath();
747 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
748 continue;
749 }
750 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
751 for (FileStatus regionDir : regionDirs) {
752 Path dd = regionDir.getPath();
753 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
754 continue;
755 }
756
757 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
758 for (FileStatus familyDir : familyDirs) {
759 Path family = familyDir.getPath();
760
761 FileStatus[] familyStatus = fs.listStatus(family);
762 if (familyStatus.length > 1) {
763 LOG.debug(family.toString() + " has " + familyStatus.length +
764 " files.");
765 return false;
766 }
767 }
768 }
769 }
770 return true;
771 }
772
773
774
775
776
777
778
779
780
781
782 public static int getTotalTableFragmentation(final HMaster master)
783 throws IOException {
784 Map<String, Integer> map = getTableFragmentation(master);
785 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
786 }
787
788
789
790
791
792
793
794
795
796
797 public static Map<String, Integer> getTableFragmentation(
798 final HMaster master)
799 throws IOException {
800 Path path = getRootDir(master.getConfiguration());
801
802 FileSystem fs = path.getFileSystem(master.getConfiguration());
803 return getTableFragmentation(fs, path);
804 }
805
806
807
808
809
810
811
812
813
814
815
816 public static Map<String, Integer> getTableFragmentation(
817 final FileSystem fs, final Path hbaseRootDir)
818 throws IOException {
819 Map<String, Integer> frags = new HashMap<String, Integer>();
820 int cfCountTotal = 0;
821 int cfFragTotal = 0;
822 DirFilter df = new DirFilter(fs);
823
824 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
825 for (FileStatus tableDir : tableDirs) {
826
827
828
829
830 Path d = tableDir.getPath();
831 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
832 continue;
833 }
834 int cfCount = 0;
835 int cfFrag = 0;
836 FileStatus[] regionDirs = fs.listStatus(d, df);
837 for (FileStatus regionDir : regionDirs) {
838 Path dd = regionDir.getPath();
839 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
840 continue;
841 }
842
843 FileStatus[] familyDirs = fs.listStatus(dd, df);
844 for (FileStatus familyDir : familyDirs) {
845 cfCount++;
846 cfCountTotal++;
847 Path family = familyDir.getPath();
848
849 FileStatus[] familyStatus = fs.listStatus(family);
850 if (familyStatus.length > 1) {
851 cfFrag++;
852 cfFragTotal++;
853 }
854 }
855 }
856
857 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
858 }
859
860 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
861 return frags;
862 }
863
864
865
866
867
868
869
870
871 public static boolean isPre020FileLayout(final FileSystem fs,
872 final Path hbaseRootDir)
873 throws IOException {
874 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
875 "70236052"), "info"), "mapfiles");
876 return fs.exists(mapfiles);
877 }
878
879
880
881
882
883
884
885
886
887
888
889
890 public static boolean isMajorCompactedPre020(final FileSystem fs,
891 final Path hbaseRootDir)
892 throws IOException {
893
894 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
895 for (FileStatus tableDir : tableDirs) {
896
897
898
899
900 Path d = tableDir.getPath();
901 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
902 continue;
903 }
904 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
905 for (FileStatus regionDir : regionDirs) {
906 Path dd = regionDir.getPath();
907 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
908 continue;
909 }
910
911 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
912 for (FileStatus familyDir : familyDirs) {
913 Path family = familyDir.getPath();
914 FileStatus[] infoAndMapfile = fs.listStatus(family);
915
916 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
917 LOG.debug(family.toString() +
918 " has more than just info and mapfile: " + infoAndMapfile.length);
919 return false;
920 }
921
922 for (int ll = 0; ll < 2; ll++) {
923 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
924 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
925 continue;
926 LOG.debug("Unexpected directory name: " +
927 infoAndMapfile[ll].getPath());
928 return false;
929 }
930
931
932 FileStatus[] familyStatus =
933 fs.listStatus(new Path(family, "mapfiles"));
934 if (familyStatus.length > 1) {
935 LOG.debug(family.toString() + " has " + familyStatus.length +
936 " files.");
937 return false;
938 }
939 }
940 }
941 }
942 return true;
943 }
944
945
946
947
948 static class FileFilter implements PathFilter {
949 private final FileSystem fs;
950
951 public FileFilter(final FileSystem fs) {
952 this.fs = fs;
953 }
954
955 @Override
956 public boolean accept(Path p) {
957 try {
958 return fs.isFile(p);
959 } catch (IOException e) {
960 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
961 return false;
962 }
963 }
964 }
965
966
967
968
969 public static class DirFilter implements PathFilter {
970 private final FileSystem fs;
971
972 public DirFilter(final FileSystem fs) {
973 this.fs = fs;
974 }
975
976 @Override
977 public boolean accept(Path p) {
978 boolean isValid = false;
979 try {
980 if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
981 isValid = false;
982 } else {
983 isValid = this.fs.getFileStatus(p).isDir();
984 }
985 } catch (IOException e) {
986 e.printStackTrace();
987 }
988 return isValid;
989 }
990 }
991
992
993
994
995
996
997
998
999 public static boolean isAppendSupported(final Configuration conf) {
1000 boolean append = conf.getBoolean("dfs.support.append", false);
1001 if (append) {
1002 try {
1003
1004
1005
1006 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1007 append = true;
1008 } catch (SecurityException e) {
1009 } catch (NoSuchMethodException e) {
1010 append = false;
1011 }
1012 }
1013 if (!append) {
1014
1015 try {
1016 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1017 append = true;
1018 } catch (NoSuchMethodException e) {
1019 append = false;
1020 }
1021 }
1022 return append;
1023 }
1024
1025
1026
1027
1028
1029
1030 public static boolean isHDFS(final Configuration conf) throws IOException {
1031 FileSystem fs = FileSystem.get(conf);
1032 String scheme = fs.getUri().getScheme();
1033 return scheme.equalsIgnoreCase("hdfs");
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1045 Configuration conf) throws IOException;
1046
1047
1048
1049
1050
1051
1052
1053
1054 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1055 throws IOException {
1056
1057 FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
1058 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1059 for (FileStatus dir: dirs) {
1060 Path p = dir.getPath();
1061 String tableName = p.getName();
1062 if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
1063 tabledirs.add(p);
1064 }
1065 }
1066 return tabledirs;
1067 }
1068
1069 public static Path getTablePath(Path rootdir, byte [] tableName) {
1070 return getTablePath(rootdir, Bytes.toString(tableName));
1071 }
1072
1073 public static Path getTablePath(Path rootdir, final String tableName) {
1074 return new Path(rootdir, tableName);
1075 }
1076
1077
1078
1079
1080 public static class RegionDirFilter implements PathFilter {
1081
1082 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1083 final FileSystem fs;
1084
1085 public RegionDirFilter(FileSystem fs) {
1086 this.fs = fs;
1087 }
1088
1089 @Override
1090 public boolean accept(Path rd) {
1091 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1092 return false;
1093 }
1094
1095 try {
1096 return fs.getFileStatus(rd).isDir();
1097 } catch (IOException ioe) {
1098
1099 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1100 return false;
1101 }
1102 }
1103 }
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1114
1115 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1116 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1117 for (FileStatus rdfs: rds) {
1118 Path rdPath = rdfs.getPath();
1119 regionDirs.add(rdPath);
1120 }
1121 return regionDirs;
1122 }
1123
1124
1125
1126
1127
1128 public static class FamilyDirFilter implements PathFilter {
1129 final FileSystem fs;
1130
1131 public FamilyDirFilter(FileSystem fs) {
1132 this.fs = fs;
1133 }
1134
1135 @Override
1136 public boolean accept(Path rd) {
1137 try {
1138
1139 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1140 } catch (IllegalArgumentException iae) {
1141
1142 return false;
1143 }
1144
1145 try {
1146 return fs.getFileStatus(rd).isDir();
1147 } catch (IOException ioe) {
1148
1149 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1150 return false;
1151 }
1152 }
1153 }
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1164
1165 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1166 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1167 for (FileStatus fdfs: fds) {
1168 Path fdPath = fdfs.getPath();
1169 familyDirs.add(fdPath);
1170 }
1171 return familyDirs;
1172 }
1173
1174
1175
1176
1177 public static class HFileFilter implements PathFilter {
1178
1179 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1180
1181 final FileSystem fs;
1182
1183 public HFileFilter(FileSystem fs) {
1184 this.fs = fs;
1185 }
1186
1187 @Override
1188 public boolean accept(Path rd) {
1189 if (!hfilePattern.matcher(rd.getName()).matches()) {
1190 return false;
1191 }
1192
1193 try {
1194
1195 return !fs.getFileStatus(rd).isDir();
1196 } catch (IOException ioe) {
1197
1198 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1199 return false;
1200 }
1201 }
1202 }
1203
1204
1205
1206
1207
1208
1209 public static FileSystem getCurrentFileSystem(Configuration conf)
1210 throws IOException {
1211 return getRootDir(conf).getFileSystem(conf);
1212 }
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 public static Map<String, Path> getTableStoreFilePathMap(
1228 final FileSystem fs, final Path hbaseRootDir)
1229 throws IOException {
1230 Map<String, Path> map = new HashMap<String, Path>();
1231
1232
1233
1234
1235 DirFilter df = new DirFilter(fs);
1236
1237 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1238 for (FileStatus tableDir : tableDirs) {
1239
1240
1241
1242 Path d = tableDir.getPath();
1243 if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1244 continue;
1245 }
1246 FileStatus[] regionDirs = fs.listStatus(d, df);
1247 for (FileStatus regionDir : regionDirs) {
1248 Path dd = regionDir.getPath();
1249 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1250 continue;
1251 }
1252
1253 FileStatus[] familyDirs = fs.listStatus(dd, df);
1254 for (FileStatus familyDir : familyDirs) {
1255 Path family = familyDir.getPath();
1256
1257
1258 FileStatus[] familyStatus = fs.listStatus(family);
1259 for (FileStatus sfStatus : familyStatus) {
1260 Path sf = sfStatus.getPath();
1261 map.put( sf.getName(), sf);
1262 }
1263
1264 }
1265 }
1266 }
1267 return map;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 public static FileStatus [] listStatus(final FileSystem fs,
1280 final Path dir, final PathFilter filter) throws IOException {
1281 FileStatus [] status = null;
1282 try {
1283 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1284 } catch (FileNotFoundException fnfe) {
1285
1286 LOG.debug(dir + " doesn't exist");
1287 }
1288 if (status == null || status.length < 1) return null;
1289 return status;
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1301 return listStatus(fs, dir, null);
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1314 throws IOException {
1315 return fs.delete(path, recursive);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 public static void checkAccess(User user, FileStatus file,
1329 FsAction action) throws AccessControlException {
1330
1331 String username = user.getShortName();
1332 if (username.equals(file.getOwner())) {
1333 if (file.getPermission().getUserAction().implies(action)) {
1334 return;
1335 }
1336 } else if (contains(user.getGroupNames(), file.getGroup())) {
1337 if (file.getPermission().getGroupAction().implies(action)) {
1338 return;
1339 }
1340 } else if (file.getPermission().getOtherAction().implies(action)) {
1341 return;
1342 }
1343 throw new AccessControlException("Permission denied:" + " action=" + action
1344 + " path=" + file.getPath() + " user=" + username);
1345 }
1346
1347 private static boolean contains(String[] groups, String user) {
1348 for (String group : groups) {
1349 if (group.equals(user)) {
1350 return true;
1351 }
1352 }
1353 return false;
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1365 return fs.exists(path);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1376 throws IOException {
1377 LOG.debug("Current file system:");
1378 logFSTree(LOG, fs, root, "|-");
1379 }
1380
1381
1382
1383
1384
1385 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1386 throws IOException {
1387 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1388 if (files == null) return;
1389
1390 for (FileStatus file : files) {
1391 if (file.isDir()) {
1392 LOG.debug(prefix + file.getPath().getName() + "/");
1393 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1394 } else {
1395 LOG.debug(prefix + file.getPath().getName());
1396 }
1397 }
1398 }
1399 }