1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.lang.reflect.Method;
27 import java.net.URI;
28 import java.net.URISyntaxException;
29 import java.util.ArrayList;
30 import java.util.HashMap;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.regex.Pattern;
34
35 import org.apache.commons.logging.Log;
36 import org.apache.commons.logging.LogFactory;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.fs.BlockLocation;
39 import org.apache.hadoop.fs.FSDataInputStream;
40 import org.apache.hadoop.fs.FSDataOutputStream;
41 import org.apache.hadoop.fs.FileStatus;
42 import org.apache.hadoop.fs.FileSystem;
43 import org.apache.hadoop.fs.Path;
44 import org.apache.hadoop.fs.PathFilter;
45 import org.apache.hadoop.fs.permission.FsAction;
46 import org.apache.hadoop.fs.permission.FsPermission;
47 import org.apache.hadoop.hbase.HBaseFileSystem;
48 import org.apache.hadoop.hbase.HColumnDescriptor;
49 import org.apache.hadoop.hbase.HConstants;
50 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
51 import org.apache.hadoop.hbase.HRegionInfo;
52 import org.apache.hadoop.hbase.RemoteExceptionHandler;
53 import org.apache.hadoop.hbase.master.HMaster;
54 import org.apache.hadoop.hbase.regionserver.HRegion;
55 import org.apache.hadoop.hbase.security.User;
56 import org.apache.hadoop.hdfs.DistributedFileSystem;
57 import org.apache.hadoop.io.SequenceFile;
58 import org.apache.hadoop.security.AccessControlException;
59 import org.apache.hadoop.util.ReflectionUtils;
60 import org.apache.hadoop.util.StringUtils;
61
62
63
64
65 public abstract class FSUtils {
66 private static final Log LOG = LogFactory.getLog(FSUtils.class);
67
68
69 private static final String FULL_RWX_PERMISSIONS = "777";
70
71 protected FSUtils() {
72 super();
73 }
74
75 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
76 String scheme = fs.getUri().getScheme();
77 if (scheme == null) {
78 LOG.warn("Could not find scheme for uri " +
79 fs.getUri() + ", default to hdfs");
80 scheme = "hdfs";
81 }
82 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
83 scheme + ".impl", FSHDFSUtils.class);
84 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
85 return fsUtils;
86 }
87
88
89
90
91
92
93
94
95 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
96 throws IOException {
97 return fs.exists(dir) && fs.delete(dir, true);
98 }
99
100
101
102
103
104
105
106
107 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
108 if (!fs.exists(dir)) {
109 HBaseFileSystem.makeDirOnFileSystem(fs, dir);
110 }
111 return dir;
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 public static FSDataOutputStream create(FileSystem fs, Path path,
132 FsPermission perm) throws IOException {
133 return create(fs, path, perm, true);
134 }
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm,
155 boolean overwrite) throws IOException {
156 LOG.debug("Creating file=" + path + " with permission=" + perm);
157 return HBaseFileSystem.createPathWithPermsOnFileSystem(fs, path, perm, overwrite);
158 }
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 public static FsPermission getFilePermissions(final FileSystem fs,
174 final Configuration conf, final String permssionConfKey) {
175 boolean enablePermissions = conf.getBoolean(
176 HConstants.ENABLE_DATA_FILE_UMASK, false);
177
178 if (enablePermissions) {
179 try {
180 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
181
182 String mask = conf.get(permssionConfKey);
183 if (mask == null)
184 return FsPermission.getDefault();
185
186 FsPermission umask = new FsPermission(mask);
187 return perm.applyUMask(umask);
188 } catch (IllegalArgumentException e) {
189 LOG.warn(
190 "Incorrect umask attempted to be created: "
191 + conf.get(permssionConfKey)
192 + ", using default file permissions.", e);
193 return FsPermission.getDefault();
194 }
195 }
196 return FsPermission.getDefault();
197 }
198
199
200
201
202
203
204
205 public static void checkFileSystemAvailable(final FileSystem fs)
206 throws IOException {
207 if (!(fs instanceof DistributedFileSystem)) {
208 return;
209 }
210 IOException exception = null;
211 DistributedFileSystem dfs = (DistributedFileSystem) fs;
212 try {
213 if (dfs.exists(new Path("/"))) {
214 return;
215 }
216 } catch (IOException e) {
217 exception = RemoteExceptionHandler.checkIOException(e);
218 }
219 try {
220 fs.close();
221 } catch (Exception e) {
222 LOG.error("file system close failed: ", e);
223 }
224 IOException io = new IOException("File system is not available");
225 io.initCause(exception);
226 throw io;
227 }
228
229
230
231
232
233
234
235
236
237 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
238 boolean inSafeMode = false;
239 try {
240 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
241 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
242 inSafeMode = (Boolean) m.invoke(dfs,
243 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
244 } catch (Exception e) {
245 if (e instanceof IOException) throw (IOException) e;
246
247
248 inSafeMode = dfs.setSafeMode(
249 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
250 }
251 return inSafeMode;
252 }
253
254
255
256
257
258
259 public static void checkDfsSafeMode(final Configuration conf)
260 throws IOException {
261 boolean isInSafeMode = false;
262 FileSystem fs = FileSystem.get(conf);
263 if (fs instanceof DistributedFileSystem) {
264 DistributedFileSystem dfs = (DistributedFileSystem)fs;
265 isInSafeMode = isInSafeMode(dfs);
266 }
267 if (isInSafeMode) {
268 throw new IOException("File system is in safemode, it can't be written now");
269 }
270 }
271
272
273
274
275
276
277
278
279
280 public static String getVersion(FileSystem fs, Path rootdir)
281 throws IOException {
282 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283 String version = null;
284 if (fs.exists(versionFile)) {
285 FSDataInputStream s =
286 fs.open(versionFile);
287 try {
288 version = DataInputStream.readUTF(s);
289 } catch (EOFException eof) {
290 LOG.warn("Version file was empty, odd, will try to set it.");
291 } finally {
292 s.close();
293 }
294 }
295 return version;
296 }
297
298
299
300
301
302
303
304
305
306
307 public static void checkVersion(FileSystem fs, Path rootdir,
308 boolean message) throws IOException {
309 checkVersion(fs, rootdir, message, 0,
310 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
311 }
312
313
314
315
316
317
318
319
320
321
322
323
324 public static void checkVersion(FileSystem fs, Path rootdir,
325 boolean message, int wait, int retries) throws IOException {
326 String version = getVersion(fs, rootdir);
327
328 if (version == null) {
329 if (!rootRegionExists(fs, rootdir)) {
330
331
332 FSUtils.setVersion(fs, rootdir, wait, retries);
333 return;
334 }
335 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
336 return;
337
338
339
340 String msg = "HBase file layout needs to be upgraded."
341 + " You have version " + version
342 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
343 + ". Is your hbase.rootdir valid? If so, you may need to run "
344 + "'hbase hbck -fixVersionFile'.";
345 if (message) {
346 System.out.println("WARNING! " + msg);
347 }
348 throw new FileSystemVersionException(msg);
349 }
350
351
352
353
354
355
356
357
358 public static void setVersion(FileSystem fs, Path rootdir)
359 throws IOException {
360 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
361 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
362 }
363
364
365
366
367
368
369
370
371
372
373 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
374 throws IOException {
375 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
376 }
377
378
379
380
381
382
383
384
385
386
387
388
389 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
390 Method m = null;
391 Class<? extends FileSystem> cls = fs.getClass();
392 try {
393 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
394 } catch (NoSuchMethodException e) {
395 LOG.info("FileSystem doesn't support getDefaultBlockSize");
396 } catch (SecurityException e) {
397 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
398 m = null;
399 }
400 if (m == null) {
401 return fs.getDefaultBlockSize();
402 } else {
403 try {
404 Object ret = m.invoke(fs, path);
405 return ((Long)ret).longValue();
406 } catch (Exception e) {
407 throw new IOException(e);
408 }
409 }
410 }
411
412
413
414
415
416
417
418
419
420
421
422
423 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
424 Method m = null;
425 Class<? extends FileSystem> cls = fs.getClass();
426 try {
427 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
428 } catch (NoSuchMethodException e) {
429 LOG.info("FileSystem doesn't support getDefaultReplication");
430 } catch (SecurityException e) {
431 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
432 m = null;
433 }
434 if (m == null) {
435 return fs.getDefaultReplication();
436 } else {
437 try {
438 Object ret = m.invoke(fs, path);
439 return ((Number)ret).shortValue();
440 } catch (Exception e) {
441 throw new IOException(e);
442 }
443 }
444 }
445
446
447
448
449
450
451
452
453
454
455
456 public static int getDefaultBufferSize(final FileSystem fs) {
457 return fs.getConf().getInt("io.file.buffer.size", 4096);
458 }
459
460
461
462
463
464
465
466
467
468
469
470 public static void setVersion(FileSystem fs, Path rootdir, String version,
471 int wait, int retries) throws IOException {
472 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
473 while (true) {
474 try {
475 FSDataOutputStream s = fs.create(versionFile);
476 s.writeUTF(version);
477 LOG.debug("Created version file at " + rootdir.toString() +
478 " set its version at:" + version);
479 s.close();
480 return;
481 } catch (IOException e) {
482 if (retries > 0) {
483 LOG.warn("Unable to create version file at " + rootdir.toString() +
484 ", retrying: " + e.getMessage());
485 fs.delete(versionFile, false);
486 try {
487 if (wait > 0) {
488 Thread.sleep(wait);
489 }
490 } catch (InterruptedException ex) {
491
492 }
493 retries--;
494 } else {
495 throw e;
496 }
497 }
498 }
499 }
500
501
502
503
504
505
506
507
508
509 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
510 int wait) throws IOException {
511 while (true) {
512 try {
513 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
514 return fs.exists(filePath);
515 } catch (IOException ioe) {
516 if (wait > 0) {
517 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
518 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
519 try {
520 Thread.sleep(wait);
521 } catch (InterruptedException ie) {
522 Thread.interrupted();
523 break;
524 }
525 } else {
526 throw ioe;
527 }
528 }
529 }
530 return false;
531 }
532
533
534
535
536
537
538
539
540 public static String getClusterId(FileSystem fs, Path rootdir)
541 throws IOException {
542 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
543 String clusterId = null;
544 if (fs.exists(idPath)) {
545 FSDataInputStream in = fs.open(idPath);
546 try {
547 clusterId = in.readUTF();
548 } catch (EOFException eof) {
549 LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
550 } finally{
551 in.close();
552 }
553 } else {
554 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
555 }
556 return clusterId;
557 }
558
559
560
561
562
563
564
565
566
567
568 public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
569 int wait) throws IOException {
570 while (true) {
571 try {
572 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
573 FSDataOutputStream s = fs.create(filePath);
574 s.writeUTF(clusterId);
575 s.close();
576 if (LOG.isDebugEnabled()) {
577 LOG.debug("Created cluster ID file at " + filePath.toString() +
578 " with ID: " + clusterId);
579 }
580 return;
581 } catch (IOException ioe) {
582 if (wait > 0) {
583 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
584 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
585 try {
586 Thread.sleep(wait);
587 } catch (InterruptedException ie) {
588 Thread.interrupted();
589 break;
590 }
591 } else {
592 throw ioe;
593 }
594 }
595 }
596 }
597
598
599
600
601
602
603
604
605 public static Path validateRootPath(Path root) throws IOException {
606 try {
607 URI rootURI = new URI(root.toString());
608 String scheme = rootURI.getScheme();
609 if (scheme == null) {
610 throw new IOException("Root directory does not have a scheme");
611 }
612 return root;
613 } catch (URISyntaxException e) {
614 IOException io = new IOException("Root directory path is not a valid " +
615 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
616 io.initCause(e);
617 throw io;
618 }
619 }
620
621
622
623
624
625
626
627 public static void waitOnSafeMode(final Configuration conf,
628 final long wait)
629 throws IOException {
630 FileSystem fs = FileSystem.get(conf);
631 if (!(fs instanceof DistributedFileSystem)) return;
632 DistributedFileSystem dfs = (DistributedFileSystem)fs;
633
634 while (isInSafeMode(dfs)) {
635 LOG.info("Waiting for dfs to exit safe mode...");
636 try {
637 Thread.sleep(wait);
638 } catch (InterruptedException e) {
639
640 }
641 }
642 }
643
644
645
646
647
648
649
650
651
652
653
654 public static String getPath(Path p) {
655 return p.toUri().getPath();
656 }
657
658
659
660
661
662
663
664 public static Path getRootDir(final Configuration c) throws IOException {
665 Path p = new Path(c.get(HConstants.HBASE_DIR));
666 FileSystem fs = p.getFileSystem(c);
667 return p.makeQualified(fs);
668 }
669
670 public static void setRootDir(final Configuration c, final Path root) throws IOException {
671 c.set(HConstants.HBASE_DIR, root.toString());
672 }
673
674 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
675 c.set("fs.defaultFS", root.toString());
676 c.set("fs.default.name", root.toString());
677 }
678
679
680
681
682
683
684
685
686
687 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
688 throws IOException {
689 Path rootRegionDir =
690 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
691 return fs.exists(rootRegionDir);
692 }
693
694
695
696
697
698
699
700
701
702 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
703 final FileSystem fs, FileStatus status, long start, long length)
704 throws IOException {
705 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
706 BlockLocation [] blockLocations =
707 fs.getFileBlockLocations(status, start, length);
708 for(BlockLocation bl : blockLocations) {
709 String [] hosts = bl.getHosts();
710 long len = bl.getLength();
711 blocksDistribution.addHostsAndBlockWeight(hosts, len);
712 }
713
714 return blocksDistribution;
715 }
716
717
718
719
720
721
722
723
724
725
726
727
728 public static boolean isMajorCompacted(final FileSystem fs,
729 final Path hbaseRootDir)
730 throws IOException {
731
732 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
733 for (FileStatus tableDir : tableDirs) {
734
735
736
737
738 Path d = tableDir.getPath();
739 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
740 continue;
741 }
742 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
743 for (FileStatus regionDir : regionDirs) {
744 Path dd = regionDir.getPath();
745 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
746 continue;
747 }
748
749 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
750 for (FileStatus familyDir : familyDirs) {
751 Path family = familyDir.getPath();
752
753 FileStatus[] familyStatus = fs.listStatus(family);
754 if (familyStatus.length > 1) {
755 LOG.debug(family.toString() + " has " + familyStatus.length +
756 " files.");
757 return false;
758 }
759 }
760 }
761 }
762 return true;
763 }
764
765
766
767
768
769
770
771
772
773
774 public static int getTotalTableFragmentation(final HMaster master)
775 throws IOException {
776 Map<String, Integer> map = getTableFragmentation(master);
777 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
778 }
779
780
781
782
783
784
785
786
787
788
789 public static Map<String, Integer> getTableFragmentation(
790 final HMaster master)
791 throws IOException {
792 Path path = getRootDir(master.getConfiguration());
793
794 FileSystem fs = path.getFileSystem(master.getConfiguration());
795 return getTableFragmentation(fs, path);
796 }
797
798
799
800
801
802
803
804
805
806
807
808 public static Map<String, Integer> getTableFragmentation(
809 final FileSystem fs, final Path hbaseRootDir)
810 throws IOException {
811 Map<String, Integer> frags = new HashMap<String, Integer>();
812 int cfCountTotal = 0;
813 int cfFragTotal = 0;
814 DirFilter df = new DirFilter(fs);
815
816 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
817 for (FileStatus tableDir : tableDirs) {
818
819
820
821
822 Path d = tableDir.getPath();
823 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
824 continue;
825 }
826 int cfCount = 0;
827 int cfFrag = 0;
828 FileStatus[] regionDirs = fs.listStatus(d, df);
829 for (FileStatus regionDir : regionDirs) {
830 Path dd = regionDir.getPath();
831 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
832 continue;
833 }
834
835 FileStatus[] familyDirs = fs.listStatus(dd, df);
836 for (FileStatus familyDir : familyDirs) {
837 cfCount++;
838 cfCountTotal++;
839 Path family = familyDir.getPath();
840
841 FileStatus[] familyStatus = fs.listStatus(family);
842 if (familyStatus.length > 1) {
843 cfFrag++;
844 cfFragTotal++;
845 }
846 }
847 }
848
849 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
850 }
851
852 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
853 return frags;
854 }
855
856
857
858
859
860
861
862
863 public static boolean isPre020FileLayout(final FileSystem fs,
864 final Path hbaseRootDir)
865 throws IOException {
866 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
867 "70236052"), "info"), "mapfiles");
868 return fs.exists(mapfiles);
869 }
870
871
872
873
874
875
876
877
878
879
880
881
882 public static boolean isMajorCompactedPre020(final FileSystem fs,
883 final Path hbaseRootDir)
884 throws IOException {
885
886 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
887 for (FileStatus tableDir : tableDirs) {
888
889
890
891
892 Path d = tableDir.getPath();
893 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
894 continue;
895 }
896 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
897 for (FileStatus regionDir : regionDirs) {
898 Path dd = regionDir.getPath();
899 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
900 continue;
901 }
902
903 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
904 for (FileStatus familyDir : familyDirs) {
905 Path family = familyDir.getPath();
906 FileStatus[] infoAndMapfile = fs.listStatus(family);
907
908 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
909 LOG.debug(family.toString() +
910 " has more than just info and mapfile: " + infoAndMapfile.length);
911 return false;
912 }
913
914 for (int ll = 0; ll < 2; ll++) {
915 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
916 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
917 continue;
918 LOG.debug("Unexpected directory name: " +
919 infoAndMapfile[ll].getPath());
920 return false;
921 }
922
923
924 FileStatus[] familyStatus =
925 fs.listStatus(new Path(family, "mapfiles"));
926 if (familyStatus.length > 1) {
927 LOG.debug(family.toString() + " has " + familyStatus.length +
928 " files.");
929 return false;
930 }
931 }
932 }
933 }
934 return true;
935 }
936
937
938
939
940 static class FileFilter implements PathFilter {
941 private final FileSystem fs;
942
943 public FileFilter(final FileSystem fs) {
944 this.fs = fs;
945 }
946
947 @Override
948 public boolean accept(Path p) {
949 try {
950 return fs.isFile(p);
951 } catch (IOException e) {
952 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
953 return false;
954 }
955 }
956 }
957
958
959
960
961 public static class DirFilter implements PathFilter {
962 private final FileSystem fs;
963
964 public DirFilter(final FileSystem fs) {
965 this.fs = fs;
966 }
967
968 @Override
969 public boolean accept(Path p) {
970 boolean isValid = false;
971 try {
972 if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
973 isValid = false;
974 } else {
975 isValid = this.fs.getFileStatus(p).isDir();
976 }
977 } catch (IOException e) {
978 e.printStackTrace();
979 }
980 return isValid;
981 }
982 }
983
984
985
986
987
988
989
990
991 public static boolean isAppendSupported(final Configuration conf) {
992 boolean append = conf.getBoolean("dfs.support.append", false);
993 if (append) {
994 try {
995
996
997
998 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
999 append = true;
1000 } catch (SecurityException e) {
1001 } catch (NoSuchMethodException e) {
1002 append = false;
1003 }
1004 }
1005 if (!append) {
1006
1007 try {
1008 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1009 append = true;
1010 } catch (NoSuchMethodException e) {
1011 append = false;
1012 }
1013 }
1014 return append;
1015 }
1016
1017
1018
1019
1020
1021
1022 public static boolean isHDFS(final Configuration conf) throws IOException {
1023 FileSystem fs = FileSystem.get(conf);
1024 String scheme = fs.getUri().getScheme();
1025 return scheme.equalsIgnoreCase("hdfs");
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1037 Configuration conf) throws IOException;
1038
1039
1040
1041
1042
1043
1044
1045
1046 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1047 throws IOException {
1048
1049 FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
1050 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1051 for (FileStatus dir: dirs) {
1052 Path p = dir.getPath();
1053 String tableName = p.getName();
1054 if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
1055 tabledirs.add(p);
1056 }
1057 }
1058 return tabledirs;
1059 }
1060
1061 public static Path getTablePath(Path rootdir, byte [] tableName) {
1062 return getTablePath(rootdir, Bytes.toString(tableName));
1063 }
1064
1065 public static Path getTablePath(Path rootdir, final String tableName) {
1066 return new Path(rootdir, tableName);
1067 }
1068
1069
1070
1071
1072 public static class RegionDirFilter implements PathFilter {
1073
1074 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1075 final FileSystem fs;
1076
1077 public RegionDirFilter(FileSystem fs) {
1078 this.fs = fs;
1079 }
1080
1081 @Override
1082 public boolean accept(Path rd) {
1083 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1084 return false;
1085 }
1086
1087 try {
1088 return fs.getFileStatus(rd).isDir();
1089 } catch (IOException ioe) {
1090
1091 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1092 return false;
1093 }
1094 }
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1106
1107 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1108 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1109 for (FileStatus rdfs: rds) {
1110 Path rdPath = rdfs.getPath();
1111 regionDirs.add(rdPath);
1112 }
1113 return regionDirs;
1114 }
1115
1116
1117
1118
1119
1120 public static class FamilyDirFilter implements PathFilter {
1121 final FileSystem fs;
1122
1123 public FamilyDirFilter(FileSystem fs) {
1124 this.fs = fs;
1125 }
1126
1127 @Override
1128 public boolean accept(Path rd) {
1129 try {
1130
1131 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1132 } catch (IllegalArgumentException iae) {
1133
1134 return false;
1135 }
1136
1137 try {
1138 return fs.getFileStatus(rd).isDir();
1139 } catch (IOException ioe) {
1140
1141 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1142 return false;
1143 }
1144 }
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1156
1157 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1158 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1159 for (FileStatus fdfs: fds) {
1160 Path fdPath = fdfs.getPath();
1161 familyDirs.add(fdPath);
1162 }
1163 return familyDirs;
1164 }
1165
1166
1167
1168
1169 public static class HFileFilter implements PathFilter {
1170
1171 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1172
1173 final FileSystem fs;
1174
1175 public HFileFilter(FileSystem fs) {
1176 this.fs = fs;
1177 }
1178
1179 @Override
1180 public boolean accept(Path rd) {
1181 if (!hfilePattern.matcher(rd.getName()).matches()) {
1182 return false;
1183 }
1184
1185 try {
1186
1187 return !fs.getFileStatus(rd).isDir();
1188 } catch (IOException ioe) {
1189
1190 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1191 return false;
1192 }
1193 }
1194 }
1195
1196
1197
1198
1199
1200
1201 public static FileSystem getCurrentFileSystem(Configuration conf)
1202 throws IOException {
1203 return getRootDir(conf).getFileSystem(conf);
1204 }
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 public static Map<String, Path> getTableStoreFilePathMap(
1220 final FileSystem fs, final Path hbaseRootDir)
1221 throws IOException {
1222 Map<String, Path> map = new HashMap<String, Path>();
1223
1224
1225
1226
1227 DirFilter df = new DirFilter(fs);
1228
1229 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
1230 for (FileStatus tableDir : tableDirs) {
1231
1232
1233
1234 Path d = tableDir.getPath();
1235 if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
1236 continue;
1237 }
1238 FileStatus[] regionDirs = fs.listStatus(d, df);
1239 for (FileStatus regionDir : regionDirs) {
1240 Path dd = regionDir.getPath();
1241 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1242 continue;
1243 }
1244
1245 FileStatus[] familyDirs = fs.listStatus(dd, df);
1246 for (FileStatus familyDir : familyDirs) {
1247 Path family = familyDir.getPath();
1248
1249
1250 FileStatus[] familyStatus = fs.listStatus(family);
1251 for (FileStatus sfStatus : familyStatus) {
1252 Path sf = sfStatus.getPath();
1253 map.put( sf.getName(), sf);
1254 }
1255
1256 }
1257 }
1258 }
1259 return map;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 public static FileStatus [] listStatus(final FileSystem fs,
1272 final Path dir, final PathFilter filter) throws IOException {
1273 FileStatus [] status = null;
1274 try {
1275 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1276 } catch (FileNotFoundException fnfe) {
1277
1278 LOG.debug(dir + " doesn't exist");
1279 }
1280 if (status == null || status.length < 1) return null;
1281 return status;
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1293 return listStatus(fs, dir, null);
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1306 throws IOException {
1307 return fs.delete(path, recursive);
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 public static void checkAccess(User user, FileStatus file,
1321 FsAction action) throws AccessControlException {
1322
1323 String username = user.getShortName();
1324 if (username.equals(file.getOwner())) {
1325 if (file.getPermission().getUserAction().implies(action)) {
1326 return;
1327 }
1328 } else if (contains(user.getGroupNames(), file.getGroup())) {
1329 if (file.getPermission().getGroupAction().implies(action)) {
1330 return;
1331 }
1332 } else if (file.getPermission().getOtherAction().implies(action)) {
1333 return;
1334 }
1335 throw new AccessControlException("Permission denied:" + " action=" + action
1336 + " path=" + file.getPath() + " user=" + username);
1337 }
1338
1339 private static boolean contains(String[] groups, String user) {
1340 for (String group : groups) {
1341 if (group.equals(user)) {
1342 return true;
1343 }
1344 }
1345 return false;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1357 return fs.exists(path);
1358 }
1359
1360
1361
1362
1363
1364
1365
1366
1367 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1368 throws IOException {
1369 LOG.debug("Current file system:");
1370 logFSTree(LOG, fs, root, "|-");
1371 }
1372
1373
1374
1375
1376
1377 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1378 throws IOException {
1379 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1380 if (files == null) return;
1381
1382 for (FileStatus file : files) {
1383 if (file.isDir()) {
1384 LOG.debug(prefix + file.getPath().getName() + "/");
1385 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1386 } else {
1387 LOG.debug(prefix + file.getPath().getName());
1388 }
1389 }
1390 }
1391 }