1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.lang.reflect.InvocationTargetException;
28 import java.lang.reflect.Method;
29 import java.net.InetSocketAddress;
30 import java.net.URI;
31 import java.net.URISyntaxException;
32 import java.util.ArrayList;
33 import java.util.Collections;
34 import java.util.HashMap;
35 import java.util.LinkedList;
36 import java.util.List;
37 import java.util.Map;
38 import java.util.regex.Pattern;
39
40 import org.apache.commons.logging.Log;
41 import org.apache.commons.logging.LogFactory;
42 import org.apache.hadoop.classification.InterfaceAudience;
43 import org.apache.hadoop.classification.InterfaceStability;
44 import org.apache.hadoop.conf.Configuration;
45 import org.apache.hadoop.fs.BlockLocation;
46 import org.apache.hadoop.fs.FSDataInputStream;
47 import org.apache.hadoop.fs.FSDataOutputStream;
48 import org.apache.hadoop.fs.FileStatus;
49 import org.apache.hadoop.fs.FileSystem;
50 import org.apache.hadoop.fs.Path;
51 import org.apache.hadoop.fs.PathFilter;
52 import org.apache.hadoop.fs.permission.FsAction;
53 import org.apache.hadoop.fs.permission.FsPermission;
54 import org.apache.hadoop.hbase.ClusterId;
55 import org.apache.hadoop.hbase.TableName;
56 import org.apache.hadoop.hbase.HColumnDescriptor;
57 import org.apache.hadoop.hbase.HConstants;
58 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
59 import org.apache.hadoop.hbase.HRegionInfo;
60 import org.apache.hadoop.hbase.RemoteExceptionHandler;
61 import org.apache.hadoop.hbase.exceptions.DeserializationException;
62 import org.apache.hadoop.hbase.fs.HFileSystem;
63 import org.apache.hadoop.hbase.master.HMaster;
64 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
65 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
66 import org.apache.hadoop.hbase.regionserver.HRegion;
67 import org.apache.hadoop.hdfs.DistributedFileSystem;
68 import org.apache.hadoop.hdfs.protocol.FSConstants;
69 import org.apache.hadoop.io.IOUtils;
70 import org.apache.hadoop.io.SequenceFile;
71 import org.apache.hadoop.security.AccessControlException;
72 import org.apache.hadoop.security.UserGroupInformation;
73 import org.apache.hadoop.util.Progressable;
74 import org.apache.hadoop.util.ReflectionUtils;
75 import org.apache.hadoop.util.StringUtils;
76
77 import com.google.common.primitives.Ints;
78 import com.google.protobuf.InvalidProtocolBufferException;
79
80
81
82
83 @InterfaceAudience.Public
84 @InterfaceStability.Evolving
85 public abstract class FSUtils {
86 private static final Log LOG = LogFactory.getLog(FSUtils.class);
87
88
89 private static final String FULL_RWX_PERMISSIONS = "777";
90
91
92 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
93
94 protected FSUtils() {
95 super();
96 }
97
98
99
100
101
102
103
104
105 public static boolean isStartingWithPath(final Path rootPath, final String path) {
106 String uriRootPath = rootPath.toUri().getPath();
107 String tailUriPath = (new Path(path)).toUri().getPath();
108 return tailUriPath.startsWith(uriRootPath);
109 }
110
111
112
113
114
115
116
117
118
119 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
120 return isMatchingTail(pathToSearch, new Path(pathTail));
121 }
122
123
124
125
126
127
128
129
130
131 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
132 if (pathToSearch.depth() != pathTail.depth()) return false;
133 Path tailPath = pathTail;
134 String tailName;
135 Path toSearch = pathToSearch;
136 String toSearchName;
137 boolean result = false;
138 do {
139 tailName = tailPath.getName();
140 if (tailName == null || tailName.length() <= 0) {
141 result = true;
142 break;
143 }
144 toSearchName = toSearch.getName();
145 if (toSearchName == null || toSearchName.length() <= 0) break;
146
147 tailPath = tailPath.getParent();
148 toSearch = toSearch.getParent();
149 } while(tailName.equals(toSearchName));
150 return result;
151 }
152
153 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
154 String scheme = fs.getUri().getScheme();
155 if (scheme == null) {
156 LOG.warn("Could not find scheme for uri " +
157 fs.getUri() + ", default to hdfs");
158 scheme = "hdfs";
159 }
160 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
161 scheme + ".impl", FSHDFSUtils.class);
162 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
163 return fsUtils;
164 }
165
166
167
168
169
170
171
172
173 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
174 throws IOException {
175 return fs.exists(dir) && fs.delete(dir, true);
176 }
177
178
179
180
181
182
183
184
185
186
187
188
189 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
190 Method m = null;
191 Class<? extends FileSystem> cls = fs.getClass();
192 try {
193 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
194 } catch (NoSuchMethodException e) {
195 LOG.info("FileSystem doesn't support getDefaultBlockSize");
196 } catch (SecurityException e) {
197 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
198 m = null;
199 }
200 if (m == null) {
201 return fs.getDefaultBlockSize();
202 } else {
203 try {
204 Object ret = m.invoke(fs, path);
205 return ((Long)ret).longValue();
206 } catch (Exception e) {
207 throw new IOException(e);
208 }
209 }
210 }
211
212
213
214
215
216
217
218
219
220
221
222
223 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
224 Method m = null;
225 Class<? extends FileSystem> cls = fs.getClass();
226 try {
227 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
228 } catch (NoSuchMethodException e) {
229 LOG.info("FileSystem doesn't support getDefaultReplication");
230 } catch (SecurityException e) {
231 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
232 m = null;
233 }
234 if (m == null) {
235 return fs.getDefaultReplication();
236 } else {
237 try {
238 Object ret = m.invoke(fs, path);
239 return ((Number)ret).shortValue();
240 } catch (Exception e) {
241 throw new IOException(e);
242 }
243 }
244 }
245
246
247
248
249
250
251
252
253
254
255
256 public static int getDefaultBufferSize(final FileSystem fs) {
257 return fs.getConf().getInt("io.file.buffer.size", 4096);
258 }
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278 public static FSDataOutputStream create(FileSystem fs, Path path,
279 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
280 if (fs instanceof HFileSystem) {
281 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
282 if (backingFs instanceof DistributedFileSystem) {
283
284
285 try {
286 return (FSDataOutputStream) (DistributedFileSystem.class
287 .getDeclaredMethod("create", Path.class, FsPermission.class,
288 boolean.class, int.class, short.class, long.class,
289 Progressable.class, InetSocketAddress[].class)
290 .invoke(backingFs, path, FsPermission.getDefault(), true,
291 getDefaultBufferSize(backingFs),
292 getDefaultReplication(backingFs, path),
293 getDefaultBlockSize(backingFs, path),
294 null, favoredNodes));
295 } catch (InvocationTargetException ite) {
296
297 throw new IOException(ite.getCause());
298 } catch (NoSuchMethodException e) {
299 LOG.debug("DFS Client does not support most favored nodes create; using default create");
300 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
301 } catch (IllegalArgumentException e) {
302 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
303 } catch (SecurityException e) {
304 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
305 } catch (IllegalAccessException e) {
306 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
307 }
308 }
309 }
310 return create(fs, path, perm, true);
311 }
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330 public static FSDataOutputStream create(FileSystem fs, Path path,
331 FsPermission perm, boolean overwrite) throws IOException {
332 if (LOG.isTraceEnabled()) {
333 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
334 }
335 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
336 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
337 }
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352 public static FsPermission getFilePermissions(final FileSystem fs,
353 final Configuration conf, final String permssionConfKey) {
354 boolean enablePermissions = conf.getBoolean(
355 HConstants.ENABLE_DATA_FILE_UMASK, false);
356
357 if (enablePermissions) {
358 try {
359 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
360
361 String mask = conf.get(permssionConfKey);
362 if (mask == null)
363 return FsPermission.getDefault();
364
365 FsPermission umask = new FsPermission(mask);
366 return perm.applyUMask(umask);
367 } catch (IllegalArgumentException e) {
368 LOG.warn(
369 "Incorrect umask attempted to be created: "
370 + conf.get(permssionConfKey)
371 + ", using default file permissions.", e);
372 return FsPermission.getDefault();
373 }
374 }
375 return FsPermission.getDefault();
376 }
377
378
379
380
381
382
383
384 public static void checkFileSystemAvailable(final FileSystem fs)
385 throws IOException {
386 if (!(fs instanceof DistributedFileSystem)) {
387 return;
388 }
389 IOException exception = null;
390 DistributedFileSystem dfs = (DistributedFileSystem) fs;
391 try {
392 if (dfs.exists(new Path("/"))) {
393 return;
394 }
395 } catch (IOException e) {
396 exception = RemoteExceptionHandler.checkIOException(e);
397 }
398 try {
399 fs.close();
400 } catch (Exception e) {
401 LOG.error("file system close failed: ", e);
402 }
403 IOException io = new IOException("File system is not available");
404 io.initCause(exception);
405 throw io;
406 }
407
408
409
410
411
412
413
414
415
416 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
417 boolean inSafeMode = false;
418 try {
419 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
420 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
421 inSafeMode = (Boolean) m.invoke(dfs,
422 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
423 } catch (Exception e) {
424 if (e instanceof IOException) throw (IOException) e;
425
426
427 inSafeMode = dfs.setSafeMode(
428 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
429 }
430 return inSafeMode;
431 }
432
433
434
435
436
437
438 public static void checkDfsSafeMode(final Configuration conf)
439 throws IOException {
440 boolean isInSafeMode = false;
441 FileSystem fs = FileSystem.get(conf);
442 if (fs instanceof DistributedFileSystem) {
443 DistributedFileSystem dfs = (DistributedFileSystem)fs;
444 isInSafeMode = isInSafeMode(dfs);
445 }
446 if (isInSafeMode) {
447 throw new IOException("File system is in safemode, it can't be written now");
448 }
449 }
450
451
452
453
454
455
456
457
458
459
460 public static String getVersion(FileSystem fs, Path rootdir)
461 throws IOException, DeserializationException {
462 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
463 FileStatus[] status = null;
464 try {
465
466
467 status = fs.listStatus(versionFile);
468 } catch (FileNotFoundException fnfe) {
469 return null;
470 }
471 if (status == null || status.length == 0) return null;
472 String version = null;
473 byte [] content = new byte [(int)status[0].getLen()];
474 FSDataInputStream s = fs.open(versionFile);
475 try {
476 IOUtils.readFully(s, content, 0, content.length);
477 if (ProtobufUtil.isPBMagicPrefix(content)) {
478 version = parseVersionFrom(content);
479 } else {
480
481 InputStream is = new ByteArrayInputStream(content);
482 DataInputStream dis = new DataInputStream(is);
483 try {
484 version = dis.readUTF();
485 } finally {
486 dis.close();
487 }
488
489 LOG.info("Updating the hbase.version file format with version=" + version);
490 setVersion(fs, rootdir, version, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
491 }
492 } catch (EOFException eof) {
493 LOG.warn("Version file was empty, odd, will try to set it.");
494 } finally {
495 s.close();
496 }
497 return version;
498 }
499
500
501
502
503
504
505
506 static String parseVersionFrom(final byte [] bytes)
507 throws DeserializationException {
508 ProtobufUtil.expectPBMagicPrefix(bytes);
509 int pblen = ProtobufUtil.lengthOfPBMagic();
510 FSProtos.HBaseVersionFileContent.Builder builder =
511 FSProtos.HBaseVersionFileContent.newBuilder();
512 FSProtos.HBaseVersionFileContent fileContent;
513 try {
514 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
515 return fileContent.getVersion();
516 } catch (InvalidProtocolBufferException e) {
517
518 throw new DeserializationException(e);
519 }
520 }
521
522
523
524
525
526
527 static byte [] toVersionByteArray(final String version) {
528 FSProtos.HBaseVersionFileContent.Builder builder =
529 FSProtos.HBaseVersionFileContent.newBuilder();
530 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
531 }
532
533
534
535
536
537
538
539
540
541
542
543 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
544 throws IOException, DeserializationException {
545 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560 public static void checkVersion(FileSystem fs, Path rootdir,
561 boolean message, int wait, int retries)
562 throws IOException, DeserializationException {
563 String version = getVersion(fs, rootdir);
564 if (version == null) {
565 if (!metaRegionExists(fs, rootdir)) {
566
567
568 setVersion(fs, rootdir, wait, retries);
569 return;
570 }
571 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
572
573
574
575 String msg = "HBase file layout needs to be upgraded."
576 + " You have version " + version
577 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
578 + ". Is your hbase.rootdir valid? If so, you may need to run "
579 + "'hbase hbck -fixVersionFile'.";
580 if (message) {
581 System.out.println("WARNING! " + msg);
582 }
583 throw new FileSystemVersionException(msg);
584 }
585
586
587
588
589
590
591
592
593 public static void setVersion(FileSystem fs, Path rootdir)
594 throws IOException {
595 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
596 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
597 }
598
599
600
601
602
603
604
605
606
607
608 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
609 throws IOException {
610 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
611 }
612
613
614
615
616
617
618
619
620
621
622
623
624 public static void setVersion(FileSystem fs, Path rootdir, String version,
625 int wait, int retries) throws IOException {
626 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
627 while (true) {
628 try {
629 FSDataOutputStream s = fs.create(versionFile);
630 s.write(toVersionByteArray(version));
631 s.close();
632 LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
633 return;
634 } catch (IOException e) {
635 if (retries > 0) {
636 LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
637 fs.delete(versionFile, false);
638 try {
639 if (wait > 0) {
640 Thread.sleep(wait);
641 }
642 } catch (InterruptedException ex) {
643
644 }
645 retries--;
646 } else {
647 throw e;
648 }
649 }
650 }
651 }
652
653
654
655
656
657
658
659
660
661 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
662 int wait) throws IOException {
663 while (true) {
664 try {
665 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
666 return fs.exists(filePath);
667 } catch (IOException ioe) {
668 if (wait > 0) {
669 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
670 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
671 try {
672 Thread.sleep(wait);
673 } catch (InterruptedException ie) {
674 Thread.interrupted();
675 break;
676 }
677 } else {
678 throw ioe;
679 }
680 }
681 }
682 return false;
683 }
684
685
686
687
688
689
690
691
692 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
693 throws IOException {
694 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
695 ClusterId clusterId = null;
696 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
697 if (status != null) {
698 int len = Ints.checkedCast(status.getLen());
699 byte [] content = new byte[len];
700 FSDataInputStream in = fs.open(idPath);
701 try {
702 in.readFully(content);
703 } catch (EOFException eof) {
704 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
705 } finally{
706 in.close();
707 }
708 try {
709 clusterId = ClusterId.parseFrom(content);
710 } catch (DeserializationException e) {
711 throw new IOException("content=" + Bytes.toString(content), e);
712 }
713
714 if (!ProtobufUtil.isPBMagicPrefix(content)) {
715 String cid = new String();
716 in = fs.open(idPath);
717 try {
718 cid = in.readUTF();
719 clusterId = new ClusterId(cid);
720 } catch (EOFException eof) {
721 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
722 } finally {
723 in.close();
724 }
725 rewriteAsPb(fs, rootdir, idPath, clusterId);
726 }
727 return clusterId;
728 } else {
729 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
730 }
731 return clusterId;
732 }
733
734
735
736
737
738 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
739 final ClusterId cid)
740 throws IOException {
741
742
743 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
744 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
745 setClusterId(fs, rootdir, cid, 100);
746 if (!fs.delete(movedAsideName, false)) {
747 throw new IOException("Failed delete of " + movedAsideName);
748 }
749 LOG.debug("Rewrote the hbase.id file as pb");
750 }
751
752
753
754
755
756
757
758
759
760
761 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
762 int wait) throws IOException {
763 while (true) {
764 try {
765 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
766 FSDataOutputStream s = fs.create(filePath);
767 try {
768 s.write(clusterId.toByteArray());
769 } finally {
770 s.close();
771 }
772 if (LOG.isDebugEnabled()) {
773 LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
774 }
775 return;
776 } catch (IOException ioe) {
777 if (wait > 0) {
778 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
779 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
780 try {
781 Thread.sleep(wait);
782 } catch (InterruptedException ie) {
783 Thread.interrupted();
784 break;
785 }
786 } else {
787 throw ioe;
788 }
789 }
790 }
791 }
792
793
794
795
796
797
798
799
800 public static Path validateRootPath(Path root) throws IOException {
801 try {
802 URI rootURI = new URI(root.toString());
803 String scheme = rootURI.getScheme();
804 if (scheme == null) {
805 throw new IOException("Root directory does not have a scheme");
806 }
807 return root;
808 } catch (URISyntaxException e) {
809 IOException io = new IOException("Root directory path is not a valid " +
810 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
811 io.initCause(e);
812 throw io;
813 }
814 }
815
816
817
818
819
820
821
822
823
824 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
825 Path root = FSUtils.getRootDir(conf);
826 String pathStr = path.toString();
827
828 if (!pathStr.startsWith(root.toString())) return pathStr;
829
830 return pathStr.substring(root.toString().length() + 1);
831 }
832
833
834
835
836
837
838
839 public static void waitOnSafeMode(final Configuration conf,
840 final long wait)
841 throws IOException {
842 FileSystem fs = FileSystem.get(conf);
843 if (!(fs instanceof DistributedFileSystem)) return;
844 DistributedFileSystem dfs = (DistributedFileSystem)fs;
845
846 while (isInSafeMode(dfs)) {
847 LOG.info("Waiting for dfs to exit safe mode...");
848 try {
849 Thread.sleep(wait);
850 } catch (InterruptedException e) {
851
852 }
853 }
854 }
855
856
857
858
859
860
861
862
863
864
865
866 public static String getPath(Path p) {
867 return p.toUri().getPath();
868 }
869
870
871
872
873
874
875
876 public static Path getRootDir(final Configuration c) throws IOException {
877 Path p = new Path(c.get(HConstants.HBASE_DIR));
878 FileSystem fs = p.getFileSystem(c);
879 return p.makeQualified(fs);
880 }
881
882 public static void setRootDir(final Configuration c, final Path root) throws IOException {
883 c.set(HConstants.HBASE_DIR, root.toString());
884 }
885
886 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
887 c.set("fs.defaultFS", root.toString());
888 c.set("fs.default.name", root.toString());
889 }
890
891
892
893
894
895
896
897
898
899 @SuppressWarnings("deprecation")
900 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
901 throws IOException {
902 Path metaRegionDir =
903 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
904 return fs.exists(metaRegionDir);
905 }
906
907
908
909
910
911
912
913
914
915 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
916 final FileSystem fs, FileStatus status, long start, long length)
917 throws IOException {
918 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
919 BlockLocation [] blockLocations =
920 fs.getFileBlockLocations(status, start, length);
921 for(BlockLocation bl : blockLocations) {
922 String [] hosts = bl.getHosts();
923 long len = bl.getLength();
924 blocksDistribution.addHostsAndBlockWeight(hosts, len);
925 }
926
927 return blocksDistribution;
928 }
929
930
931
932
933
934
935
936
937
938
939
940
941 public static boolean isMajorCompacted(final FileSystem fs,
942 final Path hbaseRootDir)
943 throws IOException {
944 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
945 for (Path d : tableDirs) {
946 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
947 for (FileStatus regionDir : regionDirs) {
948 Path dd = regionDir.getPath();
949 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
950 continue;
951 }
952
953 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
954 for (FileStatus familyDir : familyDirs) {
955 Path family = familyDir.getPath();
956
957 FileStatus[] familyStatus = fs.listStatus(family);
958 if (familyStatus.length > 1) {
959 LOG.debug(family.toString() + " has " + familyStatus.length +
960 " files.");
961 return false;
962 }
963 }
964 }
965 }
966 return true;
967 }
968
969
970
971
972
973
974
975
976
977
978 public static int getTotalTableFragmentation(final HMaster master)
979 throws IOException {
980 Map<String, Integer> map = getTableFragmentation(master);
981 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
982 }
983
984
985
986
987
988
989
990
991
992
993
994 public static Map<String, Integer> getTableFragmentation(
995 final HMaster master)
996 throws IOException {
997 Path path = getRootDir(master.getConfiguration());
998
999 FileSystem fs = path.getFileSystem(master.getConfiguration());
1000 return getTableFragmentation(fs, path);
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 public static Map<String, Integer> getTableFragmentation(
1014 final FileSystem fs, final Path hbaseRootDir)
1015 throws IOException {
1016 Map<String, Integer> frags = new HashMap<String, Integer>();
1017 int cfCountTotal = 0;
1018 int cfFragTotal = 0;
1019 DirFilter df = new DirFilter(fs);
1020 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1021 for (Path d : tableDirs) {
1022 int cfCount = 0;
1023 int cfFrag = 0;
1024 FileStatus[] regionDirs = fs.listStatus(d, df);
1025 for (FileStatus regionDir : regionDirs) {
1026 Path dd = regionDir.getPath();
1027 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1028 continue;
1029 }
1030
1031 FileStatus[] familyDirs = fs.listStatus(dd, df);
1032 for (FileStatus familyDir : familyDirs) {
1033 cfCount++;
1034 cfCountTotal++;
1035 Path family = familyDir.getPath();
1036
1037 FileStatus[] familyStatus = fs.listStatus(family);
1038 if (familyStatus.length > 1) {
1039 cfFrag++;
1040 cfFragTotal++;
1041 }
1042 }
1043 }
1044
1045 frags.put(FSUtils.getTableName(d).getNameAsString(),
1046 Math.round((float) cfFrag / cfCount * 100));
1047 }
1048
1049 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1050 return frags;
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060 public static boolean isPre020FileLayout(final FileSystem fs,
1061 final Path hbaseRootDir)
1062 throws IOException {
1063 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
1064 "70236052"), "info"), "mapfiles");
1065 return fs.exists(mapfiles);
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 public static boolean isMajorCompactedPre020(final FileSystem fs,
1080 final Path hbaseRootDir)
1081 throws IOException {
1082
1083 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1084 for (Path d: tableDirs) {
1085
1086
1087
1088
1089 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
1090 continue;
1091 }
1092 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
1093 for (FileStatus regionDir : regionDirs) {
1094 Path dd = regionDir.getPath();
1095 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1096 continue;
1097 }
1098
1099 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
1100 for (FileStatus familyDir : familyDirs) {
1101 Path family = familyDir.getPath();
1102 FileStatus[] infoAndMapfile = fs.listStatus(family);
1103
1104 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
1105 LOG.debug(family.toString() +
1106 " has more than just info and mapfile: " + infoAndMapfile.length);
1107 return false;
1108 }
1109
1110 for (int ll = 0; ll < 2; ll++) {
1111 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
1112 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
1113 continue;
1114 LOG.debug("Unexpected directory name: " +
1115 infoAndMapfile[ll].getPath());
1116 return false;
1117 }
1118
1119
1120 FileStatus[] familyStatus =
1121 fs.listStatus(new Path(family, "mapfiles"));
1122 if (familyStatus.length > 1) {
1123 LOG.debug(family.toString() + " has " + familyStatus.length +
1124 " files.");
1125 return false;
1126 }
1127 }
1128 }
1129 }
1130 return true;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 public static Path getTableDir(Path rootdir, final TableName tableName) {
1142 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1143 tableName.getQualifierAsString());
1144 }
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 public static TableName getTableName(Path tablePath) {
1155 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1156 }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1167 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1168 new Path(namespace)));
1169 }
1170
1171
1172
1173
1174 static class FileFilter implements PathFilter {
1175 private final FileSystem fs;
1176
1177 public FileFilter(final FileSystem fs) {
1178 this.fs = fs;
1179 }
1180
1181 @Override
1182 public boolean accept(Path p) {
1183 try {
1184 return fs.isFile(p);
1185 } catch (IOException e) {
1186 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1187 return false;
1188 }
1189 }
1190 }
1191
1192
1193
1194
1195 public static class BlackListDirFilter implements PathFilter {
1196 private final FileSystem fs;
1197 private List<String> blacklist;
1198
1199
1200
1201
1202
1203
1204
1205 @SuppressWarnings("unchecked")
1206 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1207 this.fs = fs;
1208 blacklist =
1209 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1210 : directoryNameBlackList);
1211 }
1212
1213 @Override
1214 public boolean accept(Path p) {
1215 boolean isValid = false;
1216 try {
1217 if (blacklist.contains(p.getName().toString())) {
1218 isValid = false;
1219 } else {
1220 isValid = fs.getFileStatus(p).isDir();
1221 }
1222 } catch (IOException e) {
1223 LOG.warn("An error occurred while verifying if [" + p.toString()
1224 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1225 }
1226 return isValid;
1227 }
1228 }
1229
1230
1231
1232
1233 public static class DirFilter extends BlackListDirFilter {
1234
1235 public DirFilter(FileSystem fs) {
1236 super(fs, null);
1237 }
1238 }
1239
1240
1241
1242
1243
1244 public static class UserTableDirFilter extends BlackListDirFilter {
1245
1246 public UserTableDirFilter(FileSystem fs) {
1247 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1248 }
1249 }
1250
1251
1252
1253
1254
1255
1256
1257
1258 public static boolean isAppendSupported(final Configuration conf) {
1259 boolean append = conf.getBoolean("dfs.support.append", false);
1260 if (append) {
1261 try {
1262
1263
1264
1265 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1266 append = true;
1267 } catch (SecurityException e) {
1268 } catch (NoSuchMethodException e) {
1269 append = false;
1270 }
1271 }
1272 if (!append) {
1273
1274 try {
1275 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1276 append = true;
1277 } catch (NoSuchMethodException e) {
1278 append = false;
1279 }
1280 }
1281 return append;
1282 }
1283
1284
1285
1286
1287
1288
1289 public static boolean isHDFS(final Configuration conf) throws IOException {
1290 FileSystem fs = FileSystem.get(conf);
1291 String scheme = fs.getUri().getScheme();
1292 return scheme.equalsIgnoreCase("hdfs");
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1304 Configuration conf, CancelableProgressable reporter) throws IOException;
1305
1306 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1307 throws IOException {
1308 List<Path> tableDirs = new LinkedList<Path>();
1309
1310 for(FileStatus status :
1311 fs.globStatus(new Path(rootdir,
1312 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1313 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1314 }
1315 return tableDirs;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1326 throws IOException {
1327
1328 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1329 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1330 for (FileStatus dir: dirs) {
1331 tabledirs.add(dir.getPath());
1332 }
1333 return tabledirs;
1334 }
1335
1336
1337
1338
1339
1340
1341 public static boolean isRecoveredEdits(Path path) {
1342 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1343 }
1344
1345
1346
1347
1348 public static class RegionDirFilter implements PathFilter {
1349
1350 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1351 final FileSystem fs;
1352
1353 public RegionDirFilter(FileSystem fs) {
1354 this.fs = fs;
1355 }
1356
1357 @Override
1358 public boolean accept(Path rd) {
1359 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1360 return false;
1361 }
1362
1363 try {
1364 return fs.getFileStatus(rd).isDir();
1365 } catch (IOException ioe) {
1366
1367 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1368 return false;
1369 }
1370 }
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1382
1383 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1384 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1385 for (FileStatus rdfs: rds) {
1386 Path rdPath = rdfs.getPath();
1387 regionDirs.add(rdPath);
1388 }
1389 return regionDirs;
1390 }
1391
1392
1393
1394
1395
1396 public static class FamilyDirFilter implements PathFilter {
1397 final FileSystem fs;
1398
1399 public FamilyDirFilter(FileSystem fs) {
1400 this.fs = fs;
1401 }
1402
1403 @Override
1404 public boolean accept(Path rd) {
1405 try {
1406
1407 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1408 } catch (IllegalArgumentException iae) {
1409
1410 return false;
1411 }
1412
1413 try {
1414 return fs.getFileStatus(rd).isDir();
1415 } catch (IOException ioe) {
1416
1417 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1418 return false;
1419 }
1420 }
1421 }
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1432
1433 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1434 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1435 for (FileStatus fdfs: fds) {
1436 Path fdPath = fdfs.getPath();
1437 familyDirs.add(fdPath);
1438 }
1439 return familyDirs;
1440 }
1441
1442
1443
1444
1445 public static class HFileFilter implements PathFilter {
1446
1447 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1448
1449 final FileSystem fs;
1450
1451 public HFileFilter(FileSystem fs) {
1452 this.fs = fs;
1453 }
1454
1455 @Override
1456 public boolean accept(Path rd) {
1457 if (!hfilePattern.matcher(rd.getName()).matches()) {
1458 return false;
1459 }
1460
1461 try {
1462
1463 return !fs.getFileStatus(rd).isDir();
1464 } catch (IOException ioe) {
1465
1466 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1467 return false;
1468 }
1469 }
1470 }
1471
1472
1473
1474
1475
1476
1477 public static FileSystem getCurrentFileSystem(Configuration conf)
1478 throws IOException {
1479 return getRootDir(conf).getFileSystem(conf);
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1499 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1500 throws IOException {
1501 if (map == null) {
1502 map = new HashMap<String, Path>();
1503 }
1504
1505
1506 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1507
1508
1509 PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
1510 FileStatus[] regionDirs = fs.listStatus(tableDir);
1511 for (FileStatus regionDir : regionDirs) {
1512 Path dd = regionDir.getPath();
1513 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1514 continue;
1515 }
1516
1517 FileStatus[] familyDirs = fs.listStatus(dd, df);
1518 for (FileStatus familyDir : familyDirs) {
1519 Path family = familyDir.getPath();
1520
1521
1522 FileStatus[] familyStatus = fs.listStatus(family);
1523 for (FileStatus sfStatus : familyStatus) {
1524 Path sf = sfStatus.getPath();
1525 map.put( sf.getName(), sf);
1526 }
1527 }
1528 }
1529 return map;
1530 }
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 public static Map<String, Path> getTableStoreFilePathMap(
1547 final FileSystem fs, final Path hbaseRootDir)
1548 throws IOException {
1549 Map<String, Path> map = new HashMap<String, Path>();
1550
1551
1552
1553
1554
1555 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1556 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1557 FSUtils.getTableName(tableDir));
1558 }
1559 return map;
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 public static FileStatus [] listStatus(final FileSystem fs,
1574 final Path dir, final PathFilter filter) throws IOException {
1575 FileStatus [] status = null;
1576 try {
1577 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1578 } catch (FileNotFoundException fnfe) {
1579
1580 if (LOG.isTraceEnabled()) {
1581 LOG.trace(dir + " doesn't exist");
1582 }
1583 }
1584 if (status == null || status.length < 1) return null;
1585 return status;
1586 }
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1597 return listStatus(fs, dir, null);
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1610 throws IOException {
1611 return fs.delete(path, recursive);
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1623 return fs.exists(path);
1624 }
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1637 FsAction action) throws AccessControlException {
1638 if (ugi.getShortUserName().equals(file.getOwner())) {
1639 if (file.getPermission().getUserAction().implies(action)) {
1640 return;
1641 }
1642 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1643 if (file.getPermission().getGroupAction().implies(action)) {
1644 return;
1645 }
1646 } else if (file.getPermission().getOtherAction().implies(action)) {
1647 return;
1648 }
1649 throw new AccessControlException("Permission denied:" + " action=" + action
1650 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1651 }
1652
1653 private static boolean contains(String[] groups, String user) {
1654 for (String group : groups) {
1655 if (group.equals(user)) {
1656 return true;
1657 }
1658 }
1659 return false;
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1670 throws IOException {
1671 LOG.debug("Current file system:");
1672 logFSTree(LOG, fs, root, "|-");
1673 }
1674
1675
1676
1677
1678
1679
1680 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1681 throws IOException {
1682 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1683 if (files == null) return;
1684
1685 for (FileStatus file : files) {
1686 if (file.isDir()) {
1687 LOG.debug(prefix + file.getPath().getName() + "/");
1688 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1689 } else {
1690 LOG.debug(prefix + file.getPath().getName());
1691 }
1692 }
1693 }
1694
1695 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1696 throws IOException {
1697
1698 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1699 return fs.rename(src, dest);
1700 }
1701 }