1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
69 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
70 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
71 import org.apache.hadoop.hbase.regionserver.HRegion;
72 import org.apache.hadoop.hdfs.DistributedFileSystem;
73 import org.apache.hadoop.hdfs.protocol.FSConstants;
74 import org.apache.hadoop.io.IOUtils;
75 import org.apache.hadoop.io.SequenceFile;
76 import org.apache.hadoop.security.AccessControlException;
77 import org.apache.hadoop.security.UserGroupInformation;
78 import org.apache.hadoop.util.Progressable;
79 import org.apache.hadoop.util.ReflectionUtils;
80 import org.apache.hadoop.util.StringUtils;
81
82 import com.google.common.primitives.Ints;
83 import com.google.protobuf.InvalidProtocolBufferException;
84
85
86
87
88 @InterfaceAudience.Private
89 public abstract class FSUtils {
90 private static final Log LOG = LogFactory.getLog(FSUtils.class);
91
92
93 private static final String FULL_RWX_PERMISSIONS = "777";
94 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
95 private static final int DEFAULT_THREAD_POOLSIZE = 2;
96
97
98 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
99
100 protected FSUtils() {
101 super();
102 }
103
104
105
106
107
108
109
110
111 public static boolean isStartingWithPath(final Path rootPath, final String path) {
112 String uriRootPath = rootPath.toUri().getPath();
113 String tailUriPath = (new Path(path)).toUri().getPath();
114 return tailUriPath.startsWith(uriRootPath);
115 }
116
117
118
119
120
121
122
123
124
125 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
126 return isMatchingTail(pathToSearch, new Path(pathTail));
127 }
128
129
130
131
132
133
134
135
136
137 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
138 if (pathToSearch.depth() != pathTail.depth()) return false;
139 Path tailPath = pathTail;
140 String tailName;
141 Path toSearch = pathToSearch;
142 String toSearchName;
143 boolean result = false;
144 do {
145 tailName = tailPath.getName();
146 if (tailName == null || tailName.length() <= 0) {
147 result = true;
148 break;
149 }
150 toSearchName = toSearch.getName();
151 if (toSearchName == null || toSearchName.length() <= 0) break;
152
153 tailPath = tailPath.getParent();
154 toSearch = toSearch.getParent();
155 } while(tailName.equals(toSearchName));
156 return result;
157 }
158
159 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
160 String scheme = fs.getUri().getScheme();
161 if (scheme == null) {
162 LOG.warn("Could not find scheme for uri " +
163 fs.getUri() + ", default to hdfs");
164 scheme = "hdfs";
165 }
166 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
167 scheme + ".impl", FSHDFSUtils.class);
168 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
169 return fsUtils;
170 }
171
172
173
174
175
176
177
178
179 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
180 throws IOException {
181 return fs.exists(dir) && fs.delete(dir, true);
182 }
183
184
185
186
187
188
189
190
191
192
193
194
195 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
196 Method m = null;
197 Class<? extends FileSystem> cls = fs.getClass();
198 try {
199 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
200 } catch (NoSuchMethodException e) {
201 LOG.info("FileSystem doesn't support getDefaultBlockSize");
202 } catch (SecurityException e) {
203 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
204 m = null;
205 }
206 if (m == null) {
207 return fs.getDefaultBlockSize();
208 } else {
209 try {
210 Object ret = m.invoke(fs, path);
211 return ((Long)ret).longValue();
212 } catch (Exception e) {
213 throw new IOException(e);
214 }
215 }
216 }
217
218
219
220
221
222
223
224
225
226
227
228
229 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
230 Method m = null;
231 Class<? extends FileSystem> cls = fs.getClass();
232 try {
233 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
234 } catch (NoSuchMethodException e) {
235 LOG.info("FileSystem doesn't support getDefaultReplication");
236 } catch (SecurityException e) {
237 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
238 m = null;
239 }
240 if (m == null) {
241 return fs.getDefaultReplication();
242 } else {
243 try {
244 Object ret = m.invoke(fs, path);
245 return ((Number)ret).shortValue();
246 } catch (Exception e) {
247 throw new IOException(e);
248 }
249 }
250 }
251
252
253
254
255
256
257
258
259
260
261
262 public static int getDefaultBufferSize(final FileSystem fs) {
263 return fs.getConf().getInt("io.file.buffer.size", 4096);
264 }
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284 public static FSDataOutputStream create(FileSystem fs, Path path,
285 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
286 if (fs instanceof HFileSystem) {
287 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
288 if (backingFs instanceof DistributedFileSystem) {
289
290
291 try {
292 return (FSDataOutputStream) (DistributedFileSystem.class
293 .getDeclaredMethod("create", Path.class, FsPermission.class,
294 boolean.class, int.class, short.class, long.class,
295 Progressable.class, InetSocketAddress[].class)
296 .invoke(backingFs, path, FsPermission.getDefault(), true,
297 getDefaultBufferSize(backingFs),
298 getDefaultReplication(backingFs, path),
299 getDefaultBlockSize(backingFs, path),
300 null, favoredNodes));
301 } catch (InvocationTargetException ite) {
302
303 throw new IOException(ite.getCause());
304 } catch (NoSuchMethodException e) {
305 LOG.debug("DFS Client does not support most favored nodes create; using default create");
306 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
307 } catch (IllegalArgumentException e) {
308 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
309 } catch (SecurityException e) {
310 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
311 } catch (IllegalAccessException e) {
312 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
313 }
314 }
315 }
316 return create(fs, path, perm, true);
317 }
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336 public static FSDataOutputStream create(FileSystem fs, Path path,
337 FsPermission perm, boolean overwrite) throws IOException {
338 if (LOG.isTraceEnabled()) {
339 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
340 }
341 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
342 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
343 }
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358 public static FsPermission getFilePermissions(final FileSystem fs,
359 final Configuration conf, final String permssionConfKey) {
360 boolean enablePermissions = conf.getBoolean(
361 HConstants.ENABLE_DATA_FILE_UMASK, false);
362
363 if (enablePermissions) {
364 try {
365 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
366
367 String mask = conf.get(permssionConfKey);
368 if (mask == null)
369 return FsPermission.getDefault();
370
371 FsPermission umask = new FsPermission(mask);
372 return perm.applyUMask(umask);
373 } catch (IllegalArgumentException e) {
374 LOG.warn(
375 "Incorrect umask attempted to be created: "
376 + conf.get(permssionConfKey)
377 + ", using default file permissions.", e);
378 return FsPermission.getDefault();
379 }
380 }
381 return FsPermission.getDefault();
382 }
383
384
385
386
387
388
389
390 public static void checkFileSystemAvailable(final FileSystem fs)
391 throws IOException {
392 if (!(fs instanceof DistributedFileSystem)) {
393 return;
394 }
395 IOException exception = null;
396 DistributedFileSystem dfs = (DistributedFileSystem) fs;
397 try {
398 if (dfs.exists(new Path("/"))) {
399 return;
400 }
401 } catch (IOException e) {
402 exception = RemoteExceptionHandler.checkIOException(e);
403 }
404 try {
405 fs.close();
406 } catch (Exception e) {
407 LOG.error("file system close failed: ", e);
408 }
409 IOException io = new IOException("File system is not available");
410 io.initCause(exception);
411 throw io;
412 }
413
414
415
416
417
418
419
420
421
422 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
423 boolean inSafeMode = false;
424 try {
425 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
426 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
427 inSafeMode = (Boolean) m.invoke(dfs,
428 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
429 } catch (Exception e) {
430 if (e instanceof IOException) throw (IOException) e;
431
432
433 inSafeMode = dfs.setSafeMode(
434 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
435 }
436 return inSafeMode;
437 }
438
439
440
441
442
443
444 public static void checkDfsSafeMode(final Configuration conf)
445 throws IOException {
446 boolean isInSafeMode = false;
447 FileSystem fs = FileSystem.get(conf);
448 if (fs instanceof DistributedFileSystem) {
449 DistributedFileSystem dfs = (DistributedFileSystem)fs;
450 isInSafeMode = isInSafeMode(dfs);
451 }
452 if (isInSafeMode) {
453 throw new IOException("File system is in safemode, it can't be written now");
454 }
455 }
456
457
458
459
460
461
462
463
464
465
466 public static String getVersion(FileSystem fs, Path rootdir)
467 throws IOException, DeserializationException {
468 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
469 FileStatus[] status = null;
470 try {
471
472
473 status = fs.listStatus(versionFile);
474 } catch (FileNotFoundException fnfe) {
475 return null;
476 }
477 if (status == null || status.length == 0) return null;
478 String version = null;
479 byte [] content = new byte [(int)status[0].getLen()];
480 FSDataInputStream s = fs.open(versionFile);
481 try {
482 IOUtils.readFully(s, content, 0, content.length);
483 if (ProtobufUtil.isPBMagicPrefix(content)) {
484 version = parseVersionFrom(content);
485 } else {
486
487 InputStream is = new ByteArrayInputStream(content);
488 DataInputStream dis = new DataInputStream(is);
489 try {
490 version = dis.readUTF();
491 } finally {
492 dis.close();
493 }
494
495 LOG.info("Updating the hbase.version file format with version=" + version);
496 setVersion(fs, rootdir, version, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
497 }
498 } catch (EOFException eof) {
499 LOG.warn("Version file was empty, odd, will try to set it.");
500 } finally {
501 s.close();
502 }
503 return version;
504 }
505
506
507
508
509
510
511
512 static String parseVersionFrom(final byte [] bytes)
513 throws DeserializationException {
514 ProtobufUtil.expectPBMagicPrefix(bytes);
515 int pblen = ProtobufUtil.lengthOfPBMagic();
516 FSProtos.HBaseVersionFileContent.Builder builder =
517 FSProtos.HBaseVersionFileContent.newBuilder();
518 FSProtos.HBaseVersionFileContent fileContent;
519 try {
520 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
521 return fileContent.getVersion();
522 } catch (InvalidProtocolBufferException e) {
523
524 throw new DeserializationException(e);
525 }
526 }
527
528
529
530
531
532
533 static byte [] toVersionByteArray(final String version) {
534 FSProtos.HBaseVersionFileContent.Builder builder =
535 FSProtos.HBaseVersionFileContent.newBuilder();
536 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
537 }
538
539
540
541
542
543
544
545
546
547
548
549 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
550 throws IOException, DeserializationException {
551 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
552 }
553
554
555
556
557
558
559
560
561
562
563
564
565
566 public static void checkVersion(FileSystem fs, Path rootdir,
567 boolean message, int wait, int retries)
568 throws IOException, DeserializationException {
569 String version = getVersion(fs, rootdir);
570 if (version == null) {
571 if (!metaRegionExists(fs, rootdir)) {
572
573
574 setVersion(fs, rootdir, wait, retries);
575 return;
576 }
577 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
578
579
580
581 String msg = "HBase file layout needs to be upgraded."
582 + " You have version " + version
583 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
584 + ". Is your hbase.rootdir valid? If so, you may need to run "
585 + "'hbase hbck -fixVersionFile'.";
586 if (message) {
587 System.out.println("WARNING! " + msg);
588 }
589 throw new FileSystemVersionException(msg);
590 }
591
592
593
594
595
596
597
598
599 public static void setVersion(FileSystem fs, Path rootdir)
600 throws IOException {
601 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
602 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
603 }
604
605
606
607
608
609
610
611
612
613
614 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
615 throws IOException {
616 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
617 }
618
619
620
621
622
623
624
625
626
627
628
629
630 public static void setVersion(FileSystem fs, Path rootdir, String version,
631 int wait, int retries) throws IOException {
632 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
633 while (true) {
634 try {
635 FSDataOutputStream s = fs.create(versionFile);
636 s.write(toVersionByteArray(version));
637 s.close();
638 LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
639 return;
640 } catch (IOException e) {
641 if (retries > 0) {
642 LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
643 fs.delete(versionFile, false);
644 try {
645 if (wait > 0) {
646 Thread.sleep(wait);
647 }
648 } catch (InterruptedException ex) {
649
650 }
651 retries--;
652 } else {
653 throw e;
654 }
655 }
656 }
657 }
658
659
660
661
662
663
664
665
666
667 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
668 int wait) throws IOException {
669 while (true) {
670 try {
671 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
672 return fs.exists(filePath);
673 } catch (IOException ioe) {
674 if (wait > 0) {
675 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
676 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
677 try {
678 Thread.sleep(wait);
679 } catch (InterruptedException ie) {
680 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
681 }
682 } else {
683 throw ioe;
684 }
685 }
686 }
687 }
688
689
690
691
692
693
694
695
696 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
697 throws IOException {
698 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
699 ClusterId clusterId = null;
700 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
701 if (status != null) {
702 int len = Ints.checkedCast(status.getLen());
703 byte [] content = new byte[len];
704 FSDataInputStream in = fs.open(idPath);
705 try {
706 in.readFully(content);
707 } catch (EOFException eof) {
708 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
709 } finally{
710 in.close();
711 }
712 try {
713 clusterId = ClusterId.parseFrom(content);
714 } catch (DeserializationException e) {
715 throw new IOException("content=" + Bytes.toString(content), e);
716 }
717
718 if (!ProtobufUtil.isPBMagicPrefix(content)) {
719 String cid = null;
720 in = fs.open(idPath);
721 try {
722 cid = in.readUTF();
723 clusterId = new ClusterId(cid);
724 } catch (EOFException eof) {
725 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
726 } finally {
727 in.close();
728 }
729 rewriteAsPb(fs, rootdir, idPath, clusterId);
730 }
731 return clusterId;
732 } else {
733 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
734 }
735 return clusterId;
736 }
737
738
739
740
741
742 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
743 final ClusterId cid)
744 throws IOException {
745
746
747 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
748 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
749 setClusterId(fs, rootdir, cid, 100);
750 if (!fs.delete(movedAsideName, false)) {
751 throw new IOException("Failed delete of " + movedAsideName);
752 }
753 LOG.debug("Rewrote the hbase.id file as pb");
754 }
755
756
757
758
759
760
761
762
763
764
765 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
766 int wait) throws IOException {
767 while (true) {
768 try {
769 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
770 FSDataOutputStream s = fs.create(filePath);
771 try {
772 s.write(clusterId.toByteArray());
773 } finally {
774 s.close();
775 }
776 if (LOG.isDebugEnabled()) {
777 LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
778 }
779 return;
780 } catch (IOException ioe) {
781 if (wait > 0) {
782 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
783 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
784 try {
785 Thread.sleep(wait);
786 } catch (InterruptedException ie) {
787 Thread.currentThread().interrupt();
788 break;
789 }
790 } else {
791 throw ioe;
792 }
793 }
794 }
795 }
796
797
798
799
800
801
802
803
804 public static Path validateRootPath(Path root) throws IOException {
805 try {
806 URI rootURI = new URI(root.toString());
807 String scheme = rootURI.getScheme();
808 if (scheme == null) {
809 throw new IOException("Root directory does not have a scheme");
810 }
811 return root;
812 } catch (URISyntaxException e) {
813 IOException io = new IOException("Root directory path is not a valid " +
814 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
815 io.initCause(e);
816 throw io;
817 }
818 }
819
820
821
822
823
824
825
826
827
828 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
829 Path root = FSUtils.getRootDir(conf);
830 String pathStr = path.toString();
831
832 if (!pathStr.startsWith(root.toString())) return pathStr;
833
834 return pathStr.substring(root.toString().length() + 1);
835 }
836
837
838
839
840
841
842
843 public static void waitOnSafeMode(final Configuration conf,
844 final long wait)
845 throws IOException {
846 FileSystem fs = FileSystem.get(conf);
847 if (!(fs instanceof DistributedFileSystem)) return;
848 DistributedFileSystem dfs = (DistributedFileSystem)fs;
849
850 while (isInSafeMode(dfs)) {
851 LOG.info("Waiting for dfs to exit safe mode...");
852 try {
853 Thread.sleep(wait);
854 } catch (InterruptedException e) {
855
856 }
857 }
858 }
859
860
861
862
863
864
865
866
867
868
869
870 public static String getPath(Path p) {
871 return p.toUri().getPath();
872 }
873
874
875
876
877
878
879
880 public static Path getRootDir(final Configuration c) throws IOException {
881 Path p = new Path(c.get(HConstants.HBASE_DIR));
882 FileSystem fs = p.getFileSystem(c);
883 return p.makeQualified(fs);
884 }
885
886 public static void setRootDir(final Configuration c, final Path root) throws IOException {
887 c.set(HConstants.HBASE_DIR, root.toString());
888 }
889
890 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
891 c.set("fs.defaultFS", root.toString());
892 c.set("fs.default.name", root.toString());
893 }
894
895
896
897
898
899
900
901
902
903 @SuppressWarnings("deprecation")
904 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
905 throws IOException {
906 Path metaRegionDir =
907 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
908 return fs.exists(metaRegionDir);
909 }
910
911
912
913
914
915
916
917
918
919 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
920 final FileSystem fs, FileStatus status, long start, long length)
921 throws IOException {
922 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
923 BlockLocation [] blockLocations =
924 fs.getFileBlockLocations(status, start, length);
925 for(BlockLocation bl : blockLocations) {
926 String [] hosts = bl.getHosts();
927 long len = bl.getLength();
928 blocksDistribution.addHostsAndBlockWeight(hosts, len);
929 }
930
931 return blocksDistribution;
932 }
933
934
935
936
937
938
939
940
941
942
943
944
945 public static boolean isMajorCompacted(final FileSystem fs,
946 final Path hbaseRootDir)
947 throws IOException {
948 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
949 for (Path d : tableDirs) {
950 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
951 for (FileStatus regionDir : regionDirs) {
952 Path dd = regionDir.getPath();
953 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
954 continue;
955 }
956
957 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
958 for (FileStatus familyDir : familyDirs) {
959 Path family = familyDir.getPath();
960
961 FileStatus[] familyStatus = fs.listStatus(family);
962 if (familyStatus.length > 1) {
963 LOG.debug(family.toString() + " has " + familyStatus.length +
964 " files.");
965 return false;
966 }
967 }
968 }
969 }
970 return true;
971 }
972
973
974
975
976
977
978
979
980
981
982 public static int getTotalTableFragmentation(final HMaster master)
983 throws IOException {
984 Map<String, Integer> map = getTableFragmentation(master);
985 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
986 }
987
988
989
990
991
992
993
994
995
996
997
998 public static Map<String, Integer> getTableFragmentation(
999 final HMaster master)
1000 throws IOException {
1001 Path path = getRootDir(master.getConfiguration());
1002
1003 FileSystem fs = path.getFileSystem(master.getConfiguration());
1004 return getTableFragmentation(fs, path);
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 public static Map<String, Integer> getTableFragmentation(
1018 final FileSystem fs, final Path hbaseRootDir)
1019 throws IOException {
1020 Map<String, Integer> frags = new HashMap<String, Integer>();
1021 int cfCountTotal = 0;
1022 int cfFragTotal = 0;
1023 DirFilter df = new DirFilter(fs);
1024 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1025 for (Path d : tableDirs) {
1026 int cfCount = 0;
1027 int cfFrag = 0;
1028 FileStatus[] regionDirs = fs.listStatus(d, df);
1029 for (FileStatus regionDir : regionDirs) {
1030 Path dd = regionDir.getPath();
1031 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1032 continue;
1033 }
1034
1035 FileStatus[] familyDirs = fs.listStatus(dd, df);
1036 for (FileStatus familyDir : familyDirs) {
1037 cfCount++;
1038 cfCountTotal++;
1039 Path family = familyDir.getPath();
1040
1041 FileStatus[] familyStatus = fs.listStatus(family);
1042 if (familyStatus.length > 1) {
1043 cfFrag++;
1044 cfFragTotal++;
1045 }
1046 }
1047 }
1048
1049 frags.put(FSUtils.getTableName(d).getNameAsString(),
1050 Math.round((float) cfFrag / cfCount * 100));
1051 }
1052
1053 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1054 return frags;
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064 public static boolean isPre020FileLayout(final FileSystem fs,
1065 final Path hbaseRootDir)
1066 throws IOException {
1067 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
1068 "70236052"), "info"), "mapfiles");
1069 return fs.exists(mapfiles);
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 public static boolean isMajorCompactedPre020(final FileSystem fs,
1084 final Path hbaseRootDir)
1085 throws IOException {
1086
1087 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1088 for (Path d: tableDirs) {
1089
1090
1091
1092
1093 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
1094 continue;
1095 }
1096 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
1097 for (FileStatus regionDir : regionDirs) {
1098 Path dd = regionDir.getPath();
1099 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1100 continue;
1101 }
1102
1103 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
1104 for (FileStatus familyDir : familyDirs) {
1105 Path family = familyDir.getPath();
1106 FileStatus[] infoAndMapfile = fs.listStatus(family);
1107
1108 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
1109 LOG.debug(family.toString() +
1110 " has more than just info and mapfile: " + infoAndMapfile.length);
1111 return false;
1112 }
1113
1114 for (int ll = 0; ll < 2; ll++) {
1115 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
1116 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
1117 continue;
1118 LOG.debug("Unexpected directory name: " +
1119 infoAndMapfile[ll].getPath());
1120 return false;
1121 }
1122
1123
1124 FileStatus[] familyStatus =
1125 fs.listStatus(new Path(family, "mapfiles"));
1126 if (familyStatus.length > 1) {
1127 LOG.debug(family.toString() + " has " + familyStatus.length +
1128 " files.");
1129 return false;
1130 }
1131 }
1132 }
1133 }
1134 return true;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 public static Path getTableDir(Path rootdir, final TableName tableName) {
1146 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1147 tableName.getQualifierAsString());
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 public static TableName getTableName(Path tablePath) {
1159 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1171 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1172 new Path(namespace)));
1173 }
1174
1175
1176
1177
1178 static class FileFilter implements PathFilter {
1179 private final FileSystem fs;
1180
1181 public FileFilter(final FileSystem fs) {
1182 this.fs = fs;
1183 }
1184
1185 @Override
1186 public boolean accept(Path p) {
1187 try {
1188 return fs.isFile(p);
1189 } catch (IOException e) {
1190 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1191 return false;
1192 }
1193 }
1194 }
1195
1196
1197
1198
1199 public static class BlackListDirFilter implements PathFilter {
1200 private final FileSystem fs;
1201 private List<String> blacklist;
1202
1203
1204
1205
1206
1207
1208
1209 @SuppressWarnings("unchecked")
1210 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1211 this.fs = fs;
1212 blacklist =
1213 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1214 : directoryNameBlackList);
1215 }
1216
1217 @Override
1218 public boolean accept(Path p) {
1219 boolean isValid = false;
1220 try {
1221 if (blacklist.contains(p.getName().toString())) {
1222 isValid = false;
1223 } else {
1224 isValid = fs.getFileStatus(p).isDir();
1225 }
1226 } catch (IOException e) {
1227 LOG.warn("An error occurred while verifying if [" + p.toString()
1228 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1229 }
1230 return isValid;
1231 }
1232 }
1233
1234
1235
1236
1237 public static class DirFilter extends BlackListDirFilter {
1238
1239 public DirFilter(FileSystem fs) {
1240 super(fs, null);
1241 }
1242 }
1243
1244
1245
1246
1247
1248 public static class UserTableDirFilter extends BlackListDirFilter {
1249
1250 public UserTableDirFilter(FileSystem fs) {
1251 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1252 }
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262 public static boolean isAppendSupported(final Configuration conf) {
1263 boolean append = conf.getBoolean("dfs.support.append", false);
1264 if (append) {
1265 try {
1266
1267
1268
1269 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1270 append = true;
1271 } catch (SecurityException e) {
1272 } catch (NoSuchMethodException e) {
1273 append = false;
1274 }
1275 }
1276 if (!append) {
1277
1278 try {
1279 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1280 append = true;
1281 } catch (NoSuchMethodException e) {
1282 append = false;
1283 }
1284 }
1285 return append;
1286 }
1287
1288
1289
1290
1291
1292
1293 public static boolean isHDFS(final Configuration conf) throws IOException {
1294 FileSystem fs = FileSystem.get(conf);
1295 String scheme = fs.getUri().getScheme();
1296 return scheme.equalsIgnoreCase("hdfs");
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1308 Configuration conf, CancelableProgressable reporter) throws IOException;
1309
1310 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1311 throws IOException {
1312 List<Path> tableDirs = new LinkedList<Path>();
1313
1314 for(FileStatus status :
1315 fs.globStatus(new Path(rootdir,
1316 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1317 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1318 }
1319 return tableDirs;
1320 }
1321
1322
1323
1324
1325
1326
1327
1328
1329 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1330 throws IOException {
1331
1332 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1333 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1334 for (FileStatus dir: dirs) {
1335 tabledirs.add(dir.getPath());
1336 }
1337 return tabledirs;
1338 }
1339
1340
1341
1342
1343
1344
1345 public static boolean isRecoveredEdits(Path path) {
1346 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1347 }
1348
1349
1350
1351
1352 public static class RegionDirFilter implements PathFilter {
1353
1354 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1355 final FileSystem fs;
1356
1357 public RegionDirFilter(FileSystem fs) {
1358 this.fs = fs;
1359 }
1360
1361 @Override
1362 public boolean accept(Path rd) {
1363 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1364 return false;
1365 }
1366
1367 try {
1368 return fs.getFileStatus(rd).isDir();
1369 } catch (IOException ioe) {
1370
1371 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1372 return false;
1373 }
1374 }
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1386
1387 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1388 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1389 for (FileStatus rdfs: rds) {
1390 Path rdPath = rdfs.getPath();
1391 regionDirs.add(rdPath);
1392 }
1393 return regionDirs;
1394 }
1395
1396
1397
1398
1399
1400 public static class FamilyDirFilter implements PathFilter {
1401 final FileSystem fs;
1402
1403 public FamilyDirFilter(FileSystem fs) {
1404 this.fs = fs;
1405 }
1406
1407 @Override
1408 public boolean accept(Path rd) {
1409 try {
1410
1411 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1412 } catch (IllegalArgumentException iae) {
1413
1414 return false;
1415 }
1416
1417 try {
1418 return fs.getFileStatus(rd).isDir();
1419 } catch (IOException ioe) {
1420
1421 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1422 return false;
1423 }
1424 }
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1436
1437 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1438 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1439 for (FileStatus fdfs: fds) {
1440 Path fdPath = fdfs.getPath();
1441 familyDirs.add(fdPath);
1442 }
1443 return familyDirs;
1444 }
1445
1446
1447
1448
1449 public static class HFileFilter implements PathFilter {
1450
1451 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1452
1453 final FileSystem fs;
1454
1455 public HFileFilter(FileSystem fs) {
1456 this.fs = fs;
1457 }
1458
1459 @Override
1460 public boolean accept(Path rd) {
1461 if (!hfilePattern.matcher(rd.getName()).matches()) {
1462 return false;
1463 }
1464
1465 try {
1466
1467 return !fs.getFileStatus(rd).isDir();
1468 } catch (IOException ioe) {
1469
1470 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1471 return false;
1472 }
1473 }
1474 }
1475
1476
1477
1478
1479
1480
1481 public static FileSystem getCurrentFileSystem(Configuration conf)
1482 throws IOException {
1483 return getRootDir(conf).getFileSystem(conf);
1484 }
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1503 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1504 throws IOException {
1505 if (map == null) {
1506 map = new HashMap<String, Path>();
1507 }
1508
1509
1510 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1511
1512
1513 PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
1514 FileStatus[] regionDirs = fs.listStatus(tableDir);
1515 for (FileStatus regionDir : regionDirs) {
1516 Path dd = regionDir.getPath();
1517 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1518 continue;
1519 }
1520
1521 FileStatus[] familyDirs = fs.listStatus(dd, df);
1522 for (FileStatus familyDir : familyDirs) {
1523 Path family = familyDir.getPath();
1524
1525
1526 FileStatus[] familyStatus = fs.listStatus(family);
1527 for (FileStatus sfStatus : familyStatus) {
1528 Path sf = sfStatus.getPath();
1529 map.put( sf.getName(), sf);
1530 }
1531 }
1532 }
1533 return map;
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 public static Map<String, Path> getTableStoreFilePathMap(
1551 final FileSystem fs, final Path hbaseRootDir)
1552 throws IOException {
1553 Map<String, Path> map = new HashMap<String, Path>();
1554
1555
1556
1557
1558
1559 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1560 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1561 FSUtils.getTableName(tableDir));
1562 }
1563 return map;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 public static FileStatus [] listStatus(final FileSystem fs,
1578 final Path dir, final PathFilter filter) throws IOException {
1579 FileStatus [] status = null;
1580 try {
1581 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1582 } catch (FileNotFoundException fnfe) {
1583
1584 if (LOG.isTraceEnabled()) {
1585 LOG.trace(dir + " doesn't exist");
1586 }
1587 }
1588 if (status == null || status.length < 1) return null;
1589 return status;
1590 }
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1601 return listStatus(fs, dir, null);
1602 }
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1614 throws IOException {
1615 return fs.delete(path, recursive);
1616 }
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1627 return fs.exists(path);
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1641 FsAction action) throws AccessControlException {
1642 if (ugi.getShortUserName().equals(file.getOwner())) {
1643 if (file.getPermission().getUserAction().implies(action)) {
1644 return;
1645 }
1646 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1647 if (file.getPermission().getGroupAction().implies(action)) {
1648 return;
1649 }
1650 } else if (file.getPermission().getOtherAction().implies(action)) {
1651 return;
1652 }
1653 throw new AccessControlException("Permission denied:" + " action=" + action
1654 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1655 }
1656
1657 private static boolean contains(String[] groups, String user) {
1658 for (String group : groups) {
1659 if (group.equals(user)) {
1660 return true;
1661 }
1662 }
1663 return false;
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1674 throws IOException {
1675 LOG.debug("Current file system:");
1676 logFSTree(LOG, fs, root, "|-");
1677 }
1678
1679
1680
1681
1682
1683
1684 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1685 throws IOException {
1686 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1687 if (files == null) return;
1688
1689 for (FileStatus file : files) {
1690 if (file.isDir()) {
1691 LOG.debug(prefix + file.getPath().getName() + "/");
1692 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1693 } else {
1694 LOG.debug(prefix + file.getPath().getName());
1695 }
1696 }
1697 }
1698
1699 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1700 throws IOException {
1701
1702 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1703 return fs.rename(src, dest);
1704 }
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1720 final Configuration conf) throws IOException {
1721 return getRegionDegreeLocalityMappingFromFS(
1722 conf, null,
1723 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1724
1725 }
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1744 final Configuration conf, final String desiredTable, int threadPoolSize)
1745 throws IOException {
1746 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1747 new ConcurrentHashMap<String, Map<String, Float>>();
1748 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1749 regionDegreeLocalityMapping);
1750 return regionDegreeLocalityMapping;
1751 }
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773 private static void getRegionLocalityMappingFromFS(
1774 final Configuration conf, final String desiredTable,
1775 int threadPoolSize,
1776 Map<String, String> regionToBestLocalityRSMapping,
1777 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1778 throws IOException {
1779 FileSystem fs = FileSystem.get(conf);
1780 Path rootPath = FSUtils.getRootDir(conf);
1781 long startTime = EnvironmentEdgeManager.currentTimeMillis();
1782 Path queryPath;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883 public static void setupShortCircuitRead(final Configuration conf) {
1884
1885 boolean shortCircuitSkipChecksum =
1886 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1887 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1888 if (shortCircuitSkipChecksum) {
1889 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1890 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1891 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1892 assert !shortCircuitSkipChecksum;
1893 }
1894 checkShortCircuitReadBufferSize(conf);
1895 }
1896
1897
1898
1899
1900
1901 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1902 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1903 final int notSet = -1;
1904
1905 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1906 int size = conf.getInt(dfsKey, notSet);
1907
1908 if (size != notSet) return;
1909
1910 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1911 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1912 }
1913 }