1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
69 import org.apache.hadoop.hbase.security.AccessDeniedException;
70 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
71 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
72 import org.apache.hadoop.hbase.regionserver.HRegion;
73 import org.apache.hadoop.hdfs.DistributedFileSystem;
74 import org.apache.hadoop.hdfs.protocol.FSConstants;
75 import org.apache.hadoop.io.IOUtils;
76 import org.apache.hadoop.io.SequenceFile;
77 import org.apache.hadoop.security.UserGroupInformation;
78 import org.apache.hadoop.util.Progressable;
79 import org.apache.hadoop.util.ReflectionUtils;
80 import org.apache.hadoop.util.StringUtils;
81
82 import com.google.common.primitives.Ints;
83 import com.google.protobuf.InvalidProtocolBufferException;
84
85
86
87
88 @InterfaceAudience.Private
89 public abstract class FSUtils {
90 private static final Log LOG = LogFactory.getLog(FSUtils.class);
91
92
93 public static final String FULL_RWX_PERMISSIONS = "777";
94 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
95 private static final int DEFAULT_THREAD_POOLSIZE = 2;
96
97
98 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
99
100 protected FSUtils() {
101 super();
102 }
103
104
105
106
107
108
109
110
111 public static boolean isStartingWithPath(final Path rootPath, final String path) {
112 String uriRootPath = rootPath.toUri().getPath();
113 String tailUriPath = (new Path(path)).toUri().getPath();
114 return tailUriPath.startsWith(uriRootPath);
115 }
116
117
118
119
120
121
122
123
124
125 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
126 return isMatchingTail(pathToSearch, new Path(pathTail));
127 }
128
129
130
131
132
133
134
135
136
137 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
138 if (pathToSearch.depth() != pathTail.depth()) return false;
139 Path tailPath = pathTail;
140 String tailName;
141 Path toSearch = pathToSearch;
142 String toSearchName;
143 boolean result = false;
144 do {
145 tailName = tailPath.getName();
146 if (tailName == null || tailName.length() <= 0) {
147 result = true;
148 break;
149 }
150 toSearchName = toSearch.getName();
151 if (toSearchName == null || toSearchName.length() <= 0) break;
152
153 tailPath = tailPath.getParent();
154 toSearch = toSearch.getParent();
155 } while(tailName.equals(toSearchName));
156 return result;
157 }
158
159 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
160 String scheme = fs.getUri().getScheme();
161 if (scheme == null) {
162 LOG.warn("Could not find scheme for uri " +
163 fs.getUri() + ", default to hdfs");
164 scheme = "hdfs";
165 }
166 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
167 scheme + ".impl", FSHDFSUtils.class);
168 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
169 return fsUtils;
170 }
171
172
173
174
175
176
177
178
179 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
180 throws IOException {
181 return fs.exists(dir) && fs.delete(dir, true);
182 }
183
184
185
186
187
188
189
190
191
192
193
194
195 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
196 Method m = null;
197 Class<? extends FileSystem> cls = fs.getClass();
198 try {
199 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
200 } catch (NoSuchMethodException e) {
201 LOG.info("FileSystem doesn't support getDefaultBlockSize");
202 } catch (SecurityException e) {
203 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
204 m = null;
205 }
206 if (m == null) {
207 return fs.getDefaultBlockSize();
208 } else {
209 try {
210 Object ret = m.invoke(fs, path);
211 return ((Long)ret).longValue();
212 } catch (Exception e) {
213 throw new IOException(e);
214 }
215 }
216 }
217
218
219
220
221
222
223
224
225
226
227
228
229 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
230 Method m = null;
231 Class<? extends FileSystem> cls = fs.getClass();
232 try {
233 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
234 } catch (NoSuchMethodException e) {
235 LOG.info("FileSystem doesn't support getDefaultReplication");
236 } catch (SecurityException e) {
237 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
238 m = null;
239 }
240 if (m == null) {
241 return fs.getDefaultReplication();
242 } else {
243 try {
244 Object ret = m.invoke(fs, path);
245 return ((Number)ret).shortValue();
246 } catch (Exception e) {
247 throw new IOException(e);
248 }
249 }
250 }
251
252
253
254
255
256
257
258
259
260
261
262 public static int getDefaultBufferSize(final FileSystem fs) {
263 return fs.getConf().getInt("io.file.buffer.size", 4096);
264 }
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284 public static FSDataOutputStream create(FileSystem fs, Path path,
285 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
286 if (fs instanceof HFileSystem) {
287 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
288 if (backingFs instanceof DistributedFileSystem) {
289
290
291 try {
292 return (FSDataOutputStream) (DistributedFileSystem.class
293 .getDeclaredMethod("create", Path.class, FsPermission.class,
294 boolean.class, int.class, short.class, long.class,
295 Progressable.class, InetSocketAddress[].class)
296 .invoke(backingFs, path, perm, true,
297 getDefaultBufferSize(backingFs),
298 getDefaultReplication(backingFs, path),
299 getDefaultBlockSize(backingFs, path),
300 null, favoredNodes));
301 } catch (InvocationTargetException ite) {
302
303 throw new IOException(ite.getCause());
304 } catch (NoSuchMethodException e) {
305 LOG.debug("DFS Client does not support most favored nodes create; using default create");
306 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
307 } catch (IllegalArgumentException e) {
308 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
309 } catch (SecurityException e) {
310 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
311 } catch (IllegalAccessException e) {
312 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
313 }
314 }
315 }
316 return create(fs, path, perm, true);
317 }
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336 public static FSDataOutputStream create(FileSystem fs, Path path,
337 FsPermission perm, boolean overwrite) throws IOException {
338 if (LOG.isTraceEnabled()) {
339 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
340 }
341 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
342 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
343 }
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358 public static FsPermission getFilePermissions(final FileSystem fs,
359 final Configuration conf, final String permssionConfKey) {
360 boolean enablePermissions = conf.getBoolean(
361 HConstants.ENABLE_DATA_FILE_UMASK, false);
362
363 if (enablePermissions) {
364 try {
365 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
366
367 String mask = conf.get(permssionConfKey);
368 if (mask == null)
369 return getFileDefault();
370
371 FsPermission umask = new FsPermission(mask);
372 return perm.applyUMask(umask);
373 } catch (IllegalArgumentException e) {
374 LOG.warn(
375 "Incorrect umask attempted to be created: "
376 + conf.get(permssionConfKey)
377 + ", using default file permissions.", e);
378 return getFileDefault();
379 }
380 }
381 return getFileDefault();
382 }
383
384
385
386
387
388
389
390
391
392
393 public static FsPermission getFileDefault() {
394 return new FsPermission((short)00666);
395 }
396
397
398
399
400
401
402
403 public static void checkFileSystemAvailable(final FileSystem fs)
404 throws IOException {
405 if (!(fs instanceof DistributedFileSystem)) {
406 return;
407 }
408 IOException exception = null;
409 DistributedFileSystem dfs = (DistributedFileSystem) fs;
410 try {
411 if (dfs.exists(new Path("/"))) {
412 return;
413 }
414 } catch (IOException e) {
415 exception = RemoteExceptionHandler.checkIOException(e);
416 }
417 try {
418 fs.close();
419 } catch (Exception e) {
420 LOG.error("file system close failed: ", e);
421 }
422 IOException io = new IOException("File system is not available");
423 io.initCause(exception);
424 throw io;
425 }
426
427
428
429
430
431
432
433
434
435 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
436 boolean inSafeMode = false;
437 try {
438 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
439 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
440 inSafeMode = (Boolean) m.invoke(dfs,
441 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
442 } catch (Exception e) {
443 if (e instanceof IOException) throw (IOException) e;
444
445
446 inSafeMode = dfs.setSafeMode(
447 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
448 }
449 return inSafeMode;
450 }
451
452
453
454
455
456
457 public static void checkDfsSafeMode(final Configuration conf)
458 throws IOException {
459 boolean isInSafeMode = false;
460 FileSystem fs = FileSystem.get(conf);
461 if (fs instanceof DistributedFileSystem) {
462 DistributedFileSystem dfs = (DistributedFileSystem)fs;
463 isInSafeMode = isInSafeMode(dfs);
464 }
465 if (isInSafeMode) {
466 throw new IOException("File system is in safemode, it can't be written now");
467 }
468 }
469
470
471
472
473
474
475
476
477
478
479 public static String getVersion(FileSystem fs, Path rootdir)
480 throws IOException, DeserializationException {
481 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
482 FileStatus[] status = null;
483 try {
484
485
486 status = fs.listStatus(versionFile);
487 } catch (FileNotFoundException fnfe) {
488 return null;
489 }
490 if (status == null || status.length == 0) return null;
491 String version = null;
492 byte [] content = new byte [(int)status[0].getLen()];
493 FSDataInputStream s = fs.open(versionFile);
494 try {
495 IOUtils.readFully(s, content, 0, content.length);
496 if (ProtobufUtil.isPBMagicPrefix(content)) {
497 version = parseVersionFrom(content);
498 } else {
499
500 InputStream is = new ByteArrayInputStream(content);
501 DataInputStream dis = new DataInputStream(is);
502 try {
503 version = dis.readUTF();
504 } finally {
505 dis.close();
506 }
507 }
508 } catch (EOFException eof) {
509 LOG.warn("Version file was empty, odd, will try to set it.");
510 } finally {
511 s.close();
512 }
513 return version;
514 }
515
516
517
518
519
520
521
522 static String parseVersionFrom(final byte [] bytes)
523 throws DeserializationException {
524 ProtobufUtil.expectPBMagicPrefix(bytes);
525 int pblen = ProtobufUtil.lengthOfPBMagic();
526 FSProtos.HBaseVersionFileContent.Builder builder =
527 FSProtos.HBaseVersionFileContent.newBuilder();
528 FSProtos.HBaseVersionFileContent fileContent;
529 try {
530 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
531 return fileContent.getVersion();
532 } catch (InvalidProtocolBufferException e) {
533
534 throw new DeserializationException(e);
535 }
536 }
537
538
539
540
541
542
543 static byte [] toVersionByteArray(final String version) {
544 FSProtos.HBaseVersionFileContent.Builder builder =
545 FSProtos.HBaseVersionFileContent.newBuilder();
546 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
547 }
548
549
550
551
552
553
554
555
556
557
558
559 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
560 throws IOException, DeserializationException {
561 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
562 }
563
564
565
566
567
568
569
570
571
572
573
574
575
576 public static void checkVersion(FileSystem fs, Path rootdir,
577 boolean message, int wait, int retries)
578 throws IOException, DeserializationException {
579 String version = getVersion(fs, rootdir);
580 if (version == null) {
581 if (!metaRegionExists(fs, rootdir)) {
582
583
584 setVersion(fs, rootdir, wait, retries);
585 return;
586 }
587 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
588
589
590
591 String msg = "HBase file layout needs to be upgraded."
592 + " You have version " + version
593 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
594 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
595 + " Is your hbase.rootdir valid? If so, you may need to run "
596 + "'hbase hbck -fixVersionFile'.";
597 if (message) {
598 System.out.println("WARNING! " + msg);
599 }
600 throw new FileSystemVersionException(msg);
601 }
602
603
604
605
606
607
608
609
610 public static void setVersion(FileSystem fs, Path rootdir)
611 throws IOException {
612 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
613 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
614 }
615
616
617
618
619
620
621
622
623
624
625 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
626 throws IOException {
627 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641 public static void setVersion(FileSystem fs, Path rootdir, String version,
642 int wait, int retries) throws IOException {
643 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
644 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
645 HConstants.VERSION_FILE_NAME);
646 while (true) {
647 try {
648
649 FSDataOutputStream s = fs.create(tempVersionFile);
650 try {
651 s.write(toVersionByteArray(version));
652 s.close();
653 s = null;
654
655
656 if (!fs.rename(tempVersionFile, versionFile)) {
657 throw new IOException("Unable to move temp version file to " + versionFile);
658 }
659 } finally {
660
661
662
663
664
665 try {
666 if (s != null) s.close();
667 } catch (IOException ignore) { }
668 }
669 LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
670 return;
671 } catch (IOException e) {
672 if (retries > 0) {
673 LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
674 fs.delete(versionFile, false);
675 try {
676 if (wait > 0) {
677 Thread.sleep(wait);
678 }
679 } catch (InterruptedException ex) {
680
681 }
682 retries--;
683 } else {
684 throw e;
685 }
686 }
687 }
688 }
689
690
691
692
693
694
695
696
697
698 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
699 int wait) throws IOException {
700 while (true) {
701 try {
702 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
703 return fs.exists(filePath);
704 } catch (IOException ioe) {
705 if (wait > 0) {
706 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
707 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
708 try {
709 Thread.sleep(wait);
710 } catch (InterruptedException ie) {
711 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
712 }
713 } else {
714 throw ioe;
715 }
716 }
717 }
718 }
719
720
721
722
723
724
725
726
727 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
728 throws IOException {
729 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
730 ClusterId clusterId = null;
731 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
732 if (status != null) {
733 int len = Ints.checkedCast(status.getLen());
734 byte [] content = new byte[len];
735 FSDataInputStream in = fs.open(idPath);
736 try {
737 in.readFully(content);
738 } catch (EOFException eof) {
739 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
740 } finally{
741 in.close();
742 }
743 try {
744 clusterId = ClusterId.parseFrom(content);
745 } catch (DeserializationException e) {
746 throw new IOException("content=" + Bytes.toString(content), e);
747 }
748
749 if (!ProtobufUtil.isPBMagicPrefix(content)) {
750 String cid = null;
751 in = fs.open(idPath);
752 try {
753 cid = in.readUTF();
754 clusterId = new ClusterId(cid);
755 } catch (EOFException eof) {
756 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
757 } finally {
758 in.close();
759 }
760 rewriteAsPb(fs, rootdir, idPath, clusterId);
761 }
762 return clusterId;
763 } else {
764 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
765 }
766 return clusterId;
767 }
768
769
770
771
772
773 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
774 final ClusterId cid)
775 throws IOException {
776
777
778 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
779 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
780 setClusterId(fs, rootdir, cid, 100);
781 if (!fs.delete(movedAsideName, false)) {
782 throw new IOException("Failed delete of " + movedAsideName);
783 }
784 LOG.debug("Rewrote the hbase.id file as pb");
785 }
786
787
788
789
790
791
792
793
794
795
796 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
797 int wait) throws IOException {
798 while (true) {
799 try {
800 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
801 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
802 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
803
804 FSDataOutputStream s = fs.create(tempIdFile);
805 try {
806 s.write(clusterId.toByteArray());
807 s.close();
808 s = null;
809
810
811 if (!fs.rename(tempIdFile, idFile)) {
812 throw new IOException("Unable to move temp version file to " + idFile);
813 }
814 } finally {
815
816 try {
817 if (s != null) s.close();
818 } catch (IOException ignore) { }
819 }
820 if (LOG.isDebugEnabled()) {
821 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
822 }
823 return;
824 } catch (IOException ioe) {
825 if (wait > 0) {
826 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
827 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
828 try {
829 Thread.sleep(wait);
830 } catch (InterruptedException ie) {
831 Thread.currentThread().interrupt();
832 break;
833 }
834 } else {
835 throw ioe;
836 }
837 }
838 }
839 }
840
841
842
843
844
845
846
847
848 public static Path validateRootPath(Path root) throws IOException {
849 try {
850 URI rootURI = new URI(root.toString());
851 String scheme = rootURI.getScheme();
852 if (scheme == null) {
853 throw new IOException("Root directory does not have a scheme");
854 }
855 return root;
856 } catch (URISyntaxException e) {
857 IOException io = new IOException("Root directory path is not a valid " +
858 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
859 io.initCause(e);
860 throw io;
861 }
862 }
863
864
865
866
867
868
869
870
871
872 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
873 Path root = FSUtils.getRootDir(conf);
874 String pathStr = path.toString();
875
876 if (!pathStr.startsWith(root.toString())) return pathStr;
877
878 return pathStr.substring(root.toString().length() + 1);
879 }
880
881
882
883
884
885
886
887 public static void waitOnSafeMode(final Configuration conf,
888 final long wait)
889 throws IOException {
890 FileSystem fs = FileSystem.get(conf);
891 if (!(fs instanceof DistributedFileSystem)) return;
892 DistributedFileSystem dfs = (DistributedFileSystem)fs;
893
894 while (isInSafeMode(dfs)) {
895 LOG.info("Waiting for dfs to exit safe mode...");
896 try {
897 Thread.sleep(wait);
898 } catch (InterruptedException e) {
899
900 }
901 }
902 }
903
904
905
906
907
908
909
910
911
912
913
914 public static String getPath(Path p) {
915 return p.toUri().getPath();
916 }
917
918
919
920
921
922
923
924 public static Path getRootDir(final Configuration c) throws IOException {
925 Path p = new Path(c.get(HConstants.HBASE_DIR));
926 FileSystem fs = p.getFileSystem(c);
927 return p.makeQualified(fs);
928 }
929
930 public static void setRootDir(final Configuration c, final Path root) throws IOException {
931 c.set(HConstants.HBASE_DIR, root.toString());
932 }
933
934 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
935 c.set("fs.defaultFS", root.toString());
936 c.set("fs.default.name", root.toString());
937 }
938
939
940
941
942
943
944
945
946
947 @SuppressWarnings("deprecation")
948 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
949 throws IOException {
950 Path metaRegionDir =
951 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
952 return fs.exists(metaRegionDir);
953 }
954
955
956
957
958
959
960
961
962
963 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
964 final FileSystem fs, FileStatus status, long start, long length)
965 throws IOException {
966 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
967 BlockLocation [] blockLocations =
968 fs.getFileBlockLocations(status, start, length);
969 for(BlockLocation bl : blockLocations) {
970 String [] hosts = bl.getHosts();
971 long len = bl.getLength();
972 blocksDistribution.addHostsAndBlockWeight(hosts, len);
973 }
974
975 return blocksDistribution;
976 }
977
978
979
980
981
982
983
984
985
986
987
988
989 public static boolean isMajorCompacted(final FileSystem fs,
990 final Path hbaseRootDir)
991 throws IOException {
992 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
993 for (Path d : tableDirs) {
994 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
995 for (FileStatus regionDir : regionDirs) {
996 Path dd = regionDir.getPath();
997 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
998 continue;
999 }
1000
1001 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
1002 for (FileStatus familyDir : familyDirs) {
1003 Path family = familyDir.getPath();
1004
1005 FileStatus[] familyStatus = fs.listStatus(family);
1006 if (familyStatus.length > 1) {
1007 LOG.debug(family.toString() + " has " + familyStatus.length +
1008 " files.");
1009 return false;
1010 }
1011 }
1012 }
1013 }
1014 return true;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 public static int getTotalTableFragmentation(final HMaster master)
1027 throws IOException {
1028 Map<String, Integer> map = getTableFragmentation(master);
1029 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 public static Map<String, Integer> getTableFragmentation(
1043 final HMaster master)
1044 throws IOException {
1045 Path path = getRootDir(master.getConfiguration());
1046
1047 FileSystem fs = path.getFileSystem(master.getConfiguration());
1048 return getTableFragmentation(fs, path);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 public static Map<String, Integer> getTableFragmentation(
1062 final FileSystem fs, final Path hbaseRootDir)
1063 throws IOException {
1064 Map<String, Integer> frags = new HashMap<String, Integer>();
1065 int cfCountTotal = 0;
1066 int cfFragTotal = 0;
1067 DirFilter df = new DirFilter(fs);
1068 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1069 for (Path d : tableDirs) {
1070 int cfCount = 0;
1071 int cfFrag = 0;
1072 FileStatus[] regionDirs = fs.listStatus(d, df);
1073 for (FileStatus regionDir : regionDirs) {
1074 Path dd = regionDir.getPath();
1075 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1076 continue;
1077 }
1078
1079 FileStatus[] familyDirs = fs.listStatus(dd, df);
1080 for (FileStatus familyDir : familyDirs) {
1081 cfCount++;
1082 cfCountTotal++;
1083 Path family = familyDir.getPath();
1084
1085 FileStatus[] familyStatus = fs.listStatus(family);
1086 if (familyStatus.length > 1) {
1087 cfFrag++;
1088 cfFragTotal++;
1089 }
1090 }
1091 }
1092
1093 frags.put(FSUtils.getTableName(d).getNameAsString(),
1094 Math.round((float) cfFrag / cfCount * 100));
1095 }
1096
1097 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1098 return frags;
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108 public static boolean isPre020FileLayout(final FileSystem fs,
1109 final Path hbaseRootDir)
1110 throws IOException {
1111 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
1112 "70236052"), "info"), "mapfiles");
1113 return fs.exists(mapfiles);
1114 }
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 public static boolean isMajorCompactedPre020(final FileSystem fs,
1128 final Path hbaseRootDir)
1129 throws IOException {
1130
1131 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1132 for (Path d: tableDirs) {
1133
1134
1135
1136
1137 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
1138 continue;
1139 }
1140 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
1141 for (FileStatus regionDir : regionDirs) {
1142 Path dd = regionDir.getPath();
1143 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1144 continue;
1145 }
1146
1147 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
1148 for (FileStatus familyDir : familyDirs) {
1149 Path family = familyDir.getPath();
1150 FileStatus[] infoAndMapfile = fs.listStatus(family);
1151
1152 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
1153 LOG.debug(family.toString() +
1154 " has more than just info and mapfile: " + infoAndMapfile.length);
1155 return false;
1156 }
1157
1158 for (int ll = 0; ll < 2; ll++) {
1159 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
1160 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
1161 continue;
1162 LOG.debug("Unexpected directory name: " +
1163 infoAndMapfile[ll].getPath());
1164 return false;
1165 }
1166
1167
1168 FileStatus[] familyStatus =
1169 fs.listStatus(new Path(family, "mapfiles"));
1170 if (familyStatus.length > 1) {
1171 LOG.debug(family.toString() + " has " + familyStatus.length +
1172 " files.");
1173 return false;
1174 }
1175 }
1176 }
1177 }
1178 return true;
1179 }
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 public static Path getTableDir(Path rootdir, final TableName tableName) {
1190 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1191 tableName.getQualifierAsString());
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 public static TableName getTableName(Path tablePath) {
1203 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1204 }
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1215 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1216 new Path(namespace)));
1217 }
1218
1219
1220
1221
1222 static class FileFilter implements PathFilter {
1223 private final FileSystem fs;
1224
1225 public FileFilter(final FileSystem fs) {
1226 this.fs = fs;
1227 }
1228
1229 @Override
1230 public boolean accept(Path p) {
1231 try {
1232 return fs.isFile(p);
1233 } catch (IOException e) {
1234 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1235 return false;
1236 }
1237 }
1238 }
1239
1240
1241
1242
1243 public static class BlackListDirFilter implements PathFilter {
1244 private final FileSystem fs;
1245 private List<String> blacklist;
1246
1247
1248
1249
1250
1251
1252
1253 @SuppressWarnings("unchecked")
1254 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1255 this.fs = fs;
1256 blacklist =
1257 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1258 : directoryNameBlackList);
1259 }
1260
1261 @Override
1262 public boolean accept(Path p) {
1263 boolean isValid = false;
1264 try {
1265 if (blacklist.contains(p.getName().toString())) {
1266 isValid = false;
1267 } else {
1268 isValid = fs.getFileStatus(p).isDir();
1269 }
1270 } catch (IOException e) {
1271 LOG.warn("An error occurred while verifying if [" + p.toString()
1272 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1273 }
1274 return isValid;
1275 }
1276 }
1277
1278
1279
1280
1281 public static class DirFilter extends BlackListDirFilter {
1282
1283 public DirFilter(FileSystem fs) {
1284 super(fs, null);
1285 }
1286 }
1287
1288
1289
1290
1291
1292 public static class UserTableDirFilter extends BlackListDirFilter {
1293
1294 public UserTableDirFilter(FileSystem fs) {
1295 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1296 }
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306 public static boolean isAppendSupported(final Configuration conf) {
1307 boolean append = conf.getBoolean("dfs.support.append", false);
1308 if (append) {
1309 try {
1310
1311
1312
1313 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1314 append = true;
1315 } catch (SecurityException e) {
1316 } catch (NoSuchMethodException e) {
1317 append = false;
1318 }
1319 }
1320 if (!append) {
1321
1322 try {
1323 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1324 append = true;
1325 } catch (NoSuchMethodException e) {
1326 append = false;
1327 }
1328 }
1329 return append;
1330 }
1331
1332
1333
1334
1335
1336
1337 public static boolean isHDFS(final Configuration conf) throws IOException {
1338 FileSystem fs = FileSystem.get(conf);
1339 String scheme = fs.getUri().getScheme();
1340 return scheme.equalsIgnoreCase("hdfs");
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1352 Configuration conf, CancelableProgressable reporter) throws IOException;
1353
1354 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1355 throws IOException {
1356 List<Path> tableDirs = new LinkedList<Path>();
1357
1358 for(FileStatus status :
1359 fs.globStatus(new Path(rootdir,
1360 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1361 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1362 }
1363 return tableDirs;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1374 throws IOException {
1375
1376 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1377 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1378 for (FileStatus dir: dirs) {
1379 tabledirs.add(dir.getPath());
1380 }
1381 return tabledirs;
1382 }
1383
1384
1385
1386
1387
1388
1389 public static boolean isRecoveredEdits(Path path) {
1390 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1391 }
1392
1393
1394
1395
1396 public static class RegionDirFilter implements PathFilter {
1397
1398 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1399 final FileSystem fs;
1400
1401 public RegionDirFilter(FileSystem fs) {
1402 this.fs = fs;
1403 }
1404
1405 @Override
1406 public boolean accept(Path rd) {
1407 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1408 return false;
1409 }
1410
1411 try {
1412 return fs.getFileStatus(rd).isDir();
1413 } catch (IOException ioe) {
1414
1415 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1416 return false;
1417 }
1418 }
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1430
1431 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1432 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1433 for (FileStatus rdfs: rds) {
1434 Path rdPath = rdfs.getPath();
1435 regionDirs.add(rdPath);
1436 }
1437 return regionDirs;
1438 }
1439
1440
1441
1442
1443
1444 public static class FamilyDirFilter implements PathFilter {
1445 final FileSystem fs;
1446
1447 public FamilyDirFilter(FileSystem fs) {
1448 this.fs = fs;
1449 }
1450
1451 @Override
1452 public boolean accept(Path rd) {
1453 try {
1454
1455 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1456 } catch (IllegalArgumentException iae) {
1457
1458 return false;
1459 }
1460
1461 try {
1462 return fs.getFileStatus(rd).isDir();
1463 } catch (IOException ioe) {
1464
1465 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1466 return false;
1467 }
1468 }
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1480
1481 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1482 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1483 for (FileStatus fdfs: fds) {
1484 Path fdPath = fdfs.getPath();
1485 familyDirs.add(fdPath);
1486 }
1487 return familyDirs;
1488 }
1489
1490
1491
1492
1493 public static class HFileFilter implements PathFilter {
1494
1495 final public static Pattern hfilePattern = Pattern.compile("^([0-9a-f]+)$");
1496
1497 final FileSystem fs;
1498
1499 public HFileFilter(FileSystem fs) {
1500 this.fs = fs;
1501 }
1502
1503 @Override
1504 public boolean accept(Path rd) {
1505 if (!hfilePattern.matcher(rd.getName()).matches()) {
1506 return false;
1507 }
1508
1509 try {
1510
1511 return !fs.getFileStatus(rd).isDir();
1512 } catch (IOException ioe) {
1513
1514 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1515 return false;
1516 }
1517 }
1518 }
1519
1520
1521
1522
1523
1524
1525 public static FileSystem getCurrentFileSystem(Configuration conf)
1526 throws IOException {
1527 return getRootDir(conf).getFileSystem(conf);
1528 }
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1547 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1548 throws IOException {
1549 if (map == null) {
1550 map = new HashMap<String, Path>();
1551 }
1552
1553
1554 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1555
1556
1557 PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
1558 FileStatus[] regionDirs = fs.listStatus(tableDir);
1559 for (FileStatus regionDir : regionDirs) {
1560 Path dd = regionDir.getPath();
1561 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
1562 continue;
1563 }
1564
1565 FileStatus[] familyDirs = fs.listStatus(dd, df);
1566 for (FileStatus familyDir : familyDirs) {
1567 Path family = familyDir.getPath();
1568
1569
1570 FileStatus[] familyStatus = fs.listStatus(family);
1571 for (FileStatus sfStatus : familyStatus) {
1572 Path sf = sfStatus.getPath();
1573 map.put( sf.getName(), sf);
1574 }
1575 }
1576 }
1577 return map;
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 public static Map<String, Path> getTableStoreFilePathMap(
1595 final FileSystem fs, final Path hbaseRootDir)
1596 throws IOException {
1597 Map<String, Path> map = new HashMap<String, Path>();
1598
1599
1600
1601
1602
1603 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1604 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1605 FSUtils.getTableName(tableDir));
1606 }
1607 return map;
1608 }
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621 public static FileStatus [] listStatus(final FileSystem fs,
1622 final Path dir, final PathFilter filter) throws IOException {
1623 FileStatus [] status = null;
1624 try {
1625 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1626 } catch (FileNotFoundException fnfe) {
1627
1628 if (LOG.isTraceEnabled()) {
1629 LOG.trace(dir + " doesn't exist");
1630 }
1631 }
1632 if (status == null || status.length < 1) return null;
1633 return status;
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1645 return listStatus(fs, dir, null);
1646 }
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1658 throws IOException {
1659 return fs.delete(path, recursive);
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1671 return fs.exists(path);
1672 }
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1685 FsAction action) throws AccessDeniedException {
1686 if (ugi.getShortUserName().equals(file.getOwner())) {
1687 if (file.getPermission().getUserAction().implies(action)) {
1688 return;
1689 }
1690 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1691 if (file.getPermission().getGroupAction().implies(action)) {
1692 return;
1693 }
1694 } else if (file.getPermission().getOtherAction().implies(action)) {
1695 return;
1696 }
1697 throw new AccessDeniedException("Permission denied:" + " action=" + action
1698 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1699 }
1700
1701 private static boolean contains(String[] groups, String user) {
1702 for (String group : groups) {
1703 if (group.equals(user)) {
1704 return true;
1705 }
1706 }
1707 return false;
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1718 throws IOException {
1719 LOG.debug("Current file system:");
1720 logFSTree(LOG, fs, root, "|-");
1721 }
1722
1723
1724
1725
1726
1727
1728 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1729 throws IOException {
1730 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1731 if (files == null) return;
1732
1733 for (FileStatus file : files) {
1734 if (file.isDir()) {
1735 LOG.debug(prefix + file.getPath().getName() + "/");
1736 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1737 } else {
1738 LOG.debug(prefix + file.getPath().getName());
1739 }
1740 }
1741 }
1742
1743 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1744 throws IOException {
1745
1746 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1747 return fs.rename(src, dest);
1748 }
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1764 final Configuration conf) throws IOException {
1765 return getRegionDegreeLocalityMappingFromFS(
1766 conf, null,
1767 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1768
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1788 final Configuration conf, final String desiredTable, int threadPoolSize)
1789 throws IOException {
1790 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1791 new ConcurrentHashMap<String, Map<String, Float>>();
1792 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1793 regionDegreeLocalityMapping);
1794 return regionDegreeLocalityMapping;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 private static void getRegionLocalityMappingFromFS(
1818 final Configuration conf, final String desiredTable,
1819 int threadPoolSize,
1820 Map<String, String> regionToBestLocalityRSMapping,
1821 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1822 throws IOException {
1823 FileSystem fs = FileSystem.get(conf);
1824 Path rootPath = FSUtils.getRootDir(conf);
1825 long startTime = EnvironmentEdgeManager.currentTimeMillis();
1826 Path queryPath;
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 public static void setupShortCircuitRead(final Configuration conf) {
1928
1929 boolean shortCircuitSkipChecksum =
1930 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1931 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1932 if (shortCircuitSkipChecksum) {
1933 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1934 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1935 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1936 assert !shortCircuitSkipChecksum;
1937 }
1938 checkShortCircuitReadBufferSize(conf);
1939 }
1940
1941
1942
1943
1944
1945 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1946 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1947 final int notSet = -1;
1948
1949 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1950 int size = conf.getInt(dfsKey, notSet);
1951
1952 if (size != notSet) return;
1953
1954 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1955 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1956 }
1957 }