1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.hbase.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
69 import org.apache.hadoop.hbase.security.AccessDeniedException;
70 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
71 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
72 import org.apache.hadoop.hbase.regionserver.HRegion;
73 import org.apache.hadoop.hdfs.DistributedFileSystem;
74 import org.apache.hadoop.io.IOUtils;
75 import org.apache.hadoop.io.SequenceFile;
76 import org.apache.hadoop.security.UserGroupInformation;
77 import org.apache.hadoop.util.Progressable;
78 import org.apache.hadoop.util.ReflectionUtils;
79 import org.apache.hadoop.util.StringUtils;
80
81 import com.google.common.primitives.Ints;
82 import com.google.protobuf.InvalidProtocolBufferException;
83
84
85
86
87 @InterfaceAudience.Private
88 public abstract class FSUtils {
89 private static final Log LOG = LogFactory.getLog(FSUtils.class);
90
91
92 public static final String FULL_RWX_PERMISSIONS = "777";
93 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
94 private static final int DEFAULT_THREAD_POOLSIZE = 2;
95
96
97 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
98
99 protected FSUtils() {
100 super();
101 }
102
103
104
105
106
107
108
109
110 public static boolean isStartingWithPath(final Path rootPath, final String path) {
111 String uriRootPath = rootPath.toUri().getPath();
112 String tailUriPath = (new Path(path)).toUri().getPath();
113 return tailUriPath.startsWith(uriRootPath);
114 }
115
116
117
118
119
120
121
122
123
124 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
125 return isMatchingTail(pathToSearch, new Path(pathTail));
126 }
127
128
129
130
131
132
133
134
135
136 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
137 if (pathToSearch.depth() != pathTail.depth()) return false;
138 Path tailPath = pathTail;
139 String tailName;
140 Path toSearch = pathToSearch;
141 String toSearchName;
142 boolean result = false;
143 do {
144 tailName = tailPath.getName();
145 if (tailName == null || tailName.length() <= 0) {
146 result = true;
147 break;
148 }
149 toSearchName = toSearch.getName();
150 if (toSearchName == null || toSearchName.length() <= 0) break;
151
152 tailPath = tailPath.getParent();
153 toSearch = toSearch.getParent();
154 } while(tailName.equals(toSearchName));
155 return result;
156 }
157
158 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
159 String scheme = fs.getUri().getScheme();
160 if (scheme == null) {
161 LOG.warn("Could not find scheme for uri " +
162 fs.getUri() + ", default to hdfs");
163 scheme = "hdfs";
164 }
165 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
166 scheme + ".impl", FSHDFSUtils.class);
167 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
168 return fsUtils;
169 }
170
171
172
173
174
175
176
177
178 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
179 throws IOException {
180 return fs.exists(dir) && fs.delete(dir, true);
181 }
182
183
184
185
186
187
188
189
190 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
191 throws IOException {
192 Path rootDir = getRootDir(conf);
193 FileSystem fs = rootDir.getFileSystem(conf);
194 return deleteDirectory(fs,
195 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
196 }
197
198
199
200
201
202
203
204
205
206
207
208
209 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
210 Method m = null;
211 Class<? extends FileSystem> cls = fs.getClass();
212 try {
213 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
214 } catch (NoSuchMethodException e) {
215 LOG.info("FileSystem doesn't support getDefaultBlockSize");
216 } catch (SecurityException e) {
217 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
218 m = null;
219 }
220 if (m == null) {
221 return fs.getDefaultBlockSize(path);
222 } else {
223 try {
224 Object ret = m.invoke(fs, path);
225 return ((Long)ret).longValue();
226 } catch (Exception e) {
227 throw new IOException(e);
228 }
229 }
230 }
231
232
233
234
235
236
237
238
239
240
241
242
243 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
244 Method m = null;
245 Class<? extends FileSystem> cls = fs.getClass();
246 try {
247 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
248 } catch (NoSuchMethodException e) {
249 LOG.info("FileSystem doesn't support getDefaultReplication");
250 } catch (SecurityException e) {
251 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
252 m = null;
253 }
254 if (m == null) {
255 return fs.getDefaultReplication(path);
256 } else {
257 try {
258 Object ret = m.invoke(fs, path);
259 return ((Number)ret).shortValue();
260 } catch (Exception e) {
261 throw new IOException(e);
262 }
263 }
264 }
265
266
267
268
269
270
271
272
273
274
275
276 public static int getDefaultBufferSize(final FileSystem fs) {
277 return fs.getConf().getInt("io.file.buffer.size", 4096);
278 }
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 public static FSDataOutputStream create(FileSystem fs, Path path,
299 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
300 if (fs instanceof HFileSystem) {
301 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
302 if (backingFs instanceof DistributedFileSystem) {
303
304
305 try {
306 return (FSDataOutputStream) (DistributedFileSystem.class
307 .getDeclaredMethod("create", Path.class, FsPermission.class,
308 boolean.class, int.class, short.class, long.class,
309 Progressable.class, InetSocketAddress[].class)
310 .invoke(backingFs, path, perm, true,
311 getDefaultBufferSize(backingFs),
312 getDefaultReplication(backingFs, path),
313 getDefaultBlockSize(backingFs, path),
314 null, favoredNodes));
315 } catch (InvocationTargetException ite) {
316
317 throw new IOException(ite.getCause());
318 } catch (NoSuchMethodException e) {
319 LOG.debug("DFS Client does not support most favored nodes create; using default create");
320 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
321 } catch (IllegalArgumentException e) {
322 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
323 } catch (SecurityException e) {
324 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
325 } catch (IllegalAccessException e) {
326 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
327 }
328 }
329 }
330 return create(fs, path, perm, true);
331 }
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350 public static FSDataOutputStream create(FileSystem fs, Path path,
351 FsPermission perm, boolean overwrite) throws IOException {
352 if (LOG.isTraceEnabled()) {
353 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
354 }
355 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
356 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
357 }
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372 public static FsPermission getFilePermissions(final FileSystem fs,
373 final Configuration conf, final String permssionConfKey) {
374 boolean enablePermissions = conf.getBoolean(
375 HConstants.ENABLE_DATA_FILE_UMASK, false);
376
377 if (enablePermissions) {
378 try {
379 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
380
381 String mask = conf.get(permssionConfKey);
382 if (mask == null)
383 return FsPermission.getFileDefault();
384
385 FsPermission umask = new FsPermission(mask);
386 return perm.applyUMask(umask);
387 } catch (IllegalArgumentException e) {
388 LOG.warn(
389 "Incorrect umask attempted to be created: "
390 + conf.get(permssionConfKey)
391 + ", using default file permissions.", e);
392 return FsPermission.getFileDefault();
393 }
394 }
395 return FsPermission.getFileDefault();
396 }
397
398
399
400
401
402
403
404 public static void checkFileSystemAvailable(final FileSystem fs)
405 throws IOException {
406 if (!(fs instanceof DistributedFileSystem)) {
407 return;
408 }
409 IOException exception = null;
410 DistributedFileSystem dfs = (DistributedFileSystem) fs;
411 try {
412 if (dfs.exists(new Path("/"))) {
413 return;
414 }
415 } catch (IOException e) {
416 exception = RemoteExceptionHandler.checkIOException(e);
417 }
418 try {
419 fs.close();
420 } catch (Exception e) {
421 LOG.error("file system close failed: ", e);
422 }
423 IOException io = new IOException("File system is not available");
424 io.initCause(exception);
425 throw io;
426 }
427
428
429
430
431
432
433
434
435
436 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
437 boolean inSafeMode = false;
438 try {
439 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
440 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
441 inSafeMode = (Boolean) m.invoke(dfs,
442 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
443 } catch (Exception e) {
444 if (e instanceof IOException) throw (IOException) e;
445
446
447 inSafeMode = dfs.setSafeMode(
448 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
449 }
450 return inSafeMode;
451 }
452
453
454
455
456
457
458 public static void checkDfsSafeMode(final Configuration conf)
459 throws IOException {
460 boolean isInSafeMode = false;
461 FileSystem fs = FileSystem.get(conf);
462 if (fs instanceof DistributedFileSystem) {
463 DistributedFileSystem dfs = (DistributedFileSystem)fs;
464 isInSafeMode = isInSafeMode(dfs);
465 }
466 if (isInSafeMode) {
467 throw new IOException("File system is in safemode, it can't be written now");
468 }
469 }
470
471
472
473
474
475
476
477
478
479
480 public static String getVersion(FileSystem fs, Path rootdir)
481 throws IOException, DeserializationException {
482 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
483 FileStatus[] status = null;
484 try {
485
486
487 status = fs.listStatus(versionFile);
488 } catch (FileNotFoundException fnfe) {
489 return null;
490 }
491 if (status == null || status.length == 0) return null;
492 String version = null;
493 byte [] content = new byte [(int)status[0].getLen()];
494 FSDataInputStream s = fs.open(versionFile);
495 try {
496 IOUtils.readFully(s, content, 0, content.length);
497 if (ProtobufUtil.isPBMagicPrefix(content)) {
498 version = parseVersionFrom(content);
499 } else {
500
501 InputStream is = new ByteArrayInputStream(content);
502 DataInputStream dis = new DataInputStream(is);
503 try {
504 version = dis.readUTF();
505 } finally {
506 dis.close();
507 }
508 }
509 } catch (EOFException eof) {
510 LOG.warn("Version file was empty, odd, will try to set it.");
511 } finally {
512 s.close();
513 }
514 return version;
515 }
516
517
518
519
520
521
522
523 static String parseVersionFrom(final byte [] bytes)
524 throws DeserializationException {
525 ProtobufUtil.expectPBMagicPrefix(bytes);
526 int pblen = ProtobufUtil.lengthOfPBMagic();
527 FSProtos.HBaseVersionFileContent.Builder builder =
528 FSProtos.HBaseVersionFileContent.newBuilder();
529 FSProtos.HBaseVersionFileContent fileContent;
530 try {
531 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
532 return fileContent.getVersion();
533 } catch (InvalidProtocolBufferException e) {
534
535 throw new DeserializationException(e);
536 }
537 }
538
539
540
541
542
543
544 static byte [] toVersionByteArray(final String version) {
545 FSProtos.HBaseVersionFileContent.Builder builder =
546 FSProtos.HBaseVersionFileContent.newBuilder();
547 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
548 }
549
550
551
552
553
554
555
556
557
558
559
560 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
561 throws IOException, DeserializationException {
562 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
563 }
564
565
566
567
568
569
570
571
572
573
574
575
576
577 public static void checkVersion(FileSystem fs, Path rootdir,
578 boolean message, int wait, int retries)
579 throws IOException, DeserializationException {
580 String version = getVersion(fs, rootdir);
581 if (version == null) {
582 if (!metaRegionExists(fs, rootdir)) {
583
584
585 setVersion(fs, rootdir, wait, retries);
586 return;
587 }
588 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
589
590
591
592 String msg = "HBase file layout needs to be upgraded."
593 + " You have version " + version
594 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
595 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
596 + " Is your hbase.rootdir valid? If so, you may need to run "
597 + "'hbase hbck -fixVersionFile'.";
598 if (message) {
599 System.out.println("WARNING! " + msg);
600 }
601 throw new FileSystemVersionException(msg);
602 }
603
604
605
606
607
608
609
610
611 public static void setVersion(FileSystem fs, Path rootdir)
612 throws IOException {
613 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
614 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
615 }
616
617
618
619
620
621
622
623
624
625
626 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
627 throws IOException {
628 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642 public static void setVersion(FileSystem fs, Path rootdir, String version,
643 int wait, int retries) throws IOException {
644 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
645 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
646 HConstants.VERSION_FILE_NAME);
647 while (true) {
648 try {
649
650 FSDataOutputStream s = fs.create(tempVersionFile);
651 try {
652 s.write(toVersionByteArray(version));
653 s.close();
654 s = null;
655
656
657 if (!fs.rename(tempVersionFile, versionFile)) {
658 throw new IOException("Unable to move temp version file to " + versionFile);
659 }
660 } finally {
661
662
663
664
665
666 try {
667 if (s != null) s.close();
668 } catch (IOException ignore) { }
669 }
670 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
671 return;
672 } catch (IOException e) {
673 if (retries > 0) {
674 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
675 fs.delete(versionFile, false);
676 try {
677 if (wait > 0) {
678 Thread.sleep(wait);
679 }
680 } catch (InterruptedException ie) {
681 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
682 }
683 retries--;
684 } else {
685 throw e;
686 }
687 }
688 }
689 }
690
691
692
693
694
695
696
697
698
699 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
700 int wait) throws IOException {
701 while (true) {
702 try {
703 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
704 return fs.exists(filePath);
705 } catch (IOException ioe) {
706 if (wait > 0) {
707 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
708 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
709 try {
710 Thread.sleep(wait);
711 } catch (InterruptedException e) {
712 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
713 }
714 } else {
715 throw ioe;
716 }
717 }
718 }
719 }
720
721
722
723
724
725
726
727
728 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
729 throws IOException {
730 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
731 ClusterId clusterId = null;
732 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
733 if (status != null) {
734 int len = Ints.checkedCast(status.getLen());
735 byte [] content = new byte[len];
736 FSDataInputStream in = fs.open(idPath);
737 try {
738 in.readFully(content);
739 } catch (EOFException eof) {
740 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
741 } finally{
742 in.close();
743 }
744 try {
745 clusterId = ClusterId.parseFrom(content);
746 } catch (DeserializationException e) {
747 throw new IOException("content=" + Bytes.toString(content), e);
748 }
749
750 if (!ProtobufUtil.isPBMagicPrefix(content)) {
751 String cid = null;
752 in = fs.open(idPath);
753 try {
754 cid = in.readUTF();
755 clusterId = new ClusterId(cid);
756 } catch (EOFException eof) {
757 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
758 } finally {
759 in.close();
760 }
761 rewriteAsPb(fs, rootdir, idPath, clusterId);
762 }
763 return clusterId;
764 } else {
765 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
766 }
767 return clusterId;
768 }
769
770
771
772
773
774 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
775 final ClusterId cid)
776 throws IOException {
777
778
779 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
780 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
781 setClusterId(fs, rootdir, cid, 100);
782 if (!fs.delete(movedAsideName, false)) {
783 throw new IOException("Failed delete of " + movedAsideName);
784 }
785 LOG.debug("Rewrote the hbase.id file as pb");
786 }
787
788
789
790
791
792
793
794
795
796
797 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
798 int wait) throws IOException {
799 while (true) {
800 try {
801 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
802 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
803 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
804
805 FSDataOutputStream s = fs.create(tempIdFile);
806 try {
807 s.write(clusterId.toByteArray());
808 s.close();
809 s = null;
810
811
812 if (!fs.rename(tempIdFile, idFile)) {
813 throw new IOException("Unable to move temp version file to " + idFile);
814 }
815 } finally {
816
817 try {
818 if (s != null) s.close();
819 } catch (IOException ignore) { }
820 }
821 if (LOG.isDebugEnabled()) {
822 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
823 }
824 return;
825 } catch (IOException ioe) {
826 if (wait > 0) {
827 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
828 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
829 try {
830 Thread.sleep(wait);
831 } catch (InterruptedException e) {
832 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
833 }
834 } else {
835 throw ioe;
836 }
837 }
838 }
839 }
840
841
842
843
844
845
846
847
848 public static Path validateRootPath(Path root) throws IOException {
849 try {
850 URI rootURI = new URI(root.toString());
851 String scheme = rootURI.getScheme();
852 if (scheme == null) {
853 throw new IOException("Root directory does not have a scheme");
854 }
855 return root;
856 } catch (URISyntaxException e) {
857 IOException io = new IOException("Root directory path is not a valid " +
858 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
859 io.initCause(e);
860 throw io;
861 }
862 }
863
864
865
866
867
868
869
870
871
872 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
873 Path root = FSUtils.getRootDir(conf);
874 String pathStr = path.toString();
875
876 if (!pathStr.startsWith(root.toString())) return pathStr;
877
878 return pathStr.substring(root.toString().length() + 1);
879 }
880
881
882
883
884
885
886
887 public static void waitOnSafeMode(final Configuration conf,
888 final long wait)
889 throws IOException {
890 FileSystem fs = FileSystem.get(conf);
891 if (!(fs instanceof DistributedFileSystem)) return;
892 DistributedFileSystem dfs = (DistributedFileSystem)fs;
893
894 while (isInSafeMode(dfs)) {
895 LOG.info("Waiting for dfs to exit safe mode...");
896 try {
897 Thread.sleep(wait);
898 } catch (InterruptedException e) {
899 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
900 }
901 }
902 }
903
904
905
906
907
908
909
910
911
912
913
914 public static String getPath(Path p) {
915 return p.toUri().getPath();
916 }
917
918
919
920
921
922
923
924 public static Path getRootDir(final Configuration c) throws IOException {
925 Path p = new Path(c.get(HConstants.HBASE_DIR));
926 FileSystem fs = p.getFileSystem(c);
927 return p.makeQualified(fs);
928 }
929
930 public static void setRootDir(final Configuration c, final Path root) throws IOException {
931 c.set(HConstants.HBASE_DIR, root.toString());
932 }
933
934 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
935 c.set("fs.defaultFS", root.toString());
936 }
937
938
939
940
941
942
943
944
945
946 @SuppressWarnings("deprecation")
947 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
948 throws IOException {
949 Path metaRegionDir =
950 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
951 return fs.exists(metaRegionDir);
952 }
953
954
955
956
957
958
959
960
961
962 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
963 final FileSystem fs, FileStatus status, long start, long length)
964 throws IOException {
965 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
966 BlockLocation [] blockLocations =
967 fs.getFileBlockLocations(status, start, length);
968 for(BlockLocation bl : blockLocations) {
969 String [] hosts = bl.getHosts();
970 long len = bl.getLength();
971 blocksDistribution.addHostsAndBlockWeight(hosts, len);
972 }
973
974 return blocksDistribution;
975 }
976
977
978
979
980
981
982
983
984
985
986
987
988 public static boolean isMajorCompacted(final FileSystem fs,
989 final Path hbaseRootDir)
990 throws IOException {
991 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
992 PathFilter regionFilter = new RegionDirFilter(fs);
993 PathFilter familyFilter = new FamilyDirFilter(fs);
994 for (Path d : tableDirs) {
995 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
996 for (FileStatus regionDir : regionDirs) {
997 Path dd = regionDir.getPath();
998
999 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1000 for (FileStatus familyDir : familyDirs) {
1001 Path family = familyDir.getPath();
1002
1003 FileStatus[] familyStatus = fs.listStatus(family);
1004 if (familyStatus.length > 1) {
1005 LOG.debug(family.toString() + " has " + familyStatus.length +
1006 " files.");
1007 return false;
1008 }
1009 }
1010 }
1011 }
1012 return true;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 public static int getTotalTableFragmentation(final HMaster master)
1025 throws IOException {
1026 Map<String, Integer> map = getTableFragmentation(master);
1027 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1028 }
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 public static Map<String, Integer> getTableFragmentation(
1041 final HMaster master)
1042 throws IOException {
1043 Path path = getRootDir(master.getConfiguration());
1044
1045 FileSystem fs = path.getFileSystem(master.getConfiguration());
1046 return getTableFragmentation(fs, path);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 public static Map<String, Integer> getTableFragmentation(
1060 final FileSystem fs, final Path hbaseRootDir)
1061 throws IOException {
1062 Map<String, Integer> frags = new HashMap<String, Integer>();
1063 int cfCountTotal = 0;
1064 int cfFragTotal = 0;
1065 PathFilter regionFilter = new RegionDirFilter(fs);
1066 PathFilter familyFilter = new FamilyDirFilter(fs);
1067 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1068 for (Path d : tableDirs) {
1069 int cfCount = 0;
1070 int cfFrag = 0;
1071 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1072 for (FileStatus regionDir : regionDirs) {
1073 Path dd = regionDir.getPath();
1074
1075 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1076 for (FileStatus familyDir : familyDirs) {
1077 cfCount++;
1078 cfCountTotal++;
1079 Path family = familyDir.getPath();
1080
1081 FileStatus[] familyStatus = fs.listStatus(family);
1082 if (familyStatus.length > 1) {
1083 cfFrag++;
1084 cfFragTotal++;
1085 }
1086 }
1087 }
1088
1089 frags.put(FSUtils.getTableName(d).getNameAsString(),
1090 cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
1091 }
1092
1093 frags.put("-TOTAL-",
1094 cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
1095 return frags;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 public static Path getTableDir(Path rootdir, final TableName tableName) {
1107 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1108 tableName.getQualifierAsString());
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 public static TableName getTableName(Path tablePath) {
1120 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1132 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1133 new Path(namespace)));
1134 }
1135
1136
1137
1138
1139 static class FileFilter implements PathFilter {
1140 private final FileSystem fs;
1141
1142 public FileFilter(final FileSystem fs) {
1143 this.fs = fs;
1144 }
1145
1146 @Override
1147 public boolean accept(Path p) {
1148 try {
1149 return fs.isFile(p);
1150 } catch (IOException e) {
1151 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1152 return false;
1153 }
1154 }
1155 }
1156
1157
1158
1159
1160 public static class BlackListDirFilter implements PathFilter {
1161 private final FileSystem fs;
1162 private List<String> blacklist;
1163
1164
1165
1166
1167
1168
1169
1170 @SuppressWarnings("unchecked")
1171 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1172 this.fs = fs;
1173 blacklist =
1174 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1175 : directoryNameBlackList);
1176 }
1177
1178 @Override
1179 public boolean accept(Path p) {
1180 boolean isValid = false;
1181 try {
1182 if (isValidName(p.getName())) {
1183 isValid = fs.getFileStatus(p).isDirectory();
1184 } else {
1185 isValid = false;
1186 }
1187 } catch (IOException e) {
1188 LOG.warn("An error occurred while verifying if [" + p.toString()
1189 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1190 }
1191 return isValid;
1192 }
1193
1194 protected boolean isValidName(final String name) {
1195 return !blacklist.contains(name);
1196 }
1197 }
1198
1199
1200
1201
1202 public static class DirFilter extends BlackListDirFilter {
1203
1204 public DirFilter(FileSystem fs) {
1205 super(fs, null);
1206 }
1207 }
1208
1209
1210
1211
1212
1213 public static class UserTableDirFilter extends BlackListDirFilter {
1214 public UserTableDirFilter(FileSystem fs) {
1215 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1216 }
1217
1218 protected boolean isValidName(final String name) {
1219 if (!super.isValidName(name))
1220 return false;
1221
1222 try {
1223 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1224 } catch (IllegalArgumentException e) {
1225 LOG.info("INVALID NAME " + name);
1226 return false;
1227 }
1228 return true;
1229 }
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239 public static boolean isAppendSupported(final Configuration conf) {
1240 boolean append = conf.getBoolean("dfs.support.append", false);
1241 if (append) {
1242 try {
1243
1244
1245
1246 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1247 append = true;
1248 } catch (SecurityException e) {
1249 } catch (NoSuchMethodException e) {
1250 append = false;
1251 }
1252 }
1253 if (!append) {
1254
1255 try {
1256 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1257 append = true;
1258 } catch (NoSuchMethodException e) {
1259 append = false;
1260 }
1261 }
1262 return append;
1263 }
1264
1265
1266
1267
1268
1269
1270 public static boolean isHDFS(final Configuration conf) throws IOException {
1271 FileSystem fs = FileSystem.get(conf);
1272 String scheme = fs.getUri().getScheme();
1273 return scheme.equalsIgnoreCase("hdfs");
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1285 Configuration conf, CancelableProgressable reporter) throws IOException;
1286
1287 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1288 throws IOException {
1289 List<Path> tableDirs = new LinkedList<Path>();
1290
1291 for(FileStatus status :
1292 fs.globStatus(new Path(rootdir,
1293 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1294 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1295 }
1296 return tableDirs;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1307 throws IOException {
1308
1309 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1310 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1311 for (FileStatus dir: dirs) {
1312 tabledirs.add(dir.getPath());
1313 }
1314 return tabledirs;
1315 }
1316
1317
1318
1319
1320
1321
1322 public static boolean isRecoveredEdits(Path path) {
1323 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1324 }
1325
1326
1327
1328
1329 public static class RegionDirFilter implements PathFilter {
1330
1331 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1332 final FileSystem fs;
1333
1334 public RegionDirFilter(FileSystem fs) {
1335 this.fs = fs;
1336 }
1337
1338 @Override
1339 public boolean accept(Path rd) {
1340 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1341 return false;
1342 }
1343
1344 try {
1345 return fs.getFileStatus(rd).isDirectory();
1346 } catch (IOException ioe) {
1347
1348 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1349 return false;
1350 }
1351 }
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1363
1364 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1365 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1366 for (FileStatus rdfs: rds) {
1367 Path rdPath = rdfs.getPath();
1368 regionDirs.add(rdPath);
1369 }
1370 return regionDirs;
1371 }
1372
1373
1374
1375
1376
1377 public static class FamilyDirFilter implements PathFilter {
1378 final FileSystem fs;
1379
1380 public FamilyDirFilter(FileSystem fs) {
1381 this.fs = fs;
1382 }
1383
1384 @Override
1385 public boolean accept(Path rd) {
1386 try {
1387
1388 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1389 } catch (IllegalArgumentException iae) {
1390
1391 return false;
1392 }
1393
1394 try {
1395 return fs.getFileStatus(rd).isDirectory();
1396 } catch (IOException ioe) {
1397
1398 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1399 return false;
1400 }
1401 }
1402 }
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1413
1414 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1415 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1416 for (FileStatus fdfs: fds) {
1417 Path fdPath = fdfs.getPath();
1418 familyDirs.add(fdPath);
1419 }
1420 return familyDirs;
1421 }
1422
1423 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1424 FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1425 List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1426 for (FileStatus fdfs: fds) {
1427 Path fdPath = fdfs.getPath();
1428 referenceFiles.add(fdPath);
1429 }
1430 return referenceFiles;
1431 }
1432
1433
1434
1435
1436 public static class HFileFilter implements PathFilter {
1437 final FileSystem fs;
1438
1439 public HFileFilter(FileSystem fs) {
1440 this.fs = fs;
1441 }
1442
1443 @Override
1444 public boolean accept(Path rd) {
1445 try {
1446
1447 return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isHFile(rd);
1448 } catch (IOException ioe) {
1449
1450 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1451 return false;
1452 }
1453 }
1454 }
1455
1456 public static class ReferenceFileFilter implements PathFilter {
1457
1458 private final FileSystem fs;
1459
1460 public ReferenceFileFilter(FileSystem fs) {
1461 this.fs = fs;
1462 }
1463
1464 @Override
1465 public boolean accept(Path rd) {
1466 try {
1467
1468 return !fs.getFileStatus(rd).isDirectory() && StoreFileInfo.isReference(rd);
1469 } catch (IOException ioe) {
1470
1471 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1472 return false;
1473 }
1474 }
1475 }
1476
1477
1478
1479
1480
1481
1482
1483 public static FileSystem getCurrentFileSystem(Configuration conf)
1484 throws IOException {
1485 return getRootDir(conf).getFileSystem(conf);
1486 }
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1505 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1506 throws IOException {
1507 if (map == null) {
1508 map = new HashMap<String, Path>();
1509 }
1510
1511
1512 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1513
1514
1515 PathFilter familyFilter = new FamilyDirFilter(fs);
1516 FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1517 for (FileStatus regionDir : regionDirs) {
1518 Path dd = regionDir.getPath();
1519
1520 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1521 for (FileStatus familyDir : familyDirs) {
1522 Path family = familyDir.getPath();
1523 if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
1524 continue;
1525 }
1526
1527
1528 FileStatus[] familyStatus = fs.listStatus(family);
1529 for (FileStatus sfStatus : familyStatus) {
1530 Path sf = sfStatus.getPath();
1531 map.put( sf.getName(), sf);
1532 }
1533 }
1534 }
1535 return map;
1536 }
1537
1538 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1539 int result = 0;
1540 try {
1541 for (Path familyDir:getFamilyDirs(fs, p)){
1542 result += getReferenceFilePaths(fs, familyDir).size();
1543 }
1544 } catch (IOException e) {
1545 LOG.warn("Error Counting reference files.", e);
1546 }
1547 return result;
1548 }
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 public static Map<String, Path> getTableStoreFilePathMap(
1565 final FileSystem fs, final Path hbaseRootDir)
1566 throws IOException {
1567 Map<String, Path> map = new HashMap<String, Path>();
1568
1569
1570
1571
1572
1573 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1574 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1575 FSUtils.getTableName(tableDir));
1576 }
1577 return map;
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 public static FileStatus [] listStatus(final FileSystem fs,
1592 final Path dir, final PathFilter filter) throws IOException {
1593 FileStatus [] status = null;
1594 try {
1595 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1596 } catch (FileNotFoundException fnfe) {
1597
1598 if (LOG.isTraceEnabled()) {
1599 LOG.trace(dir + " doesn't exist");
1600 }
1601 }
1602 if (status == null || status.length < 1) return null;
1603 return status;
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1615 return listStatus(fs, dir, null);
1616 }
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1628 throws IOException {
1629 return fs.delete(path, recursive);
1630 }
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1641 return fs.exists(path);
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1655 FsAction action) throws AccessDeniedException {
1656 if (ugi.getShortUserName().equals(file.getOwner())) {
1657 if (file.getPermission().getUserAction().implies(action)) {
1658 return;
1659 }
1660 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1661 if (file.getPermission().getGroupAction().implies(action)) {
1662 return;
1663 }
1664 } else if (file.getPermission().getOtherAction().implies(action)) {
1665 return;
1666 }
1667 throw new AccessDeniedException("Permission denied:" + " action=" + action
1668 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1669 }
1670
1671 private static boolean contains(String[] groups, String user) {
1672 for (String group : groups) {
1673 if (group.equals(user)) {
1674 return true;
1675 }
1676 }
1677 return false;
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1688 throws IOException {
1689 LOG.debug("Current file system:");
1690 logFSTree(LOG, fs, root, "|-");
1691 }
1692
1693
1694
1695
1696
1697
1698 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1699 throws IOException {
1700 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1701 if (files == null) return;
1702
1703 for (FileStatus file : files) {
1704 if (file.isDirectory()) {
1705 LOG.debug(prefix + file.getPath().getName() + "/");
1706 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1707 } else {
1708 LOG.debug(prefix + file.getPath().getName());
1709 }
1710 }
1711 }
1712
1713 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1714 throws IOException {
1715
1716 fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
1717 return fs.rename(src, dest);
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1734 final Configuration conf) throws IOException {
1735 return getRegionDegreeLocalityMappingFromFS(
1736 conf, null,
1737 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1738
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1758 final Configuration conf, final String desiredTable, int threadPoolSize)
1759 throws IOException {
1760 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1761 new ConcurrentHashMap<String, Map<String, Float>>();
1762 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1763 regionDegreeLocalityMapping);
1764 return regionDegreeLocalityMapping;
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 private static void getRegionLocalityMappingFromFS(
1788 final Configuration conf, final String desiredTable,
1789 int threadPoolSize,
1790 Map<String, String> regionToBestLocalityRSMapping,
1791 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1792 throws IOException {
1793 FileSystem fs = FileSystem.get(conf);
1794 Path rootPath = FSUtils.getRootDir(conf);
1795 long startTime = EnvironmentEdgeManager.currentTime();
1796 Path queryPath;
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 public static void setupShortCircuitRead(final Configuration conf) {
1898
1899 boolean shortCircuitSkipChecksum =
1900 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1901 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1902 if (shortCircuitSkipChecksum) {
1903 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1904 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1905 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1906 assert !shortCircuitSkipChecksum;
1907 }
1908 checkShortCircuitReadBufferSize(conf);
1909 }
1910
1911
1912
1913
1914
1915 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1916 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1917 final int notSet = -1;
1918
1919 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1920 int size = conf.getInt(dfsKey, notSet);
1921
1922 if (size != notSet) return;
1923
1924 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1925 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1926 }
1927 }