1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.hbase.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
69 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
70 import org.apache.hadoop.hbase.security.AccessDeniedException;
71 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
72 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
73 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
74 import org.apache.hadoop.hbase.regionserver.HRegion;
75 import org.apache.hadoop.hdfs.DistributedFileSystem;
76 import org.apache.hadoop.hdfs.protocol.FSConstants;
77 import org.apache.hadoop.io.IOUtils;
78 import org.apache.hadoop.io.SequenceFile;
79 import org.apache.hadoop.security.UserGroupInformation;
80 import org.apache.hadoop.util.Progressable;
81 import org.apache.hadoop.util.ReflectionUtils;
82 import org.apache.hadoop.util.StringUtils;
83
84 import com.google.common.primitives.Ints;
85 import com.google.protobuf.InvalidProtocolBufferException;
86
87
88
89
90 @InterfaceAudience.Private
91 public abstract class FSUtils {
92 private static final Log LOG = LogFactory.getLog(FSUtils.class);
93
94
95 public static final String FULL_RWX_PERMISSIONS = "777";
96 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
97 private static final int DEFAULT_THREAD_POOLSIZE = 2;
98
99
100 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
101
102 protected FSUtils() {
103 super();
104 }
105
106
107
108
109
110
111
112
113 public static boolean isStartingWithPath(final Path rootPath, final String path) {
114 String uriRootPath = rootPath.toUri().getPath();
115 String tailUriPath = (new Path(path)).toUri().getPath();
116 return tailUriPath.startsWith(uriRootPath);
117 }
118
119
120
121
122
123
124
125
126
127 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
128 return isMatchingTail(pathToSearch, new Path(pathTail));
129 }
130
131
132
133
134
135
136
137
138
139 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
140 if (pathToSearch.depth() != pathTail.depth()) return false;
141 Path tailPath = pathTail;
142 String tailName;
143 Path toSearch = pathToSearch;
144 String toSearchName;
145 boolean result = false;
146 do {
147 tailName = tailPath.getName();
148 if (tailName == null || tailName.length() <= 0) {
149 result = true;
150 break;
151 }
152 toSearchName = toSearch.getName();
153 if (toSearchName == null || toSearchName.length() <= 0) break;
154
155 tailPath = tailPath.getParent();
156 toSearch = toSearch.getParent();
157 } while(tailName.equals(toSearchName));
158 return result;
159 }
160
161 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
162 String scheme = fs.getUri().getScheme();
163 if (scheme == null) {
164 LOG.warn("Could not find scheme for uri " +
165 fs.getUri() + ", default to hdfs");
166 scheme = "hdfs";
167 }
168 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
169 scheme + ".impl", FSHDFSUtils.class);
170 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
171 return fsUtils;
172 }
173
174
175
176
177
178
179
180
181 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
182 throws IOException {
183 return fs.exists(dir) && fs.delete(dir, true);
184 }
185
186
187
188
189
190
191
192
193 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
194 throws IOException {
195 Path rootDir = getRootDir(conf);
196 FileSystem fs = rootDir.getFileSystem(conf);
197 return deleteDirectory(fs,
198 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
213 Method m = null;
214 Class<? extends FileSystem> cls = fs.getClass();
215 try {
216 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
217 } catch (NoSuchMethodException e) {
218 LOG.info("FileSystem doesn't support getDefaultBlockSize");
219 } catch (SecurityException e) {
220 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
221 m = null;
222 }
223 if (m == null) {
224 return fs.getDefaultBlockSize();
225 } else {
226 try {
227 Object ret = m.invoke(fs, path);
228 return ((Long)ret).longValue();
229 } catch (Exception e) {
230 throw new IOException(e);
231 }
232 }
233 }
234
235
236
237
238
239
240
241
242
243
244
245
246 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
247 Method m = null;
248 Class<? extends FileSystem> cls = fs.getClass();
249 try {
250 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
251 } catch (NoSuchMethodException e) {
252 LOG.info("FileSystem doesn't support getDefaultReplication");
253 } catch (SecurityException e) {
254 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
255 m = null;
256 }
257 if (m == null) {
258 return fs.getDefaultReplication();
259 } else {
260 try {
261 Object ret = m.invoke(fs, path);
262 return ((Number)ret).shortValue();
263 } catch (Exception e) {
264 throw new IOException(e);
265 }
266 }
267 }
268
269
270
271
272
273
274
275
276
277
278
279 public static int getDefaultBufferSize(final FileSystem fs) {
280 return fs.getConf().getInt("io.file.buffer.size", 4096);
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301 public static FSDataOutputStream create(FileSystem fs, Path path,
302 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
303 if (fs instanceof HFileSystem) {
304 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
305 if (backingFs instanceof DistributedFileSystem) {
306
307
308 try {
309 return (FSDataOutputStream) (DistributedFileSystem.class
310 .getDeclaredMethod("create", Path.class, FsPermission.class,
311 boolean.class, int.class, short.class, long.class,
312 Progressable.class, InetSocketAddress[].class)
313 .invoke(backingFs, path, perm, true,
314 getDefaultBufferSize(backingFs),
315 getDefaultReplication(backingFs, path),
316 getDefaultBlockSize(backingFs, path),
317 null, favoredNodes));
318 } catch (InvocationTargetException ite) {
319
320 throw new IOException(ite.getCause());
321 } catch (NoSuchMethodException e) {
322 LOG.debug("DFS Client does not support most favored nodes create; using default create");
323 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
324 } catch (IllegalArgumentException e) {
325 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
326 } catch (SecurityException e) {
327 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
328 } catch (IllegalAccessException e) {
329 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
330 }
331 }
332 }
333 return create(fs, path, perm, true);
334 }
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353 public static FSDataOutputStream create(FileSystem fs, Path path,
354 FsPermission perm, boolean overwrite) throws IOException {
355 if (LOG.isTraceEnabled()) {
356 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
357 }
358 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
359 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
360 }
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 public static FsPermission getFilePermissions(final FileSystem fs,
376 final Configuration conf, final String permssionConfKey) {
377 boolean enablePermissions = conf.getBoolean(
378 HConstants.ENABLE_DATA_FILE_UMASK, false);
379
380 if (enablePermissions) {
381 try {
382 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
383
384 String mask = conf.get(permssionConfKey);
385 if (mask == null)
386 return getFileDefault();
387
388 FsPermission umask = new FsPermission(mask);
389 return perm.applyUMask(umask);
390 } catch (IllegalArgumentException e) {
391 LOG.warn(
392 "Incorrect umask attempted to be created: "
393 + conf.get(permssionConfKey)
394 + ", using default file permissions.", e);
395 return getFileDefault();
396 }
397 }
398 return getFileDefault();
399 }
400
401
402
403
404
405
406
407
408
409
410 public static FsPermission getFileDefault() {
411 return new FsPermission((short)00666);
412 }
413
414
415
416
417
418
419
420 public static void checkFileSystemAvailable(final FileSystem fs)
421 throws IOException {
422 if (!(fs instanceof DistributedFileSystem)) {
423 return;
424 }
425 IOException exception = null;
426 DistributedFileSystem dfs = (DistributedFileSystem) fs;
427 try {
428 if (dfs.exists(new Path("/"))) {
429 return;
430 }
431 } catch (IOException e) {
432 exception = RemoteExceptionHandler.checkIOException(e);
433 }
434 try {
435 fs.close();
436 } catch (Exception e) {
437 LOG.error("file system close failed: ", e);
438 }
439 IOException io = new IOException("File system is not available");
440 io.initCause(exception);
441 throw io;
442 }
443
444
445
446
447
448
449
450
451
452 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
453 boolean inSafeMode = false;
454 try {
455 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
456 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
457 inSafeMode = (Boolean) m.invoke(dfs,
458 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
459 } catch (Exception e) {
460 if (e instanceof IOException) throw (IOException) e;
461
462
463 inSafeMode = dfs.setSafeMode(
464 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
465 }
466 return inSafeMode;
467 }
468
469
470
471
472
473
474 public static void checkDfsSafeMode(final Configuration conf)
475 throws IOException {
476 boolean isInSafeMode = false;
477 FileSystem fs = FileSystem.get(conf);
478 if (fs instanceof DistributedFileSystem) {
479 DistributedFileSystem dfs = (DistributedFileSystem)fs;
480 isInSafeMode = isInSafeMode(dfs);
481 }
482 if (isInSafeMode) {
483 throw new IOException("File system is in safemode, it can't be written now");
484 }
485 }
486
487
488
489
490
491
492
493
494
495
496 public static String getVersion(FileSystem fs, Path rootdir)
497 throws IOException, DeserializationException {
498 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
499 FileStatus[] status = null;
500 try {
501
502
503 status = fs.listStatus(versionFile);
504 } catch (FileNotFoundException fnfe) {
505 return null;
506 }
507 if (status == null || status.length == 0) return null;
508 String version = null;
509 byte [] content = new byte [(int)status[0].getLen()];
510 FSDataInputStream s = fs.open(versionFile);
511 try {
512 IOUtils.readFully(s, content, 0, content.length);
513 if (ProtobufUtil.isPBMagicPrefix(content)) {
514 version = parseVersionFrom(content);
515 } else {
516
517 InputStream is = new ByteArrayInputStream(content);
518 DataInputStream dis = new DataInputStream(is);
519 try {
520 version = dis.readUTF();
521 } finally {
522 dis.close();
523 }
524 }
525 } catch (EOFException eof) {
526 LOG.warn("Version file was empty, odd, will try to set it.");
527 } finally {
528 s.close();
529 }
530 return version;
531 }
532
533
534
535
536
537
538
539 static String parseVersionFrom(final byte [] bytes)
540 throws DeserializationException {
541 ProtobufUtil.expectPBMagicPrefix(bytes);
542 int pblen = ProtobufUtil.lengthOfPBMagic();
543 FSProtos.HBaseVersionFileContent.Builder builder =
544 FSProtos.HBaseVersionFileContent.newBuilder();
545 FSProtos.HBaseVersionFileContent fileContent;
546 try {
547 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
548 return fileContent.getVersion();
549 } catch (InvalidProtocolBufferException e) {
550
551 throw new DeserializationException(e);
552 }
553 }
554
555
556
557
558
559
560 static byte [] toVersionByteArray(final String version) {
561 FSProtos.HBaseVersionFileContent.Builder builder =
562 FSProtos.HBaseVersionFileContent.newBuilder();
563 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
564 }
565
566
567
568
569
570
571
572
573
574
575
576 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
577 throws IOException, DeserializationException {
578 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
579 }
580
581
582
583
584
585
586
587
588
589
590
591
592
593 public static void checkVersion(FileSystem fs, Path rootdir,
594 boolean message, int wait, int retries)
595 throws IOException, DeserializationException {
596 String version = getVersion(fs, rootdir);
597 if (version == null) {
598 if (!metaRegionExists(fs, rootdir)) {
599
600
601 setVersion(fs, rootdir, wait, retries);
602 return;
603 }
604 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
605
606
607
608 String msg = "HBase file layout needs to be upgraded."
609 + " You have version " + version
610 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
611 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
612 + " Is your hbase.rootdir valid? If so, you may need to run "
613 + "'hbase hbck -fixVersionFile'.";
614 if (message) {
615 System.out.println("WARNING! " + msg);
616 }
617 throw new FileSystemVersionException(msg);
618 }
619
620
621
622
623
624
625
626
627 public static void setVersion(FileSystem fs, Path rootdir)
628 throws IOException {
629 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
630 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
631 }
632
633
634
635
636
637
638
639
640
641
642 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
643 throws IOException {
644 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
645 }
646
647
648
649
650
651
652
653
654
655
656
657
658 public static void setVersion(FileSystem fs, Path rootdir, String version,
659 int wait, int retries) throws IOException {
660 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
661 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
662 HConstants.VERSION_FILE_NAME);
663 while (true) {
664 try {
665
666 FSDataOutputStream s = fs.create(tempVersionFile);
667 try {
668 s.write(toVersionByteArray(version));
669 s.close();
670 s = null;
671
672
673 if (!fs.rename(tempVersionFile, versionFile)) {
674 throw new IOException("Unable to move temp version file to " + versionFile);
675 }
676 } finally {
677
678
679
680
681
682 try {
683 if (s != null) s.close();
684 } catch (IOException ignore) { }
685 }
686 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
687 return;
688 } catch (IOException e) {
689 if (retries > 0) {
690 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
691 fs.delete(versionFile, false);
692 try {
693 if (wait > 0) {
694 Thread.sleep(wait);
695 }
696 } catch (InterruptedException ex) {
697
698 }
699 retries--;
700 } else {
701 throw e;
702 }
703 }
704 }
705 }
706
707
708
709
710
711
712
713
714
715 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
716 int wait) throws IOException {
717 while (true) {
718 try {
719 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
720 return fs.exists(filePath);
721 } catch (IOException ioe) {
722 if (wait > 0) {
723 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
724 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
725 try {
726 Thread.sleep(wait);
727 } catch (InterruptedException ie) {
728 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
729 }
730 } else {
731 throw ioe;
732 }
733 }
734 }
735 }
736
737
738
739
740
741
742
743
744 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
745 throws IOException {
746 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
747 ClusterId clusterId = null;
748 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
749 if (status != null) {
750 int len = Ints.checkedCast(status.getLen());
751 byte [] content = new byte[len];
752 FSDataInputStream in = fs.open(idPath);
753 try {
754 in.readFully(content);
755 } catch (EOFException eof) {
756 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
757 } finally{
758 in.close();
759 }
760 try {
761 clusterId = ClusterId.parseFrom(content);
762 } catch (DeserializationException e) {
763 throw new IOException("content=" + Bytes.toString(content), e);
764 }
765
766 if (!ProtobufUtil.isPBMagicPrefix(content)) {
767 String cid = null;
768 in = fs.open(idPath);
769 try {
770 cid = in.readUTF();
771 clusterId = new ClusterId(cid);
772 } catch (EOFException eof) {
773 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
774 } finally {
775 in.close();
776 }
777 rewriteAsPb(fs, rootdir, idPath, clusterId);
778 }
779 return clusterId;
780 } else {
781 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
782 }
783 return clusterId;
784 }
785
786
787
788
789
790 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
791 final ClusterId cid)
792 throws IOException {
793
794
795 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
796 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
797 setClusterId(fs, rootdir, cid, 100);
798 if (!fs.delete(movedAsideName, false)) {
799 throw new IOException("Failed delete of " + movedAsideName);
800 }
801 LOG.debug("Rewrote the hbase.id file as pb");
802 }
803
804
805
806
807
808
809
810
811
812
813 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
814 int wait) throws IOException {
815 while (true) {
816 try {
817 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
818 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
819 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
820
821 FSDataOutputStream s = fs.create(tempIdFile);
822 try {
823 s.write(clusterId.toByteArray());
824 s.close();
825 s = null;
826
827
828 if (!fs.rename(tempIdFile, idFile)) {
829 throw new IOException("Unable to move temp version file to " + idFile);
830 }
831 } finally {
832
833 try {
834 if (s != null) s.close();
835 } catch (IOException ignore) { }
836 }
837 if (LOG.isDebugEnabled()) {
838 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
839 }
840 return;
841 } catch (IOException ioe) {
842 if (wait > 0) {
843 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
844 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
845 try {
846 Thread.sleep(wait);
847 } catch (InterruptedException ie) {
848 Thread.currentThread().interrupt();
849 break;
850 }
851 } else {
852 throw ioe;
853 }
854 }
855 }
856 }
857
858
859
860
861
862
863
864
865 public static Path validateRootPath(Path root) throws IOException {
866 try {
867 URI rootURI = new URI(root.toString());
868 String scheme = rootURI.getScheme();
869 if (scheme == null) {
870 throw new IOException("Root directory does not have a scheme");
871 }
872 return root;
873 } catch (URISyntaxException e) {
874 IOException io = new IOException("Root directory path is not a valid " +
875 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
876 io.initCause(e);
877 throw io;
878 }
879 }
880
881
882
883
884
885
886
887
888
889 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
890 Path root = FSUtils.getRootDir(conf);
891 String pathStr = path.toString();
892
893 if (!pathStr.startsWith(root.toString())) return pathStr;
894
895 return pathStr.substring(root.toString().length() + 1);
896 }
897
898
899
900
901
902
903
904 public static void waitOnSafeMode(final Configuration conf,
905 final long wait)
906 throws IOException {
907 FileSystem fs = FileSystem.get(conf);
908 if (!(fs instanceof DistributedFileSystem)) return;
909 DistributedFileSystem dfs = (DistributedFileSystem)fs;
910
911 while (isInSafeMode(dfs)) {
912 LOG.info("Waiting for dfs to exit safe mode...");
913 try {
914 Thread.sleep(wait);
915 } catch (InterruptedException e) {
916
917 }
918 }
919 }
920
921
922
923
924
925
926
927
928
929
930
931 public static String getPath(Path p) {
932 return p.toUri().getPath();
933 }
934
935
936
937
938
939
940
941 public static Path getRootDir(final Configuration c) throws IOException {
942 Path p = new Path(c.get(HConstants.HBASE_DIR));
943 FileSystem fs = p.getFileSystem(c);
944 return p.makeQualified(fs);
945 }
946
947 public static void setRootDir(final Configuration c, final Path root) throws IOException {
948 c.set(HConstants.HBASE_DIR, root.toString());
949 }
950
951 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
952 c.set("fs.defaultFS", root.toString());
953 c.set("fs.default.name", root.toString());
954 }
955
956
957
958
959
960
961
962
963
964 @SuppressWarnings("deprecation")
965 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
966 throws IOException {
967 Path metaRegionDir =
968 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
969 return fs.exists(metaRegionDir);
970 }
971
972
973
974
975
976
977
978
979
980 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
981 final FileSystem fs, FileStatus status, long start, long length)
982 throws IOException {
983 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
984 BlockLocation [] blockLocations =
985 fs.getFileBlockLocations(status, start, length);
986 for(BlockLocation bl : blockLocations) {
987 String [] hosts = bl.getHosts();
988 long len = bl.getLength();
989 blocksDistribution.addHostsAndBlockWeight(hosts, len);
990 }
991
992 return blocksDistribution;
993 }
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006 public static boolean isMajorCompacted(final FileSystem fs,
1007 final Path hbaseRootDir)
1008 throws IOException {
1009 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1010 PathFilter regionFilter = new RegionDirFilter(fs);
1011 PathFilter familyFilter = new FamilyDirFilter(fs);
1012 for (Path d : tableDirs) {
1013 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1014 for (FileStatus regionDir : regionDirs) {
1015 Path dd = regionDir.getPath();
1016
1017 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1018 for (FileStatus familyDir : familyDirs) {
1019 Path family = familyDir.getPath();
1020
1021 FileStatus[] familyStatus = fs.listStatus(family);
1022 if (familyStatus.length > 1) {
1023 LOG.debug(family.toString() + " has " + familyStatus.length +
1024 " files.");
1025 return false;
1026 }
1027 }
1028 }
1029 }
1030 return true;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 public static int getTotalTableFragmentation(final HMaster master)
1043 throws IOException {
1044 Map<String, Integer> map = getTableFragmentation(master);
1045 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1046 }
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 public static Map<String, Integer> getTableFragmentation(
1059 final HMaster master)
1060 throws IOException {
1061 Path path = getRootDir(master.getConfiguration());
1062
1063 FileSystem fs = path.getFileSystem(master.getConfiguration());
1064 return getTableFragmentation(fs, path);
1065 }
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 public static Map<String, Integer> getTableFragmentation(
1078 final FileSystem fs, final Path hbaseRootDir)
1079 throws IOException {
1080 Map<String, Integer> frags = new HashMap<String, Integer>();
1081 int cfCountTotal = 0;
1082 int cfFragTotal = 0;
1083 PathFilter regionFilter = new RegionDirFilter(fs);
1084 PathFilter familyFilter = new FamilyDirFilter(fs);
1085 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1086 for (Path d : tableDirs) {
1087 int cfCount = 0;
1088 int cfFrag = 0;
1089 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1090 for (FileStatus regionDir : regionDirs) {
1091 Path dd = regionDir.getPath();
1092
1093 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1094 for (FileStatus familyDir : familyDirs) {
1095 cfCount++;
1096 cfCountTotal++;
1097 Path family = familyDir.getPath();
1098
1099 FileStatus[] familyStatus = fs.listStatus(family);
1100 if (familyStatus.length > 1) {
1101 cfFrag++;
1102 cfFragTotal++;
1103 }
1104 }
1105 }
1106
1107 frags.put(FSUtils.getTableName(d).getNameAsString(),
1108 Math.round((float) cfFrag / cfCount * 100));
1109 }
1110
1111 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1112 return frags;
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 public static Path getTableDir(Path rootdir, final TableName tableName) {
1124 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1125 tableName.getQualifierAsString());
1126 }
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 public static TableName getTableName(Path tablePath) {
1137 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1149 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1150 new Path(namespace)));
1151 }
1152
1153
1154
1155
1156 static class FileFilter implements PathFilter {
1157 private final FileSystem fs;
1158
1159 public FileFilter(final FileSystem fs) {
1160 this.fs = fs;
1161 }
1162
1163 @Override
1164 public boolean accept(Path p) {
1165 try {
1166 return fs.isFile(p);
1167 } catch (IOException e) {
1168 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1169 return false;
1170 }
1171 }
1172 }
1173
1174
1175
1176
1177 public static class BlackListDirFilter implements PathFilter {
1178 private final FileSystem fs;
1179 private List<String> blacklist;
1180
1181
1182
1183
1184
1185
1186
1187 @SuppressWarnings("unchecked")
1188 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1189 this.fs = fs;
1190 blacklist =
1191 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1192 : directoryNameBlackList);
1193 }
1194
1195 @Override
1196 public boolean accept(Path p) {
1197 boolean isValid = false;
1198 try {
1199 if (isValidName(p.getName())) {
1200 isValid = fs.getFileStatus(p).isDir();
1201 } else {
1202 isValid = false;
1203 }
1204 } catch (IOException e) {
1205 LOG.warn("An error occurred while verifying if [" + p.toString()
1206 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1207 }
1208 return isValid;
1209 }
1210
1211 protected boolean isValidName(final String name) {
1212 return !blacklist.contains(name);
1213 }
1214 }
1215
1216
1217
1218
1219 public static class DirFilter extends BlackListDirFilter {
1220
1221 public DirFilter(FileSystem fs) {
1222 super(fs, null);
1223 }
1224 }
1225
1226
1227
1228
1229
1230 public static class UserTableDirFilter extends BlackListDirFilter {
1231 public UserTableDirFilter(FileSystem fs) {
1232 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1233 }
1234
1235 protected boolean isValidName(final String name) {
1236 if (!super.isValidName(name))
1237 return false;
1238
1239 try {
1240 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1241 } catch (IllegalArgumentException e) {
1242 LOG.info("INVALID NAME " + name);
1243 return false;
1244 }
1245 return true;
1246 }
1247 }
1248
1249
1250
1251
1252
1253
1254
1255
1256 public static boolean isAppendSupported(final Configuration conf) {
1257 boolean append = conf.getBoolean("dfs.support.append", false);
1258 if (append) {
1259 try {
1260
1261
1262
1263 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1264 append = true;
1265 } catch (SecurityException e) {
1266 } catch (NoSuchMethodException e) {
1267 append = false;
1268 }
1269 }
1270 if (!append) {
1271
1272 try {
1273 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1274 append = true;
1275 } catch (NoSuchMethodException e) {
1276 append = false;
1277 }
1278 }
1279 return append;
1280 }
1281
1282
1283
1284
1285
1286
1287 public static boolean isHDFS(final Configuration conf) throws IOException {
1288 FileSystem fs = FileSystem.get(conf);
1289 String scheme = fs.getUri().getScheme();
1290 return scheme.equalsIgnoreCase("hdfs");
1291 }
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1302 Configuration conf, CancelableProgressable reporter) throws IOException;
1303
1304 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1305 throws IOException {
1306 List<Path> tableDirs = new LinkedList<Path>();
1307
1308 for(FileStatus status :
1309 fs.globStatus(new Path(rootdir,
1310 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1311 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1312 }
1313 return tableDirs;
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1324 throws IOException {
1325
1326 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1327 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1328 for (FileStatus dir: dirs) {
1329 tabledirs.add(dir.getPath());
1330 }
1331 return tabledirs;
1332 }
1333
1334
1335
1336
1337
1338
1339 public static boolean isRecoveredEdits(Path path) {
1340 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1341 }
1342
1343
1344
1345
1346 public static class RegionDirFilter implements PathFilter {
1347
1348 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1349 final FileSystem fs;
1350
1351 public RegionDirFilter(FileSystem fs) {
1352 this.fs = fs;
1353 }
1354
1355 @Override
1356 public boolean accept(Path rd) {
1357 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1358 return false;
1359 }
1360
1361 try {
1362 return fs.getFileStatus(rd).isDir();
1363 } catch (IOException ioe) {
1364
1365 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1366 return false;
1367 }
1368 }
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1380
1381 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1382 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1383 for (FileStatus rdfs: rds) {
1384 Path rdPath = rdfs.getPath();
1385 regionDirs.add(rdPath);
1386 }
1387 return regionDirs;
1388 }
1389
1390
1391
1392
1393
1394 public static class FamilyDirFilter implements PathFilter {
1395 final FileSystem fs;
1396
1397 public FamilyDirFilter(FileSystem fs) {
1398 this.fs = fs;
1399 }
1400
1401 @Override
1402 public boolean accept(Path rd) {
1403 try {
1404
1405 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1406 } catch (IllegalArgumentException iae) {
1407
1408 return false;
1409 }
1410
1411 try {
1412 return fs.getFileStatus(rd).isDir();
1413 } catch (IOException ioe) {
1414
1415 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1416 return false;
1417 }
1418 }
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1430
1431 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1432 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1433 for (FileStatus fdfs: fds) {
1434 Path fdPath = fdfs.getPath();
1435 familyDirs.add(fdPath);
1436 }
1437 return familyDirs;
1438 }
1439
1440 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1441 FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1442 List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1443 for (FileStatus fdfs: fds) {
1444 Path fdPath = fdfs.getPath();
1445 referenceFiles.add(fdPath);
1446 }
1447 return referenceFiles;
1448 }
1449
1450
1451
1452
1453 public static class HFileFilter implements PathFilter {
1454 final FileSystem fs;
1455
1456 public HFileFilter(FileSystem fs) {
1457 this.fs = fs;
1458 }
1459
1460 @Override
1461 public boolean accept(Path rd) {
1462 try {
1463
1464 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isHFile(rd);
1465 } catch (IOException ioe) {
1466
1467 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1468 return false;
1469 }
1470 }
1471 }
1472
1473 public static class ReferenceFileFilter implements PathFilter {
1474
1475 private final FileSystem fs;
1476
1477 public ReferenceFileFilter(FileSystem fs) {
1478 this.fs = fs;
1479 }
1480
1481 @Override
1482 public boolean accept(Path rd) {
1483 try {
1484
1485 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isReference(rd);
1486 } catch (IOException ioe) {
1487
1488 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1489 return false;
1490 }
1491 }
1492 }
1493
1494
1495
1496
1497
1498
1499
1500 public static FileSystem getCurrentFileSystem(Configuration conf)
1501 throws IOException {
1502 return getRootDir(conf).getFileSystem(conf);
1503 }
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1522 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1523 throws IOException {
1524 return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null);
1525 }
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1544 final FileSystem fs, final Path hbaseRootDir, TableName tableName, ErrorReporter errors)
1545 throws IOException {
1546 if (map == null) {
1547 map = new HashMap<String, Path>();
1548 }
1549
1550
1551 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1552
1553
1554 PathFilter familyFilter = new FamilyDirFilter(fs);
1555 FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1556 for (FileStatus regionDir : regionDirs) {
1557 if (null != errors) {
1558 errors.progress();
1559 }
1560 Path dd = regionDir.getPath();
1561
1562 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1563 for (FileStatus familyDir : familyDirs) {
1564 if (null != errors) {
1565 errors.progress();
1566 }
1567 Path family = familyDir.getPath();
1568
1569
1570 FileStatus[] familyStatus = fs.listStatus(family);
1571 for (FileStatus sfStatus : familyStatus) {
1572 if (null != errors) {
1573 errors.progress();
1574 }
1575 Path sf = sfStatus.getPath();
1576 map.put( sf.getName(), sf);
1577 }
1578 }
1579 }
1580 return map;
1581 }
1582
1583 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1584 int result = 0;
1585 try {
1586 for (Path familyDir:getFamilyDirs(fs, p)){
1587 result += getReferenceFilePaths(fs, familyDir).size();
1588 }
1589 } catch (IOException e) {
1590 LOG.warn("Error Counting reference files.", e);
1591 }
1592 return result;
1593 }
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 public static Map<String, Path> getTableStoreFilePathMap(
1609 final FileSystem fs, final Path hbaseRootDir)
1610 throws IOException {
1611 return getTableStoreFilePathMap(fs, hbaseRootDir, null);
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 public static Map<String, Path> getTableStoreFilePathMap(
1629 final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
1630 throws IOException {
1631 Map<String, Path> map = new HashMap<String, Path>();
1632
1633
1634
1635
1636
1637 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1638 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1639 FSUtils.getTableName(tableDir), errors);
1640 }
1641 return map;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 public static FileStatus [] listStatus(final FileSystem fs,
1656 final Path dir, final PathFilter filter) throws IOException {
1657 FileStatus [] status = null;
1658 try {
1659 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1660 } catch (FileNotFoundException fnfe) {
1661
1662 if (LOG.isTraceEnabled()) {
1663 LOG.trace(dir + " doesn't exist");
1664 }
1665 }
1666 if (status == null || status.length < 1) return null;
1667 return status;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1679 return listStatus(fs, dir, null);
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1692 throws IOException {
1693 return fs.delete(path, recursive);
1694 }
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1705 return fs.exists(path);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1719 FsAction action) throws AccessDeniedException {
1720 if (ugi.getShortUserName().equals(file.getOwner())) {
1721 if (file.getPermission().getUserAction().implies(action)) {
1722 return;
1723 }
1724 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1725 if (file.getPermission().getGroupAction().implies(action)) {
1726 return;
1727 }
1728 } else if (file.getPermission().getOtherAction().implies(action)) {
1729 return;
1730 }
1731 throw new AccessDeniedException("Permission denied:" + " action=" + action
1732 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1733 }
1734
1735 private static boolean contains(String[] groups, String user) {
1736 for (String group : groups) {
1737 if (group.equals(user)) {
1738 return true;
1739 }
1740 }
1741 return false;
1742 }
1743
1744
1745
1746
1747
1748
1749
1750
1751 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1752 throws IOException {
1753 LOG.debug("Current file system:");
1754 logFSTree(LOG, fs, root, "|-");
1755 }
1756
1757
1758
1759
1760
1761
1762 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1763 throws IOException {
1764 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1765 if (files == null) return;
1766
1767 for (FileStatus file : files) {
1768 if (file.isDir()) {
1769 LOG.debug(prefix + file.getPath().getName() + "/");
1770 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1771 } else {
1772 LOG.debug(prefix + file.getPath().getName());
1773 }
1774 }
1775 }
1776
1777 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1778 throws IOException {
1779
1780 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1781 return fs.rename(src, dest);
1782 }
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1798 final Configuration conf) throws IOException {
1799 return getRegionDegreeLocalityMappingFromFS(
1800 conf, null,
1801 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1802
1803 }
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1822 final Configuration conf, final String desiredTable, int threadPoolSize)
1823 throws IOException {
1824 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1825 new ConcurrentHashMap<String, Map<String, Float>>();
1826 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1827 regionDegreeLocalityMapping);
1828 return regionDegreeLocalityMapping;
1829 }
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851 private static void getRegionLocalityMappingFromFS(
1852 final Configuration conf, final String desiredTable,
1853 int threadPoolSize,
1854 Map<String, String> regionToBestLocalityRSMapping,
1855 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1856 throws IOException {
1857 FileSystem fs = FileSystem.get(conf);
1858 Path rootPath = FSUtils.getRootDir(conf);
1859 long startTime = EnvironmentEdgeManager.currentTimeMillis();
1860 Path queryPath;
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 public static void setupShortCircuitRead(final Configuration conf) {
1962
1963 boolean shortCircuitSkipChecksum =
1964 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1965 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1966 if (shortCircuitSkipChecksum) {
1967 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1968 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1969 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1970 assert !shortCircuitSkipChecksum;
1971 }
1972 checkShortCircuitReadBufferSize(conf);
1973 }
1974
1975
1976
1977
1978
1979 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1980 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1981 final int notSet = -1;
1982
1983 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1984 int size = conf.getInt(dfsKey, notSet);
1985
1986 if (size != notSet) return;
1987
1988 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1989 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1990 }
1991 }