1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.hbase.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
69 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
70 import org.apache.hadoop.hbase.security.AccessDeniedException;
71 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
72 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
73 import org.apache.hadoop.hbase.regionserver.HRegion;
74 import org.apache.hadoop.hdfs.DistributedFileSystem;
75 import org.apache.hadoop.hdfs.protocol.FSConstants;
76 import org.apache.hadoop.io.IOUtils;
77 import org.apache.hadoop.io.SequenceFile;
78 import org.apache.hadoop.security.UserGroupInformation;
79 import org.apache.hadoop.util.Progressable;
80 import org.apache.hadoop.util.ReflectionUtils;
81 import org.apache.hadoop.util.StringUtils;
82
83 import com.google.common.primitives.Ints;
84 import com.google.protobuf.InvalidProtocolBufferException;
85
86
87
88
89 @InterfaceAudience.Private
90 public abstract class FSUtils {
91 private static final Log LOG = LogFactory.getLog(FSUtils.class);
92
93
94 public static final String FULL_RWX_PERMISSIONS = "777";
95 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
96 private static final int DEFAULT_THREAD_POOLSIZE = 2;
97
98
99 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
100
101 protected FSUtils() {
102 super();
103 }
104
105
106
107
108
109
110
111
112 public static boolean isStartingWithPath(final Path rootPath, final String path) {
113 String uriRootPath = rootPath.toUri().getPath();
114 String tailUriPath = (new Path(path)).toUri().getPath();
115 return tailUriPath.startsWith(uriRootPath);
116 }
117
118
119
120
121
122
123
124
125
126 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
127 return isMatchingTail(pathToSearch, new Path(pathTail));
128 }
129
130
131
132
133
134
135
136
137
138 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
139 if (pathToSearch.depth() != pathTail.depth()) return false;
140 Path tailPath = pathTail;
141 String tailName;
142 Path toSearch = pathToSearch;
143 String toSearchName;
144 boolean result = false;
145 do {
146 tailName = tailPath.getName();
147 if (tailName == null || tailName.length() <= 0) {
148 result = true;
149 break;
150 }
151 toSearchName = toSearch.getName();
152 if (toSearchName == null || toSearchName.length() <= 0) break;
153
154 tailPath = tailPath.getParent();
155 toSearch = toSearch.getParent();
156 } while(tailName.equals(toSearchName));
157 return result;
158 }
159
160 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
161 String scheme = fs.getUri().getScheme();
162 if (scheme == null) {
163 LOG.warn("Could not find scheme for uri " +
164 fs.getUri() + ", default to hdfs");
165 scheme = "hdfs";
166 }
167 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
168 scheme + ".impl", FSHDFSUtils.class);
169 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
170 return fsUtils;
171 }
172
173
174
175
176
177
178
179
180 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
181 throws IOException {
182 return fs.exists(dir) && fs.delete(dir, true);
183 }
184
185
186
187
188
189
190
191
192
193
194
195
196 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
197 Method m = null;
198 Class<? extends FileSystem> cls = fs.getClass();
199 try {
200 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
201 } catch (NoSuchMethodException e) {
202 LOG.info("FileSystem doesn't support getDefaultBlockSize");
203 } catch (SecurityException e) {
204 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
205 m = null;
206 }
207 if (m == null) {
208 return fs.getDefaultBlockSize();
209 } else {
210 try {
211 Object ret = m.invoke(fs, path);
212 return ((Long)ret).longValue();
213 } catch (Exception e) {
214 throw new IOException(e);
215 }
216 }
217 }
218
219
220
221
222
223
224
225
226
227
228
229
230 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
231 Method m = null;
232 Class<? extends FileSystem> cls = fs.getClass();
233 try {
234 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
235 } catch (NoSuchMethodException e) {
236 LOG.info("FileSystem doesn't support getDefaultReplication");
237 } catch (SecurityException e) {
238 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
239 m = null;
240 }
241 if (m == null) {
242 return fs.getDefaultReplication();
243 } else {
244 try {
245 Object ret = m.invoke(fs, path);
246 return ((Number)ret).shortValue();
247 } catch (Exception e) {
248 throw new IOException(e);
249 }
250 }
251 }
252
253
254
255
256
257
258
259
260
261
262
263 public static int getDefaultBufferSize(final FileSystem fs) {
264 return fs.getConf().getInt("io.file.buffer.size", 4096);
265 }
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 public static FSDataOutputStream create(FileSystem fs, Path path,
286 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
287 if (fs instanceof HFileSystem) {
288 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
289 if (backingFs instanceof DistributedFileSystem) {
290
291
292 try {
293 return (FSDataOutputStream) (DistributedFileSystem.class
294 .getDeclaredMethod("create", Path.class, FsPermission.class,
295 boolean.class, int.class, short.class, long.class,
296 Progressable.class, InetSocketAddress[].class)
297 .invoke(backingFs, path, perm, true,
298 getDefaultBufferSize(backingFs),
299 getDefaultReplication(backingFs, path),
300 getDefaultBlockSize(backingFs, path),
301 null, favoredNodes));
302 } catch (InvocationTargetException ite) {
303
304 throw new IOException(ite.getCause());
305 } catch (NoSuchMethodException e) {
306 LOG.debug("DFS Client does not support most favored nodes create; using default create");
307 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
308 } catch (IllegalArgumentException e) {
309 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
310 } catch (SecurityException e) {
311 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
312 } catch (IllegalAccessException e) {
313 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
314 }
315 }
316 }
317 return create(fs, path, perm, true);
318 }
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 public static FSDataOutputStream create(FileSystem fs, Path path,
338 FsPermission perm, boolean overwrite) throws IOException {
339 if (LOG.isTraceEnabled()) {
340 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
341 }
342 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
343 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 public static FsPermission getFilePermissions(final FileSystem fs,
360 final Configuration conf, final String permssionConfKey) {
361 boolean enablePermissions = conf.getBoolean(
362 HConstants.ENABLE_DATA_FILE_UMASK, false);
363
364 if (enablePermissions) {
365 try {
366 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
367
368 String mask = conf.get(permssionConfKey);
369 if (mask == null)
370 return getFileDefault();
371
372 FsPermission umask = new FsPermission(mask);
373 return perm.applyUMask(umask);
374 } catch (IllegalArgumentException e) {
375 LOG.warn(
376 "Incorrect umask attempted to be created: "
377 + conf.get(permssionConfKey)
378 + ", using default file permissions.", e);
379 return getFileDefault();
380 }
381 }
382 return getFileDefault();
383 }
384
385
386
387
388
389
390
391
392
393
394 public static FsPermission getFileDefault() {
395 return new FsPermission((short)00666);
396 }
397
398
399
400
401
402
403
404 public static void checkFileSystemAvailable(final FileSystem fs)
405 throws IOException {
406 if (!(fs instanceof DistributedFileSystem)) {
407 return;
408 }
409 IOException exception = null;
410 DistributedFileSystem dfs = (DistributedFileSystem) fs;
411 try {
412 if (dfs.exists(new Path("/"))) {
413 return;
414 }
415 } catch (IOException e) {
416 exception = RemoteExceptionHandler.checkIOException(e);
417 }
418 try {
419 fs.close();
420 } catch (Exception e) {
421 LOG.error("file system close failed: ", e);
422 }
423 IOException io = new IOException("File system is not available");
424 io.initCause(exception);
425 throw io;
426 }
427
428
429
430
431
432
433
434
435
436 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
437 boolean inSafeMode = false;
438 try {
439 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
440 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
441 inSafeMode = (Boolean) m.invoke(dfs,
442 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
443 } catch (Exception e) {
444 if (e instanceof IOException) throw (IOException) e;
445
446
447 inSafeMode = dfs.setSafeMode(
448 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
449 }
450 return inSafeMode;
451 }
452
453
454
455
456
457
458 public static void checkDfsSafeMode(final Configuration conf)
459 throws IOException {
460 boolean isInSafeMode = false;
461 FileSystem fs = FileSystem.get(conf);
462 if (fs instanceof DistributedFileSystem) {
463 DistributedFileSystem dfs = (DistributedFileSystem)fs;
464 isInSafeMode = isInSafeMode(dfs);
465 }
466 if (isInSafeMode) {
467 throw new IOException("File system is in safemode, it can't be written now");
468 }
469 }
470
471
472
473
474
475
476
477
478
479
480 public static String getVersion(FileSystem fs, Path rootdir)
481 throws IOException, DeserializationException {
482 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
483 FileStatus[] status = null;
484 try {
485
486
487 status = fs.listStatus(versionFile);
488 } catch (FileNotFoundException fnfe) {
489 return null;
490 }
491 if (status == null || status.length == 0) return null;
492 String version = null;
493 byte [] content = new byte [(int)status[0].getLen()];
494 FSDataInputStream s = fs.open(versionFile);
495 try {
496 IOUtils.readFully(s, content, 0, content.length);
497 if (ProtobufUtil.isPBMagicPrefix(content)) {
498 version = parseVersionFrom(content);
499 } else {
500
501 InputStream is = new ByteArrayInputStream(content);
502 DataInputStream dis = new DataInputStream(is);
503 try {
504 version = dis.readUTF();
505 } finally {
506 dis.close();
507 }
508 }
509 } catch (EOFException eof) {
510 LOG.warn("Version file was empty, odd, will try to set it.");
511 } finally {
512 s.close();
513 }
514 return version;
515 }
516
517
518
519
520
521
522
523 static String parseVersionFrom(final byte [] bytes)
524 throws DeserializationException {
525 ProtobufUtil.expectPBMagicPrefix(bytes);
526 int pblen = ProtobufUtil.lengthOfPBMagic();
527 FSProtos.HBaseVersionFileContent.Builder builder =
528 FSProtos.HBaseVersionFileContent.newBuilder();
529 FSProtos.HBaseVersionFileContent fileContent;
530 try {
531 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
532 return fileContent.getVersion();
533 } catch (InvalidProtocolBufferException e) {
534
535 throw new DeserializationException(e);
536 }
537 }
538
539
540
541
542
543
544 static byte [] toVersionByteArray(final String version) {
545 FSProtos.HBaseVersionFileContent.Builder builder =
546 FSProtos.HBaseVersionFileContent.newBuilder();
547 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
548 }
549
550
551
552
553
554
555
556
557
558
559
560 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
561 throws IOException, DeserializationException {
562 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
563 }
564
565
566
567
568
569
570
571
572
573
574
575
576
577 public static void checkVersion(FileSystem fs, Path rootdir,
578 boolean message, int wait, int retries)
579 throws IOException, DeserializationException {
580 String version = getVersion(fs, rootdir);
581 if (version == null) {
582 if (!metaRegionExists(fs, rootdir)) {
583
584
585 setVersion(fs, rootdir, wait, retries);
586 return;
587 }
588 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
589
590
591
592 String msg = "HBase file layout needs to be upgraded."
593 + " You have version " + version
594 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
595 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
596 + " Is your hbase.rootdir valid? If so, you may need to run "
597 + "'hbase hbck -fixVersionFile'.";
598 if (message) {
599 System.out.println("WARNING! " + msg);
600 }
601 throw new FileSystemVersionException(msg);
602 }
603
604
605
606
607
608
609
610
611 public static void setVersion(FileSystem fs, Path rootdir)
612 throws IOException {
613 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
614 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
615 }
616
617
618
619
620
621
622
623
624
625
626 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
627 throws IOException {
628 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
629 }
630
631
632
633
634
635
636
637
638
639
640
641
642 public static void setVersion(FileSystem fs, Path rootdir, String version,
643 int wait, int retries) throws IOException {
644 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
645 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
646 HConstants.VERSION_FILE_NAME);
647 while (true) {
648 try {
649
650 FSDataOutputStream s = fs.create(tempVersionFile);
651 try {
652 s.write(toVersionByteArray(version));
653 s.close();
654 s = null;
655
656
657 if (!fs.rename(tempVersionFile, versionFile)) {
658 throw new IOException("Unable to move temp version file to " + versionFile);
659 }
660 } finally {
661
662
663
664
665
666 try {
667 if (s != null) s.close();
668 } catch (IOException ignore) { }
669 }
670 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
671 return;
672 } catch (IOException e) {
673 if (retries > 0) {
674 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
675 fs.delete(versionFile, false);
676 try {
677 if (wait > 0) {
678 Thread.sleep(wait);
679 }
680 } catch (InterruptedException ex) {
681
682 }
683 retries--;
684 } else {
685 throw e;
686 }
687 }
688 }
689 }
690
691
692
693
694
695
696
697
698
699 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
700 int wait) throws IOException {
701 while (true) {
702 try {
703 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
704 return fs.exists(filePath);
705 } catch (IOException ioe) {
706 if (wait > 0) {
707 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
708 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
709 try {
710 Thread.sleep(wait);
711 } catch (InterruptedException ie) {
712 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
713 }
714 } else {
715 throw ioe;
716 }
717 }
718 }
719 }
720
721
722
723
724
725
726
727
728 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
729 throws IOException {
730 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
731 ClusterId clusterId = null;
732 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
733 if (status != null) {
734 int len = Ints.checkedCast(status.getLen());
735 byte [] content = new byte[len];
736 FSDataInputStream in = fs.open(idPath);
737 try {
738 in.readFully(content);
739 } catch (EOFException eof) {
740 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
741 } finally{
742 in.close();
743 }
744 try {
745 clusterId = ClusterId.parseFrom(content);
746 } catch (DeserializationException e) {
747 throw new IOException("content=" + Bytes.toString(content), e);
748 }
749
750 if (!ProtobufUtil.isPBMagicPrefix(content)) {
751 String cid = null;
752 in = fs.open(idPath);
753 try {
754 cid = in.readUTF();
755 clusterId = new ClusterId(cid);
756 } catch (EOFException eof) {
757 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
758 } finally {
759 in.close();
760 }
761 rewriteAsPb(fs, rootdir, idPath, clusterId);
762 }
763 return clusterId;
764 } else {
765 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
766 }
767 return clusterId;
768 }
769
770
771
772
773
774 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
775 final ClusterId cid)
776 throws IOException {
777
778
779 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
780 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
781 setClusterId(fs, rootdir, cid, 100);
782 if (!fs.delete(movedAsideName, false)) {
783 throw new IOException("Failed delete of " + movedAsideName);
784 }
785 LOG.debug("Rewrote the hbase.id file as pb");
786 }
787
788
789
790
791
792
793
794
795
796
797 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
798 int wait) throws IOException {
799 while (true) {
800 try {
801 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
802 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
803 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
804
805 FSDataOutputStream s = fs.create(tempIdFile);
806 try {
807 s.write(clusterId.toByteArray());
808 s.close();
809 s = null;
810
811
812 if (!fs.rename(tempIdFile, idFile)) {
813 throw new IOException("Unable to move temp version file to " + idFile);
814 }
815 } finally {
816
817 try {
818 if (s != null) s.close();
819 } catch (IOException ignore) { }
820 }
821 if (LOG.isDebugEnabled()) {
822 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
823 }
824 return;
825 } catch (IOException ioe) {
826 if (wait > 0) {
827 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
828 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
829 try {
830 Thread.sleep(wait);
831 } catch (InterruptedException ie) {
832 Thread.currentThread().interrupt();
833 break;
834 }
835 } else {
836 throw ioe;
837 }
838 }
839 }
840 }
841
842
843
844
845
846
847
848
849 public static Path validateRootPath(Path root) throws IOException {
850 try {
851 URI rootURI = new URI(root.toString());
852 String scheme = rootURI.getScheme();
853 if (scheme == null) {
854 throw new IOException("Root directory does not have a scheme");
855 }
856 return root;
857 } catch (URISyntaxException e) {
858 IOException io = new IOException("Root directory path is not a valid " +
859 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
860 io.initCause(e);
861 throw io;
862 }
863 }
864
865
866
867
868
869
870
871
872
873 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
874 Path root = FSUtils.getRootDir(conf);
875 String pathStr = path.toString();
876
877 if (!pathStr.startsWith(root.toString())) return pathStr;
878
879 return pathStr.substring(root.toString().length() + 1);
880 }
881
882
883
884
885
886
887
888 public static void waitOnSafeMode(final Configuration conf,
889 final long wait)
890 throws IOException {
891 FileSystem fs = FileSystem.get(conf);
892 if (!(fs instanceof DistributedFileSystem)) return;
893 DistributedFileSystem dfs = (DistributedFileSystem)fs;
894
895 while (isInSafeMode(dfs)) {
896 LOG.info("Waiting for dfs to exit safe mode...");
897 try {
898 Thread.sleep(wait);
899 } catch (InterruptedException e) {
900
901 }
902 }
903 }
904
905
906
907
908
909
910
911
912
913
914
915 public static String getPath(Path p) {
916 return p.toUri().getPath();
917 }
918
919
920
921
922
923
924
925 public static Path getRootDir(final Configuration c) throws IOException {
926 Path p = new Path(c.get(HConstants.HBASE_DIR));
927 FileSystem fs = p.getFileSystem(c);
928 return p.makeQualified(fs);
929 }
930
931 public static void setRootDir(final Configuration c, final Path root) throws IOException {
932 c.set(HConstants.HBASE_DIR, root.toString());
933 }
934
935 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
936 c.set("fs.defaultFS", root.toString());
937 c.set("fs.default.name", root.toString());
938 }
939
940
941
942
943
944
945
946
947
948 @SuppressWarnings("deprecation")
949 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
950 throws IOException {
951 Path metaRegionDir =
952 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
953 return fs.exists(metaRegionDir);
954 }
955
956
957
958
959
960
961
962
963
964 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
965 final FileSystem fs, FileStatus status, long start, long length)
966 throws IOException {
967 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
968 BlockLocation [] blockLocations =
969 fs.getFileBlockLocations(status, start, length);
970 for(BlockLocation bl : blockLocations) {
971 String [] hosts = bl.getHosts();
972 long len = bl.getLength();
973 blocksDistribution.addHostsAndBlockWeight(hosts, len);
974 }
975
976 return blocksDistribution;
977 }
978
979
980
981
982
983
984
985
986
987
988
989
990 public static boolean isMajorCompacted(final FileSystem fs,
991 final Path hbaseRootDir)
992 throws IOException {
993 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
994 PathFilter regionFilter = new RegionDirFilter(fs);
995 PathFilter familyFilter = new FamilyDirFilter(fs);
996 for (Path d : tableDirs) {
997 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
998 for (FileStatus regionDir : regionDirs) {
999 Path dd = regionDir.getPath();
1000
1001 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1002 for (FileStatus familyDir : familyDirs) {
1003 Path family = familyDir.getPath();
1004
1005 FileStatus[] familyStatus = fs.listStatus(family);
1006 if (familyStatus.length > 1) {
1007 LOG.debug(family.toString() + " has " + familyStatus.length +
1008 " files.");
1009 return false;
1010 }
1011 }
1012 }
1013 }
1014 return true;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 public static int getTotalTableFragmentation(final HMaster master)
1027 throws IOException {
1028 Map<String, Integer> map = getTableFragmentation(master);
1029 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 public static Map<String, Integer> getTableFragmentation(
1043 final HMaster master)
1044 throws IOException {
1045 Path path = getRootDir(master.getConfiguration());
1046
1047 FileSystem fs = path.getFileSystem(master.getConfiguration());
1048 return getTableFragmentation(fs, path);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 public static Map<String, Integer> getTableFragmentation(
1062 final FileSystem fs, final Path hbaseRootDir)
1063 throws IOException {
1064 Map<String, Integer> frags = new HashMap<String, Integer>();
1065 int cfCountTotal = 0;
1066 int cfFragTotal = 0;
1067 PathFilter regionFilter = new RegionDirFilter(fs);
1068 PathFilter familyFilter = new FamilyDirFilter(fs);
1069 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1070 for (Path d : tableDirs) {
1071 int cfCount = 0;
1072 int cfFrag = 0;
1073 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1074 for (FileStatus regionDir : regionDirs) {
1075 Path dd = regionDir.getPath();
1076
1077 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1078 for (FileStatus familyDir : familyDirs) {
1079 cfCount++;
1080 cfCountTotal++;
1081 Path family = familyDir.getPath();
1082
1083 FileStatus[] familyStatus = fs.listStatus(family);
1084 if (familyStatus.length > 1) {
1085 cfFrag++;
1086 cfFragTotal++;
1087 }
1088 }
1089 }
1090
1091 frags.put(FSUtils.getTableName(d).getNameAsString(),
1092 Math.round((float) cfFrag / cfCount * 100));
1093 }
1094
1095 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1096 return frags;
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 public static Path getTableDir(Path rootdir, final TableName tableName) {
1108 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1109 tableName.getQualifierAsString());
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 public static TableName getTableName(Path tablePath) {
1121 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1122 }
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1133 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1134 new Path(namespace)));
1135 }
1136
1137
1138
1139
1140 static class FileFilter implements PathFilter {
1141 private final FileSystem fs;
1142
1143 public FileFilter(final FileSystem fs) {
1144 this.fs = fs;
1145 }
1146
1147 @Override
1148 public boolean accept(Path p) {
1149 try {
1150 return fs.isFile(p);
1151 } catch (IOException e) {
1152 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1153 return false;
1154 }
1155 }
1156 }
1157
1158
1159
1160
1161 public static class BlackListDirFilter implements PathFilter {
1162 private final FileSystem fs;
1163 private List<String> blacklist;
1164
1165
1166
1167
1168
1169
1170
1171 @SuppressWarnings("unchecked")
1172 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1173 this.fs = fs;
1174 blacklist =
1175 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1176 : directoryNameBlackList);
1177 }
1178
1179 @Override
1180 public boolean accept(Path p) {
1181 boolean isValid = false;
1182 try {
1183 if (isValidName(p.getName())) {
1184 isValid = fs.getFileStatus(p).isDir();
1185 } else {
1186 isValid = false;
1187 }
1188 } catch (IOException e) {
1189 LOG.warn("An error occurred while verifying if [" + p.toString()
1190 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1191 }
1192 return isValid;
1193 }
1194
1195 protected boolean isValidName(final String name) {
1196 return !blacklist.contains(name);
1197 }
1198 }
1199
1200
1201
1202
1203 public static class DirFilter extends BlackListDirFilter {
1204
1205 public DirFilter(FileSystem fs) {
1206 super(fs, null);
1207 }
1208 }
1209
1210
1211
1212
1213
1214 public static class UserTableDirFilter extends BlackListDirFilter {
1215 public UserTableDirFilter(FileSystem fs) {
1216 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1217 }
1218
1219 protected boolean isValidName(final String name) {
1220 if (!super.isValidName(name))
1221 return false;
1222
1223 try {
1224 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1225 } catch (IllegalArgumentException e) {
1226 LOG.info("INVALID NAME " + name);
1227 return false;
1228 }
1229 return true;
1230 }
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240 public static boolean isAppendSupported(final Configuration conf) {
1241 boolean append = conf.getBoolean("dfs.support.append", false);
1242 if (append) {
1243 try {
1244
1245
1246
1247 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1248 append = true;
1249 } catch (SecurityException e) {
1250 } catch (NoSuchMethodException e) {
1251 append = false;
1252 }
1253 }
1254 if (!append) {
1255
1256 try {
1257 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1258 append = true;
1259 } catch (NoSuchMethodException e) {
1260 append = false;
1261 }
1262 }
1263 return append;
1264 }
1265
1266
1267
1268
1269
1270
1271 public static boolean isHDFS(final Configuration conf) throws IOException {
1272 FileSystem fs = FileSystem.get(conf);
1273 String scheme = fs.getUri().getScheme();
1274 return scheme.equalsIgnoreCase("hdfs");
1275 }
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1286 Configuration conf, CancelableProgressable reporter) throws IOException;
1287
1288 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1289 throws IOException {
1290 List<Path> tableDirs = new LinkedList<Path>();
1291
1292 for(FileStatus status :
1293 fs.globStatus(new Path(rootdir,
1294 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1295 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1296 }
1297 return tableDirs;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1308 throws IOException {
1309
1310 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1311 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1312 for (FileStatus dir: dirs) {
1313 tabledirs.add(dir.getPath());
1314 }
1315 return tabledirs;
1316 }
1317
1318
1319
1320
1321
1322
1323 public static boolean isRecoveredEdits(Path path) {
1324 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1325 }
1326
1327
1328
1329
1330 public static class RegionDirFilter implements PathFilter {
1331
1332 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1333 final FileSystem fs;
1334
1335 public RegionDirFilter(FileSystem fs) {
1336 this.fs = fs;
1337 }
1338
1339 @Override
1340 public boolean accept(Path rd) {
1341 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1342 return false;
1343 }
1344
1345 try {
1346 return fs.getFileStatus(rd).isDir();
1347 } catch (IOException ioe) {
1348
1349 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1350 return false;
1351 }
1352 }
1353 }
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1364
1365 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1366 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1367 for (FileStatus rdfs: rds) {
1368 Path rdPath = rdfs.getPath();
1369 regionDirs.add(rdPath);
1370 }
1371 return regionDirs;
1372 }
1373
1374
1375
1376
1377
1378 public static class FamilyDirFilter implements PathFilter {
1379 final FileSystem fs;
1380
1381 public FamilyDirFilter(FileSystem fs) {
1382 this.fs = fs;
1383 }
1384
1385 @Override
1386 public boolean accept(Path rd) {
1387 try {
1388
1389 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1390 } catch (IllegalArgumentException iae) {
1391
1392 return false;
1393 }
1394
1395 try {
1396 return fs.getFileStatus(rd).isDir();
1397 } catch (IOException ioe) {
1398
1399 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1400 return false;
1401 }
1402 }
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1414
1415 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1416 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1417 for (FileStatus fdfs: fds) {
1418 Path fdPath = fdfs.getPath();
1419 familyDirs.add(fdPath);
1420 }
1421 return familyDirs;
1422 }
1423
1424 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1425 FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1426 List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1427 for (FileStatus fdfs: fds) {
1428 Path fdPath = fdfs.getPath();
1429 referenceFiles.add(fdPath);
1430 }
1431 return referenceFiles;
1432 }
1433
1434
1435
1436
1437 public static class HFileFilter implements PathFilter {
1438 final FileSystem fs;
1439
1440 public HFileFilter(FileSystem fs) {
1441 this.fs = fs;
1442 }
1443
1444 @Override
1445 public boolean accept(Path rd) {
1446 try {
1447
1448 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isHFile(rd);
1449 } catch (IOException ioe) {
1450
1451 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1452 return false;
1453 }
1454 }
1455 }
1456
1457 public static class ReferenceFileFilter implements PathFilter {
1458
1459 private final FileSystem fs;
1460
1461 public ReferenceFileFilter(FileSystem fs) {
1462 this.fs = fs;
1463 }
1464
1465 @Override
1466 public boolean accept(Path rd) {
1467 try {
1468
1469 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isReference(rd);
1470 } catch (IOException ioe) {
1471
1472 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1473 return false;
1474 }
1475 }
1476 }
1477
1478
1479
1480
1481
1482
1483
1484 public static FileSystem getCurrentFileSystem(Configuration conf)
1485 throws IOException {
1486 return getRootDir(conf).getFileSystem(conf);
1487 }
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1506 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1507 throws IOException {
1508 if (map == null) {
1509 map = new HashMap<String, Path>();
1510 }
1511
1512
1513 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1514
1515
1516 PathFilter familyFilter = new FamilyDirFilter(fs);
1517 FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1518 for (FileStatus regionDir : regionDirs) {
1519 Path dd = regionDir.getPath();
1520
1521 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1522 for (FileStatus familyDir : familyDirs) {
1523 Path family = familyDir.getPath();
1524
1525
1526 FileStatus[] familyStatus = fs.listStatus(family);
1527 for (FileStatus sfStatus : familyStatus) {
1528 Path sf = sfStatus.getPath();
1529 map.put( sf.getName(), sf);
1530 }
1531 }
1532 }
1533 return map;
1534 }
1535
1536 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1537 int result = 0;
1538 try {
1539 for (Path familyDir:getFamilyDirs(fs, p)){
1540 result += getReferenceFilePaths(fs, familyDir).size();
1541 }
1542 } catch (IOException e) {
1543 LOG.warn("Error Counting reference files.", e);
1544 }
1545 return result;
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 public static Map<String, Path> getTableStoreFilePathMap(
1563 final FileSystem fs, final Path hbaseRootDir)
1564 throws IOException {
1565 Map<String, Path> map = new HashMap<String, Path>();
1566
1567
1568
1569
1570
1571 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1572 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1573 FSUtils.getTableName(tableDir));
1574 }
1575 return map;
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589 public static FileStatus [] listStatus(final FileSystem fs,
1590 final Path dir, final PathFilter filter) throws IOException {
1591 FileStatus [] status = null;
1592 try {
1593 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1594 } catch (FileNotFoundException fnfe) {
1595
1596 if (LOG.isTraceEnabled()) {
1597 LOG.trace(dir + " doesn't exist");
1598 }
1599 }
1600 if (status == null || status.length < 1) return null;
1601 return status;
1602 }
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1613 return listStatus(fs, dir, null);
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1626 throws IOException {
1627 return fs.delete(path, recursive);
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1639 return fs.exists(path);
1640 }
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1653 FsAction action) throws AccessDeniedException {
1654 if (ugi.getShortUserName().equals(file.getOwner())) {
1655 if (file.getPermission().getUserAction().implies(action)) {
1656 return;
1657 }
1658 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1659 if (file.getPermission().getGroupAction().implies(action)) {
1660 return;
1661 }
1662 } else if (file.getPermission().getOtherAction().implies(action)) {
1663 return;
1664 }
1665 throw new AccessDeniedException("Permission denied:" + " action=" + action
1666 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1667 }
1668
1669 private static boolean contains(String[] groups, String user) {
1670 for (String group : groups) {
1671 if (group.equals(user)) {
1672 return true;
1673 }
1674 }
1675 return false;
1676 }
1677
1678
1679
1680
1681
1682
1683
1684
1685 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1686 throws IOException {
1687 LOG.debug("Current file system:");
1688 logFSTree(LOG, fs, root, "|-");
1689 }
1690
1691
1692
1693
1694
1695
1696 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1697 throws IOException {
1698 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1699 if (files == null) return;
1700
1701 for (FileStatus file : files) {
1702 if (file.isDir()) {
1703 LOG.debug(prefix + file.getPath().getName() + "/");
1704 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1705 } else {
1706 LOG.debug(prefix + file.getPath().getName());
1707 }
1708 }
1709 }
1710
1711 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1712 throws IOException {
1713
1714 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1715 return fs.rename(src, dest);
1716 }
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1732 final Configuration conf) throws IOException {
1733 return getRegionDegreeLocalityMappingFromFS(
1734 conf, null,
1735 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1736
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1756 final Configuration conf, final String desiredTable, int threadPoolSize)
1757 throws IOException {
1758 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1759 new ConcurrentHashMap<String, Map<String, Float>>();
1760 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1761 regionDegreeLocalityMapping);
1762 return regionDegreeLocalityMapping;
1763 }
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 private static void getRegionLocalityMappingFromFS(
1786 final Configuration conf, final String desiredTable,
1787 int threadPoolSize,
1788 Map<String, String> regionToBestLocalityRSMapping,
1789 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1790 throws IOException {
1791 FileSystem fs = FileSystem.get(conf);
1792 Path rootPath = FSUtils.getRootDir(conf);
1793 long startTime = EnvironmentEdgeManager.currentTimeMillis();
1794 Path queryPath;
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 public static void setupShortCircuitRead(final Configuration conf) {
1896
1897 boolean shortCircuitSkipChecksum =
1898 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1899 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1900 if (shortCircuitSkipChecksum) {
1901 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1902 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1903 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1904 assert !shortCircuitSkipChecksum;
1905 }
1906 checkShortCircuitReadBufferSize(conf);
1907 }
1908
1909
1910
1911
1912
1913 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1914 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1915 final int notSet = -1;
1916
1917 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1918 int size = conf.getInt(dfsKey, notSet);
1919
1920 if (size != notSet) return;
1921
1922 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1923 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1924 }
1925 }