1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.util.ArrayList;
25 import java.util.Collection;
26 import java.util.List;
27 import java.util.Map;
28 import java.util.UUID;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.classification.InterfaceAudience;
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.fs.FSDataInputStream;
35 import org.apache.hadoop.fs.FSDataOutputStream;
36 import org.apache.hadoop.fs.FileStatus;
37 import org.apache.hadoop.fs.FileSystem;
38 import org.apache.hadoop.fs.FileUtil;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.fs.PathFilter;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.HColumnDescriptor;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HRegionInfo;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.KeyValue;
47 import org.apache.hadoop.hbase.backup.HFileArchiver;
48 import org.apache.hadoop.hbase.fs.HFileSystem;
49 import org.apache.hadoop.hbase.io.Reference;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.FSHDFSUtils;
52 import org.apache.hadoop.hbase.util.FSUtils;
53 import org.apache.hadoop.hbase.util.Threads;
54
55
56
57
58
59 @InterfaceAudience.Private
60 public class HRegionFileSystem {
61 public static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
62
63
64 public final static String REGION_INFO_FILE = ".regioninfo";
65
66
67 public static final String REGION_MERGES_DIR = ".merges";
68
69
70 public static final String REGION_SPLITS_DIR = ".splits";
71
72
73 private static final String REGION_TEMP_DIR = ".tmp";
74
75 private final HRegionInfo regionInfo;
76 private final Configuration conf;
77 private final Path tableDir;
78 private final FileSystem fs;
79
80
81
82
83
84 private final int hdfsClientRetriesNumber;
85 private final int baseSleepBeforeRetries;
86 private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
87 private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
88
89
90
91
92
93
94
95
96 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
97 final HRegionInfo regionInfo) {
98 this.fs = fs;
99 this.conf = conf;
100 this.tableDir = tableDir;
101 this.regionInfo = regionInfo;
102 this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
103 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
104 this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
105 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
106 }
107
108
109 public FileSystem getFileSystem() {
110 return this.fs;
111 }
112
113
114 public HRegionInfo getRegionInfo() {
115 return this.regionInfo;
116 }
117
118
119 public Path getTableDir() {
120 return this.tableDir;
121 }
122
123
124 public Path getRegionDir() {
125 return new Path(this.tableDir, this.regionInfo.getEncodedName());
126 }
127
128
129
130
131
132 Path getTempDir() {
133 return new Path(getRegionDir(), REGION_TEMP_DIR);
134 }
135
136
137
138
139 void cleanupTempDir() throws IOException {
140 deleteDir(getTempDir());
141 }
142
143
144
145
146
147
148
149
150
151 Path getStoreDir(final String familyName) {
152 return new Path(this.getRegionDir(), familyName);
153 }
154
155
156
157
158
159
160
161 Path createStoreDir(final String familyName) throws IOException {
162 Path storeDir = getStoreDir(familyName);
163 if(!fs.exists(storeDir) && !createDir(storeDir))
164 throw new IOException("Failed creating "+storeDir);
165 return storeDir;
166 }
167
168
169
170
171
172
173
174 public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
175 return getStoreFiles(Bytes.toString(familyName));
176 }
177
178
179
180
181
182
183
184 public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
185 Path familyDir = getStoreDir(familyName);
186 FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
187 if (files == null) return null;
188
189 ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
190 for (FileStatus status: files) {
191 if (!StoreFileInfo.isValid(status)) continue;
192
193 storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
194 }
195 return storeFiles;
196 }
197
198
199
200
201
202
203
204 public boolean hasReferences(final String familyName) throws IOException {
205 FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
206 new PathFilter () {
207 public boolean accept(Path path) {
208 return StoreFileInfo.isReference(path);
209 }
210 }
211 );
212 return files != null && files.length > 0;
213 }
214
215
216
217
218
219
220
221 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
222 for (HColumnDescriptor family : htd.getFamilies()) {
223 if (hasReferences(family.getNameAsString())) {
224 return true;
225 }
226 }
227 return false;
228 }
229
230
231
232
233
234 public Collection<String> getFamilies() throws IOException {
235 FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
236 if (fds == null) return null;
237
238 ArrayList<String> families = new ArrayList<String>(fds.length);
239 for (FileStatus status: fds) {
240 families.add(status.getPath().getName());
241 }
242
243 return families;
244 }
245
246
247
248
249
250
251 public void deleteFamily(final String familyName) throws IOException {
252
253 HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
254
255
256 Path familyDir = getStoreDir(familyName);
257 if(fs.exists(familyDir) && !deleteDir(familyDir))
258 throw new IOException("Could not delete family " + familyName
259 + " from FileSystem for region " + regionInfo.getRegionNameAsString() + "("
260 + regionInfo.getEncodedName() + ")");
261 }
262
263
264
265
266
267
268 private static String generateUniqueName(final String suffix) {
269 String name = UUID.randomUUID().toString().replaceAll("-", "");
270 if (suffix != null) name += suffix;
271 return name;
272 }
273
274
275
276
277
278
279
280
281
282
283
284
285 public Path createTempName() {
286 return createTempName(null);
287 }
288
289
290
291
292
293
294
295
296
297
298
299
300
301 public Path createTempName(final String suffix) {
302 return new Path(getTempDir(), generateUniqueName(suffix));
303 }
304
305
306
307
308
309
310
311
312 public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
313 return commitStoreFile(familyName, buildPath, -1, false);
314 }
315
316
317
318
319
320
321
322
323
324
325 private Path commitStoreFile(final String familyName, final Path buildPath,
326 final long seqNum, final boolean generateNewName) throws IOException {
327 Path storeDir = getStoreDir(familyName);
328 if(!fs.exists(storeDir) && !createDir(storeDir))
329 throw new IOException("Failed creating " + storeDir);
330
331 String name = buildPath.getName();
332 if (generateNewName) {
333 name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
334 }
335 Path dstPath = new Path(storeDir, name);
336 if (!fs.exists(buildPath)) {
337 throw new FileNotFoundException(buildPath.toString());
338 }
339 LOG.debug("Committing store file " + buildPath + " as " + dstPath);
340
341 if (!rename(buildPath, dstPath)) {
342 throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
343 }
344 return dstPath;
345 }
346
347
348
349
350
351
352
353 void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
354 for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
355 String familyName = Bytes.toString(es.getKey());
356 for (StoreFile sf: es.getValue()) {
357 commitStoreFile(familyName, sf.getPath());
358 }
359 }
360 }
361
362
363
364
365
366
367
368 public void removeStoreFile(final String familyName, final Path filePath)
369 throws IOException {
370 HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfo,
371 this.tableDir, Bytes.toBytes(familyName), filePath);
372 }
373
374
375
376
377
378
379
380 public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
381 throws IOException {
382 HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfo,
383 this.tableDir, Bytes.toBytes(familyName), storeFiles);
384 }
385
386
387
388
389
390
391
392
393
394
395
396
397 Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
398 throws IOException {
399
400 FileSystem srcFs = srcPath.getFileSystem(conf);
401 FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
402
403
404
405
406 if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
407 LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
408 "the destination store. Copying file over to destination filesystem.");
409 Path tmpPath = createTempName();
410 FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
411 LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
412 srcPath = tmpPath;
413 }
414
415 return commitStoreFile(familyName, srcPath, seqNum, true);
416 }
417
418
419
420
421
422 Path getSplitsDir() {
423 return new Path(getRegionDir(), REGION_SPLITS_DIR);
424 }
425
426 Path getSplitsDir(final HRegionInfo hri) {
427 return new Path(getSplitsDir(), hri.getEncodedName());
428 }
429
430
431
432
433 void cleanupSplitsDir() throws IOException {
434 deleteDir(getSplitsDir());
435 }
436
437
438
439
440
441
442
443 void cleanupAnySplitDetritus() throws IOException {
444 Path splitdir = this.getSplitsDir();
445 if (!fs.exists(splitdir)) return;
446
447
448
449
450
451
452
453 FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
454 if (daughters != null) {
455 for (FileStatus daughter: daughters) {
456 Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
457 if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
458 throw new IOException("Failed delete of " + daughterDir);
459 }
460 }
461 }
462 cleanupSplitsDir();
463 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
464 }
465
466
467
468
469
470
471 void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
472 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
473 if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
474 throw new IOException("Failed delete of " + regionDir);
475 }
476 }
477
478
479
480
481
482
483
484 Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
485 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
486 Path daughterTmpDir = this.getSplitsDir(regionInfo);
487 if (fs.exists(daughterTmpDir)) {
488
489 Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
490 byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
491 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
492
493
494 if (!rename(daughterTmpDir, regionDir)) {
495 throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
496 }
497 }
498 return regionDir;
499 }
500
501
502
503
504 void createSplitsDir() throws IOException {
505 Path splitdir = getSplitsDir();
506 if (fs.exists(splitdir)) {
507 LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
508 if (!deleteDir(splitdir)) {
509 throw new IOException("Failed deletion of " + splitdir
510 + " before creating them again.");
511 }
512 }
513
514 if (!createDir(splitdir)) {
515 throw new IOException("Failed create of " + splitdir);
516 }
517 }
518
519
520
521
522
523
524
525
526
527
528
529
530 Path splitStoreFile(final HRegionInfo hri, final String familyName,
531 final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
532
533
534
535 if (top) {
536
537 KeyValue splitKey = KeyValue.createFirstOnRow(splitRow);
538 byte[] lastKey = f.createReader().getLastKey();
539
540 if (lastKey == null) return null;
541 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
542 splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
543 return null;
544 }
545 } else {
546
547 KeyValue splitKey = KeyValue.createLastOnRow(splitRow);
548 byte[] firstKey = f.createReader().getFirstKey();
549
550 if (firstKey == null) return null;
551 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
552 splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
553 return null;
554 }
555 }
556
557 f.getReader().close(true);
558
559 Path splitDir = new Path(getSplitsDir(hri), familyName);
560
561 Reference r =
562 top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
563
564
565
566
567 String parentRegionName = regionInfo.getEncodedName();
568
569
570 Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
571 return r.write(fs, p);
572 }
573
574
575
576
577
578 Path getMergesDir() {
579 return new Path(getRegionDir(), REGION_MERGES_DIR);
580 }
581
582 Path getMergesDir(final HRegionInfo hri) {
583 return new Path(getMergesDir(), hri.getEncodedName());
584 }
585
586
587
588
589 void cleanupMergesDir() throws IOException {
590 deleteDir(getMergesDir());
591 }
592
593
594
595
596
597
598 void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
599 Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
600 if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
601 throw new IOException("Failed delete of " + regionDir);
602 }
603 }
604
605
606
607
608
609
610 void createMergesDir() throws IOException {
611 Path mergesdir = getMergesDir();
612 if (fs.exists(mergesdir)) {
613 LOG.info("The " + mergesdir
614 + " directory exists. Hence deleting it to recreate it");
615 if (!fs.delete(mergesdir, true)) {
616 throw new IOException("Failed deletion of " + mergesdir
617 + " before creating them again.");
618 }
619 }
620 if (!fs.mkdirs(mergesdir))
621 throw new IOException("Failed create of " + mergesdir);
622 }
623
624
625
626
627
628
629
630
631
632
633
634 Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
635 final StoreFile f, final Path mergedDir)
636 throws IOException {
637 Path referenceDir = new Path(new Path(mergedDir,
638 mergedRegion.getEncodedName()), familyName);
639
640 Reference r = Reference.createTopReference(regionInfo.getStartKey());
641
642
643
644
645 String mergingRegionName = regionInfo.getEncodedName();
646
647
648 Path p = new Path(referenceDir, f.getPath().getName() + "."
649 + mergingRegionName);
650 return r.write(fs, p);
651 }
652
653
654
655
656
657
658
659 void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
660 Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
661 Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
662
663 if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
664 if (!fs.rename(mergedRegionTmpDir, regionDir)) {
665 throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
666 + regionDir);
667 }
668 }
669 }
670
671
672
673
674
675
676
677
678
679 void logFileSystemState(final Log LOG) throws IOException {
680 FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
681 }
682
683
684
685
686
687
688 private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
689 return hri.toDelimitedByteArray();
690 }
691
692
693
694
695
696
697
698
699 public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
700 throws IOException {
701 FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
702 try {
703 return HRegionInfo.parseFrom(in);
704 } finally {
705 in.close();
706 }
707 }
708
709
710
711
712 private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
713 final Path regionInfoFile, final byte[] content) throws IOException {
714
715 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
716
717 FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
718 try {
719 out.write(content);
720 } finally {
721 out.close();
722 }
723 }
724
725
726
727
728
729 void checkRegionInfoOnFilesystem() throws IOException {
730
731
732
733
734
735 byte[] content = getRegionInfoFileContent(regionInfo);
736 try {
737 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
738
739 FileStatus status = fs.getFileStatus(regionInfoFile);
740 if (status != null && status.getLen() == content.length) {
741
742
743 return;
744 }
745
746 LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
747 if (!fs.delete(regionInfoFile, false)) {
748 throw new IOException("Unable to remove existing " + regionInfoFile);
749 }
750 } catch (FileNotFoundException e) {
751 LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfo.getEncodedName());
752 }
753
754
755 writeRegionInfoOnFilesystem(content, true);
756 }
757
758
759
760
761
762 private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
763 byte[] content = getRegionInfoFileContent(regionInfo);
764 writeRegionInfoOnFilesystem(content, useTempDir);
765 }
766
767
768
769
770
771
772 private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
773 final boolean useTempDir) throws IOException {
774 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
775 if (useTempDir) {
776
777
778
779
780
781
782 Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
783
784
785
786
787
788 if (FSUtils.isExists(fs, tmpPath)) {
789 FSUtils.delete(fs, tmpPath, true);
790 }
791
792
793 writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
794
795
796 if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
797 throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
798 }
799 } else {
800
801 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
802 }
803 }
804
805
806
807
808
809
810
811
812
813 public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
814 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
815 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
816 Path regionDir = regionFs.getRegionDir();
817
818 if (fs.exists(regionDir)) {
819 LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
820 throw new IOException("The specified region already exists on disk: " + regionDir);
821 }
822
823
824 if (!createDirOnFileSystem(fs, conf, regionDir)) {
825 LOG.warn("Unable to create the region directory: " + regionDir);
826 throw new IOException("Unable to create region directory: " + regionDir);
827 }
828
829
830 regionFs.writeRegionInfoOnFilesystem(false);
831 return regionFs;
832 }
833
834
835
836
837
838
839
840
841
842
843 public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
844 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
845 throws IOException {
846 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
847 Path regionDir = regionFs.getRegionDir();
848
849 if (!fs.exists(regionDir)) {
850 LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
851 throw new IOException("The specified region do not exists on disk: " + regionDir);
852 }
853
854 if (!readOnly) {
855
856 regionFs.cleanupTempDir();
857 regionFs.cleanupSplitsDir();
858 regionFs.cleanupMergesDir();
859
860
861 regionFs.checkRegionInfoOnFilesystem();
862 }
863
864 return regionFs;
865 }
866
867
868
869
870
871
872
873
874
875 public static void deleteRegionFromFileSystem(final Configuration conf,
876 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
877 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
878 Path regionDir = regionFs.getRegionDir();
879
880 if (!fs.exists(regionDir)) {
881 LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
882 return;
883 }
884
885 if (LOG.isDebugEnabled()) {
886 LOG.debug("DELETING region " + regionDir);
887 }
888
889
890 Path rootDir = FSUtils.getRootDir(conf);
891 HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
892
893
894 if (!fs.delete(regionDir, true)) {
895 LOG.warn("Failed delete of " + regionDir);
896 }
897 }
898
899
900
901
902
903
904
905
906 boolean createDir(Path dir) throws IOException {
907 int i = 0;
908 IOException lastIOE = null;
909 do {
910 try {
911 return fs.mkdirs(dir);
912 } catch (IOException ioe) {
913 lastIOE = ioe;
914 if (fs.exists(dir)) return true;
915 sleepBeforeRetry("Create Directory", i+1);
916 }
917 } while (++i <= hdfsClientRetriesNumber);
918 throw new IOException("Exception in createDir", lastIOE);
919 }
920
921
922
923
924
925
926
927
928 boolean rename(Path srcpath, Path dstPath) throws IOException {
929 IOException lastIOE = null;
930 int i = 0;
931 do {
932 try {
933 return fs.rename(srcpath, dstPath);
934 } catch (IOException ioe) {
935 lastIOE = ioe;
936 if (!fs.exists(srcpath) && fs.exists(dstPath)) return true;
937
938 sleepBeforeRetry("Rename Directory", i+1);
939 }
940 } while (++i <= hdfsClientRetriesNumber);
941 throw new IOException("Exception in rename", lastIOE);
942 }
943
944
945
946
947
948
949
950 boolean deleteDir(Path dir) throws IOException {
951 IOException lastIOE = null;
952 int i = 0;
953 do {
954 try {
955 return fs.delete(dir, true);
956 } catch (IOException ioe) {
957 lastIOE = ioe;
958 if (!fs.exists(dir)) return true;
959
960 sleepBeforeRetry("Delete Directory", i+1);
961 }
962 } while (++i <= hdfsClientRetriesNumber);
963 throw new IOException("Exception in DeleteDir", lastIOE);
964 }
965
966
967
968
969 private void sleepBeforeRetry(String msg, int sleepMultiplier) {
970 sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
971 }
972
973
974
975
976
977
978
979
980
981
982
983 private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
984 throws IOException {
985 int i = 0;
986 IOException lastIOE = null;
987 int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
988 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
989 int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
990 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
991 do {
992 try {
993 return fs.mkdirs(dir);
994 } catch (IOException ioe) {
995 lastIOE = ioe;
996 if (fs.exists(dir)) return true;
997 sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
998 }
999 } while (++i <= hdfsClientRetriesNumber);
1000 throw new IOException("Exception in createDir", lastIOE);
1001 }
1002
1003
1004
1005
1006
1007 private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1008 int hdfsClientRetriesNumber) {
1009 if (sleepMultiplier > hdfsClientRetriesNumber) {
1010 LOG.debug(msg + ", retries exhausted");
1011 return;
1012 }
1013 LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1014 Threads.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1015 }
1016 }