1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.util.ArrayList;
25 import java.util.Collection;
26 import java.util.List;
27 import java.util.Map;
28 import java.util.UUID;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.classification.InterfaceAudience;
33 import org.apache.hadoop.conf.Configuration;
34 import org.apache.hadoop.fs.FSDataInputStream;
35 import org.apache.hadoop.fs.FSDataOutputStream;
36 import org.apache.hadoop.fs.FileStatus;
37 import org.apache.hadoop.fs.FileSystem;
38 import org.apache.hadoop.fs.FileUtil;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.fs.PathFilter;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.HColumnDescriptor;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HRegionInfo;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.KeyValue;
47 import org.apache.hadoop.hbase.backup.HFileArchiver;
48 import org.apache.hadoop.hbase.fs.HFileSystem;
49 import org.apache.hadoop.hbase.io.Reference;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.FSUtils;
52 import org.apache.hadoop.hbase.util.Threads;
53
54
55
56
57
58 @InterfaceAudience.Private
59 public class HRegionFileSystem {
60 public static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
61
62
63 public final static String REGION_INFO_FILE = ".regioninfo";
64
65
66 public static final String REGION_MERGES_DIR = ".merges";
67
68
69 public static final String REGION_SPLITS_DIR = ".splits";
70
71
72 private static final String REGION_TEMP_DIR = ".tmp";
73
74 private final HRegionInfo regionInfo;
75 private final Configuration conf;
76 private final Path tableDir;
77 private final FileSystem fs;
78
79
80
81
82
83 private final int hdfsClientRetriesNumber;
84 private final int baseSleepBeforeRetries;
85 private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
86 private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
87
88
89
90
91
92
93
94
95 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
96 final HRegionInfo regionInfo) {
97 this.fs = fs;
98 this.conf = conf;
99 this.tableDir = tableDir;
100 this.regionInfo = regionInfo;
101 this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
102 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
103 this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
104 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
105 }
106
107
108 public FileSystem getFileSystem() {
109 return this.fs;
110 }
111
112
113 public HRegionInfo getRegionInfo() {
114 return this.regionInfo;
115 }
116
117
118 public Path getTableDir() {
119 return this.tableDir;
120 }
121
122
123 public Path getRegionDir() {
124 return new Path(this.tableDir, this.regionInfo.getEncodedName());
125 }
126
127
128
129
130
131 Path getTempDir() {
132 return new Path(getRegionDir(), REGION_TEMP_DIR);
133 }
134
135
136
137
138 void cleanupTempDir() throws IOException {
139 deleteDir(getTempDir());
140 }
141
142
143
144
145
146
147
148
149
150 Path getStoreDir(final String familyName) {
151 return new Path(this.getRegionDir(), familyName);
152 }
153
154
155
156
157
158
159
160 Path createStoreDir(final String familyName) throws IOException {
161 Path storeDir = getStoreDir(familyName);
162 if(!fs.exists(storeDir) && !createDir(storeDir))
163 throw new IOException("Failed creating "+storeDir);
164 return storeDir;
165 }
166
167
168
169
170
171
172
173 public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
174 return getStoreFiles(Bytes.toString(familyName));
175 }
176
177
178
179
180
181
182
183 public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
184 Path familyDir = getStoreDir(familyName);
185 FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
186 if (files == null) return null;
187
188 ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
189 for (FileStatus status: files) {
190 if (!StoreFileInfo.isValid(status)) continue;
191
192 storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
193 }
194 return storeFiles;
195 }
196
197
198
199
200
201
202
203 public boolean hasReferences(final String familyName) throws IOException {
204 FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
205 new PathFilter () {
206 public boolean accept(Path path) {
207 return StoreFileInfo.isReference(path);
208 }
209 }
210 );
211 return files != null && files.length > 0;
212 }
213
214
215
216
217
218
219
220 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
221 for (HColumnDescriptor family : htd.getFamilies()) {
222 if (hasReferences(family.getNameAsString())) {
223 return true;
224 }
225 }
226 return false;
227 }
228
229
230
231
232
233 public Collection<String> getFamilies() throws IOException {
234 FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
235 if (fds == null) return null;
236
237 ArrayList<String> families = new ArrayList<String>(fds.length);
238 for (FileStatus status: fds) {
239 families.add(status.getPath().getName());
240 }
241
242 return families;
243 }
244
245
246
247
248
249
250 public void deleteFamily(final String familyName) throws IOException {
251
252 HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
253
254
255 Path familyDir = getStoreDir(familyName);
256 if(fs.exists(familyDir) && !deleteDir(familyDir))
257 throw new IOException("Could not delete family " + familyName
258 + " from FileSystem for region " + regionInfo.getRegionNameAsString() + "("
259 + regionInfo.getEncodedName() + ")");
260 }
261
262
263
264
265
266
267 private static String generateUniqueName(final String suffix) {
268 String name = UUID.randomUUID().toString().replaceAll("-", "");
269 if (suffix != null) name += suffix;
270 return name;
271 }
272
273
274
275
276
277
278
279
280
281
282
283
284 public Path createTempName() {
285 return createTempName(null);
286 }
287
288
289
290
291
292
293
294
295
296
297
298
299
300 public Path createTempName(final String suffix) {
301 return new Path(getTempDir(), generateUniqueName(suffix));
302 }
303
304
305
306
307
308
309
310
311 public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
312 return commitStoreFile(familyName, buildPath, -1, false);
313 }
314
315
316
317
318
319
320
321
322
323
324 private Path commitStoreFile(final String familyName, final Path buildPath,
325 final long seqNum, final boolean generateNewName) throws IOException {
326 Path storeDir = getStoreDir(familyName);
327 if(!fs.exists(storeDir) && !createDir(storeDir))
328 throw new IOException("Failed creating " + storeDir);
329
330 String name = buildPath.getName();
331 if (generateNewName) {
332 name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
333 }
334 Path dstPath = new Path(storeDir, name);
335 if (!fs.exists(buildPath)) {
336 throw new FileNotFoundException(buildPath.toString());
337 }
338 LOG.debug("Committing store file " + buildPath + " as " + dstPath);
339
340 if (!rename(buildPath, dstPath)) {
341 throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
342 }
343 return dstPath;
344 }
345
346
347
348
349
350
351
352 void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
353 for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
354 String familyName = Bytes.toString(es.getKey());
355 for (StoreFile sf: es.getValue()) {
356 commitStoreFile(familyName, sf.getPath());
357 }
358 }
359 }
360
361
362
363
364
365
366
367 public void removeStoreFile(final String familyName, final Path filePath)
368 throws IOException {
369 HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfo,
370 this.tableDir, Bytes.toBytes(familyName), filePath);
371 }
372
373
374
375
376
377
378
379 public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
380 throws IOException {
381 HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfo,
382 this.tableDir, Bytes.toBytes(familyName), storeFiles);
383 }
384
385
386
387
388
389
390
391
392
393
394
395
396 Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
397 throws IOException {
398
399 FileSystem srcFs = srcPath.getFileSystem(conf);
400 FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
401
402
403
404
405 if (!srcFs.getUri().equals(desFs.getUri())) {
406 LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
407 "the destination store. Copying file over to destination filesystem.");
408 Path tmpPath = createTempName();
409 FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
410 LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
411 srcPath = tmpPath;
412 }
413
414 return commitStoreFile(familyName, srcPath, seqNum, true);
415 }
416
417
418
419
420
421 Path getSplitsDir() {
422 return new Path(getRegionDir(), REGION_SPLITS_DIR);
423 }
424
425 Path getSplitsDir(final HRegionInfo hri) {
426 return new Path(getSplitsDir(), hri.getEncodedName());
427 }
428
429
430
431
432 void cleanupSplitsDir() throws IOException {
433 deleteDir(getSplitsDir());
434 }
435
436
437
438
439
440
441
442 void cleanupAnySplitDetritus() throws IOException {
443 Path splitdir = this.getSplitsDir();
444 if (!fs.exists(splitdir)) return;
445
446
447
448
449
450
451
452 FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
453 if (daughters != null) {
454 for (FileStatus daughter: daughters) {
455 Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
456 if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
457 throw new IOException("Failed delete of " + daughterDir);
458 }
459 }
460 }
461 cleanupSplitsDir();
462 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
463 }
464
465
466
467
468
469
470 void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
471 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
472 if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
473 throw new IOException("Failed delete of " + regionDir);
474 }
475 }
476
477
478
479
480
481
482
483 Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
484 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
485 Path daughterTmpDir = this.getSplitsDir(regionInfo);
486 if (fs.exists(daughterTmpDir)) {
487
488 Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
489 byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
490 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
491
492
493 if (!rename(daughterTmpDir, regionDir)) {
494 throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
495 }
496 }
497 return regionDir;
498 }
499
500
501
502
503 void createSplitsDir() throws IOException {
504 Path splitdir = getSplitsDir();
505 if (fs.exists(splitdir)) {
506 LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
507 if (!deleteDir(splitdir)) {
508 throw new IOException("Failed deletion of " + splitdir
509 + " before creating them again.");
510 }
511 }
512
513 if (!createDir(splitdir)) {
514 throw new IOException("Failed create of " + splitdir);
515 }
516 }
517
518
519
520
521
522
523
524
525
526
527
528
529 Path splitStoreFile(final HRegionInfo hri, final String familyName,
530 final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
531
532
533
534 if (top) {
535
536 KeyValue splitKey = KeyValue.createFirstOnRow(splitRow);
537 byte[] lastKey = f.createReader().getLastKey();
538
539 if (lastKey == null) return null;
540 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
541 splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
542 return null;
543 }
544 } else {
545
546 KeyValue splitKey = KeyValue.createLastOnRow(splitRow);
547 byte[] firstKey = f.createReader().getFirstKey();
548
549 if (firstKey == null) return null;
550 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
551 splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
552 return null;
553 }
554 }
555
556 f.getReader().close(true);
557
558 Path splitDir = new Path(getSplitsDir(hri), familyName);
559
560 Reference r =
561 top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
562
563
564
565
566 String parentRegionName = regionInfo.getEncodedName();
567
568
569 Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
570 return r.write(fs, p);
571 }
572
573
574
575
576
577 Path getMergesDir() {
578 return new Path(getRegionDir(), REGION_MERGES_DIR);
579 }
580
581 Path getMergesDir(final HRegionInfo hri) {
582 return new Path(getMergesDir(), hri.getEncodedName());
583 }
584
585
586
587
588 void cleanupMergesDir() throws IOException {
589 deleteDir(getMergesDir());
590 }
591
592
593
594
595
596
597 void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
598 Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
599 if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
600 throw new IOException("Failed delete of " + regionDir);
601 }
602 }
603
604
605
606
607
608
609 void createMergesDir() throws IOException {
610 Path mergesdir = getMergesDir();
611 if (fs.exists(mergesdir)) {
612 LOG.info("The " + mergesdir
613 + " directory exists. Hence deleting it to recreate it");
614 if (!fs.delete(mergesdir, true)) {
615 throw new IOException("Failed deletion of " + mergesdir
616 + " before creating them again.");
617 }
618 }
619 if (!fs.mkdirs(mergesdir))
620 throw new IOException("Failed create of " + mergesdir);
621 }
622
623
624
625
626
627
628
629
630
631
632
633 Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
634 final StoreFile f, final Path mergedDir)
635 throws IOException {
636 Path referenceDir = new Path(new Path(mergedDir,
637 mergedRegion.getEncodedName()), familyName);
638
639 Reference r = Reference.createTopReference(regionInfo.getStartKey());
640
641
642
643
644 String mergingRegionName = regionInfo.getEncodedName();
645
646
647 Path p = new Path(referenceDir, f.getPath().getName() + "."
648 + mergingRegionName);
649 return r.write(fs, p);
650 }
651
652
653
654
655
656
657
658 void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
659 Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
660 Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
661
662 if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
663 if (!fs.rename(mergedRegionTmpDir, regionDir)) {
664 throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
665 + regionDir);
666 }
667 }
668 }
669
670
671
672
673
674
675
676
677
678 void logFileSystemState(final Log LOG) throws IOException {
679 FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
680 }
681
682
683
684
685
686
687 private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
688 return hri.toDelimitedByteArray();
689 }
690
691
692
693
694
695
696
697
698 public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
699 throws IOException {
700 FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
701 try {
702 return HRegionInfo.parseFrom(in);
703 } finally {
704 in.close();
705 }
706 }
707
708
709
710
711 private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
712 final Path regionInfoFile, final byte[] content) throws IOException {
713
714 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
715
716 FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
717 try {
718 out.write(content);
719 } finally {
720 out.close();
721 }
722 }
723
724
725
726
727
728 void checkRegionInfoOnFilesystem() throws IOException {
729
730
731
732
733
734 byte[] content = getRegionInfoFileContent(regionInfo);
735 try {
736 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
737
738 FileStatus status = fs.getFileStatus(regionInfoFile);
739 if (status != null && status.getLen() == content.length) {
740
741
742 return;
743 }
744
745 LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
746 if (!fs.delete(regionInfoFile, false)) {
747 throw new IOException("Unable to remove existing " + regionInfoFile);
748 }
749 } catch (FileNotFoundException e) {
750 LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfo.getEncodedName());
751 }
752
753
754 writeRegionInfoOnFilesystem(content, true);
755 }
756
757
758
759
760
761 private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
762 byte[] content = getRegionInfoFileContent(regionInfo);
763 writeRegionInfoOnFilesystem(content, useTempDir);
764 }
765
766
767
768
769
770
771 private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
772 final boolean useTempDir) throws IOException {
773 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
774 if (useTempDir) {
775
776
777
778
779
780
781 Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
782
783
784
785
786
787 if (FSUtils.isExists(fs, tmpPath)) {
788 FSUtils.delete(fs, tmpPath, true);
789 }
790
791
792 writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
793
794
795 if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
796 throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
797 }
798 } else {
799
800 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
801 }
802 }
803
804
805
806
807
808
809
810
811
812 public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
813 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
814 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
815 Path regionDir = regionFs.getRegionDir();
816
817 if (fs.exists(regionDir)) {
818 LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
819 throw new IOException("The specified region already exists on disk: " + regionDir);
820 }
821
822
823 if (!createDirOnFileSystem(fs, conf, regionDir)) {
824 LOG.warn("Unable to create the region directory: " + regionDir);
825 throw new IOException("Unable to create region directory: " + regionDir);
826 }
827
828
829 regionFs.writeRegionInfoOnFilesystem(false);
830 return regionFs;
831 }
832
833
834
835
836
837
838
839
840
841
842 public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
843 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
844 throws IOException {
845 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
846 Path regionDir = regionFs.getRegionDir();
847
848 if (!fs.exists(regionDir)) {
849 LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
850 throw new IOException("The specified region do not exists on disk: " + regionDir);
851 }
852
853 if (!readOnly) {
854
855 regionFs.cleanupTempDir();
856 regionFs.cleanupSplitsDir();
857 regionFs.cleanupMergesDir();
858
859
860 regionFs.checkRegionInfoOnFilesystem();
861 }
862
863 return regionFs;
864 }
865
866
867
868
869
870
871
872
873
874 public static void deleteRegionFromFileSystem(final Configuration conf,
875 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
876 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
877 Path regionDir = regionFs.getRegionDir();
878
879 if (!fs.exists(regionDir)) {
880 LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
881 return;
882 }
883
884 if (LOG.isDebugEnabled()) {
885 LOG.debug("DELETING region " + regionDir);
886 }
887
888
889 Path rootDir = FSUtils.getRootDir(conf);
890 HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
891
892
893 if (!fs.delete(regionDir, true)) {
894 LOG.warn("Failed delete of " + regionDir);
895 }
896 }
897
898
899
900
901
902
903
904
905 boolean createDir(Path dir) throws IOException {
906 int i = 0;
907 IOException lastIOE = null;
908 do {
909 try {
910 return fs.mkdirs(dir);
911 } catch (IOException ioe) {
912 lastIOE = ioe;
913 if (fs.exists(dir)) return true;
914 sleepBeforeRetry("Create Directory", i+1);
915 }
916 } while (++i <= hdfsClientRetriesNumber);
917 throw new IOException("Exception in createDir", lastIOE);
918 }
919
920
921
922
923
924
925
926
927 boolean rename(Path srcpath, Path dstPath) throws IOException {
928 IOException lastIOE = null;
929 int i = 0;
930 do {
931 try {
932 return fs.rename(srcpath, dstPath);
933 } catch (IOException ioe) {
934 lastIOE = ioe;
935 if (!fs.exists(srcpath) && fs.exists(dstPath)) return true;
936
937 sleepBeforeRetry("Rename Directory", i+1);
938 }
939 } while (++i <= hdfsClientRetriesNumber);
940 throw new IOException("Exception in rename", lastIOE);
941 }
942
943
944
945
946
947
948
949 boolean deleteDir(Path dir) throws IOException {
950 IOException lastIOE = null;
951 int i = 0;
952 do {
953 try {
954 return fs.delete(dir, true);
955 } catch (IOException ioe) {
956 lastIOE = ioe;
957 if (!fs.exists(dir)) return true;
958
959 sleepBeforeRetry("Delete Directory", i+1);
960 }
961 } while (++i <= hdfsClientRetriesNumber);
962 throw new IOException("Exception in DeleteDir", lastIOE);
963 }
964
965
966
967
968 private void sleepBeforeRetry(String msg, int sleepMultiplier) {
969 sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
970 }
971
972
973
974
975
976
977
978
979
980
981
982 private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
983 throws IOException {
984 int i = 0;
985 IOException lastIOE = null;
986 int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
987 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
988 int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
989 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
990 do {
991 try {
992 return fs.mkdirs(dir);
993 } catch (IOException ioe) {
994 lastIOE = ioe;
995 if (fs.exists(dir)) return true;
996 sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
997 }
998 } while (++i <= hdfsClientRetriesNumber);
999 throw new IOException("Exception in createDir", lastIOE);
1000 }
1001
1002
1003
1004
1005
1006 private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1007 int hdfsClientRetriesNumber) {
1008 if (sleepMultiplier > hdfsClientRetriesNumber) {
1009 LOG.debug(msg + ", retries exhausted");
1010 return;
1011 }
1012 LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1013 Threads.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1014 }
1015 }