1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.io.InterruptedIOException;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.UUID;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.hadoop.hbase.classification.InterfaceAudience;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FSDataInputStream;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.FileUtil;
40 import org.apache.hadoop.fs.Path;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.HColumnDescriptor;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HRegionInfo;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.KeyValue;
47 import org.apache.hadoop.hbase.KeyValueUtil;
48 import org.apache.hadoop.hbase.backup.HFileArchiver;
49 import org.apache.hadoop.hbase.fs.HFileSystem;
50 import org.apache.hadoop.hbase.io.Reference;
51 import org.apache.hadoop.hbase.util.Bytes;
52 import org.apache.hadoop.hbase.util.FSHDFSUtils;
53 import org.apache.hadoop.hbase.util.FSUtils;
54 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
55
56
57
58
59
60 @InterfaceAudience.Private
61 public class HRegionFileSystem {
62 public static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
63
64
65 public final static String REGION_INFO_FILE = ".regioninfo";
66
67
68 public static final String REGION_MERGES_DIR = ".merges";
69
70
71 public static final String REGION_SPLITS_DIR = ".splits";
72
73
74 private static final String REGION_TEMP_DIR = ".tmp";
75
76 private final HRegionInfo regionInfo;
77
78 private final HRegionInfo regionInfoForFs;
79 private final Configuration conf;
80 private final Path tableDir;
81 private final FileSystem fs;
82
83
84
85
86
87 private final int hdfsClientRetriesNumber;
88 private final int baseSleepBeforeRetries;
89 private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
90 private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
91
92
93
94
95
96
97
98
99 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
100 final HRegionInfo regionInfo) {
101 this.fs = fs;
102 this.conf = conf;
103 this.tableDir = tableDir;
104 this.regionInfo = regionInfo;
105 this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
106 this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
107 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
108 this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
109 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
110 }
111
112
113 public FileSystem getFileSystem() {
114 return this.fs;
115 }
116
117
118 public HRegionInfo getRegionInfo() {
119 return this.regionInfo;
120 }
121
122
123 public Path getTableDir() {
124 return this.tableDir;
125 }
126
127
128 public Path getRegionDir() {
129 return new Path(this.tableDir, this.regionInfoForFs.getEncodedName());
130 }
131
132
133
134
135
136 Path getTempDir() {
137 return new Path(getRegionDir(), REGION_TEMP_DIR);
138 }
139
140
141
142
143 void cleanupTempDir() throws IOException {
144 deleteDir(getTempDir());
145 }
146
147
148
149
150
151
152
153
154
155 public Path getStoreDir(final String familyName) {
156 return new Path(this.getRegionDir(), familyName);
157 }
158
159
160
161
162
163
164
165 Path createStoreDir(final String familyName) throws IOException {
166 Path storeDir = getStoreDir(familyName);
167 if(!fs.exists(storeDir) && !createDir(storeDir))
168 throw new IOException("Failed creating "+storeDir);
169 return storeDir;
170 }
171
172
173
174
175
176
177
178 public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
179 return getStoreFiles(Bytes.toString(familyName));
180 }
181
182 public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
183 return getStoreFiles(familyName, true);
184 }
185
186
187
188
189
190
191
192 public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
193 throws IOException {
194 Path familyDir = getStoreDir(familyName);
195 FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
196 if (files == null) {
197 LOG.debug("No StoreFiles for: " + familyDir);
198 return null;
199 }
200
201 ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
202 for (FileStatus status: files) {
203 if (validate && !StoreFileInfo.isValid(status)) {
204 LOG.warn("Invalid StoreFile: " + status.getPath());
205 continue;
206 }
207 StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
208 regionInfoForFs, familyName, status);
209 storeFiles.add(info);
210
211 }
212 return storeFiles;
213 }
214
215
216
217
218
219
220
221
222 Path getStoreFilePath(final String familyName, final String fileName) {
223 Path familyDir = getStoreDir(familyName);
224 return new Path(familyDir, fileName).makeQualified(this.fs);
225 }
226
227
228
229
230
231
232
233
234 StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
235 throws IOException {
236 Path familyDir = getStoreDir(familyName);
237 FileStatus status = fs.getFileStatus(new Path(familyDir, fileName));
238 return new StoreFileInfo(this.conf, this.fs, status);
239 }
240
241
242
243
244
245
246
247 public boolean hasReferences(final String familyName) throws IOException {
248 FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
249 new FSUtils.ReferenceFileFilter(fs));
250 return files != null && files.length > 0;
251 }
252
253
254
255
256
257
258
259 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
260 for (HColumnDescriptor family : htd.getFamilies()) {
261 if (hasReferences(family.getNameAsString())) {
262 return true;
263 }
264 }
265 return false;
266 }
267
268
269
270
271
272 public Collection<String> getFamilies() throws IOException {
273 FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
274 if (fds == null) return null;
275
276 ArrayList<String> families = new ArrayList<String>(fds.length);
277 for (FileStatus status: fds) {
278 families.add(status.getPath().getName());
279 }
280
281 return families;
282 }
283
284
285
286
287
288
289 public void deleteFamily(final String familyName) throws IOException {
290
291 HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
292
293
294 Path familyDir = getStoreDir(familyName);
295 if(fs.exists(familyDir) && !deleteDir(familyDir))
296 throw new IOException("Could not delete family " + familyName
297 + " from FileSystem for region " + regionInfoForFs.getRegionNameAsString() + "("
298 + regionInfoForFs.getEncodedName() + ")");
299 }
300
301
302
303
304
305
306 private static String generateUniqueName(final String suffix) {
307 String name = UUID.randomUUID().toString().replaceAll("-", "");
308 if (suffix != null) name += suffix;
309 return name;
310 }
311
312
313
314
315
316
317
318
319
320
321
322
323 public Path createTempName() {
324 return createTempName(null);
325 }
326
327
328
329
330
331
332
333
334
335
336
337
338
339 public Path createTempName(final String suffix) {
340 return new Path(getTempDir(), generateUniqueName(suffix));
341 }
342
343
344
345
346
347
348
349
350 public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
351 return commitStoreFile(familyName, buildPath, -1, false);
352 }
353
354
355
356
357
358
359
360
361
362
363 private Path commitStoreFile(final String familyName, final Path buildPath,
364 final long seqNum, final boolean generateNewName) throws IOException {
365 Path storeDir = getStoreDir(familyName);
366 if(!fs.exists(storeDir) && !createDir(storeDir))
367 throw new IOException("Failed creating " + storeDir);
368
369 String name = buildPath.getName();
370 if (generateNewName) {
371 name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
372 }
373 Path dstPath = new Path(storeDir, name);
374 if (!fs.exists(buildPath)) {
375 throw new FileNotFoundException(buildPath.toString());
376 }
377 LOG.debug("Committing store file " + buildPath + " as " + dstPath);
378
379 if (!rename(buildPath, dstPath)) {
380 throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
381 }
382 return dstPath;
383 }
384
385
386
387
388
389
390
391 void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
392 for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
393 String familyName = Bytes.toString(es.getKey());
394 for (StoreFile sf: es.getValue()) {
395 commitStoreFile(familyName, sf.getPath());
396 }
397 }
398 }
399
400
401
402
403
404
405
406 public void removeStoreFile(final String familyName, final Path filePath)
407 throws IOException {
408 HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
409 this.tableDir, Bytes.toBytes(familyName), filePath);
410 }
411
412
413
414
415
416
417
418 public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
419 throws IOException {
420 HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
421 this.tableDir, Bytes.toBytes(familyName), storeFiles);
422 }
423
424
425
426
427
428
429
430
431
432
433
434
435 Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
436 throws IOException {
437
438 FileSystem srcFs = srcPath.getFileSystem(conf);
439 FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
440
441
442
443
444 if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
445 LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
446 "the destination store. Copying file over to destination filesystem.");
447 Path tmpPath = createTempName();
448 FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
449 LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
450 srcPath = tmpPath;
451 }
452
453 return commitStoreFile(familyName, srcPath, seqNum, true);
454 }
455
456
457
458
459
460 Path getSplitsDir() {
461 return new Path(getRegionDir(), REGION_SPLITS_DIR);
462 }
463
464 Path getSplitsDir(final HRegionInfo hri) {
465 return new Path(getSplitsDir(), hri.getEncodedName());
466 }
467
468
469
470
471 void cleanupSplitsDir() throws IOException {
472 deleteDir(getSplitsDir());
473 }
474
475
476
477
478
479
480
481 void cleanupAnySplitDetritus() throws IOException {
482 Path splitdir = this.getSplitsDir();
483 if (!fs.exists(splitdir)) return;
484
485
486
487
488
489
490
491 FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
492 if (daughters != null) {
493 for (FileStatus daughter: daughters) {
494 Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
495 if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
496 throw new IOException("Failed delete of " + daughterDir);
497 }
498 }
499 }
500 cleanupSplitsDir();
501 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
502 }
503
504
505
506
507
508
509 void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
510 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
511 if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
512 throw new IOException("Failed delete of " + regionDir);
513 }
514 }
515
516
517
518
519
520
521
522
523 Path commitDaughterRegion(final HRegionInfo regionInfo)
524 throws IOException {
525 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
526 Path daughterTmpDir = this.getSplitsDir(regionInfo);
527
528 if (fs.exists(daughterTmpDir)) {
529
530
531 Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
532 byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
533 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
534
535
536 if (!rename(daughterTmpDir, regionDir)) {
537 throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
538 }
539 }
540
541 return regionDir;
542 }
543
544
545
546
547 void createSplitsDir() throws IOException {
548 Path splitdir = getSplitsDir();
549 if (fs.exists(splitdir)) {
550 LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
551 if (!deleteDir(splitdir)) {
552 throw new IOException("Failed deletion of " + splitdir
553 + " before creating them again.");
554 }
555 }
556
557 if (!createDir(splitdir)) {
558 throw new IOException("Failed create of " + splitdir);
559 }
560 }
561
562
563
564
565
566
567
568
569
570
571
572
573
574 Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
575 final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy) throws IOException {
576
577 if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck()) {
578
579
580 if (top) {
581
582 KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
583 byte[] lastKey = f.createReader().getLastKey();
584
585 if (lastKey == null) return null;
586 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
587 splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
588 return null;
589 }
590 } else {
591
592 KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
593 byte[] firstKey = f.createReader().getFirstKey();
594
595 if (firstKey == null) return null;
596 if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
597 splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
598 return null;
599 }
600 }
601 }
602
603 f.closeReader(true);
604
605 Path splitDir = new Path(getSplitsDir(hri), familyName);
606
607 Reference r =
608 top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
609
610
611
612
613 String parentRegionName = regionInfoForFs.getEncodedName();
614
615
616 Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
617 return r.write(fs, p);
618 }
619
620
621
622
623
624 Path getMergesDir() {
625 return new Path(getRegionDir(), REGION_MERGES_DIR);
626 }
627
628 Path getMergesDir(final HRegionInfo hri) {
629 return new Path(getMergesDir(), hri.getEncodedName());
630 }
631
632
633
634
635 void cleanupMergesDir() throws IOException {
636 deleteDir(getMergesDir());
637 }
638
639
640
641
642
643
644 void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
645 Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
646 if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
647 throw new IOException("Failed delete of " + regionDir);
648 }
649 }
650
651
652
653
654
655
656 void createMergesDir() throws IOException {
657 Path mergesdir = getMergesDir();
658 if (fs.exists(mergesdir)) {
659 LOG.info("The " + mergesdir
660 + " directory exists. Hence deleting it to recreate it");
661 if (!fs.delete(mergesdir, true)) {
662 throw new IOException("Failed deletion of " + mergesdir
663 + " before creating them again.");
664 }
665 }
666 if (!fs.mkdirs(mergesdir))
667 throw new IOException("Failed create of " + mergesdir);
668 }
669
670
671
672
673
674
675
676
677
678
679
680 Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
681 final StoreFile f, final Path mergedDir)
682 throws IOException {
683 Path referenceDir = new Path(new Path(mergedDir,
684 mergedRegion.getEncodedName()), familyName);
685
686 Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
687
688
689
690
691 String mergingRegionName = regionInfoForFs.getEncodedName();
692
693
694 Path p = new Path(referenceDir, f.getPath().getName() + "."
695 + mergingRegionName);
696 return r.write(fs, p);
697 }
698
699
700
701
702
703
704
705 void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
706 Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
707 Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
708
709 if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
710 if (!fs.rename(mergedRegionTmpDir, regionDir)) {
711 throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
712 + regionDir);
713 }
714 }
715 }
716
717
718
719
720
721
722
723
724
725 void logFileSystemState(final Log LOG) throws IOException {
726 FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
727 }
728
729
730
731
732
733
734 private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
735 return hri.toDelimitedByteArray();
736 }
737
738
739
740
741
742
743
744
745 public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
746 throws IOException {
747 FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
748 try {
749 return HRegionInfo.parseFrom(in);
750 } finally {
751 in.close();
752 }
753 }
754
755
756
757
758 private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
759 final Path regionInfoFile, final byte[] content) throws IOException {
760
761 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
762
763 FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
764 try {
765 out.write(content);
766 } finally {
767 out.close();
768 }
769 }
770
771
772
773
774
775 void checkRegionInfoOnFilesystem() throws IOException {
776
777
778
779
780
781 byte[] content = getRegionInfoFileContent(regionInfoForFs);
782 try {
783 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
784
785 FileStatus status = fs.getFileStatus(regionInfoFile);
786 if (status != null && status.getLen() == content.length) {
787
788
789 return;
790 }
791
792 LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
793 if (!fs.delete(regionInfoFile, false)) {
794 throw new IOException("Unable to remove existing " + regionInfoFile);
795 }
796 } catch (FileNotFoundException e) {
797 LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName() +
798 " on table " + regionInfo.getTable());
799 }
800
801
802 writeRegionInfoOnFilesystem(content, true);
803 }
804
805
806
807
808
809 private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
810 byte[] content = getRegionInfoFileContent(regionInfoForFs);
811 writeRegionInfoOnFilesystem(content, useTempDir);
812 }
813
814
815
816
817
818
819 private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
820 final boolean useTempDir) throws IOException {
821 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
822 if (useTempDir) {
823
824
825
826
827
828
829 Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
830
831
832
833
834
835 if (FSUtils.isExists(fs, tmpPath)) {
836 FSUtils.delete(fs, tmpPath, true);
837 }
838
839
840 writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
841
842
843 if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
844 throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
845 }
846 } else {
847
848 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
849 }
850 }
851
852
853
854
855
856
857
858
859
860 public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
861 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
862 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
863 Path regionDir = regionFs.getRegionDir();
864
865 if (fs.exists(regionDir)) {
866 LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
867 throw new IOException("The specified region already exists on disk: " + regionDir);
868 }
869
870
871 if (!createDirOnFileSystem(fs, conf, regionDir)) {
872 LOG.warn("Unable to create the region directory: " + regionDir);
873 throw new IOException("Unable to create region directory: " + regionDir);
874 }
875
876
877 regionFs.writeRegionInfoOnFilesystem(false);
878 return regionFs;
879 }
880
881
882
883
884
885
886
887
888
889
890 public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
891 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
892 throws IOException {
893 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
894 Path regionDir = regionFs.getRegionDir();
895
896 if (!fs.exists(regionDir)) {
897 LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
898 throw new IOException("The specified region do not exists on disk: " + regionDir);
899 }
900
901 if (!readOnly) {
902
903 regionFs.cleanupTempDir();
904 regionFs.cleanupSplitsDir();
905 regionFs.cleanupMergesDir();
906
907
908 regionFs.checkRegionInfoOnFilesystem();
909 }
910
911 return regionFs;
912 }
913
914
915
916
917
918
919
920
921
922 public static void deleteRegionFromFileSystem(final Configuration conf,
923 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
924 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
925 Path regionDir = regionFs.getRegionDir();
926
927 if (!fs.exists(regionDir)) {
928 LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
929 return;
930 }
931
932 if (LOG.isDebugEnabled()) {
933 LOG.debug("DELETING region " + regionDir);
934 }
935
936
937 Path rootDir = FSUtils.getRootDir(conf);
938 HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
939
940
941 if (!fs.delete(regionDir, true)) {
942 LOG.warn("Failed delete of " + regionDir);
943 }
944 }
945
946
947
948
949
950
951
952
953 boolean createDir(Path dir) throws IOException {
954 int i = 0;
955 IOException lastIOE = null;
956 do {
957 try {
958 return fs.mkdirs(dir);
959 } catch (IOException ioe) {
960 lastIOE = ioe;
961 if (fs.exists(dir)) return true;
962 try {
963 sleepBeforeRetry("Create Directory", i+1);
964 } catch (InterruptedException e) {
965 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
966 }
967 }
968 } while (++i <= hdfsClientRetriesNumber);
969 throw new IOException("Exception in createDir", lastIOE);
970 }
971
972
973
974
975
976
977
978
979 boolean rename(Path srcpath, Path dstPath) throws IOException {
980 IOException lastIOE = null;
981 int i = 0;
982 do {
983 try {
984 return fs.rename(srcpath, dstPath);
985 } catch (IOException ioe) {
986 lastIOE = ioe;
987 if (!fs.exists(srcpath) && fs.exists(dstPath)) return true;
988
989 try {
990 sleepBeforeRetry("Rename Directory", i+1);
991 } catch (InterruptedException e) {
992 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
993 }
994 }
995 } while (++i <= hdfsClientRetriesNumber);
996
997 throw new IOException("Exception in rename", lastIOE);
998 }
999
1000
1001
1002
1003
1004
1005
1006 boolean deleteDir(Path dir) throws IOException {
1007 IOException lastIOE = null;
1008 int i = 0;
1009 do {
1010 try {
1011 return fs.delete(dir, true);
1012 } catch (IOException ioe) {
1013 lastIOE = ioe;
1014 if (!fs.exists(dir)) return true;
1015
1016 try {
1017 sleepBeforeRetry("Delete Directory", i+1);
1018 } catch (InterruptedException e) {
1019 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1020 }
1021 }
1022 } while (++i <= hdfsClientRetriesNumber);
1023
1024 throw new IOException("Exception in DeleteDir", lastIOE);
1025 }
1026
1027
1028
1029
1030 private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
1031 sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1032 }
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
1045 throws IOException {
1046 int i = 0;
1047 IOException lastIOE = null;
1048 int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
1049 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
1050 int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
1051 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
1052 do {
1053 try {
1054 return fs.mkdirs(dir);
1055 } catch (IOException ioe) {
1056 lastIOE = ioe;
1057 if (fs.exists(dir)) return true;
1058 try {
1059 sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1060 } catch (InterruptedException e) {
1061 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1062 }
1063 }
1064 } while (++i <= hdfsClientRetriesNumber);
1065
1066 throw new IOException("Exception in createDir", lastIOE);
1067 }
1068
1069
1070
1071
1072
1073 private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1074 int hdfsClientRetriesNumber) throws InterruptedException {
1075 if (sleepMultiplier > hdfsClientRetriesNumber) {
1076 LOG.debug(msg + ", retries exhausted");
1077 return;
1078 }
1079 LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1080 Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1081 }
1082 }