1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import org.apache.commons.logging.Log;
23 import org.apache.commons.logging.LogFactory;
24 import org.apache.hadoop.conf.Configuration;
25 import org.apache.hadoop.fs.FSDataInputStream;
26 import org.apache.hadoop.fs.FSDataOutputStream;
27 import org.apache.hadoop.fs.FileStatus;
28 import org.apache.hadoop.fs.FileSystem;
29 import org.apache.hadoop.fs.Path;
30 import org.apache.hadoop.fs.PathFilter;
31 import org.apache.hadoop.hbase.HConstants;
32 import org.apache.hadoop.hbase.HRegionInfo;
33 import org.apache.hadoop.hbase.RemoteExceptionHandler;
34 import org.apache.hadoop.hbase.master.HMaster;
35 import org.apache.hadoop.hbase.regionserver.HRegion;
36 import org.apache.hadoop.hdfs.DistributedFileSystem;
37 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
38 import org.apache.hadoop.hdfs.protocol.FSConstants;
39 import org.apache.hadoop.io.SequenceFile;
40
41 import java.io.DataInputStream;
42 import java.io.IOException;
43 import java.net.URI;
44 import java.net.URISyntaxException;
45 import java.util.HashMap;
46 import java.util.Map;
47
48
49
50
51 public class FSUtils {
52 private static final Log LOG = LogFactory.getLog(FSUtils.class);
53
54
55
56
57 private FSUtils() {
58 super();
59 }
60
61
62
63
64
65
66
67
68 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
69 throws IOException {
70 return fs.exists(dir) && fs.delete(dir, true);
71 }
72
73
74
75
76
77
78
79
80 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
81 if (!fs.exists(dir)) {
82 fs.mkdirs(dir);
83 }
84 return dir;
85 }
86
87
88
89
90
91
92
93
94 public static Path create(final FileSystem fs, final Path p)
95 throws IOException {
96 if (fs.exists(p)) {
97 throw new IOException("File already exists " + p.toString());
98 }
99 if (!fs.createNewFile(p)) {
100 throw new IOException("Failed create of " + p);
101 }
102 return p;
103 }
104
105
106
107
108
109
110
111 public static void checkFileSystemAvailable(final FileSystem fs)
112 throws IOException {
113 if (!(fs instanceof DistributedFileSystem)) {
114 return;
115 }
116 IOException exception = null;
117 DistributedFileSystem dfs = (DistributedFileSystem) fs;
118 try {
119 if (dfs.exists(new Path("/"))) {
120 return;
121 }
122 } catch (IOException e) {
123 exception = RemoteExceptionHandler.checkIOException(e);
124 }
125 try {
126 fs.close();
127 } catch (Exception e) {
128 LOG.error("file system close failed: ", e);
129 }
130 IOException io = new IOException("File system is not available");
131 io.initCause(exception);
132 throw io;
133 }
134
135
136
137
138
139
140
141
142
143 public static String getVersion(FileSystem fs, Path rootdir)
144 throws IOException {
145 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
146 String version = null;
147 if (fs.exists(versionFile)) {
148 FSDataInputStream s =
149 fs.open(versionFile);
150 try {
151 version = DataInputStream.readUTF(s);
152 } finally {
153 s.close();
154 }
155 }
156 return version;
157 }
158
159
160
161
162
163
164
165
166
167
168 public static void checkVersion(FileSystem fs, Path rootdir,
169 boolean message) throws IOException {
170 String version = getVersion(fs, rootdir);
171
172 if (version == null) {
173 if (!rootRegionExists(fs, rootdir)) {
174
175
176 FSUtils.setVersion(fs, rootdir);
177 return;
178 }
179 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
180 return;
181
182
183
184 String msg = "File system needs to be upgraded."
185 + " You have version " + version
186 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
187 + ". Run the '${HBASE_HOME}/bin/hbase migrate' script.";
188 if (message) {
189 System.out.println("WARNING! " + msg);
190 }
191 throw new FileSystemVersionException(msg);
192 }
193
194
195
196
197
198
199
200
201 public static void setVersion(FileSystem fs, Path rootdir)
202 throws IOException {
203 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION);
204 }
205
206
207
208
209
210
211
212
213
214 public static void setVersion(FileSystem fs, Path rootdir, String version)
215 throws IOException {
216 FSDataOutputStream s =
217 fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
218 s.writeUTF(version);
219 s.close();
220 LOG.debug("Created version file at " + rootdir.toString() + " set its version at:" + version);
221 }
222
223
224
225
226
227
228
229
230 public static Path validateRootPath(Path root) throws IOException {
231 try {
232 URI rootURI = new URI(root.toString());
233 String scheme = rootURI.getScheme();
234 if (scheme == null) {
235 throw new IOException("Root directory does not have a scheme");
236 }
237 return root;
238 } catch (URISyntaxException e) {
239 IOException io = new IOException("Root directory path is not a valid " +
240 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
241 io.initCause(e);
242 throw io;
243 }
244 }
245
246
247
248
249
250
251
252 public static void waitOnSafeMode(final Configuration conf,
253 final long wait)
254 throws IOException {
255 FileSystem fs = FileSystem.get(conf);
256 if (!(fs instanceof DistributedFileSystem)) return;
257 DistributedFileSystem dfs = (DistributedFileSystem)fs;
258
259
260
261 try {
262 while (dfs.getDataNodeStats().length == 0) {
263 LOG.info("Waiting for dfs to come up...");
264 try {
265 Thread.sleep(wait);
266 } catch (InterruptedException e) {
267
268 }
269 }
270 } catch (IOException e) {
271
272
273 }
274
275 while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) {
276 LOG.info("Waiting for dfs to exit safe mode...");
277 try {
278 Thread.sleep(wait);
279 } catch (InterruptedException e) {
280
281 }
282 }
283 }
284
285
286
287
288
289
290
291
292
293
294
295 public static String getPath(Path p) {
296 return p.toUri().getPath();
297 }
298
299
300
301
302
303
304
305 public static Path getRootDir(final Configuration c) throws IOException {
306 return new Path(c.get(HConstants.HBASE_DIR));
307 }
308
309
310
311
312
313
314
315
316
317 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
318 throws IOException {
319 Path rootRegionDir =
320 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
321 return fs.exists(rootRegionDir);
322 }
323
324
325
326
327
328
329
330
331
332
333 public static boolean isMajorCompacted(final FileSystem fs,
334 final Path hbaseRootDir)
335 throws IOException {
336
337 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
338 for (FileStatus tableDir : tableDirs) {
339
340
341
342
343 Path d = tableDir.getPath();
344 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
345 continue;
346 }
347 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
348 for (FileStatus regionDir : regionDirs) {
349 Path dd = regionDir.getPath();
350 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
351 continue;
352 }
353
354 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
355 for (FileStatus familyDir : familyDirs) {
356 Path family = familyDir.getPath();
357
358 FileStatus[] familyStatus = fs.listStatus(family);
359 if (familyStatus.length > 1) {
360 LOG.debug(family.toString() + " has " + familyStatus.length +
361 " files.");
362 return false;
363 }
364 }
365 }
366 }
367 return true;
368 }
369
370
371
372
373
374
375
376
377
378
379 public static int getTotalTableFragmentation(final HMaster master)
380 throws IOException {
381 Map<String, Integer> map = getTableFragmentation(master);
382 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
383 }
384
385
386
387
388
389
390
391
392
393
394 public static Map<String, Integer> getTableFragmentation(
395 final HMaster master)
396 throws IOException {
397 Path path = master.getRootDir();
398
399 FileSystem fs = path.getFileSystem(master.getConfiguration());
400 return getTableFragmentation(fs, path);
401 }
402
403
404
405
406
407
408
409
410
411
412
413 public static Map<String, Integer> getTableFragmentation(
414 final FileSystem fs, final Path hbaseRootDir)
415 throws IOException {
416 Map<String, Integer> frags = new HashMap<String, Integer>();
417 int cfCountTotal = 0;
418 int cfFragTotal = 0;
419 DirFilter df = new DirFilter(fs);
420
421 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
422 for (FileStatus tableDir : tableDirs) {
423
424
425
426
427 Path d = tableDir.getPath();
428 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
429 continue;
430 }
431 int cfCount = 0;
432 int cfFrag = 0;
433 FileStatus[] regionDirs = fs.listStatus(d, df);
434 for (FileStatus regionDir : regionDirs) {
435 Path dd = regionDir.getPath();
436 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
437 continue;
438 }
439
440 FileStatus[] familyDirs = fs.listStatus(dd, df);
441 for (FileStatus familyDir : familyDirs) {
442 cfCount++;
443 cfCountTotal++;
444 Path family = familyDir.getPath();
445
446 FileStatus[] familyStatus = fs.listStatus(family);
447 if (familyStatus.length > 1) {
448 cfFrag++;
449 cfFragTotal++;
450 }
451 }
452 }
453
454 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
455 }
456
457 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
458 return frags;
459 }
460
461
462
463
464
465
466
467
468 public static boolean isPre020FileLayout(final FileSystem fs,
469 final Path hbaseRootDir)
470 throws IOException {
471 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
472 "70236052"), "info"), "mapfiles");
473 return fs.exists(mapfiles);
474 }
475
476
477
478
479
480
481
482
483
484
485
486
487 public static boolean isMajorCompactedPre020(final FileSystem fs,
488 final Path hbaseRootDir)
489 throws IOException {
490
491 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
492 for (FileStatus tableDir : tableDirs) {
493
494
495
496
497 Path d = tableDir.getPath();
498 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
499 continue;
500 }
501 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
502 for (FileStatus regionDir : regionDirs) {
503 Path dd = regionDir.getPath();
504 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
505 continue;
506 }
507
508 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
509 for (FileStatus familyDir : familyDirs) {
510 Path family = familyDir.getPath();
511 FileStatus[] infoAndMapfile = fs.listStatus(family);
512
513 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
514 LOG.debug(family.toString() +
515 " has more than just info and mapfile: " + infoAndMapfile.length);
516 return false;
517 }
518
519 for (int ll = 0; ll < 2; ll++) {
520 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
521 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
522 continue;
523 LOG.debug("Unexpected directory name: " +
524 infoAndMapfile[ll].getPath());
525 return false;
526 }
527
528
529 FileStatus[] familyStatus =
530 fs.listStatus(new Path(family, "mapfiles"));
531 if (familyStatus.length > 1) {
532 LOG.debug(family.toString() + " has " + familyStatus.length +
533 " files.");
534 return false;
535 }
536 }
537 }
538 }
539 return true;
540 }
541
542
543
544
545 public static class DirFilter implements PathFilter {
546 private final FileSystem fs;
547
548 public DirFilter(final FileSystem fs) {
549 this.fs = fs;
550 }
551
552 public boolean accept(Path p) {
553 boolean isdir = false;
554 try {
555 isdir = this.fs.getFileStatus(p).isDir();
556 } catch (IOException e) {
557 e.printStackTrace();
558 }
559 return isdir;
560 }
561 }
562
563
564
565
566
567
568
569
570 public static boolean isAppendSupported(final Configuration conf) {
571 boolean append = conf.getBoolean("dfs.support.append", false);
572 if (append) {
573 try {
574
575
576
577 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
578 append = true;
579 } catch (SecurityException e) {
580 } catch (NoSuchMethodException e) {
581 append = false;
582 }
583 } else {
584 try {
585 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
586 } catch (NoSuchMethodException e) {
587 append = false;
588 }
589 }
590 return append;
591 }
592
593
594
595
596
597
598
599
600
601 public static void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
602 throws IOException{
603 if (!isAppendSupported(conf)) {
604 LOG.warn("Running on HDFS without append enabled may result in data loss");
605 return;
606 }
607
608
609 if (!(fs instanceof DistributedFileSystem)) {
610 return;
611 }
612 LOG.info("Recovering file" + p);
613 long startWaiting = System.currentTimeMillis();
614
615
616 boolean recovered = false;
617 while (!recovered) {
618 try {
619 FSDataOutputStream out = fs.append(p);
620 out.close();
621 recovered = true;
622 } catch (IOException e) {
623 e = RemoteExceptionHandler.checkIOException(e);
624 if (e instanceof AlreadyBeingCreatedException) {
625
626
627
628
629 long waitedFor = System.currentTimeMillis() - startWaiting;
630 if (waitedFor > FSConstants.LEASE_SOFTLIMIT_PERIOD) {
631 LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
632 ":" + e.getMessage());
633 }
634 try {
635 Thread.sleep(1000);
636 } catch (InterruptedException ex) {
637
638 }
639 } else {
640 throw new IOException("Failed to open " + p + " for append", e);
641 }
642 }
643 }
644 LOG.info("Finished lease recover attempt for " + p);
645 }
646
647 }