1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import org.apache.commons.logging.Log;
23 import org.apache.commons.logging.LogFactory;
24 import org.apache.hadoop.conf.Configuration;
25 import org.apache.hadoop.fs.FSDataInputStream;
26 import org.apache.hadoop.fs.FSDataOutputStream;
27 import org.apache.hadoop.fs.FileStatus;
28 import org.apache.hadoop.fs.FileSystem;
29 import org.apache.hadoop.fs.Path;
30 import org.apache.hadoop.fs.PathFilter;
31 import org.apache.hadoop.hbase.HConstants;
32 import org.apache.hadoop.hbase.HRegionInfo;
33 import org.apache.hadoop.hbase.RemoteExceptionHandler;
34 import org.apache.hadoop.hbase.master.HMaster;
35 import org.apache.hadoop.hbase.regionserver.HRegion;
36 import org.apache.hadoop.hdfs.DistributedFileSystem;
37 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
38 import org.apache.hadoop.hdfs.protocol.FSConstants;
39 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
40 import org.apache.hadoop.io.SequenceFile;
41
42 import java.io.DataInputStream;
43 import java.io.EOFException;
44 import java.io.FileNotFoundException;
45 import java.io.IOException;
46 import java.net.URI;
47 import java.net.URISyntaxException;
48 import java.util.HashMap;
49 import java.util.Map;
50
51
52
53
54 public class FSUtils {
55 private static final Log LOG = LogFactory.getLog(FSUtils.class);
56
57
58
59
60 private FSUtils() {
61 super();
62 }
63
64
65
66
67
68
69
70
71 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
72 throws IOException {
73 return fs.exists(dir) && fs.delete(dir, true);
74 }
75
76
77
78
79
80
81
82
83 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
84 if (!fs.exists(dir)) {
85 fs.mkdirs(dir);
86 }
87 return dir;
88 }
89
90
91
92
93
94
95
96
97 public static Path create(final FileSystem fs, final Path p)
98 throws IOException {
99 if (fs.exists(p)) {
100 throw new IOException("File already exists " + p.toString());
101 }
102 if (!fs.createNewFile(p)) {
103 throw new IOException("Failed create of " + p);
104 }
105 return p;
106 }
107
108
109
110
111
112
113
114 public static void checkFileSystemAvailable(final FileSystem fs)
115 throws IOException {
116 if (!(fs instanceof DistributedFileSystem)) {
117 return;
118 }
119 IOException exception = null;
120 DistributedFileSystem dfs = (DistributedFileSystem) fs;
121 try {
122 if (dfs.exists(new Path("/"))) {
123 return;
124 }
125 } catch (IOException e) {
126 exception = RemoteExceptionHandler.checkIOException(e);
127 }
128 try {
129 fs.close();
130 } catch (Exception e) {
131 LOG.error("file system close failed: ", e);
132 }
133 IOException io = new IOException("File system is not available");
134 io.initCause(exception);
135 throw io;
136 }
137
138
139
140
141
142
143
144
145
146 public static String getVersion(FileSystem fs, Path rootdir)
147 throws IOException {
148 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
149 String version = null;
150 if (fs.exists(versionFile)) {
151 FSDataInputStream s =
152 fs.open(versionFile);
153 try {
154 version = DataInputStream.readUTF(s);
155 } catch (EOFException eof) {
156 LOG.warn("Version file was empty, odd, will try to set it.");
157 } finally {
158 s.close();
159 }
160 }
161 return version;
162 }
163
164
165
166
167
168
169
170
171
172
173 public static void checkVersion(FileSystem fs, Path rootdir,
174 boolean message) throws IOException {
175 String version = getVersion(fs, rootdir);
176
177 if (version == null) {
178 if (!rootRegionExists(fs, rootdir)) {
179
180
181 FSUtils.setVersion(fs, rootdir);
182 return;
183 }
184 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
185 return;
186
187
188
189 String msg = "File system needs to be upgraded."
190 + " You have version " + version
191 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
192 + ". Run the '${HBASE_HOME}/bin/hbase migrate' script.";
193 if (message) {
194 System.out.println("WARNING! " + msg);
195 }
196 throw new FileSystemVersionException(msg);
197 }
198
199
200
201
202
203
204
205
206 public static void setVersion(FileSystem fs, Path rootdir)
207 throws IOException {
208 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION);
209 }
210
211
212
213
214
215
216
217
218
219 public static void setVersion(FileSystem fs, Path rootdir, String version)
220 throws IOException {
221 FSDataOutputStream s =
222 fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
223 s.writeUTF(version);
224 s.close();
225 LOG.debug("Created version file at " + rootdir.toString() + " set its version at:" + version);
226 }
227
228
229
230
231
232
233
234
235 public static Path validateRootPath(Path root) throws IOException {
236 try {
237 URI rootURI = new URI(root.toString());
238 String scheme = rootURI.getScheme();
239 if (scheme == null) {
240 throw new IOException("Root directory does not have a scheme");
241 }
242 return root;
243 } catch (URISyntaxException e) {
244 IOException io = new IOException("Root directory path is not a valid " +
245 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
246 io.initCause(e);
247 throw io;
248 }
249 }
250
251
252
253
254
255
256
257 public static void waitOnSafeMode(final Configuration conf,
258 final long wait)
259 throws IOException {
260 FileSystem fs = FileSystem.get(conf);
261 if (!(fs instanceof DistributedFileSystem)) return;
262 DistributedFileSystem dfs = (DistributedFileSystem)fs;
263
264
265
266 try {
267 while (dfs.getDataNodeStats().length == 0) {
268 LOG.info("Waiting for dfs to come up...");
269 try {
270 Thread.sleep(wait);
271 } catch (InterruptedException e) {
272
273 }
274 }
275 } catch (IOException e) {
276
277
278 }
279
280 while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) {
281 LOG.info("Waiting for dfs to exit safe mode...");
282 try {
283 Thread.sleep(wait);
284 } catch (InterruptedException e) {
285
286 }
287 }
288 }
289
290
291
292
293
294
295
296
297
298
299
300 public static String getPath(Path p) {
301 return p.toUri().getPath();
302 }
303
304
305
306
307
308
309
310 public static Path getRootDir(final Configuration c) throws IOException {
311 return new Path(c.get(HConstants.HBASE_DIR));
312 }
313
314
315
316
317
318
319
320
321
322 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
323 throws IOException {
324 Path rootRegionDir =
325 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
326 return fs.exists(rootRegionDir);
327 }
328
329
330
331
332
333
334
335
336
337
338 public static boolean isMajorCompacted(final FileSystem fs,
339 final Path hbaseRootDir)
340 throws IOException {
341
342 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
343 for (FileStatus tableDir : tableDirs) {
344
345
346
347
348 Path d = tableDir.getPath();
349 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
350 continue;
351 }
352 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
353 for (FileStatus regionDir : regionDirs) {
354 Path dd = regionDir.getPath();
355 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
356 continue;
357 }
358
359 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
360 for (FileStatus familyDir : familyDirs) {
361 Path family = familyDir.getPath();
362
363 FileStatus[] familyStatus = fs.listStatus(family);
364 if (familyStatus.length > 1) {
365 LOG.debug(family.toString() + " has " + familyStatus.length +
366 " files.");
367 return false;
368 }
369 }
370 }
371 }
372 return true;
373 }
374
375
376
377
378
379
380
381
382
383
384 public static int getTotalTableFragmentation(final HMaster master)
385 throws IOException {
386 Map<String, Integer> map = getTableFragmentation(master);
387 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
388 }
389
390
391
392
393
394
395
396
397
398
399 public static Map<String, Integer> getTableFragmentation(
400 final HMaster master)
401 throws IOException {
402 Path path = getRootDir(master.getConfiguration());
403
404 FileSystem fs = path.getFileSystem(master.getConfiguration());
405 return getTableFragmentation(fs, path);
406 }
407
408
409
410
411
412
413
414
415
416
417
418 public static Map<String, Integer> getTableFragmentation(
419 final FileSystem fs, final Path hbaseRootDir)
420 throws IOException {
421 Map<String, Integer> frags = new HashMap<String, Integer>();
422 int cfCountTotal = 0;
423 int cfFragTotal = 0;
424 DirFilter df = new DirFilter(fs);
425
426 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
427 for (FileStatus tableDir : tableDirs) {
428
429
430
431
432 Path d = tableDir.getPath();
433 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
434 continue;
435 }
436 int cfCount = 0;
437 int cfFrag = 0;
438 FileStatus[] regionDirs = fs.listStatus(d, df);
439 for (FileStatus regionDir : regionDirs) {
440 Path dd = regionDir.getPath();
441 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
442 continue;
443 }
444
445 FileStatus[] familyDirs = fs.listStatus(dd, df);
446 for (FileStatus familyDir : familyDirs) {
447 cfCount++;
448 cfCountTotal++;
449 Path family = familyDir.getPath();
450
451 FileStatus[] familyStatus = fs.listStatus(family);
452 if (familyStatus.length > 1) {
453 cfFrag++;
454 cfFragTotal++;
455 }
456 }
457 }
458
459 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
460 }
461
462 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
463 return frags;
464 }
465
466
467
468
469
470
471
472
473 public static boolean isPre020FileLayout(final FileSystem fs,
474 final Path hbaseRootDir)
475 throws IOException {
476 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
477 "70236052"), "info"), "mapfiles");
478 return fs.exists(mapfiles);
479 }
480
481
482
483
484
485
486
487
488
489
490
491
492 public static boolean isMajorCompactedPre020(final FileSystem fs,
493 final Path hbaseRootDir)
494 throws IOException {
495
496 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
497 for (FileStatus tableDir : tableDirs) {
498
499
500
501
502 Path d = tableDir.getPath();
503 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
504 continue;
505 }
506 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
507 for (FileStatus regionDir : regionDirs) {
508 Path dd = regionDir.getPath();
509 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
510 continue;
511 }
512
513 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
514 for (FileStatus familyDir : familyDirs) {
515 Path family = familyDir.getPath();
516 FileStatus[] infoAndMapfile = fs.listStatus(family);
517
518 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
519 LOG.debug(family.toString() +
520 " has more than just info and mapfile: " + infoAndMapfile.length);
521 return false;
522 }
523
524 for (int ll = 0; ll < 2; ll++) {
525 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
526 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
527 continue;
528 LOG.debug("Unexpected directory name: " +
529 infoAndMapfile[ll].getPath());
530 return false;
531 }
532
533
534 FileStatus[] familyStatus =
535 fs.listStatus(new Path(family, "mapfiles"));
536 if (familyStatus.length > 1) {
537 LOG.debug(family.toString() + " has " + familyStatus.length +
538 " files.");
539 return false;
540 }
541 }
542 }
543 }
544 return true;
545 }
546
547
548
549
550 public static class DirFilter implements PathFilter {
551 private final FileSystem fs;
552
553 public DirFilter(final FileSystem fs) {
554 this.fs = fs;
555 }
556
557 public boolean accept(Path p) {
558 boolean isdir = false;
559 try {
560 isdir = this.fs.getFileStatus(p).isDir();
561 } catch (IOException e) {
562 e.printStackTrace();
563 }
564 return isdir;
565 }
566 }
567
568
569
570
571
572
573
574
575 public static boolean isAppendSupported(final Configuration conf) {
576 boolean append = conf.getBoolean("dfs.support.append", false);
577 if (append) {
578 try {
579
580
581
582 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
583 append = true;
584 } catch (SecurityException e) {
585 } catch (NoSuchMethodException e) {
586 append = false;
587 }
588 } else {
589 try {
590 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
591 } catch (NoSuchMethodException e) {
592 append = false;
593 }
594 }
595 return append;
596 }
597
598
599
600
601
602
603 public static boolean isHDFS(final Configuration conf) throws IOException {
604 FileSystem fs = FileSystem.get(conf);
605 String scheme = fs.getUri().getScheme();
606 return scheme.equalsIgnoreCase("hdfs");
607 }
608
609
610
611
612
613
614
615
616 public static void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
617 throws IOException{
618 if (!isAppendSupported(conf)) {
619 LOG.warn("Running on HDFS without append enabled may result in data loss");
620 return;
621 }
622
623
624 if (!(fs instanceof DistributedFileSystem)) {
625 return;
626 }
627 LOG.info("Recovering file " + p);
628 long startWaiting = System.currentTimeMillis();
629
630
631 boolean recovered = false;
632 while (!recovered) {
633 try {
634 FSDataOutputStream out = fs.append(p);
635 out.close();
636 recovered = true;
637 } catch (IOException e) {
638 e = RemoteExceptionHandler.checkIOException(e);
639 if (e instanceof AlreadyBeingCreatedException) {
640
641
642
643
644 long waitedFor = System.currentTimeMillis() - startWaiting;
645 if (waitedFor > FSConstants.LEASE_SOFTLIMIT_PERIOD) {
646 LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
647 ":" + e.getMessage());
648 }
649 try {
650 Thread.sleep(1000);
651 } catch (InterruptedException ex) {
652
653 }
654 } else if (e instanceof LeaseExpiredException &&
655 e.getMessage().contains("File does not exist")) {
656
657 throw new FileNotFoundException(
658 "The given HLog wasn't found at " + p.toString());
659 } else {
660 throw new IOException("Failed to open " + p + " for append", e);
661 }
662 }
663 }
664 LOG.info("Finished lease recover attempt for " + p);
665 }
666
667 }