View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  
20  import java.io.FileNotFoundException;
21  import java.io.IOException;
22  import java.util.Arrays;
23  import java.util.Comparator;
24  import java.util.List;
25  import java.util.Map;
26  import java.util.TreeMap;
27  import java.util.concurrent.ConcurrentHashMap;
28  import java.util.regex.Matcher;
29  import java.util.regex.Pattern;
30  
31  import org.apache.commons.lang.NotImplementedException;
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FSDataInputStream;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.fs.PathFilter;
41  import org.apache.hadoop.hbase.HBaseFileSystem;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.TableDescriptors;
45  import org.apache.hadoop.hbase.TableInfoMissingException;
46  
47  
48  /**
49   * Implementation of {@link TableDescriptors} that reads descriptors from the
50   * passed filesystem.  It expects descriptors to be in a file under the
51   * table's directory in FS.  Can be read-only -- i.e. does not modify
52   * the filesystem or can be read and write.
53   * 
54   * <p>Also has utility for keeping up the table descriptors tableinfo file.
55   * The table schema file is kept under the table directory in the filesystem.
56   * It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
57   * edit sequenceid: e.g. <code>.tableinfo.0000000003</code>.  This sequenceid
58   * is always increasing.  It starts at zero.  The table schema file with the
59   * highest sequenceid has the most recent schema edit. Usually there is one file
60   * only, the most recent but there may be short periods where there are more
61   * than one file. Old files are eventually cleaned.  Presumption is that there
62   * will not be lots of concurrent clients making table schema edits.  If so,
63   * the below needs a bit of a reworking and perhaps some supporting api in hdfs.
64   */
65  public class FSTableDescriptors implements TableDescriptors {
66    private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
67    private final FileSystem fs;
68    private final Path rootdir;
69    private final boolean fsreadonly;
70    long cachehits = 0;
71    long invocations = 0;
72  
73    /** The file name used to store HTD in HDFS  */
74    public static final String TABLEINFO_NAME = ".tableinfo";
75  
76    // This cache does not age out the old stuff.  Thinking is that the amount
77    // of data we keep up in here is so small, no need to do occasional purge.
78    // TODO.
79    private final Map<String, TableDescriptorModtime> cache =
80      new ConcurrentHashMap<String, TableDescriptorModtime>();
81  
82    /**
83     * Data structure to cache a table descriptor, the time it was modified,
84     * and the time the table directory was modified.
85     */
86    static class TableDescriptorModtime {
87      private final HTableDescriptor descriptor;
88      private final long modtime;
89      private final long dirmodtime;
90  
91      TableDescriptorModtime(final long modtime, final long dirmodtime, final HTableDescriptor htd) {
92        this.descriptor = htd;
93        this.modtime = modtime;
94        this.dirmodtime = dirmodtime;
95      }
96  
97      long getModtime() {
98        return this.modtime;
99      }
100     
101     long getDirModtime() {
102       return this.dirmodtime;
103     }
104 
105     HTableDescriptor getTableDescriptor() {
106       return this.descriptor;
107     }
108   }
109 
110   public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
111     this(fs, rootdir, false);
112   }
113 
114   /**
115    * @param fs
116    * @param rootdir
117    * @param fsreadOnly True if we are read-only when it comes to filesystem
118    * operations; i.e. on remove, we do not do delete in fs.
119    */
120   public FSTableDescriptors(final FileSystem fs, final Path rootdir,
121       final boolean fsreadOnly) {
122     super();
123     this.fs = fs;
124     this.rootdir = rootdir;
125     this.fsreadonly = fsreadOnly;
126   }
127 
128   /* (non-Javadoc)
129    * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String)
130    */
131   @Override
132   public HTableDescriptor get(final byte [] tablename)
133   throws IOException {
134     return get(Bytes.toString(tablename));
135   }
136 
137   /* (non-Javadoc)
138    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[])
139    */
140   @Override
141   public HTableDescriptor get(final String tablename)
142   throws IOException {
143     invocations++;
144     if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
145       cachehits++;
146       return HTableDescriptor.ROOT_TABLEDESC;
147     }
148     if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
149       cachehits++;
150       return HTableDescriptor.META_TABLEDESC;
151     }
152     // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
153     // .logs, .oldlogs or .corrupt throw an exception.
154     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
155        throw new IOException("No descriptor found for table = " + tablename);
156     }
157 
158     // Look in cache of descriptors.
159     TableDescriptorModtime cachedtdm = this.cache.get(tablename);
160 
161     if (cachedtdm != null) {
162       // Check mod time has not changed (this is trip to NN).
163       // First check directory modtime as it doesn't require a scan of the full table directory
164       long tableDirModtime = getTableDirModtime(fs, this.rootdir, tablename);
165       boolean cachehit = false;
166       if (tableDirModtime <= cachedtdm.getDirModtime()) {
167         // table dir not changed since our cached entry
168         cachehit = true;
169       } else if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) {
170         // the table dir has changed (perhaps a region split) but the info file itself has not
171         // so the cached descriptor is good, we just need to update the entry
172         this.cache.put(tablename, new TableDescriptorModtime(cachedtdm.getModtime(),
173             tableDirModtime, cachedtdm.getTableDescriptor()));
174         cachehit = true;
175       }  // else table info file has been changed, need to read it 
176       if (cachehit) {
177         cachehits++;
178         return cachedtdm.getTableDescriptor();
179       }
180    }
181     
182     TableDescriptorModtime tdmt = null;
183     try {
184       tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename, true);
185     } catch (NullPointerException e) {
186       LOG.debug("Exception during readTableDecriptor. Current table name = "
187           + tablename, e);
188     } catch (IOException ioe) {
189       LOG.debug("Exception during readTableDecriptor. Current table name = "
190           + tablename, ioe);
191     }
192     
193     if (tdmt == null) {
194       LOG.warn("The following folder is in HBase's root directory and " +
195         "doesn't contain a table descriptor, " +
196         "do consider deleting it: " + tablename);
197     } else {
198       this.cache.put(tablename, tdmt);
199     }
200     return tdmt == null ? null : tdmt.getTableDescriptor();
201   }
202 
203   /* (non-Javadoc)
204    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
205    */
206   @Override
207   public Map<String, HTableDescriptor> getAll()
208   throws IOException {
209     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
210     List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
211     for (Path d: tableDirs) {
212       HTableDescriptor htd = null;
213       try {
214 
215         htd = get(d.getName());
216       } catch (FileNotFoundException fnfe) {
217         // inability of retrieving one HTD shouldn't stop getting the remaining
218         LOG.warn("Trouble retrieving htd", fnfe);
219       }
220       if (htd == null) continue;
221       htds.put(d.getName(), htd);
222     }
223     return htds;
224   }
225 
226   @Override
227   public void add(HTableDescriptor htd) throws IOException {
228     if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
229       throw new NotImplementedException();
230     }
231     if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
232       throw new NotImplementedException();
233     }
234     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
235       throw new NotImplementedException();
236     }
237     if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
238     String tableName = htd.getNameAsString();
239     long modtime = getTableInfoModtime(this.fs, this.rootdir, tableName);
240     long dirmodtime = getTableDirModtime(this.fs, this.rootdir, tableName);
241     this.cache.put(tableName, new TableDescriptorModtime(modtime, dirmodtime, htd));
242   }
243 
244   @Override
245   public HTableDescriptor remove(final String tablename)
246   throws IOException {
247     if (!this.fsreadonly) {
248       Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
249       if (this.fs.exists(tabledir)) {
250         if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tabledir)) {
251           throw new IOException("Failed delete of " + tabledir.toString());
252         }
253       }
254     }
255     TableDescriptorModtime tdm = this.cache.remove(tablename);
256     return tdm == null ? null : tdm.getTableDescriptor();
257   }
258 
259   /**
260    * Checks if <code>.tableinfo<code> exists for given table
261    * 
262    * @param fs file system
263    * @param rootdir root directory of HBase installation
264    * @param tableName name of table
265    * @return true if exists
266    * @throws IOException
267    */
268   public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
269       String tableName) throws IOException {
270     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
271     return status == null? false: fs.exists(status.getPath());
272   }
273 
274   private static FileStatus getTableInfoPath(final FileSystem fs,
275       final Path rootdir, final String tableName)
276   throws IOException {
277     Path tabledir = FSUtils.getTablePath(rootdir, tableName);
278     return getTableInfoPath(fs, tabledir);
279   }
280 
281   /**
282    * Looks under the table directory in the filesystem for files with a
283    * {@link #TABLEINFO_NAME} prefix.  Returns reference to the 'latest' instance.
284    * @param fs
285    * @param tabledir
286    * @return The 'current' tableinfo file.
287    * @throws IOException
288    */
289   public static FileStatus getTableInfoPath(final FileSystem fs,
290       final Path tabledir)
291   throws IOException {
292     FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() {
293       @Override
294       public boolean accept(Path p) {
295         // Accept any file that starts with TABLEINFO_NAME
296         return p.getName().startsWith(TABLEINFO_NAME);
297       }
298     });
299     if (status == null || status.length < 1) return null;
300     Arrays.sort(status, new FileStatusFileNameComparator());
301     if (status.length > 1) {
302       // Clean away old versions of .tableinfo
303       for (int i = 1; i < status.length; i++) {
304         Path p = status[i].getPath();
305         // Clean up old versions
306         if (!HBaseFileSystem.deleteFileFromFileSystem(fs, p)) {
307           LOG.warn("Failed cleanup of " + status);
308         } else {
309           LOG.debug("Cleaned up old tableinfo file " + p);
310         }
311       }
312     }
313     return status[0];
314   }
315 
316   /**
317    * Compare {@link FileStatus} instances by {@link Path#getName()}.
318    * Returns in reverse order.
319    */
320   static class FileStatusFileNameComparator
321   implements Comparator<FileStatus> {
322     @Override
323     public int compare(FileStatus left, FileStatus right) {
324       return -left.compareTo(right);
325     }
326   }
327 
328   /**
329    * Width of the sequenceid that is a suffix on a tableinfo file.
330    */
331   static final int WIDTH_OF_SEQUENCE_ID = 10;
332 
333   /*
334    * @param number Number to use as suffix.
335    * @return Returns zero-prefixed 5-byte wide decimal version of passed
336    * number (Does absolute in case number is negative).
337    */
338   static String formatTableInfoSequenceId(final int number) {
339     byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
340     int d = Math.abs(number);
341     for (int i = b.length - 1; i >= 0; i--) {
342       b[i] = (byte)((d % 10) + '0');
343       d /= 10;
344     }
345     return Bytes.toString(b);
346   }
347 
348   /**
349    * Regex to eat up sequenceid suffix on a .tableinfo file.
350    * Use regex because may encounter oldstyle .tableinfos where there is no
351    * sequenceid on the end.
352    */
353   private static final Pattern SUFFIX =
354     Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
355 
356 
357   /**
358    * @param p Path to a <code>.tableinfo</code> file.
359    * @return The current editid or 0 if none found.
360    */
361   static int getTableInfoSequenceid(final Path p) {
362     if (p == null) return 0;
363     Matcher m = SUFFIX.matcher(p.getName());
364     if (!m.matches()) throw new IllegalArgumentException(p.toString());
365     String suffix = m.group(2);
366     if (suffix == null || suffix.length() <= 0) return 0;
367     return Integer.parseInt(m.group(2));
368   }
369 
370   /**
371    * @param tabledir
372    * @param sequenceid
373    * @return Name of tableinfo file.
374    */
375   static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
376     return new Path(tabledir,
377       TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
378   }
379 
380   static long getTableDirModtime(final FileSystem fs, final Path rootdir,
381       final String tableName)
382   throws IOException {
383     Path tabledir = FSUtils.getTablePath(rootdir, tableName);
384     FileStatus status = fs.getFileStatus(tabledir);
385     return status == null? 0: status.getModificationTime();
386   }
387   
388   /**
389    * @param fs
390    * @param rootdir
391    * @param tableName
392    * @return Modification time for the table {@link #TABLEINFO_NAME} file
393    * or <code>0</code> if no tableinfo file found.
394    * @throws IOException
395    */
396   static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
397       final String tableName)
398   throws IOException {
399     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
400     return status == null? 0: status.getModificationTime();
401   }
402 
403   /**
404    * Get HTD from HDFS.
405    * @param fs
406    * @param hbaseRootDir
407    * @param tableName
408    * @return Descriptor or null if none found.
409    * @throws IOException
410    */
411   public static HTableDescriptor getTableDescriptor(FileSystem fs,
412       Path hbaseRootDir, byte[] tableName)
413   throws IOException {
414      HTableDescriptor htd = null;
415      try {
416        TableDescriptorModtime tdmt =
417          getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName), false);
418        htd = tdmt == null ? null : tdmt.getTableDescriptor();
419      } catch (NullPointerException e) {
420        LOG.debug("Exception during readTableDecriptor. Current table name = "
421            + Bytes.toString(tableName), e);
422      }
423      return htd;
424   }
425 
426   static HTableDescriptor getTableDescriptor(FileSystem fs,
427       Path hbaseRootDir, String tableName) throws NullPointerException, IOException {
428     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName, false);
429     return tdmt == null ? null : tdmt.getTableDescriptor();
430   }
431 
432   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs,
433       Path hbaseRootDir, String tableName, boolean readDirModtime)
434   throws NullPointerException, IOException{
435     // ignore both -ROOT- and .META. tables
436     if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
437         || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
438       return null;
439     }
440     return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName), readDirModtime);
441   }
442 
443   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir, boolean readDirModtime)
444   throws NullPointerException, IOException {
445     if (tableDir == null) throw new NullPointerException();
446     FileStatus status = getTableInfoPath(fs, tableDir);
447     if (status == null) {
448       throw new TableInfoMissingException("No .tableinfo file under "
449           + tableDir.toUri());
450     }
451     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
452     HTableDescriptor hTableDescriptor = null;
453     try {
454       hTableDescriptor = new HTableDescriptor();
455       hTableDescriptor.readFields(fsDataInputStream);
456     } finally {
457       fsDataInputStream.close();
458     }
459     long dirModtime = 0;
460     if (readDirModtime) {
461       dirModtime = fs.getFileStatus(tableDir).getModificationTime();
462     }
463     return new TableDescriptorModtime(status.getModificationTime(), dirModtime, hTableDescriptor);
464   }
465   
466   public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
467   throws IOException, NullPointerException {
468     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir, false);
469     return tdmt == null? null: tdmt.getTableDescriptor();
470   }
471  
472 
473   /**
474    * Update table descriptor
475    * @param fs
476    * @param conf
477    * @param hTableDescriptor
478    * @return New tableinfo or null if we failed update.
479    * @throws IOException Thrown if failed update.
480    */
481   static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
482       HTableDescriptor hTableDescriptor)
483   throws IOException {
484     Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
485     Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir,
486       getTableInfoPath(fs, tableDir));
487     if (p == null) throw new IOException("Failed update");
488     LOG.info("Updated tableinfo=" + p);
489     return p;
490   }
491 
492   /**
493    * Deletes a table's directory from the file system if exists. Used in unit
494    * tests.
495    */
496   public static void deleteTableDescriptorIfExists(String tableName,
497       Configuration conf) throws IOException {
498     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
499     FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
500     // The below deleteDirectory works for either file or directory.
501     if (status != null && fs.exists(status.getPath())) {
502       FSUtils.deleteDirectory(fs, status.getPath());
503     }
504   }
505 
506   /**
507    * @param fs
508    * @param hTableDescriptor
509    * @param tableDir
510    * @param status
511    * @return Descriptor file or null if we failed write.
512    * @throws IOException 
513    */
514   private static Path writeTableDescriptor(final FileSystem fs,
515       final HTableDescriptor hTableDescriptor, final Path tableDir,
516       final FileStatus status)
517   throws IOException {
518     // Get temporary dir into which we'll first write a file to avoid
519     // half-written file phenomeon.
520     Path tmpTableDir = new Path(tableDir, ".tmp");
521     // What is current sequenceid?  We read the current sequenceid from
522     // the current file.  After we read it, another thread could come in and
523     // compete with us writing out next version of file.  The below retries
524     // should help in this case some but its hard to do guarantees in face of
525     // concurrent schema edits.
526     int currentSequenceid =
527       status == null? 0: getTableInfoSequenceid(status.getPath());
528     int sequenceid = currentSequenceid;
529     // Put arbitrary upperbound on how often we retry
530     int retries = 10;
531     int retrymax = currentSequenceid + retries;
532     Path tableInfoPath = null;
533     do {
534       sequenceid += 1;
535       Path p = getTableInfoFileName(tmpTableDir, sequenceid);
536       if (fs.exists(p)) {
537         LOG.debug(p + " exists; retrying up to " + retries + " times");
538         continue;
539       }
540       try {
541         writeHTD(fs, p, hTableDescriptor);
542         tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
543         if (!HBaseFileSystem.renameDirForFileSystem(fs, p, tableInfoPath)) {
544           throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
545         }
546       } catch (IOException ioe) {
547         // Presume clash of names or something; go around again.
548         LOG.debug("Failed write and/or rename; retrying", ioe);
549         if (!FSUtils.deleteDirectory(fs, p)) {
550           LOG.warn("Failed cleanup of " + p);
551         }
552         tableInfoPath = null;
553         continue;
554       }
555       // Cleanup old schema file.
556       if (status != null) {
557         if (!FSUtils.deleteDirectory(fs, status.getPath())) {
558           LOG.warn("Failed delete of " + status.getPath() + "; continuing");
559         }
560       }
561       break;
562     } while (sequenceid < retrymax);
563     return tableInfoPath;
564   }
565 
566   private static void writeHTD(final FileSystem fs, final Path p,
567       final HTableDescriptor htd)
568   throws IOException {
569     FSDataOutputStream out = HBaseFileSystem.createPathOnFileSystem(fs, p, false);
570     try {
571       htd.write(out);
572       out.write('\n');
573       out.write('\n');
574       out.write(Bytes.toBytes(htd.toString()));
575     } finally {
576       out.close();
577     }
578   }
579 
580   /**
581    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
582    * 
583    * @param htableDescriptor
584    * @param conf
585    */
586   public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
587       Configuration conf)
588   throws IOException {
589     return createTableDescriptor(htableDescriptor, conf, false);
590   }
591 
592   /**
593    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
594    * forceCreation is true then even if previous table descriptor is present it
595    * will be overwritten
596    * 
597    * @param htableDescriptor
598    * @param conf
599    * @param forceCreation True if we are to overwrite existing file.
600    */
601   static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
602       final Configuration conf, boolean forceCreation)
603   throws IOException {
604     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
605     return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
606         forceCreation);
607   }
608 
609   /**
610    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
611    * Used by tests.
612    * @param fs
613    * @param htableDescriptor
614    * @param rootdir
615    */
616   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
617       HTableDescriptor htableDescriptor)
618   throws IOException {
619     return createTableDescriptor(fs, rootdir, htableDescriptor, false);
620   }
621 
622   /**
623    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
624    * forceCreation is true then even if previous table descriptor is present it
625    * will be overwritten
626    * 
627    * @param fs
628    * @param htableDescriptor
629    * @param rootdir
630    * @param forceCreation
631    * @return True if we successfully created file.
632    */
633   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
634       HTableDescriptor htableDescriptor, boolean forceCreation)
635   throws IOException {
636     Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString());
637     return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation);
638   }
639 
640   /**
641    * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
642    * a new table or snapshot a table.
643    * @param fs filesystem where the descriptor should be written
644    * @param tabledir directory under which we should write the file
645    * @param htableDescriptor description of the table to write
646    * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
647    *          be overwritten
648    * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
649    *         already exists and we weren't forcing the descriptor creation.
650    * @throws IOException if a filesystem error occurs
651    */
652   public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir,
653       HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException {
654     FileStatus status = getTableInfoPath(fs, tabledir);
655     if (status != null) {
656       LOG.info("Current tableInfoPath = " + status.getPath());
657       if (!forceCreation) {
658         if (fs.exists(status.getPath()) && status.getLen() > 0) {
659           LOG.info("TableInfo already exists.. Skipping creation");
660           return false;
661         }
662       }
663     }
664     Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status);
665     return p != null;
666   }
667 }