View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util;
19  
20  import java.io.FileNotFoundException;
21  import java.io.IOException;
22  import java.util.Arrays;
23  import java.util.Comparator;
24  import java.util.List;
25  import java.util.Map;
26  import java.util.TreeMap;
27  import java.util.concurrent.ConcurrentHashMap;
28  import java.util.regex.Matcher;
29  import java.util.regex.Pattern;
30  
31  import org.apache.commons.lang.NotImplementedException;
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.classification.InterfaceAudience;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FSDataInputStream;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.fs.PathFilter;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.HConstants;
44  import org.apache.hadoop.hbase.HTableDescriptor;
45  import org.apache.hadoop.hbase.TableDescriptors;
46  import org.apache.hadoop.hbase.exceptions.TableInfoMissingException;
47  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
48  
49  import com.google.common.primitives.Ints;
50  
51  
52  /**
53   * Implementation of {@link TableDescriptors} that reads descriptors from the
54   * passed filesystem.  It expects descriptors to be in a file under the
55   * table's directory in FS.  Can be read-only -- i.e. does not modify
56   * the filesystem or can be read and write.
57   *
58   * <p>Also has utility for keeping up the table descriptors tableinfo file.
59   * The table schema file is kept under the table directory in the filesystem.
60   * It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
61   * edit sequenceid: e.g. <code>.tableinfo.0000000003</code>.  This sequenceid
62   * is always increasing.  It starts at zero.  The table schema file with the
63   * highest sequenceid has the most recent schema edit. Usually there is one file
64   * only, the most recent but there may be short periods where there are more
65   * than one file. Old files are eventually cleaned.  Presumption is that there
66   * will not be lots of concurrent clients making table schema edits.  If so,
67   * the below needs a bit of a reworking and perhaps some supporting api in hdfs.
68   */
69  @InterfaceAudience.Private
70  public class FSTableDescriptors implements TableDescriptors {
71    private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
72    private final FileSystem fs;
73    private final Path rootdir;
74    private final boolean fsreadonly;
75    long cachehits = 0;
76    long invocations = 0;
77  
78    /** The file name used to store HTD in HDFS  */
79    public static final String TABLEINFO_NAME = ".tableinfo";
80  
81    // This cache does not age out the old stuff.  Thinking is that the amount
82    // of data we keep up in here is so small, no need to do occasional purge.
83    // TODO.
84    private final Map<String, TableDescriptorModtime> cache =
85      new ConcurrentHashMap<String, TableDescriptorModtime>();
86  
87    /**
88     * Data structure to hold modification time and table descriptor.
89     */
90    static class TableDescriptorModtime {
91      private final HTableDescriptor descriptor;
92      private final long modtime;
93  
94      TableDescriptorModtime(final long modtime, final HTableDescriptor htd) {
95        this.descriptor = htd;
96        this.modtime = modtime;
97      }
98  
99      long getModtime() {
100       return this.modtime;
101     }
102 
103     HTableDescriptor getTableDescriptor() {
104       return this.descriptor;
105     }
106   }
107 
108   public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
109     this(fs, rootdir, false);
110   }
111 
112   /**
113    * @param fs
114    * @param rootdir
115    * @param fsreadOnly True if we are read-only when it comes to filesystem
116    * operations; i.e. on remove, we do not do delete in fs.
117    */
118   public FSTableDescriptors(final FileSystem fs, final Path rootdir,
119       final boolean fsreadOnly) {
120     super();
121     this.fs = fs;
122     this.rootdir = rootdir;
123     this.fsreadonly = fsreadOnly;
124   }
125 
126   /* (non-Javadoc)
127    * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String)
128    */
129   @Override
130   public HTableDescriptor get(final byte [] tablename)
131   throws IOException {
132     return get(Bytes.toString(tablename));
133   }
134 
135   /* (non-Javadoc)
136    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[])
137    */
138   @Override
139   public HTableDescriptor get(final String tablename)
140   throws IOException {
141     invocations++;
142     if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
143       cachehits++;
144       return HTableDescriptor.ROOT_TABLEDESC;
145     }
146     if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
147       cachehits++;
148       return HTableDescriptor.META_TABLEDESC;
149     }
150     // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
151     // .logs, .oldlogs or .corrupt throw an exception.
152     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
153        throw new IOException("No descriptor found for table = " + tablename);
154     }
155 
156     // Look in cache of descriptors.
157     TableDescriptorModtime cachedtdm = this.cache.get(tablename);
158 
159     if (cachedtdm != null) {
160       // Check mod time has not changed (this is trip to NN).
161       if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) {
162         cachehits++;
163         return cachedtdm.getTableDescriptor();
164       }
165     }
166 
167     TableDescriptorModtime tdmt = null;
168     try {
169       tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename);
170     } catch (NullPointerException e) {
171       LOG.debug("Exception during readTableDecriptor. Current table name = "
172           + tablename, e);
173     } catch (IOException ioe) {
174       LOG.debug("Exception during readTableDecriptor. Current table name = "
175           + tablename, ioe);
176     }
177 
178     if (tdmt == null) {
179       LOG.warn("The following folder is in HBase's root directory and " +
180         "doesn't contain a table descriptor, " +
181         "do consider deleting it: " + tablename);
182     } else {
183       this.cache.put(tablename, tdmt);
184     }
185     return tdmt == null ? null : tdmt.getTableDescriptor();
186   }
187 
188   /* (non-Javadoc)
189    * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
190    */
191   @Override
192   public Map<String, HTableDescriptor> getAll()
193   throws IOException {
194     Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
195     List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
196     for (Path d: tableDirs) {
197       HTableDescriptor htd = null;
198       try {
199 
200         htd = get(d.getName());
201       } catch (FileNotFoundException fnfe) {
202         // inability of retrieving one HTD shouldn't stop getting the remaining
203         LOG.warn("Trouble retrieving htd", fnfe);
204       }
205       if (htd == null) continue;
206       htds.put(d.getName(), htd);
207     }
208     return htds;
209   }
210 
211   @Override
212   public void add(HTableDescriptor htd) throws IOException {
213     if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
214       throw new NotImplementedException();
215     }
216     if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
217       throw new NotImplementedException();
218     }
219     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
220       throw new NotImplementedException();
221     }
222     if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
223     long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
224     this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
225   }
226 
227   @Override
228   public HTableDescriptor remove(final String tablename)
229   throws IOException {
230     if (!this.fsreadonly) {
231       Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
232       if (this.fs.exists(tabledir)) {
233         if (!this.fs.delete(tabledir, true)) {
234           throw new IOException("Failed delete of " + tabledir.toString());
235         }
236       }
237     }
238     TableDescriptorModtime tdm = this.cache.remove(tablename);
239     return tdm == null ? null : tdm.getTableDescriptor();
240   }
241 
242   /**
243    * Checks if <code>.tableinfo<code> exists for given table
244    *
245    * @param fs file system
246    * @param rootdir root directory of HBase installation
247    * @param tableName name of table
248    * @return true if exists
249    * @throws IOException
250    */
251   public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
252       String tableName) throws IOException {
253     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
254     return status == null? false: fs.exists(status.getPath());
255   }
256 
257   private static FileStatus getTableInfoPath(final FileSystem fs,
258       final Path rootdir, final String tableName)
259   throws IOException {
260     Path tabledir = FSUtils.getTablePath(rootdir, tableName);
261     return getTableInfoPath(fs, tabledir);
262   }
263 
264   /**
265    * Looks under the table directory in the filesystem for files with a
266    * {@link #TABLEINFO_NAME} prefix.  Returns reference to the 'latest' instance.
267    * @param fs
268    * @param tabledir
269    * @return The 'current' tableinfo file.
270    * @throws IOException
271    */
272   public static FileStatus getTableInfoPath(final FileSystem fs,
273       final Path tabledir)
274   throws IOException {
275     FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() {
276       @Override
277       public boolean accept(Path p) {
278         // Accept any file that starts with TABLEINFO_NAME
279         return p.getName().startsWith(TABLEINFO_NAME);
280       }
281     });
282     if (status == null || status.length < 1) return null;
283     Arrays.sort(status, new FileStatusFileNameComparator());
284     if (status.length > 1) {
285       // Clean away old versions of .tableinfo
286       for (int i = 1; i < status.length; i++) {
287         Path p = status[i].getPath();
288         // Clean up old versions
289         if (!fs.delete(p, false)) {
290           LOG.warn("Failed cleanup of " + p);
291         } else {
292           LOG.debug("Cleaned up old tableinfo file " + p);
293         }
294       }
295     }
296     return status[0];
297   }
298 
299   /**
300    * Compare {@link FileStatus} instances by {@link Path#getName()}.
301    * Returns in reverse order.
302    */
303   static class FileStatusFileNameComparator
304   implements Comparator<FileStatus> {
305     @Override
306     public int compare(FileStatus left, FileStatus right) {
307       return -left.compareTo(right);
308     }
309   }
310 
311   /**
312    * Width of the sequenceid that is a suffix on a tableinfo file.
313    */
314   static final int WIDTH_OF_SEQUENCE_ID = 10;
315 
316   /*
317    * @param number Number to use as suffix.
318    * @return Returns zero-prefixed 5-byte wide decimal version of passed
319    * number (Does absolute in case number is negative).
320    */
321   static String formatTableInfoSequenceId(final int number) {
322     byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
323     int d = Math.abs(number);
324     for (int i = b.length - 1; i >= 0; i--) {
325       b[i] = (byte)((d % 10) + '0');
326       d /= 10;
327     }
328     return Bytes.toString(b);
329   }
330 
331   /**
332    * Regex to eat up sequenceid suffix on a .tableinfo file.
333    * Use regex because may encounter oldstyle .tableinfos where there is no
334    * sequenceid on the end.
335    */
336   private static final Pattern SUFFIX =
337     Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
338 
339 
340   /**
341    * @param p Path to a <code>.tableinfo</code> file.
342    * @return The current editid or 0 if none found.
343    */
344   static int getTableInfoSequenceid(final Path p) {
345     if (p == null) return 0;
346     Matcher m = SUFFIX.matcher(p.getName());
347     if (!m.matches()) throw new IllegalArgumentException(p.toString());
348     String suffix = m.group(2);
349     if (suffix == null || suffix.length() <= 0) return 0;
350     return Integer.parseInt(m.group(2));
351   }
352 
353   /**
354    * @param tabledir
355    * @param sequenceid
356    * @return Name of tableinfo file.
357    */
358   static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
359     return new Path(tabledir,
360       TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
361   }
362 
363   /**
364    * @param fs
365    * @param rootdir
366    * @param tableName
367    * @return Modification time for the table {@link #TABLEINFO_NAME} file
368    * or <code>0</code> if no tableinfo file found.
369    * @throws IOException
370    */
371   static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
372       final String tableName)
373   throws IOException {
374     FileStatus status = getTableInfoPath(fs, rootdir, tableName);
375     return status == null? 0: status.getModificationTime();
376   }
377 
378   /**
379    * Get HTD from HDFS.
380    * @param fs
381    * @param hbaseRootDir
382    * @param tableName
383    * @return Descriptor or null if none found.
384    * @throws IOException
385    */
386   public static HTableDescriptor getTableDescriptor(FileSystem fs,
387       Path hbaseRootDir, byte[] tableName)
388   throws IOException {
389      HTableDescriptor htd = null;
390      try {
391        TableDescriptorModtime tdmt =
392          getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName));
393        htd = tdmt == null ? null : tdmt.getTableDescriptor();
394      } catch (NullPointerException e) {
395        LOG.debug("Exception during readTableDecriptor. Current table name = "
396            + Bytes.toString(tableName), e);
397      }
398      return htd;
399   }
400 
401   static HTableDescriptor getTableDescriptor(FileSystem fs,
402       Path hbaseRootDir, String tableName) throws NullPointerException, IOException {
403     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName);
404     return tdmt == null ? null : tdmt.getTableDescriptor();
405   }
406 
407   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs,
408       Path hbaseRootDir, String tableName) throws NullPointerException, IOException{
409     // ignore both -ROOT- and .META. tables
410     if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
411         || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
412       return null;
413     }
414     return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
415   }
416 
417   static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir)
418   throws NullPointerException, IOException {
419     if (tableDir == null) throw new NullPointerException();
420     FileStatus status = getTableInfoPath(fs, tableDir);
421     if (status == null) {
422       throw new TableInfoMissingException("No .tableinfo file under "
423           + tableDir.toUri());
424     }
425     int len = Ints.checkedCast(status.getLen());
426     byte [] content = new byte[len];
427     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
428     try {
429       fsDataInputStream.readFully(content);
430     } finally {
431       fsDataInputStream.close();
432     }
433     HTableDescriptor htd = null;
434     try {
435       htd = HTableDescriptor.parseFrom(content);
436     } catch (DeserializationException e) {
437       throw new IOException("content=" + Bytes.toShort(content), e);
438     }
439     if (!ProtobufUtil.isPBMagicPrefix(content)) {
440       // Convert the file over to be pb before leaving here.
441       createTableDescriptor(fs, tableDir.getParent(), htd, true);
442     }
443     return new TableDescriptorModtime(status.getModificationTime(), htd);
444   }
445 
446   public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
447   throws IOException, NullPointerException {
448     TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir);
449     return tdmt == null ? null : tdmt.getTableDescriptor();
450   }
451 
452   /**
453    * Update table descriptor
454    * @param fs
455    * @param conf
456    * @param hTableDescriptor
457    * @return New tableinfo or null if we failed update.
458    * @throws IOException Thrown if failed update.
459    */
460   static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
461       HTableDescriptor hTableDescriptor)
462   throws IOException {
463     Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
464     Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir,
465       getTableInfoPath(fs, tableDir));
466     if (p == null) throw new IOException("Failed update");
467     LOG.info("Updated tableinfo=" + p);
468     return p;
469   }
470 
471   /**
472    * Deletes a table's directory from the file system if exists. Used in unit
473    * tests.
474    */
475   public static void deleteTableDescriptorIfExists(String tableName,
476       Configuration conf) throws IOException {
477     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
478     FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
479     // The below deleteDirectory works for either file or directory.
480     if (status != null && fs.exists(status.getPath())) {
481       FSUtils.deleteDirectory(fs, status.getPath());
482     }
483   }
484 
485   /**
486    * @param fs
487    * @param hTableDescriptor
488    * @param tableDir
489    * @param status
490    * @return Descriptor file or null if we failed write.
491    * @throws IOException
492    */
493   private static Path writeTableDescriptor(final FileSystem fs,
494       final HTableDescriptor hTableDescriptor, final Path tableDir,
495       final FileStatus status)
496   throws IOException {
497     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
498     Path tmpTableDir = new Path(tableDir, ".tmp");
499     // What is current sequenceid?  We read the current sequenceid from
500     // the current file.  After we read it, another thread could come in and
501     // compete with us writing out next version of file.  The below retries
502     // should help in this case some but its hard to do guarantees in face of
503     // concurrent schema edits.
504     int currentSequenceid = status == null? 0: getTableInfoSequenceid(status.getPath());
505     int sequenceid = currentSequenceid;
506     // Put arbitrary upperbound on how often we retry
507     int retries = 10;
508     int retrymax = currentSequenceid + retries;
509     Path tableInfoPath = null;
510     do {
511       sequenceid += 1;
512       Path p = getTableInfoFileName(tmpTableDir, sequenceid);
513       if (fs.exists(p)) {
514         LOG.debug(p + " exists; retrying up to " + retries + " times");
515         continue;
516       }
517       try {
518         writeHTD(fs, p, hTableDescriptor);
519         tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
520         if (!fs.rename(p, tableInfoPath)) {
521           throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
522         }
523       } catch (IOException ioe) {
524         // Presume clash of names or something; go around again.
525         LOG.debug("Failed write and/or rename; retrying", ioe);
526         if (!FSUtils.deleteDirectory(fs, p)) {
527           LOG.warn("Failed cleanup of " + p);
528         }
529         tableInfoPath = null;
530         continue;
531       }
532       // Cleanup old schema file.
533       if (status != null) {
534         if (!FSUtils.deleteDirectory(fs, status.getPath())) {
535           LOG.warn("Failed delete of " + status.getPath() + "; continuing");
536         }
537       }
538       break;
539     } while (sequenceid < retrymax);
540     return tableInfoPath;
541   }
542 
543   private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
544   throws IOException {
545     FSDataOutputStream out = fs.create(p, false);
546     try {
547       // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
548       // the toString version of HTD.  Now we just write out the pb serialization.
549       out.write(htd.toByteArray());
550     } finally {
551       out.close();
552     }
553   }
554 
555   /**
556    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
557    *
558    * @param htableDescriptor
559    * @param conf
560    */
561   public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
562       Configuration conf)
563   throws IOException {
564     return createTableDescriptor(htableDescriptor, conf, false);
565   }
566 
567   /**
568    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
569    * forceCreation is true then even if previous table descriptor is present it
570    * will be overwritten
571    *
572    * @param htableDescriptor
573    * @param conf
574    * @param forceCreation True if we are to overwrite existing file.
575    */
576   static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
577       final Configuration conf, boolean forceCreation)
578   throws IOException {
579     FileSystem fs = FSUtils.getCurrentFileSystem(conf);
580     return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, forceCreation);
581   }
582 
583   /**
584    * Create new HTableDescriptor in HDFS. Happens when we are creating table.
585    * Used by tests.
586    * @param fs
587    * @param htableDescriptor
588    * @param rootdir
589    */
590   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
591       HTableDescriptor htableDescriptor)
592   throws IOException {
593     return createTableDescriptor(fs, rootdir, htableDescriptor, false);
594   }
595 
596   /**
597    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
598    * forceCreation is true then even if previous table descriptor is present it
599    * will be overwritten
600    *
601    * @param fs
602    * @param htableDescriptor
603    * @param rootdir
604    * @param forceCreation
605    * @return True if we successfully created file.
606    */
607   public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
608       HTableDescriptor htableDescriptor, boolean forceCreation)
609   throws IOException {
610     Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString());
611     return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation);
612   }
613 
614   /**
615    * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
616    * a new table or snapshot a table.
617    * @param fs filesystem where the descriptor should be written
618    * @param tabledir directory under which we should write the file
619    * @param htableDescriptor description of the table to write
620    * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
621    *          be overwritten
622    * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
623    *         already exists and we weren't forcing the descriptor creation.
624    * @throws IOException if a filesystem error occurs
625    */
626   public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir,
627       HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException {
628     FileStatus status = getTableInfoPath(fs, tabledir);
629     if (status != null) {
630       LOG.info("Current tableInfoPath = " + status.getPath());
631       if (!forceCreation) {
632         if (fs.exists(status.getPath()) && status.getLen() > 0) {
633           if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) {
634             LOG.info("TableInfo already exists.. Skipping creation");
635             return false;
636           }
637         }
638       }
639     }
640     Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status);
641     return p != null;
642   }
643 }