View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.classification.InterfaceAudience;
40  import org.apache.hadoop.classification.InterfaceStability;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.client.Durability;
43  import org.apache.hadoop.hbase.exceptions.DeserializationException;
44  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
45  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
46  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
50  import org.apache.hadoop.hbase.regionserver.BloomType;
51  import org.apache.hadoop.hbase.security.User;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.hbase.util.Writables;
54  import org.apache.hadoop.io.WritableComparable;
55  
56  import com.google.protobuf.ByteString;
57  import com.google.protobuf.InvalidProtocolBufferException;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> .META. </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   /**
147    * <em>INTERNAL</em> Used by rest interface to access this metadata
148    * attribute which denotes if the table is a -ROOT- region or not
149    *
150    * @see #isRootRegion()
151    */
152   public static final String IS_ROOT = "IS_ROOT";
153   private static final ImmutableBytesWritable IS_ROOT_KEY =
154     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
155 
156   /**
157    * <em>INTERNAL</em> Used by rest interface to access this metadata
158    * attribute which denotes if it is a catalog table, either
159    * <code> .META. </code> or <code> -ROOT- </code>
160    *
161    * @see #isMetaRegion()
162    */
163   public static final String IS_META = "IS_META";
164   private static final ImmutableBytesWritable IS_META_KEY =
165     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
166 
167   /**
168    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
169    * attribute which denotes if the deferred log flush option is enabled.
170    * @deprecated Use {@link #DURABILITY} instead.
171    */
172   @Deprecated
173   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
174   @Deprecated
175   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
176     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
177 
178   /**
179    * <em>INTERNAL</em> {@link Durability} setting for the table.
180    */
181   public static final String DURABILITY = "DURABILITY";
182   private static final ImmutableBytesWritable DURABILITY_KEY =
183       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
184 
185   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
186   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
187 
188   /*
189    *  The below are ugly but better than creating them each time till we
190    *  replace booleans being saved as Strings with plain booleans.  Need a
191    *  migration script to do this.  TODO.
192    */
193   private static final ImmutableBytesWritable FALSE =
194     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
195 
196   private static final ImmutableBytesWritable TRUE =
197     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
198 
199   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
200 
201   /**
202    * Constant that denotes whether the table is READONLY by default and is false
203    */
204   public static final boolean DEFAULT_READONLY = false;
205 
206   /**
207    * Constant that denotes whether the table is compaction enabled by default
208    */
209   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
210 
211   /**
212    * Constant that denotes the maximum default size of the memstore after which
213    * the contents are flushed to the store files
214    */
215   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
216 
217   private final static Map<String, String> DEFAULT_VALUES
218     = new HashMap<String, String>();
219   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
220     = new HashSet<ImmutableBytesWritable>();
221   static {
222     DEFAULT_VALUES.put(MAX_FILESIZE,
223         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
224     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
225     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
226         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
227     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
228         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
229     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
230     for (String s : DEFAULT_VALUES.keySet()) {
231       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
232     }
233     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
234     RESERVED_KEYWORDS.add(IS_META_KEY);
235   }
236 
237   /**
238    * Cache of whether this is a meta table or not.
239    */
240   private volatile Boolean meta = null;
241   /**
242    * Cache of whether this is root table or not.
243    */
244   private volatile Boolean root = null;
245 
246   /**
247    * Durability setting for the table
248    */
249   private Durability durability = null;
250 
251   /**
252    * Maps column family name to the respective HColumnDescriptors
253    */
254   private final Map<byte [], HColumnDescriptor> families =
255     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
256 
257   /**
258    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
259    * catalog tables, <code>.META.</code> and <code>-ROOT-</code>.
260    */
261   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
262     setName(name);
263     for(HColumnDescriptor descriptor : families) {
264       this.families.put(descriptor.getName(), descriptor);
265     }
266   }
267 
268   /**
269    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
270    * catalog tables, <code>.META.</code> and <code>-ROOT-</code>.
271    */
272   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
273       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
274     setName(name);
275     for(HColumnDescriptor descriptor : families) {
276       this.families.put(descriptor.getName(), descriptor);
277     }
278     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
279         values.entrySet()) {
280       setValue(entry.getKey(), entry.getValue());
281     }
282   }
283 
284   /**
285    * Default constructor which constructs an empty object.
286    * For deserializing an HTableDescriptor instance only.
287    * @deprecated Used by Writables and Writables are going away.
288    */
289   @Deprecated
290   public HTableDescriptor() {
291     super();
292   }
293 
294   /**
295    * Construct a table descriptor specifying a TableName object
296    * @param name Table name.
297    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
298    */
299   public HTableDescriptor(final TableName name) {
300     super();
301     setName(name);
302   }
303 
304   /**
305    * Construct a table descriptor specifying a byte array table name
306    * @param name Table name.
307    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
308    */
309   @Deprecated
310   public HTableDescriptor(final byte[] name) {
311     this(TableName.valueOf(name));
312   }
313 
314   /**
315    * Construct a table descriptor specifying a String table name
316    * @param name Table name.
317    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
318    */
319   @Deprecated
320   public HTableDescriptor(final String name) {
321     this(TableName.valueOf(name));
322   }
323 
324   /**
325    * Construct a table descriptor by cloning the descriptor passed as a parameter.
326    * <p>
327    * Makes a deep copy of the supplied descriptor.
328    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
329    * @param desc The descriptor.
330    */
331   public HTableDescriptor(final HTableDescriptor desc) {
332     super();
333     setName(desc.name);
334     setMetaFlags(this.name);
335     for (HColumnDescriptor c: desc.families.values()) {
336       this.families.put(c.getName(), new HColumnDescriptor(c));
337     }
338     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
339         desc.values.entrySet()) {
340       setValue(e.getKey(), e.getValue());
341     }
342     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
343       this.configuration.put(e.getKey(), e.getValue());
344     }
345   }
346 
347   /*
348    * Set meta flags on this table.
349    * IS_ROOT_KEY is set if its a -ROOT- table
350    * IS_META_KEY is set either if its a -ROOT- or a .META. table
351    * Called by constructors.
352    * @param name
353    */
354   private void setMetaFlags(final TableName name) {
355     setMetaRegion(isRootRegion() ||
356         name.equals(TableName.META_TABLE_NAME));
357   }
358 
359   /**
360    * Check if the descriptor represents a <code> -ROOT- </code> region.
361    *
362    * @return true if this is a <code> -ROOT- </code> region
363    */
364   public boolean isRootRegion() {
365     if (this.root == null) {
366       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
367     }
368     return this.root.booleanValue();
369   }
370 
371   /**
372    * <em> INTERNAL </em> Used to denote if the current table represents
373    * <code> -ROOT- </code> region. This is used internally by the
374    * HTableDescriptor constructors
375    *
376    * @param isRoot true if this is the <code> -ROOT- </code> region
377    */
378   protected void setRootRegion(boolean isRoot) {
379     // TODO: Make the value a boolean rather than String of boolean.
380     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
381   }
382 
383   /**
384    * Checks if this table is <code> .META. </code>
385    * region.
386    *
387    * @return true if this table is <code> .META. </code>
388    * region
389    */
390   public boolean isMetaRegion() {
391     if (this.meta == null) {
392       this.meta = calculateIsMetaRegion();
393     }
394     return this.meta.booleanValue();
395   }
396 
397   private synchronized Boolean calculateIsMetaRegion() {
398     byte [] value = getValue(IS_META_KEY);
399     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
400   }
401 
402   private boolean isSomething(final ImmutableBytesWritable key,
403       final boolean valueIfNull) {
404     byte [] value = getValue(key);
405     if (value != null) {
406       return Boolean.valueOf(Bytes.toString(value));
407     }
408     return valueIfNull;
409   }
410 
411   /**
412    * <em> INTERNAL </em> Used to denote if the current table represents
413    * <code> -ROOT- </code> or <code> .META. </code> region. This is used
414    * internally by the HTableDescriptor constructors
415    *
416    * @param isMeta true if its either <code> -ROOT- </code> or
417    * <code> .META. </code> region
418    */
419   protected void setMetaRegion(boolean isMeta) {
420     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
421   }
422 
423   /**
424    * Checks if the table is a <code>.META.</code> table
425    *
426    * @return true if table is <code> .META. </code> region.
427    */
428   public boolean isMetaTable() {
429     return isMetaRegion() && !isRootRegion();
430   }
431 
432   /**
433    * Checks of the tableName being passed is a system table
434    *
435    *
436    * @return true if a tableName is a member of the system
437    * namesapce (aka hbase)
438    */
439   public static boolean isSystemTable(final TableName tableName) {
440     return tableName.getNamespaceAsString()
441         .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
442   }
443 
444   /**
445    * Getter for accessing the metadata associated with the key
446    *
447    * @param key The key.
448    * @return The value.
449    * @see #values
450    */
451   public byte[] getValue(byte[] key) {
452     return getValue(new ImmutableBytesWritable(key));
453   }
454 
455   private byte[] getValue(final ImmutableBytesWritable key) {
456     ImmutableBytesWritable ibw = values.get(key);
457     if (ibw == null)
458       return null;
459     return ibw.get();
460   }
461 
462   /**
463    * Getter for accessing the metadata associated with the key
464    *
465    * @param key The key.
466    * @return The value.
467    * @see #values
468    */
469   public String getValue(String key) {
470     byte[] value = getValue(Bytes.toBytes(key));
471     if (value == null)
472       return null;
473     return Bytes.toString(value);
474   }
475 
476   /**
477    * Getter for fetching an unmodifiable {@link #values} map.
478    *
479    * @return unmodifiable map {@link #values}.
480    * @see #values
481    */
482   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
483     // shallow pointer copy
484     return Collections.unmodifiableMap(values);
485   }
486 
487   /**
488    * Setter for storing metadata as a (key, value) pair in {@link #values} map
489    *
490    * @param key The key.
491    * @param value The value.
492    * @see #values
493    */
494   public void setValue(byte[] key, byte[] value) {
495     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
496   }
497 
498   /*
499    * @param key The key.
500    * @param value The value.
501    */
502   private void setValue(final ImmutableBytesWritable key,
503       final String value) {
504     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
505   }
506 
507   /*
508    * Setter for storing metadata as a (key, value) pair in {@link #values} map
509    *
510    * @param key The key.
511    * @param value The value.
512    */
513   public void setValue(final ImmutableBytesWritable key,
514       final ImmutableBytesWritable value) {
515     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
516       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
517       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
518           "use " + DURABILITY + " instead");
519       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
520       return;
521     }
522     values.put(key, value);
523   }
524 
525   /**
526    * Setter for storing metadata as a (key, value) pair in {@link #values} map
527    *
528    * @param key The key.
529    * @param value The value.
530    * @see #values
531    */
532   public void setValue(String key, String value) {
533     if (value == null) {
534       remove(key);
535     } else {
536       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
537     }
538   }
539 
540   /**
541    * Remove metadata represented by the key from the {@link #values} map
542    *
543    * @param key Key whose key and value we're to remove from HTableDescriptor
544    * parameters.
545    */
546   public void remove(final String key) {
547     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
548   }
549 
550   /**
551    * Remove metadata represented by the key from the {@link #values} map
552    *
553    * @param key Key whose key and value we're to remove from HTableDescriptor
554    * parameters.
555    */
556   public void remove(ImmutableBytesWritable key) {
557     values.remove(key);
558   }
559 
560   /**
561    * Check if the readOnly flag of the table is set. If the readOnly flag is
562    * set then the contents of the table can only be read from but not modified.
563    *
564    * @return true if all columns in the table should be read only
565    */
566   public boolean isReadOnly() {
567     return isSomething(READONLY_KEY, DEFAULT_READONLY);
568   }
569 
570   /**
571    * Setting the table as read only sets all the columns in the table as read
572    * only. By default all tables are modifiable, but if the readOnly flag is
573    * set to true then the contents of the table can only be read but not modified.
574    *
575    * @param readOnly True if all of the columns in the table should be read
576    * only.
577    */
578   public void setReadOnly(final boolean readOnly) {
579     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
580   }
581 
582   /**
583    * Check if the compaction enable flag of the table is true. If flag is
584    * false then no minor/major compactions will be done in real.
585    *
586    * @return true if table compaction enabled
587    */
588   public boolean isCompactionEnabled() {
589     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
590   }
591 
592   /**
593    * Setting the table compaction enable flag.
594    *
595    * @param isEnable True if enable compaction.
596    */
597   public void setCompactionEnabled(final boolean isEnable) {
598     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
599   }
600 
601   /**
602    * Check if deferred log edits are enabled on the table.
603    *
604    * @return true if that deferred log flush is enabled on the table
605    *
606    * @see #setDeferredLogFlush(boolean)
607    * @deprecated use {@link #getDurability()}
608    */
609   @Deprecated
610   public synchronized boolean isDeferredLogFlush() {
611     return getDurability() == Durability.ASYNC_WAL;
612   }
613 
614   /**
615    * This is used to defer the log edits syncing to the file system. Everytime
616    * an edit is sent to the server it is first sync'd to the file system by the
617    * log writer. This sync is an expensive operation and thus can be deferred so
618    * that the edits are kept in memory for a specified period of time as represented
619    * by <code> hbase.regionserver.optionallogflushinterval </code> and not flushed
620    * for every edit.
621    * <p>
622    * NOTE:- This option might result in data loss if the region server crashes
623    * before these deferred edits in memory are flushed onto the filesystem.
624    * </p>
625    *
626    * @param isDeferredLogFlush
627    * @deprecated use {@link #setDurability(Durability)}
628    */
629   @Deprecated
630   public synchronized void setDeferredLogFlush(final boolean isDeferredLogFlush) {
631     this.setDurability(isDeferredLogFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
632   }
633 
634   /**
635    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
636    * @param durability enum value
637    */
638   public void setDurability(Durability durability) {
639     this.durability = durability;
640     setValue(DURABILITY_KEY, durability.name());
641   }
642 
643   /**
644    * Returns the durability setting for the table.
645    * @return durability setting for the table.
646    */
647   public Durability getDurability() {
648     if (this.durability == null) {
649       byte[] durabilityValue = getValue(DURABILITY_KEY);
650       if (durabilityValue == null) {
651         this.durability = DEFAULT_DURABLITY;
652       } else {
653         try {
654           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
655         } catch (IllegalArgumentException ex) {
656           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
657             + " is not known. Durability:" + Bytes.toString(durabilityValue));
658           this.durability = DEFAULT_DURABLITY;
659         }
660       }
661     }
662     return this.durability;
663   }
664 
665   /**
666    * Get the name of the table
667    *
668    * @return TableName
669    */
670   public TableName getTableName() {
671     return name;
672   }
673 
674   /**
675    * Get the name of the table as a byte array.
676    *
677    * @return name of table
678    */
679   public byte[] getName() {
680     return name.getName();
681   }
682 
683   /**
684    * Get the name of the table as a String
685    *
686    * @return name of table as a String
687    */
688   public String getNameAsString() {
689     return name.getNameAsString();
690   }
691 
692   /**
693    * This get the class associated with the region split policy which
694    * determines when a region split should occur.  The class used by
695    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
696    *
697    * @return the class name of the region split policy for this table.
698    * If this returns null, the default split policy is used.
699    */
700    public String getRegionSplitPolicyClassName() {
701     return getValue(SPLIT_POLICY);
702   }
703 
704   /**
705    * Set the name of the table.
706    *
707    * @param name name of table
708    */
709   @Deprecated
710   public void setName(byte[] name) {
711     setName(TableName.valueOf(name));
712   }
713 
714   @Deprecated
715   public void setName(TableName name) {
716     this.name = name;
717     setMetaFlags(this.name);
718   }
719 
720   /**
721    * Returns the maximum size upto which a region can grow to after which a region
722    * split is triggered. The region size is represented by the size of the biggest
723    * store file in that region.
724    *
725    * @return max hregion size for table, -1 if not set.
726    *
727    * @see #setMaxFileSize(long)
728    */
729   public long getMaxFileSize() {
730     byte [] value = getValue(MAX_FILESIZE_KEY);
731     if (value != null) {
732       return Long.parseLong(Bytes.toString(value));
733     }
734     return -1;
735   }
736 
737   /**
738    * Sets the maximum size upto which a region can grow to after which a region
739    * split is triggered. The region size is represented by the size of the biggest
740    * store file in that region, i.e. If the biggest store file grows beyond the
741    * maxFileSize, then the region split is triggered. This defaults to a value of
742    * 256 MB.
743    * <p>
744    * This is not an absolute value and might vary. Assume that a single row exceeds
745    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
746    * a single row cannot be split across multiple regions
747    * </p>
748    *
749    * @param maxFileSize The maximum file size that a store file can grow to
750    * before a split is triggered.
751    */
752   public void setMaxFileSize(long maxFileSize) {
753     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
754   }
755 
756   /**
757    * Returns the size of the memstore after which a flush to filesystem is triggered.
758    *
759    * @return memory cache flush size for each hregion, -1 if not set.
760    *
761    * @see #setMemStoreFlushSize(long)
762    */
763   public long getMemStoreFlushSize() {
764     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
765     if (value != null) {
766       return Long.parseLong(Bytes.toString(value));
767     }
768     return -1;
769   }
770 
771   /**
772    * Represents the maximum size of the memstore after which the contents of the
773    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
774    *
775    * @param memstoreFlushSize memory cache flush size for each hregion
776    */
777   public void setMemStoreFlushSize(long memstoreFlushSize) {
778     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
779   }
780 
781   /**
782    * Adds a column family.
783    * @param family HColumnDescriptor of family to add.
784    */
785   public void addFamily(final HColumnDescriptor family) {
786     if (family.getName() == null || family.getName().length <= 0) {
787       throw new NullPointerException("Family name cannot be null or empty");
788     }
789     this.families.put(family.getName(), family);
790   }
791 
792   /**
793    * Checks to see if this table contains the given column family
794    * @param familyName Family name or column name.
795    * @return true if the table contains the specified family name
796    */
797   public boolean hasFamily(final byte [] familyName) {
798     return families.containsKey(familyName);
799   }
800 
801   /**
802    * @return Name of this table and then a map of all of the column family
803    * descriptors.
804    * @see #getNameAsString()
805    */
806   @Override
807   public String toString() {
808     StringBuilder s = new StringBuilder();
809     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
810     s.append(getValues(true));
811     for (HColumnDescriptor f : families.values()) {
812       s.append(", ").append(f);
813     }
814     return s.toString();
815   }
816 
817   /**
818    * @return Name of this table and then a map of all of the column family
819    * descriptors (with only the non-default column family attributes)
820    */
821   public String toStringCustomizedValues() {
822     StringBuilder s = new StringBuilder();
823     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
824     s.append(getValues(false));
825     for(HColumnDescriptor hcd : families.values()) {
826       s.append(", ").append(hcd.toStringCustomizedValues());
827     }
828     return s.toString();
829   }
830 
831   private StringBuilder getValues(boolean printDefaults) {
832     StringBuilder s = new StringBuilder();
833 
834     // step 1: set partitioning and pruning
835     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
836     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
837     for (ImmutableBytesWritable k : values.keySet()) {
838       if (k == null || k.get() == null) continue;
839       String key = Bytes.toString(k.get());
840       // in this section, print out reserved keywords + coprocessor info
841       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
842         userKeys.add(k);
843         continue;
844       }
845       // only print out IS_ROOT/IS_META if true
846       String value = Bytes.toString(values.get(k).get());
847       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
848         if (Boolean.valueOf(value) == false) continue;
849       }
850       // see if a reserved key is a default value. may not want to print it out
851       if (printDefaults
852           || !DEFAULT_VALUES.containsKey(key)
853           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
854         reservedKeys.add(k);
855       }
856     }
857 
858     // early exit optimization
859     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
860     if (!hasAttributes && configuration.isEmpty()) return s;
861 
862     s.append(", {");
863     // step 2: printing attributes
864     if (hasAttributes) {
865       s.append("TABLE_ATTRIBUTES => {");
866 
867       // print all reserved keys first
868       boolean printCommaForAttr = false;
869       for (ImmutableBytesWritable k : reservedKeys) {
870         String key = Bytes.toString(k.get());
871         String value = Bytes.toString(values.get(k).get());
872         if (printCommaForAttr) s.append(", ");
873         printCommaForAttr = true;
874         s.append(key);
875         s.append(" => ");
876         s.append('\'').append(value).append('\'');
877       }
878 
879       if (!userKeys.isEmpty()) {
880         // print all non-reserved, advanced config keys as a separate subset
881         if (printCommaForAttr) s.append(", ");
882         printCommaForAttr = true;
883         s.append(HConstants.METADATA).append(" => ");
884         s.append("{");
885         boolean printCommaForCfg = false;
886         for (ImmutableBytesWritable k : userKeys) {
887           String key = Bytes.toString(k.get());
888           String value = Bytes.toString(values.get(k).get());
889           if (printCommaForCfg) s.append(", ");
890           printCommaForCfg = true;
891           s.append('\'').append(key).append('\'');
892           s.append(" => ");
893           s.append('\'').append(value).append('\'');
894         }
895         s.append("}");
896       }
897     }
898 
899     // step 3: printing all configuration:
900     if (!configuration.isEmpty()) {
901       if (hasAttributes) {
902         s.append(", ");
903       }
904       s.append(HConstants.CONFIGURATION).append(" => ");
905       s.append('{');
906       boolean printCommaForConfig = false;
907       for (Map.Entry<String, String> e : configuration.entrySet()) {
908         if (printCommaForConfig) s.append(", ");
909         printCommaForConfig = true;
910         s.append('\'').append(e.getKey()).append('\'');
911         s.append(" => ");
912         s.append('\'').append(e.getValue()).append('\'');
913       }
914       s.append("}");
915     }
916     s.append("}"); // end METHOD
917     return s;
918   }
919 
920   /**
921    * Compare the contents of the descriptor with another one passed as a parameter.
922    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
923    * contents of the descriptors are compared.
924    *
925    * @return true if the contents of the the two descriptors exactly match
926    *
927    * @see java.lang.Object#equals(java.lang.Object)
928    */
929   @Override
930   public boolean equals(Object obj) {
931     if (this == obj) {
932       return true;
933     }
934     if (obj == null) {
935       return false;
936     }
937     if (!(obj instanceof HTableDescriptor)) {
938       return false;
939     }
940     return compareTo((HTableDescriptor)obj) == 0;
941   }
942 
943   /**
944    * @see java.lang.Object#hashCode()
945    */
946   @Override
947   public int hashCode() {
948     int result = this.name.hashCode();
949     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
950     if (this.families != null && this.families.size() > 0) {
951       for (HColumnDescriptor e: this.families.values()) {
952         result ^= e.hashCode();
953       }
954     }
955     result ^= values.hashCode();
956     result ^= configuration.hashCode();
957     return result;
958   }
959 
960   /**
961    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
962    * and is used for de-serialization of the HTableDescriptor over RPC
963    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
964    */
965   @Deprecated
966   @Override
967   public void readFields(DataInput in) throws IOException {
968     int version = in.readInt();
969     if (version < 3)
970       throw new IOException("versions < 3 are not supported (and never existed!?)");
971     // version 3+
972     name = TableName.valueOf(Bytes.readByteArray(in));
973     setRootRegion(in.readBoolean());
974     setMetaRegion(in.readBoolean());
975     values.clear();
976     configuration.clear();
977     int numVals = in.readInt();
978     for (int i = 0; i < numVals; i++) {
979       ImmutableBytesWritable key = new ImmutableBytesWritable();
980       ImmutableBytesWritable value = new ImmutableBytesWritable();
981       key.readFields(in);
982       value.readFields(in);
983       setValue(key, value);
984     }
985     families.clear();
986     int numFamilies = in.readInt();
987     for (int i = 0; i < numFamilies; i++) {
988       HColumnDescriptor c = new HColumnDescriptor();
989       c.readFields(in);
990       families.put(c.getName(), c);
991     }
992     if (version >= 7) {
993       int numConfigs = in.readInt();
994       for (int i = 0; i < numConfigs; i++) {
995         ImmutableBytesWritable key = new ImmutableBytesWritable();
996         ImmutableBytesWritable value = new ImmutableBytesWritable();
997         key.readFields(in);
998         value.readFields(in);
999         configuration.put(
1000           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1001           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1002       }
1003     }
1004   }
1005 
1006   /**
1007    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1008    * and is used for serialization of the HTableDescriptor over RPC
1009    * @deprecated Writables are going away.
1010    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1011    */
1012   @Deprecated
1013   @Override
1014   public void write(DataOutput out) throws IOException {
1015 	  out.writeInt(TABLE_DESCRIPTOR_VERSION);
1016     Bytes.writeByteArray(out, name.toBytes());
1017     out.writeBoolean(isRootRegion());
1018     out.writeBoolean(isMetaRegion());
1019     out.writeInt(values.size());
1020     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1021         values.entrySet()) {
1022       e.getKey().write(out);
1023       e.getValue().write(out);
1024     }
1025     out.writeInt(families.size());
1026     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1027         it.hasNext(); ) {
1028       HColumnDescriptor family = it.next();
1029       family.write(out);
1030     }
1031     out.writeInt(configuration.size());
1032     for (Map.Entry<String, String> e : configuration.entrySet()) {
1033       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1034       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1035     }
1036   }
1037 
1038   // Comparable
1039 
1040   /**
1041    * Compares the descriptor with another descriptor which is passed as a parameter.
1042    * This compares the content of the two descriptors and not the reference.
1043    *
1044    * @return 0 if the contents of the descriptors are exactly matching,
1045    * 		 1 if there is a mismatch in the contents
1046    */
1047   @Override
1048   public int compareTo(final HTableDescriptor other) {
1049     int result = this.name.compareTo(other.name);
1050     if (result == 0) {
1051       result = families.size() - other.families.size();
1052     }
1053     if (result == 0 && families.size() != other.families.size()) {
1054       result = Integer.valueOf(families.size()).compareTo(
1055           Integer.valueOf(other.families.size()));
1056     }
1057     if (result == 0) {
1058       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1059           it2 = other.families.values().iterator(); it.hasNext(); ) {
1060         result = it.next().compareTo(it2.next());
1061         if (result != 0) {
1062           break;
1063         }
1064       }
1065     }
1066     if (result == 0) {
1067       // punt on comparison for ordering, just calculate difference
1068       result = this.values.hashCode() - other.values.hashCode();
1069       if (result < 0)
1070         result = -1;
1071       else if (result > 0)
1072         result = 1;
1073     }
1074     if (result == 0) {
1075       result = this.configuration.hashCode() - other.configuration.hashCode();
1076       if (result < 0)
1077         result = -1;
1078       else if (result > 0)
1079         result = 1;
1080     }
1081     return result;
1082   }
1083 
1084   /**
1085    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1086    * of all the column families of the table.
1087    *
1088    * @return Immutable collection of {@link HColumnDescriptor} of all the
1089    * column families.
1090    */
1091   public Collection<HColumnDescriptor> getFamilies() {
1092     return Collections.unmodifiableCollection(this.families.values());
1093   }
1094 
1095   /**
1096    * Returns all the column family names of the current table. The map of
1097    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1098    * This returns all the keys of the family map which represents the column
1099    * family names of the table.
1100    *
1101    * @return Immutable sorted set of the keys of the families.
1102    */
1103   public Set<byte[]> getFamiliesKeys() {
1104     return Collections.unmodifiableSet(this.families.keySet());
1105   }
1106 
1107   /**
1108    * Returns an array all the {@link HColumnDescriptor} of the column families
1109    * of the table.
1110    *
1111    * @return Array of all the HColumnDescriptors of the current table
1112    *
1113    * @see #getFamilies()
1114    */
1115   public HColumnDescriptor[] getColumnFamilies() {
1116     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1117     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1118   }
1119 
1120 
1121   /**
1122    * Returns the HColumnDescriptor for a specific column family with name as
1123    * specified by the parameter column.
1124    *
1125    * @param column Column family name
1126    * @return Column descriptor for the passed family name or the family on
1127    * passed in column.
1128    */
1129   public HColumnDescriptor getFamily(final byte [] column) {
1130     return this.families.get(column);
1131   }
1132 
1133 
1134   /**
1135    * Removes the HColumnDescriptor with name specified by the parameter column
1136    * from the table descriptor
1137    *
1138    * @param column Name of the column family to be removed.
1139    * @return Column descriptor for the passed family name or the family on
1140    * passed in column.
1141    */
1142   public HColumnDescriptor removeFamily(final byte [] column) {
1143     return this.families.remove(column);
1144   }
1145 
1146 
1147   /**
1148    * Add a table coprocessor to this table. The coprocessor
1149    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1150    * or Endpoint.
1151    * It won't check if the class can be loaded or not.
1152    * Whether a coprocessor is loadable or not will be determined when
1153    * a region is opened.
1154    * @param className Full class name.
1155    * @throws IOException
1156    */
1157   public void addCoprocessor(String className) throws IOException {
1158     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1159   }
1160 
1161 
1162   /**
1163    * Add a table coprocessor to this table. The coprocessor
1164    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1165    * or Endpoint.
1166    * It won't check if the class can be loaded or not.
1167    * Whether a coprocessor is loadable or not will be determined when
1168    * a region is opened.
1169    * @param jarFilePath Path of the jar file. If it's null, the class will be
1170    * loaded from default classloader.
1171    * @param className Full class name.
1172    * @param priority Priority
1173    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1174    * @throws IOException
1175    */
1176   public void addCoprocessor(String className, Path jarFilePath,
1177                              int priority, final Map<String, String> kvs)
1178   throws IOException {
1179     if (hasCoprocessor(className)) {
1180       throw new IOException("Coprocessor " + className + " already exists.");
1181     }
1182     // validate parameter kvs
1183     StringBuilder kvString = new StringBuilder();
1184     if (kvs != null) {
1185       for (Map.Entry<String, String> e: kvs.entrySet()) {
1186         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1187           throw new IOException("Illegal parameter key = " + e.getKey());
1188         }
1189         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1190           throw new IOException("Illegal parameter (" + e.getKey() +
1191               ") value = " + e.getValue());
1192         }
1193         if (kvString.length() != 0) {
1194           kvString.append(',');
1195         }
1196         kvString.append(e.getKey());
1197         kvString.append('=');
1198         kvString.append(e.getValue());
1199       }
1200     }
1201 
1202     // generate a coprocessor key
1203     int maxCoprocessorNumber = 0;
1204     Matcher keyMatcher;
1205     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1206         this.values.entrySet()) {
1207       keyMatcher =
1208           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1209               Bytes.toString(e.getKey().get()));
1210       if (!keyMatcher.matches()) {
1211         continue;
1212       }
1213       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1214           maxCoprocessorNumber);
1215     }
1216     maxCoprocessorNumber++;
1217 
1218     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1219     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1220         "|" + className + "|" + Integer.toString(priority) + "|" +
1221         kvString.toString();
1222     setValue(key, value);
1223   }
1224 
1225 
1226   /**
1227    * Check if the table has an attached co-processor represented by the name className
1228    *
1229    * @param className - Class name of the co-processor
1230    * @return true of the table has a co-processor className
1231    */
1232   public boolean hasCoprocessor(String className) {
1233     Matcher keyMatcher;
1234     Matcher valueMatcher;
1235     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1236         this.values.entrySet()) {
1237       keyMatcher =
1238           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1239               Bytes.toString(e.getKey().get()));
1240       if (!keyMatcher.matches()) {
1241         continue;
1242       }
1243       valueMatcher =
1244         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1245             Bytes.toString(e.getValue().get()));
1246       if (!valueMatcher.matches()) {
1247         continue;
1248       }
1249       // get className and compare
1250       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1251       if (clazz.equals(className.trim())) {
1252         return true;
1253       }
1254     }
1255     return false;
1256   }
1257 
1258   /**
1259    * Return the list of attached co-processor represented by their name className
1260    *
1261    * @return The list of co-processors classNames
1262    */
1263   public List<String> getCoprocessors() {
1264     List<String> result = new ArrayList<String>();
1265     Matcher keyMatcher;
1266     Matcher valueMatcher;
1267     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1268       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1269       if (!keyMatcher.matches()) {
1270         continue;
1271       }
1272       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1273           .toString(e.getValue().get()));
1274       if (!valueMatcher.matches()) {
1275         continue;
1276       }
1277       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1278     }
1279     return result;
1280   }
1281 
1282   /**
1283    * Remove a coprocessor from those set on the table
1284    * @param className Class name of the co-processor
1285    */
1286   public void removeCoprocessor(String className) {
1287     ImmutableBytesWritable match = null;
1288     Matcher keyMatcher;
1289     Matcher valueMatcher;
1290     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1291         .entrySet()) {
1292       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1293           .getKey().get()));
1294       if (!keyMatcher.matches()) {
1295         continue;
1296       }
1297       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1298           .toString(e.getValue().get()));
1299       if (!valueMatcher.matches()) {
1300         continue;
1301       }
1302       // get className and compare
1303       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1304       // remove the CP if it is present
1305       if (clazz.equals(className.trim())) {
1306         match = e.getKey();
1307         break;
1308       }
1309     }
1310     // if we found a match, remove it
1311     if (match != null)
1312       remove(match);
1313   }
1314 
1315   /**
1316    * Returns the {@link Path} object representing the table directory under
1317    * path rootdir
1318    *
1319    * Deprecated use FSUtils.getTableDir() instead.
1320    *
1321    * @param rootdir qualified path of HBase root directory
1322    * @param tableName name of table
1323    * @return {@link Path} for table
1324    */
1325   @Deprecated
1326   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1327     //This is bad I had to mirror code from FSUTils.getTableDir since
1328     //there is no module dependency between hbase-client and hbase-server
1329     TableName name = TableName.valueOf(tableName);
1330     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1331               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1332   }
1333 
1334   /** Table descriptor for <core>-ROOT-</code> catalog table */
1335   public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
1336       TableName.ROOT_TABLE_NAME,
1337       new HColumnDescriptor[] {
1338           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1339               // Ten is arbitrary number.  Keep versions to help debugging.
1340               .setMaxVersions(10)
1341               .setInMemory(true)
1342               .setBlocksize(8 * 1024)
1343               .setTimeToLive(HConstants.FOREVER)
1344               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1345       });
1346 
1347   /** Table descriptor for <code>.META.</code> catalog table */
1348   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1349       TableName.META_TABLE_NAME,
1350       new HColumnDescriptor[] {
1351           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1352               // Ten is arbitrary number.  Keep versions to help debugging.
1353               .setMaxVersions(10)
1354               .setInMemory(true)
1355               .setBlocksize(8 * 1024)
1356               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1357               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1358               .setBloomFilterType(BloomType.NONE)
1359       });
1360 
1361   static {
1362     try {
1363       META_TABLEDESC.addCoprocessor(
1364           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1365           null, Coprocessor.PRIORITY_SYSTEM, null);
1366     } catch (IOException ex) {
1367       //LOG.warn("exception in loading coprocessor for the META table");
1368       throw new RuntimeException(ex);
1369     }
1370   }
1371 
1372   public final static String NAMESPACE_FAMILY_INFO = "info";
1373   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1374   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1375 
1376   /** Table descriptor for namespace table */
1377   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1378       TableName.NAMESPACE_TABLE_NAME,
1379       new HColumnDescriptor[] {
1380           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1381               // Ten is arbitrary number.  Keep versions to help debugging.
1382               .setMaxVersions(10)
1383               .setInMemory(true)
1384               .setBlocksize(8 * 1024)
1385               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1386       });
1387 
1388   @Deprecated
1389   public void setOwner(User owner) {
1390     setOwnerString(owner != null ? owner.getShortName() : null);
1391   }
1392 
1393   // used by admin.rb:alter(table_name,*args) to update owner.
1394   @Deprecated
1395   public void setOwnerString(String ownerString) {
1396     if (ownerString != null) {
1397       setValue(OWNER_KEY, ownerString);
1398     } else {
1399       remove(OWNER_KEY);
1400     }
1401   }
1402 
1403   @Deprecated
1404   public String getOwnerString() {
1405     if (getValue(OWNER_KEY) != null) {
1406       return Bytes.toString(getValue(OWNER_KEY));
1407     }
1408     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1409     // .META. and -ROOT- should return system user as owner, not null (see
1410     // MasterFileSystem.java:bootstrap()).
1411     return null;
1412   }
1413 
1414   /**
1415    * @return This instance serialized with pb with pb magic prefix
1416    * @see #parseFrom(byte[])
1417    */
1418   public byte [] toByteArray() {
1419     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1420   }
1421 
1422   /**
1423    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1424    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1425    * @throws DeserializationException
1426    * @throws IOException
1427    * @see #toByteArray()
1428    */
1429   public static HTableDescriptor parseFrom(final byte [] bytes)
1430   throws DeserializationException, IOException {
1431     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1432       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1433     }
1434     int pblen = ProtobufUtil.lengthOfPBMagic();
1435     TableSchema.Builder builder = TableSchema.newBuilder();
1436     TableSchema ts;
1437     try {
1438       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1439     } catch (InvalidProtocolBufferException e) {
1440       throw new DeserializationException(e);
1441     }
1442     return convert(ts);
1443   }
1444 
1445   /**
1446    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1447    */
1448   public TableSchema convert() {
1449     TableSchema.Builder builder = TableSchema.newBuilder();
1450     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1451     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1452       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1453       aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
1454       aBuilder.setSecond(ByteString.copyFrom(e.getValue().get()));
1455       builder.addAttributes(aBuilder.build());
1456     }
1457     for (HColumnDescriptor hcd: getColumnFamilies()) {
1458       builder.addColumnFamilies(hcd.convert());
1459     }
1460     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1461       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1462       aBuilder.setName(e.getKey());
1463       aBuilder.setValue(e.getValue());
1464       builder.addConfiguration(aBuilder.build());
1465     }
1466     return builder.build();
1467   }
1468 
1469   /**
1470    * @param ts A pb TableSchema instance.
1471    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1472    */
1473   public static HTableDescriptor convert(final TableSchema ts) {
1474     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1475     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1476     int index = 0;
1477     for (ColumnFamilySchema cfs: list) {
1478       hcds[index++] = HColumnDescriptor.convert(cfs);
1479     }
1480     HTableDescriptor htd = new HTableDescriptor(
1481         ProtobufUtil.toTableName(ts.getTableName()),
1482         hcds);
1483     for (BytesBytesPair a: ts.getAttributesList()) {
1484       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1485     }
1486     for (NameStringPair a: ts.getConfigurationList()) {
1487       htd.setConfiguration(a.getName(), a.getValue());
1488     }
1489     return htd;
1490   }
1491 
1492   /**
1493    * Getter for accessing the configuration value by key
1494    */
1495   public String getConfigurationValue(String key) {
1496     return configuration.get(key);
1497   }
1498 
1499   /**
1500    * Getter for fetching an unmodifiable {@link #configuration} map.
1501    */
1502   public Map<String, String> getConfiguration() {
1503     // shallow pointer copy
1504     return Collections.unmodifiableMap(configuration);
1505   }
1506 
1507   /**
1508    * Setter for storing a configuration setting in {@link #configuration} map.
1509    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1510    * @param value String value. If null, removes the setting.
1511    */
1512   public void setConfiguration(String key, String value) {
1513     if (value == null) {
1514       removeConfiguration(key);
1515     } else {
1516       configuration.put(key, value);
1517     }
1518   }
1519 
1520   /**
1521    * Remove a config setting represented by the key from the {@link #configuration} map
1522    */
1523   public void removeConfiguration(final String key) {
1524     configuration.remove(key);
1525   }
1526 }