View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import com.google.protobuf.HBaseZeroCopyByteString;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.classification.InterfaceAudience;
41  import org.apache.hadoop.classification.InterfaceStability;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.client.Durability;
44  import org.apache.hadoop.hbase.exceptions.DeserializationException;
45  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
46  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
51  import org.apache.hadoop.hbase.regionserver.BloomType;
52  import org.apache.hadoop.hbase.security.User;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.Writables;
55  import org.apache.hadoop.io.WritableComparable;
56  
57  import com.google.protobuf.InvalidProtocolBufferException;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   /**
147    * <em>INTERNAL</em> Used by rest interface to access this metadata
148    * attribute which denotes if the table is a -ROOT- region or not
149    *
150    * @see #isRootRegion()
151    */
152   public static final String IS_ROOT = "IS_ROOT";
153   private static final ImmutableBytesWritable IS_ROOT_KEY =
154     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
155 
156   /**
157    * <em>INTERNAL</em> Used by rest interface to access this metadata
158    * attribute which denotes if it is a catalog table, either
159    * <code> hbase:meta </code> or <code> -ROOT- </code>
160    *
161    * @see #isMetaRegion()
162    */
163   public static final String IS_META = "IS_META";
164   private static final ImmutableBytesWritable IS_META_KEY =
165     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
166 
167   /**
168    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
169    * attribute which denotes if the deferred log flush option is enabled.
170    * @deprecated Use {@link #DURABILITY} instead.
171    */
172   @Deprecated
173   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
174   @Deprecated
175   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
176     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
177 
178   /**
179    * <em>INTERNAL</em> {@link Durability} setting for the table.
180    */
181   public static final String DURABILITY = "DURABILITY";
182   private static final ImmutableBytesWritable DURABILITY_KEY =
183       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
184 
185   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
186   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
187 
188   /*
189    *  The below are ugly but better than creating them each time till we
190    *  replace booleans being saved as Strings with plain booleans.  Need a
191    *  migration script to do this.  TODO.
192    */
193   private static final ImmutableBytesWritable FALSE =
194     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
195 
196   private static final ImmutableBytesWritable TRUE =
197     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
198 
199   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
200 
201   /**
202    * Constant that denotes whether the table is READONLY by default and is false
203    */
204   public static final boolean DEFAULT_READONLY = false;
205 
206   /**
207    * Constant that denotes whether the table is compaction enabled by default
208    */
209   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
210 
211   /**
212    * Constant that denotes the maximum default size of the memstore after which
213    * the contents are flushed to the store files
214    */
215   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
216 
217   private final static Map<String, String> DEFAULT_VALUES
218     = new HashMap<String, String>();
219   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
220     = new HashSet<ImmutableBytesWritable>();
221   static {
222     DEFAULT_VALUES.put(MAX_FILESIZE,
223         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
224     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
225     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
226         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
227     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
228         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
229     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
230     for (String s : DEFAULT_VALUES.keySet()) {
231       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
232     }
233     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
234     RESERVED_KEYWORDS.add(IS_META_KEY);
235   }
236 
237   /**
238    * Cache of whether this is a meta table or not.
239    */
240   private volatile Boolean meta = null;
241   /**
242    * Cache of whether this is root table or not.
243    */
244   private volatile Boolean root = null;
245 
246   /**
247    * Durability setting for the table
248    */
249   private Durability durability = null;
250 
251   /**
252    * Maps column family name to the respective HColumnDescriptors
253    */
254   private final Map<byte [], HColumnDescriptor> families =
255     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
256 
257   /**
258    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
259    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
260    */
261   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
262     setName(name);
263     for(HColumnDescriptor descriptor : families) {
264       this.families.put(descriptor.getName(), descriptor);
265     }
266   }
267 
268   /**
269    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
270    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
271    */
272   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
273       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
274     setName(name);
275     for(HColumnDescriptor descriptor : families) {
276       this.families.put(descriptor.getName(), descriptor);
277     }
278     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
279         values.entrySet()) {
280       setValue(entry.getKey(), entry.getValue());
281     }
282   }
283 
284   /**
285    * Default constructor which constructs an empty object.
286    * For deserializing an HTableDescriptor instance only.
287    * @deprecated Used by Writables and Writables are going away.
288    */
289   @Deprecated
290   public HTableDescriptor() {
291     super();
292   }
293 
294   /**
295    * Construct a table descriptor specifying a TableName object
296    * @param name Table name.
297    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
298    */
299   public HTableDescriptor(final TableName name) {
300     super();
301     setName(name);
302   }
303 
304   /**
305    * Construct a table descriptor specifying a byte array table name
306    * @param name Table name.
307    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
308    */
309   @Deprecated
310   public HTableDescriptor(final byte[] name) {
311     this(TableName.valueOf(name));
312   }
313 
314   /**
315    * Construct a table descriptor specifying a String table name
316    * @param name Table name.
317    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
318    */
319   @Deprecated
320   public HTableDescriptor(final String name) {
321     this(TableName.valueOf(name));
322   }
323 
324   /**
325    * Construct a table descriptor by cloning the descriptor passed as a parameter.
326    * <p>
327    * Makes a deep copy of the supplied descriptor.
328    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
329    * @param desc The descriptor.
330    */
331   public HTableDescriptor(final HTableDescriptor desc) {
332     super();
333     setName(desc.name);
334     setMetaFlags(this.name);
335     for (HColumnDescriptor c: desc.families.values()) {
336       this.families.put(c.getName(), new HColumnDescriptor(c));
337     }
338     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
339         desc.values.entrySet()) {
340       setValue(e.getKey(), e.getValue());
341     }
342     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
343       this.configuration.put(e.getKey(), e.getValue());
344     }
345   }
346 
347   /*
348    * Set meta flags on this table.
349    * IS_ROOT_KEY is set if its a -ROOT- table
350    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
351    * Called by constructors.
352    * @param name
353    */
354   private void setMetaFlags(final TableName name) {
355     setMetaRegion(isRootRegion() ||
356         name.equals(TableName.META_TABLE_NAME));
357   }
358 
359   /**
360    * Check if the descriptor represents a <code> -ROOT- </code> region.
361    *
362    * @return true if this is a <code> -ROOT- </code> region
363    */
364   public boolean isRootRegion() {
365     if (this.root == null) {
366       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
367     }
368     return this.root.booleanValue();
369   }
370 
371   /**
372    * <em> INTERNAL </em> Used to denote if the current table represents
373    * <code> -ROOT- </code> region. This is used internally by the
374    * HTableDescriptor constructors
375    *
376    * @param isRoot true if this is the <code> -ROOT- </code> region
377    */
378   protected void setRootRegion(boolean isRoot) {
379     // TODO: Make the value a boolean rather than String of boolean.
380     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
381   }
382 
383   /**
384    * Checks if this table is <code> hbase:meta </code>
385    * region.
386    *
387    * @return true if this table is <code> hbase:meta </code>
388    * region
389    */
390   public boolean isMetaRegion() {
391     if (this.meta == null) {
392       this.meta = calculateIsMetaRegion();
393     }
394     return this.meta.booleanValue();
395   }
396 
397   private synchronized Boolean calculateIsMetaRegion() {
398     byte [] value = getValue(IS_META_KEY);
399     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
400   }
401 
402   private boolean isSomething(final ImmutableBytesWritable key,
403       final boolean valueIfNull) {
404     byte [] value = getValue(key);
405     if (value != null) {
406       return Boolean.valueOf(Bytes.toString(value));
407     }
408     return valueIfNull;
409   }
410 
411   /**
412    * <em> INTERNAL </em> Used to denote if the current table represents
413    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
414    * internally by the HTableDescriptor constructors
415    *
416    * @param isMeta true if its either <code> -ROOT- </code> or
417    * <code> hbase:meta </code> region
418    */
419   protected void setMetaRegion(boolean isMeta) {
420     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
421   }
422 
423   /**
424    * Checks if the table is a <code>hbase:meta</code> table
425    *
426    * @return true if table is <code> hbase:meta </code> region.
427    */
428   public boolean isMetaTable() {
429     return isMetaRegion() && !isRootRegion();
430   }
431 
432   /**
433    * Getter for accessing the metadata associated with the key
434    *
435    * @param key The key.
436    * @return The value.
437    * @see #values
438    */
439   public byte[] getValue(byte[] key) {
440     return getValue(new ImmutableBytesWritable(key));
441   }
442 
443   private byte[] getValue(final ImmutableBytesWritable key) {
444     ImmutableBytesWritable ibw = values.get(key);
445     if (ibw == null)
446       return null;
447     return ibw.get();
448   }
449 
450   /**
451    * Getter for accessing the metadata associated with the key
452    *
453    * @param key The key.
454    * @return The value.
455    * @see #values
456    */
457   public String getValue(String key) {
458     byte[] value = getValue(Bytes.toBytes(key));
459     if (value == null)
460       return null;
461     return Bytes.toString(value);
462   }
463 
464   /**
465    * Getter for fetching an unmodifiable {@link #values} map.
466    *
467    * @return unmodifiable map {@link #values}.
468    * @see #values
469    */
470   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
471     // shallow pointer copy
472     return Collections.unmodifiableMap(values);
473   }
474 
475   /**
476    * Setter for storing metadata as a (key, value) pair in {@link #values} map
477    *
478    * @param key The key.
479    * @param value The value.
480    * @see #values
481    */
482   public void setValue(byte[] key, byte[] value) {
483     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
484   }
485 
486   /*
487    * @param key The key.
488    * @param value The value.
489    */
490   private void setValue(final ImmutableBytesWritable key,
491       final String value) {
492     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
493   }
494 
495   /*
496    * Setter for storing metadata as a (key, value) pair in {@link #values} map
497    *
498    * @param key The key.
499    * @param value The value.
500    */
501   public void setValue(final ImmutableBytesWritable key,
502       final ImmutableBytesWritable value) {
503     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
504       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
505       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
506           "use " + DURABILITY + " instead");
507       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
508       return;
509     }
510     values.put(key, value);
511   }
512 
513   /**
514    * Setter for storing metadata as a (key, value) pair in {@link #values} map
515    *
516    * @param key The key.
517    * @param value The value.
518    * @see #values
519    */
520   public void setValue(String key, String value) {
521     if (value == null) {
522       remove(key);
523     } else {
524       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
525     }
526   }
527 
528   /**
529    * Remove metadata represented by the key from the {@link #values} map
530    *
531    * @param key Key whose key and value we're to remove from HTableDescriptor
532    * parameters.
533    */
534   public void remove(final String key) {
535     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
536   }
537 
538   /**
539    * Remove metadata represented by the key from the {@link #values} map
540    *
541    * @param key Key whose key and value we're to remove from HTableDescriptor
542    * parameters.
543    */
544   public void remove(ImmutableBytesWritable key) {
545     values.remove(key);
546   }
547 
548   /**
549    * Remove metadata represented by the key from the {@link #values} map
550    *
551    * @param key Key whose key and value we're to remove from HTableDescriptor
552    * parameters.
553    */
554   public void remove(final byte [] key) {
555     remove(new ImmutableBytesWritable(key));
556   }
557 
558   /**
559    * Check if the readOnly flag of the table is set. If the readOnly flag is
560    * set then the contents of the table can only be read from but not modified.
561    *
562    * @return true if all columns in the table should be read only
563    */
564   public boolean isReadOnly() {
565     return isSomething(READONLY_KEY, DEFAULT_READONLY);
566   }
567 
568   /**
569    * Setting the table as read only sets all the columns in the table as read
570    * only. By default all tables are modifiable, but if the readOnly flag is
571    * set to true then the contents of the table can only be read but not modified.
572    *
573    * @param readOnly True if all of the columns in the table should be read
574    * only.
575    */
576   public void setReadOnly(final boolean readOnly) {
577     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
578   }
579 
580   /**
581    * Check if the compaction enable flag of the table is true. If flag is
582    * false then no minor/major compactions will be done in real.
583    *
584    * @return true if table compaction enabled
585    */
586   public boolean isCompactionEnabled() {
587     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
588   }
589 
590   /**
591    * Setting the table compaction enable flag.
592    *
593    * @param isEnable True if enable compaction.
594    */
595   public void setCompactionEnabled(final boolean isEnable) {
596     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
597   }
598 
599   /**
600    * Check if deferred log edits are enabled on the table.
601    *
602    * @return true if that deferred log flush is enabled on the table
603    *
604    * @see #setDeferredLogFlush(boolean)
605    * @deprecated use {@link #getDurability()}
606    */
607   @Deprecated
608   public synchronized boolean isDeferredLogFlush() {
609     return getDurability() == Durability.ASYNC_WAL;
610   }
611 
612   /**
613    * This is used to defer the log edits syncing to the file system. Everytime
614    * an edit is sent to the server it is first sync'd to the file system by the
615    * log writer. This sync is an expensive operation and thus can be deferred so
616    * that the edits are kept in memory for a specified period of time as represented
617    * by <code> hbase.regionserver.optionallogflushinterval </code> and not flushed
618    * for every edit.
619    * <p>
620    * NOTE:- This option might result in data loss if the region server crashes
621    * before these deferred edits in memory are flushed onto the filesystem.
622    * </p>
623    *
624    * @param isDeferredLogFlush
625    * @deprecated use {@link #setDurability(Durability)}
626    */
627   @Deprecated
628   public synchronized void setDeferredLogFlush(final boolean isDeferredLogFlush) {
629     this.setDurability(isDeferredLogFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
630   }
631 
632   /**
633    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
634    * @param durability enum value
635    */
636   public void setDurability(Durability durability) {
637     this.durability = durability;
638     setValue(DURABILITY_KEY, durability.name());
639   }
640 
641   /**
642    * Returns the durability setting for the table.
643    * @return durability setting for the table.
644    */
645   public Durability getDurability() {
646     if (this.durability == null) {
647       byte[] durabilityValue = getValue(DURABILITY_KEY);
648       if (durabilityValue == null) {
649         this.durability = DEFAULT_DURABLITY;
650       } else {
651         try {
652           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
653         } catch (IllegalArgumentException ex) {
654           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
655             + " is not known. Durability:" + Bytes.toString(durabilityValue));
656           this.durability = DEFAULT_DURABLITY;
657         }
658       }
659     }
660     return this.durability;
661   }
662 
663   /**
664    * Get the name of the table
665    *
666    * @return TableName
667    */
668   public TableName getTableName() {
669     return name;
670   }
671 
672   /**
673    * Get the name of the table as a byte array.
674    *
675    * @return name of table
676    */
677   public byte[] getName() {
678     return name.getName();
679   }
680 
681   /**
682    * Get the name of the table as a String
683    *
684    * @return name of table as a String
685    */
686   public String getNameAsString() {
687     return name.getNameAsString();
688   }
689 
690   /**
691    * This get the class associated with the region split policy which
692    * determines when a region split should occur.  The class used by
693    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
694    *
695    * @return the class name of the region split policy for this table.
696    * If this returns null, the default split policy is used.
697    */
698    public String getRegionSplitPolicyClassName() {
699     return getValue(SPLIT_POLICY);
700   }
701 
702   /**
703    * Set the name of the table.
704    *
705    * @param name name of table
706    */
707   @Deprecated
708   public void setName(byte[] name) {
709     setName(TableName.valueOf(name));
710   }
711 
712   @Deprecated
713   public void setName(TableName name) {
714     this.name = name;
715     setMetaFlags(this.name);
716   }
717 
718   /**
719    * Returns the maximum size upto which a region can grow to after which a region
720    * split is triggered. The region size is represented by the size of the biggest
721    * store file in that region.
722    *
723    * @return max hregion size for table, -1 if not set.
724    *
725    * @see #setMaxFileSize(long)
726    */
727   public long getMaxFileSize() {
728     byte [] value = getValue(MAX_FILESIZE_KEY);
729     if (value != null) {
730       return Long.parseLong(Bytes.toString(value));
731     }
732     return -1;
733   }
734 
735   /**
736    * Sets the maximum size upto which a region can grow to after which a region
737    * split is triggered. The region size is represented by the size of the biggest
738    * store file in that region, i.e. If the biggest store file grows beyond the
739    * maxFileSize, then the region split is triggered. This defaults to a value of
740    * 256 MB.
741    * <p>
742    * This is not an absolute value and might vary. Assume that a single row exceeds
743    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
744    * a single row cannot be split across multiple regions
745    * </p>
746    *
747    * @param maxFileSize The maximum file size that a store file can grow to
748    * before a split is triggered.
749    */
750   public void setMaxFileSize(long maxFileSize) {
751     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
752   }
753 
754   /**
755    * Returns the size of the memstore after which a flush to filesystem is triggered.
756    *
757    * @return memory cache flush size for each hregion, -1 if not set.
758    *
759    * @see #setMemStoreFlushSize(long)
760    */
761   public long getMemStoreFlushSize() {
762     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
763     if (value != null) {
764       return Long.parseLong(Bytes.toString(value));
765     }
766     return -1;
767   }
768 
769   /**
770    * Represents the maximum size of the memstore after which the contents of the
771    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
772    *
773    * @param memstoreFlushSize memory cache flush size for each hregion
774    */
775   public void setMemStoreFlushSize(long memstoreFlushSize) {
776     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
777   }
778 
779   /**
780    * Adds a column family.
781    * @param family HColumnDescriptor of family to add.
782    */
783   public void addFamily(final HColumnDescriptor family) {
784     if (family.getName() == null || family.getName().length <= 0) {
785       throw new NullPointerException("Family name cannot be null or empty");
786     }
787     this.families.put(family.getName(), family);
788   }
789 
790   /**
791    * Checks to see if this table contains the given column family
792    * @param familyName Family name or column name.
793    * @return true if the table contains the specified family name
794    */
795   public boolean hasFamily(final byte [] familyName) {
796     return families.containsKey(familyName);
797   }
798 
799   /**
800    * @return Name of this table and then a map of all of the column family
801    * descriptors.
802    * @see #getNameAsString()
803    */
804   @Override
805   public String toString() {
806     StringBuilder s = new StringBuilder();
807     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
808     s.append(getValues(true));
809     for (HColumnDescriptor f : families.values()) {
810       s.append(", ").append(f);
811     }
812     return s.toString();
813   }
814 
815   /**
816    * @return Name of this table and then a map of all of the column family
817    * descriptors (with only the non-default column family attributes)
818    */
819   public String toStringCustomizedValues() {
820     StringBuilder s = new StringBuilder();
821     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
822     s.append(getValues(false));
823     for(HColumnDescriptor hcd : families.values()) {
824       s.append(", ").append(hcd.toStringCustomizedValues());
825     }
826     return s.toString();
827   }
828 
829   private StringBuilder getValues(boolean printDefaults) {
830     StringBuilder s = new StringBuilder();
831 
832     // step 1: set partitioning and pruning
833     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
834     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
835     for (ImmutableBytesWritable k : values.keySet()) {
836       if (k == null || k.get() == null) continue;
837       String key = Bytes.toString(k.get());
838       // in this section, print out reserved keywords + coprocessor info
839       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
840         userKeys.add(k);
841         continue;
842       }
843       // only print out IS_ROOT/IS_META if true
844       String value = Bytes.toString(values.get(k).get());
845       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
846         if (Boolean.valueOf(value) == false) continue;
847       }
848       // see if a reserved key is a default value. may not want to print it out
849       if (printDefaults
850           || !DEFAULT_VALUES.containsKey(key)
851           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
852         reservedKeys.add(k);
853       }
854     }
855 
856     // early exit optimization
857     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
858     if (!hasAttributes && configuration.isEmpty()) return s;
859 
860     s.append(", {");
861     // step 2: printing attributes
862     if (hasAttributes) {
863       s.append("TABLE_ATTRIBUTES => {");
864 
865       // print all reserved keys first
866       boolean printCommaForAttr = false;
867       for (ImmutableBytesWritable k : reservedKeys) {
868         String key = Bytes.toString(k.get());
869         String value = Bytes.toString(values.get(k).get());
870         if (printCommaForAttr) s.append(", ");
871         printCommaForAttr = true;
872         s.append(key);
873         s.append(" => ");
874         s.append('\'').append(value).append('\'');
875       }
876 
877       if (!userKeys.isEmpty()) {
878         // print all non-reserved, advanced config keys as a separate subset
879         if (printCommaForAttr) s.append(", ");
880         printCommaForAttr = true;
881         s.append(HConstants.METADATA).append(" => ");
882         s.append("{");
883         boolean printCommaForCfg = false;
884         for (ImmutableBytesWritable k : userKeys) {
885           String key = Bytes.toString(k.get());
886           String value = Bytes.toString(values.get(k).get());
887           if (printCommaForCfg) s.append(", ");
888           printCommaForCfg = true;
889           s.append('\'').append(key).append('\'');
890           s.append(" => ");
891           s.append('\'').append(value).append('\'');
892         }
893         s.append("}");
894       }
895     }
896 
897     // step 3: printing all configuration:
898     if (!configuration.isEmpty()) {
899       if (hasAttributes) {
900         s.append(", ");
901       }
902       s.append(HConstants.CONFIGURATION).append(" => ");
903       s.append('{');
904       boolean printCommaForConfig = false;
905       for (Map.Entry<String, String> e : configuration.entrySet()) {
906         if (printCommaForConfig) s.append(", ");
907         printCommaForConfig = true;
908         s.append('\'').append(e.getKey()).append('\'');
909         s.append(" => ");
910         s.append('\'').append(e.getValue()).append('\'');
911       }
912       s.append("}");
913     }
914     s.append("}"); // end METHOD
915     return s;
916   }
917 
918   /**
919    * Compare the contents of the descriptor with another one passed as a parameter.
920    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
921    * contents of the descriptors are compared.
922    *
923    * @return true if the contents of the the two descriptors exactly match
924    *
925    * @see java.lang.Object#equals(java.lang.Object)
926    */
927   @Override
928   public boolean equals(Object obj) {
929     if (this == obj) {
930       return true;
931     }
932     if (obj == null) {
933       return false;
934     }
935     if (!(obj instanceof HTableDescriptor)) {
936       return false;
937     }
938     return compareTo((HTableDescriptor)obj) == 0;
939   }
940 
941   /**
942    * @see java.lang.Object#hashCode()
943    */
944   @Override
945   public int hashCode() {
946     int result = this.name.hashCode();
947     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
948     if (this.families != null && this.families.size() > 0) {
949       for (HColumnDescriptor e: this.families.values()) {
950         result ^= e.hashCode();
951       }
952     }
953     result ^= values.hashCode();
954     result ^= configuration.hashCode();
955     return result;
956   }
957 
958   /**
959    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
960    * and is used for de-serialization of the HTableDescriptor over RPC
961    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
962    */
963   @Deprecated
964   @Override
965   public void readFields(DataInput in) throws IOException {
966     int version = in.readInt();
967     if (version < 3)
968       throw new IOException("versions < 3 are not supported (and never existed!?)");
969     // version 3+
970     name = TableName.valueOf(Bytes.readByteArray(in));
971     setRootRegion(in.readBoolean());
972     setMetaRegion(in.readBoolean());
973     values.clear();
974     configuration.clear();
975     int numVals = in.readInt();
976     for (int i = 0; i < numVals; i++) {
977       ImmutableBytesWritable key = new ImmutableBytesWritable();
978       ImmutableBytesWritable value = new ImmutableBytesWritable();
979       key.readFields(in);
980       value.readFields(in);
981       setValue(key, value);
982     }
983     families.clear();
984     int numFamilies = in.readInt();
985     for (int i = 0; i < numFamilies; i++) {
986       HColumnDescriptor c = new HColumnDescriptor();
987       c.readFields(in);
988       families.put(c.getName(), c);
989     }
990     if (version >= 7) {
991       int numConfigs = in.readInt();
992       for (int i = 0; i < numConfigs; i++) {
993         ImmutableBytesWritable key = new ImmutableBytesWritable();
994         ImmutableBytesWritable value = new ImmutableBytesWritable();
995         key.readFields(in);
996         value.readFields(in);
997         configuration.put(
998           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
999           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1000       }
1001     }
1002   }
1003 
1004   /**
1005    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1006    * and is used for serialization of the HTableDescriptor over RPC
1007    * @deprecated Writables are going away.
1008    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1009    */
1010   @Deprecated
1011   @Override
1012   public void write(DataOutput out) throws IOException {
1013 	  out.writeInt(TABLE_DESCRIPTOR_VERSION);
1014     Bytes.writeByteArray(out, name.toBytes());
1015     out.writeBoolean(isRootRegion());
1016     out.writeBoolean(isMetaRegion());
1017     out.writeInt(values.size());
1018     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1019         values.entrySet()) {
1020       e.getKey().write(out);
1021       e.getValue().write(out);
1022     }
1023     out.writeInt(families.size());
1024     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1025         it.hasNext(); ) {
1026       HColumnDescriptor family = it.next();
1027       family.write(out);
1028     }
1029     out.writeInt(configuration.size());
1030     for (Map.Entry<String, String> e : configuration.entrySet()) {
1031       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1032       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1033     }
1034   }
1035 
1036   // Comparable
1037 
1038   /**
1039    * Compares the descriptor with another descriptor which is passed as a parameter.
1040    * This compares the content of the two descriptors and not the reference.
1041    *
1042    * @return 0 if the contents of the descriptors are exactly matching,
1043    * 		 1 if there is a mismatch in the contents
1044    */
1045   @Override
1046   public int compareTo(final HTableDescriptor other) {
1047     int result = this.name.compareTo(other.name);
1048     if (result == 0) {
1049       result = families.size() - other.families.size();
1050     }
1051     if (result == 0 && families.size() != other.families.size()) {
1052       result = Integer.valueOf(families.size()).compareTo(
1053           Integer.valueOf(other.families.size()));
1054     }
1055     if (result == 0) {
1056       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1057           it2 = other.families.values().iterator(); it.hasNext(); ) {
1058         result = it.next().compareTo(it2.next());
1059         if (result != 0) {
1060           break;
1061         }
1062       }
1063     }
1064     if (result == 0) {
1065       // punt on comparison for ordering, just calculate difference
1066       result = this.values.hashCode() - other.values.hashCode();
1067       if (result < 0)
1068         result = -1;
1069       else if (result > 0)
1070         result = 1;
1071     }
1072     if (result == 0) {
1073       result = this.configuration.hashCode() - other.configuration.hashCode();
1074       if (result < 0)
1075         result = -1;
1076       else if (result > 0)
1077         result = 1;
1078     }
1079     return result;
1080   }
1081 
1082   /**
1083    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1084    * of all the column families of the table.
1085    *
1086    * @return Immutable collection of {@link HColumnDescriptor} of all the
1087    * column families.
1088    */
1089   public Collection<HColumnDescriptor> getFamilies() {
1090     return Collections.unmodifiableCollection(this.families.values());
1091   }
1092 
1093   /**
1094    * Returns all the column family names of the current table. The map of
1095    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1096    * This returns all the keys of the family map which represents the column
1097    * family names of the table.
1098    *
1099    * @return Immutable sorted set of the keys of the families.
1100    */
1101   public Set<byte[]> getFamiliesKeys() {
1102     return Collections.unmodifiableSet(this.families.keySet());
1103   }
1104 
1105   /**
1106    * Returns an array all the {@link HColumnDescriptor} of the column families
1107    * of the table.
1108    *
1109    * @return Array of all the HColumnDescriptors of the current table
1110    *
1111    * @see #getFamilies()
1112    */
1113   public HColumnDescriptor[] getColumnFamilies() {
1114     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1115     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1116   }
1117 
1118 
1119   /**
1120    * Returns the HColumnDescriptor for a specific column family with name as
1121    * specified by the parameter column.
1122    *
1123    * @param column Column family name
1124    * @return Column descriptor for the passed family name or the family on
1125    * passed in column.
1126    */
1127   public HColumnDescriptor getFamily(final byte [] column) {
1128     return this.families.get(column);
1129   }
1130 
1131 
1132   /**
1133    * Removes the HColumnDescriptor with name specified by the parameter column
1134    * from the table descriptor
1135    *
1136    * @param column Name of the column family to be removed.
1137    * @return Column descriptor for the passed family name or the family on
1138    * passed in column.
1139    */
1140   public HColumnDescriptor removeFamily(final byte [] column) {
1141     return this.families.remove(column);
1142   }
1143 
1144 
1145   /**
1146    * Add a table coprocessor to this table. The coprocessor
1147    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1148    * or Endpoint.
1149    * It won't check if the class can be loaded or not.
1150    * Whether a coprocessor is loadable or not will be determined when
1151    * a region is opened.
1152    * @param className Full class name.
1153    * @throws IOException
1154    */
1155   public void addCoprocessor(String className) throws IOException {
1156     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1157   }
1158 
1159 
1160   /**
1161    * Add a table coprocessor to this table. The coprocessor
1162    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1163    * or Endpoint.
1164    * It won't check if the class can be loaded or not.
1165    * Whether a coprocessor is loadable or not will be determined when
1166    * a region is opened.
1167    * @param jarFilePath Path of the jar file. If it's null, the class will be
1168    * loaded from default classloader.
1169    * @param className Full class name.
1170    * @param priority Priority
1171    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1172    * @throws IOException
1173    */
1174   public void addCoprocessor(String className, Path jarFilePath,
1175                              int priority, final Map<String, String> kvs)
1176   throws IOException {
1177     if (hasCoprocessor(className)) {
1178       throw new IOException("Coprocessor " + className + " already exists.");
1179     }
1180     // validate parameter kvs
1181     StringBuilder kvString = new StringBuilder();
1182     if (kvs != null) {
1183       for (Map.Entry<String, String> e: kvs.entrySet()) {
1184         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1185           throw new IOException("Illegal parameter key = " + e.getKey());
1186         }
1187         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1188           throw new IOException("Illegal parameter (" + e.getKey() +
1189               ") value = " + e.getValue());
1190         }
1191         if (kvString.length() != 0) {
1192           kvString.append(',');
1193         }
1194         kvString.append(e.getKey());
1195         kvString.append('=');
1196         kvString.append(e.getValue());
1197       }
1198     }
1199 
1200     // generate a coprocessor key
1201     int maxCoprocessorNumber = 0;
1202     Matcher keyMatcher;
1203     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1204         this.values.entrySet()) {
1205       keyMatcher =
1206           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1207               Bytes.toString(e.getKey().get()));
1208       if (!keyMatcher.matches()) {
1209         continue;
1210       }
1211       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1212           maxCoprocessorNumber);
1213     }
1214     maxCoprocessorNumber++;
1215 
1216     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1217     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1218         "|" + className + "|" + Integer.toString(priority) + "|" +
1219         kvString.toString();
1220     setValue(key, value);
1221   }
1222 
1223 
1224   /**
1225    * Check if the table has an attached co-processor represented by the name className
1226    *
1227    * @param className - Class name of the co-processor
1228    * @return true of the table has a co-processor className
1229    */
1230   public boolean hasCoprocessor(String className) {
1231     Matcher keyMatcher;
1232     Matcher valueMatcher;
1233     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1234         this.values.entrySet()) {
1235       keyMatcher =
1236           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1237               Bytes.toString(e.getKey().get()));
1238       if (!keyMatcher.matches()) {
1239         continue;
1240       }
1241       valueMatcher =
1242         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1243             Bytes.toString(e.getValue().get()));
1244       if (!valueMatcher.matches()) {
1245         continue;
1246       }
1247       // get className and compare
1248       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1249       if (clazz.equals(className.trim())) {
1250         return true;
1251       }
1252     }
1253     return false;
1254   }
1255 
1256   /**
1257    * Return the list of attached co-processor represented by their name className
1258    *
1259    * @return The list of co-processors classNames
1260    */
1261   public List<String> getCoprocessors() {
1262     List<String> result = new ArrayList<String>();
1263     Matcher keyMatcher;
1264     Matcher valueMatcher;
1265     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1266       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1267       if (!keyMatcher.matches()) {
1268         continue;
1269       }
1270       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1271           .toString(e.getValue().get()));
1272       if (!valueMatcher.matches()) {
1273         continue;
1274       }
1275       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1276     }
1277     return result;
1278   }
1279 
1280   /**
1281    * Remove a coprocessor from those set on the table
1282    * @param className Class name of the co-processor
1283    */
1284   public void removeCoprocessor(String className) {
1285     ImmutableBytesWritable match = null;
1286     Matcher keyMatcher;
1287     Matcher valueMatcher;
1288     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1289         .entrySet()) {
1290       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1291           .getKey().get()));
1292       if (!keyMatcher.matches()) {
1293         continue;
1294       }
1295       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1296           .toString(e.getValue().get()));
1297       if (!valueMatcher.matches()) {
1298         continue;
1299       }
1300       // get className and compare
1301       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1302       // remove the CP if it is present
1303       if (clazz.equals(className.trim())) {
1304         match = e.getKey();
1305         break;
1306       }
1307     }
1308     // if we found a match, remove it
1309     if (match != null)
1310       remove(match);
1311   }
1312 
1313   /**
1314    * Returns the {@link Path} object representing the table directory under
1315    * path rootdir
1316    *
1317    * Deprecated use FSUtils.getTableDir() instead.
1318    *
1319    * @param rootdir qualified path of HBase root directory
1320    * @param tableName name of table
1321    * @return {@link Path} for table
1322    */
1323   @Deprecated
1324   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1325     //This is bad I had to mirror code from FSUTils.getTableDir since
1326     //there is no module dependency between hbase-client and hbase-server
1327     TableName name = TableName.valueOf(tableName);
1328     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1329               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1330   }
1331 
1332   /** Table descriptor for <code>hbase:meta</code> catalog table */
1333   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1334       TableName.META_TABLE_NAME,
1335       new HColumnDescriptor[] {
1336           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1337               // Ten is arbitrary number.  Keep versions to help debugging.
1338               .setMaxVersions(10)
1339               .setInMemory(true)
1340               .setBlocksize(8 * 1024)
1341               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1342               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1343               .setBloomFilterType(BloomType.NONE)
1344       });
1345 
1346   static {
1347     try {
1348       META_TABLEDESC.addCoprocessor(
1349           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1350           null, Coprocessor.PRIORITY_SYSTEM, null);
1351     } catch (IOException ex) {
1352       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1353       throw new RuntimeException(ex);
1354     }
1355   }
1356 
1357   public final static String NAMESPACE_FAMILY_INFO = "info";
1358   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1359   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1360 
1361   /** Table descriptor for namespace table */
1362   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1363       TableName.NAMESPACE_TABLE_NAME,
1364       new HColumnDescriptor[] {
1365           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1366               // Ten is arbitrary number.  Keep versions to help debugging.
1367               .setMaxVersions(10)
1368               .setInMemory(true)
1369               .setBlocksize(8 * 1024)
1370               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1371       });
1372 
1373   @Deprecated
1374   public void setOwner(User owner) {
1375     setOwnerString(owner != null ? owner.getShortName() : null);
1376   }
1377 
1378   // used by admin.rb:alter(table_name,*args) to update owner.
1379   @Deprecated
1380   public void setOwnerString(String ownerString) {
1381     if (ownerString != null) {
1382       setValue(OWNER_KEY, ownerString);
1383     } else {
1384       remove(OWNER_KEY);
1385     }
1386   }
1387 
1388   @Deprecated
1389   public String getOwnerString() {
1390     if (getValue(OWNER_KEY) != null) {
1391       return Bytes.toString(getValue(OWNER_KEY));
1392     }
1393     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1394     // hbase:meta and -ROOT- should return system user as owner, not null (see
1395     // MasterFileSystem.java:bootstrap()).
1396     return null;
1397   }
1398 
1399   /**
1400    * @return This instance serialized with pb with pb magic prefix
1401    * @see #parseFrom(byte[])
1402    */
1403   public byte [] toByteArray() {
1404     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1405   }
1406 
1407   /**
1408    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1409    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1410    * @throws DeserializationException
1411    * @throws IOException
1412    * @see #toByteArray()
1413    */
1414   public static HTableDescriptor parseFrom(final byte [] bytes)
1415   throws DeserializationException, IOException {
1416     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1417       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1418     }
1419     int pblen = ProtobufUtil.lengthOfPBMagic();
1420     TableSchema.Builder builder = TableSchema.newBuilder();
1421     TableSchema ts;
1422     try {
1423       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1424     } catch (InvalidProtocolBufferException e) {
1425       throw new DeserializationException(e);
1426     }
1427     return convert(ts);
1428   }
1429 
1430   /**
1431    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1432    */
1433   public TableSchema convert() {
1434     TableSchema.Builder builder = TableSchema.newBuilder();
1435     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1436     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1437       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1438       aBuilder.setFirst(HBaseZeroCopyByteString.wrap(e.getKey().get()));
1439       aBuilder.setSecond(HBaseZeroCopyByteString.wrap(e.getValue().get()));
1440       builder.addAttributes(aBuilder.build());
1441     }
1442     for (HColumnDescriptor hcd: getColumnFamilies()) {
1443       builder.addColumnFamilies(hcd.convert());
1444     }
1445     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1446       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1447       aBuilder.setName(e.getKey());
1448       aBuilder.setValue(e.getValue());
1449       builder.addConfiguration(aBuilder.build());
1450     }
1451     return builder.build();
1452   }
1453 
1454   /**
1455    * @param ts A pb TableSchema instance.
1456    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1457    */
1458   public static HTableDescriptor convert(final TableSchema ts) {
1459     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1460     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1461     int index = 0;
1462     for (ColumnFamilySchema cfs: list) {
1463       hcds[index++] = HColumnDescriptor.convert(cfs);
1464     }
1465     HTableDescriptor htd = new HTableDescriptor(
1466         ProtobufUtil.toTableName(ts.getTableName()),
1467         hcds);
1468     for (BytesBytesPair a: ts.getAttributesList()) {
1469       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1470     }
1471     for (NameStringPair a: ts.getConfigurationList()) {
1472       htd.setConfiguration(a.getName(), a.getValue());
1473     }
1474     return htd;
1475   }
1476 
1477   /**
1478    * Getter for accessing the configuration value by key
1479    */
1480   public String getConfigurationValue(String key) {
1481     return configuration.get(key);
1482   }
1483 
1484   /**
1485    * Getter for fetching an unmodifiable {@link #configuration} map.
1486    */
1487   public Map<String, String> getConfiguration() {
1488     // shallow pointer copy
1489     return Collections.unmodifiableMap(configuration);
1490   }
1491 
1492   /**
1493    * Setter for storing a configuration setting in {@link #configuration} map.
1494    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1495    * @param value String value. If null, removes the setting.
1496    */
1497   public void setConfiguration(String key, String value) {
1498     if (value == null) {
1499       removeConfiguration(key);
1500     } else {
1501       configuration.put(key, value);
1502     }
1503   }
1504 
1505   /**
1506    * Remove a config setting represented by the key from the {@link #configuration} map
1507    */
1508   public void removeConfiguration(final String key) {
1509     configuration.remove(key);
1510   }
1511 }