View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.hbase.client.Durability;
45  import org.apache.hadoop.hbase.exceptions.DeserializationException;
46  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
47  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
51  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
52  import org.apache.hadoop.hbase.regionserver.BloomType;
53  import org.apache.hadoop.hbase.security.User;
54  import org.apache.hadoop.hbase.util.Bytes;
55  import org.apache.hadoop.hbase.util.Writables;
56  import org.apache.hadoop.io.WritableComparable;
57  
58  import com.google.protobuf.InvalidProtocolBufferException;
59  
60  /**
61   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
62   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
63   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
64   * when the region split should occur, coprocessors associated with it etc...
65   */
66  @InterfaceAudience.Public
67  @InterfaceStability.Evolving
68  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
69  
70    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
71  
72    /**
73     *  Changes prior to version 3 were not recorded here.
74     *  Version 3 adds metadata as a map where keys and values are byte[].
75     *  Version 4 adds indexes
76     *  Version 5 removed transactional pollution -- e.g. indexes
77     *  Version 6 changed metadata to BytesBytesPair in PB
78     *  Version 7 adds table-level configuration
79     */
80    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
81  
82    private TableName name = null;
83  
84    /**
85     * A map which holds the metadata information of the table. This metadata
86     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
87     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
88     */
89    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
90      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
91  
92    /**
93     * A map which holds the configuration specific to the table.
94     * The keys of the map have the same names as config keys and override the defaults with
95     * table-specific settings. Example usage may be for compactions, etc.
96     */
97    private final Map<String, String> configuration = new HashMap<String, String>();
98  
99    public static final String SPLIT_POLICY = "SPLIT_POLICY";
100 
101   /**
102    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
103    * attribute which denotes the maximum size of the store file after which
104    * a region split occurs
105    *
106    * @see #getMaxFileSize()
107    */
108   public static final String MAX_FILESIZE = "MAX_FILESIZE";
109   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
110     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
111 
112   public static final String OWNER = "OWNER";
113   public static final ImmutableBytesWritable OWNER_KEY =
114     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
115 
116   /**
117    * <em>INTERNAL</em> Used by rest interface to access this metadata
118    * attribute which denotes if the table is Read Only
119    *
120    * @see #isReadOnly()
121    */
122   public static final String READONLY = "READONLY";
123   private static final ImmutableBytesWritable READONLY_KEY =
124     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
125 
126   /**
127    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
128    * attribute which denotes if the table is compaction enabled
129    *
130    * @see #isCompactionEnabled()
131    */
132   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
133   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
134     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
135 
136   /**
137    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
138    * attribute which represents the maximum size of the memstore after which
139    * its contents are flushed onto the disk
140    *
141    * @see #getMemStoreFlushSize()
142    */
143   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
144   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
145     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
146 
147   /**
148    * <em>INTERNAL</em> Used by rest interface to access this metadata
149    * attribute which denotes if the table is a -ROOT- region or not
150    *
151    * @see #isRootRegion()
152    */
153   public static final String IS_ROOT = "IS_ROOT";
154   private static final ImmutableBytesWritable IS_ROOT_KEY =
155     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
156 
157   /**
158    * <em>INTERNAL</em> Used by rest interface to access this metadata
159    * attribute which denotes if it is a catalog table, either
160    * <code> hbase:meta </code> or <code> -ROOT- </code>
161    *
162    * @see #isMetaRegion()
163    */
164   public static final String IS_META = "IS_META";
165   private static final ImmutableBytesWritable IS_META_KEY =
166     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
167 
168   /**
169    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
170    * attribute which denotes if the deferred log flush option is enabled.
171    * @deprecated Use {@link #DURABILITY} instead.
172    */
173   @Deprecated
174   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
175   @Deprecated
176   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
177     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
178 
179   /**
180    * <em>INTERNAL</em> {@link Durability} setting for the table.
181    */
182   public static final String DURABILITY = "DURABILITY";
183   private static final ImmutableBytesWritable DURABILITY_KEY =
184       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
185 
186   /**
187    * <em>INTERNAL</em> number of region replicas for the table.
188    */
189   public static final String REGION_REPLICATION = "REGION_REPLICATION";
190   private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
191       new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
192 
193   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
194   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
195 
196   /*
197    *  The below are ugly but better than creating them each time till we
198    *  replace booleans being saved as Strings with plain booleans.  Need a
199    *  migration script to do this.  TODO.
200    */
201   private static final ImmutableBytesWritable FALSE =
202     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
203 
204   private static final ImmutableBytesWritable TRUE =
205     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
206 
207   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
208 
209   /**
210    * Constant that denotes whether the table is READONLY by default and is false
211    */
212   public static final boolean DEFAULT_READONLY = false;
213 
214   /**
215    * Constant that denotes whether the table is compaction enabled by default
216    */
217   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
218 
219   /**
220    * Constant that denotes the maximum default size of the memstore after which
221    * the contents are flushed to the store files
222    */
223   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
224 
225   public static final int DEFAULT_REGION_REPLICATION = 1;
226 
227   private final static Map<String, String> DEFAULT_VALUES
228     = new HashMap<String, String>();
229   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
230     = new HashSet<ImmutableBytesWritable>();
231   static {
232     DEFAULT_VALUES.put(MAX_FILESIZE,
233         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
234     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
235     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
236         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
237     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
238         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
239     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
240     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
241     for (String s : DEFAULT_VALUES.keySet()) {
242       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
243     }
244     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
245     RESERVED_KEYWORDS.add(IS_META_KEY);
246   }
247 
248   /**
249    * Cache of whether this is a meta table or not.
250    */
251   private volatile Boolean meta = null;
252   /**
253    * Cache of whether this is root table or not.
254    */
255   private volatile Boolean root = null;
256 
257   /**
258    * Durability setting for the table
259    */
260   private Durability durability = null;
261 
262   /**
263    * Maps column family name to the respective HColumnDescriptors
264    */
265   private final Map<byte [], HColumnDescriptor> families =
266     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
267 
268   /**
269    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
270    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
271    */
272   @InterfaceAudience.Private
273   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
274     setName(name);
275     for(HColumnDescriptor descriptor : families) {
276       this.families.put(descriptor.getName(), descriptor);
277     }
278   }
279 
280   /**
281    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
282    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
283    */
284   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
285       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
286     setName(name);
287     for(HColumnDescriptor descriptor : families) {
288       this.families.put(descriptor.getName(), descriptor);
289     }
290     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
291         values.entrySet()) {
292       setValue(entry.getKey(), entry.getValue());
293     }
294   }
295 
296   /**
297    * Default constructor which constructs an empty object.
298    * For deserializing an HTableDescriptor instance only.
299    * @deprecated Used by Writables and Writables are going away.
300    */
301   @Deprecated
302   public HTableDescriptor() {
303     super();
304   }
305 
306   /**
307    * Construct a table descriptor specifying a TableName object
308    * @param name Table name.
309    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
310    */
311   public HTableDescriptor(final TableName name) {
312     super();
313     setName(name);
314   }
315 
316   /**
317    * Construct a table descriptor specifying a byte array table name
318    * @param name Table name.
319    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
320    */
321   @Deprecated
322   public HTableDescriptor(final byte[] name) {
323     this(TableName.valueOf(name));
324   }
325 
326   /**
327    * Construct a table descriptor specifying a String table name
328    * @param name Table name.
329    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
330    */
331   @Deprecated
332   public HTableDescriptor(final String name) {
333     this(TableName.valueOf(name));
334   }
335 
336   /**
337    * Construct a table descriptor by cloning the descriptor passed as a parameter.
338    * <p>
339    * Makes a deep copy of the supplied descriptor.
340    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
341    * @param desc The descriptor.
342    */
343   public HTableDescriptor(final HTableDescriptor desc) {
344     super();
345     setName(desc.name);
346     setMetaFlags(this.name);
347     for (HColumnDescriptor c: desc.families.values()) {
348       this.families.put(c.getName(), new HColumnDescriptor(c));
349     }
350     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
351         desc.values.entrySet()) {
352       setValue(e.getKey(), e.getValue());
353     }
354     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
355       this.configuration.put(e.getKey(), e.getValue());
356     }
357   }
358 
359   /*
360    * Set meta flags on this table.
361    * IS_ROOT_KEY is set if its a -ROOT- table
362    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
363    * Called by constructors.
364    * @param name
365    */
366   private void setMetaFlags(final TableName name) {
367     setMetaRegion(isRootRegion() ||
368         name.equals(TableName.META_TABLE_NAME));
369   }
370 
371   /**
372    * Check if the descriptor represents a <code> -ROOT- </code> region.
373    *
374    * @return true if this is a <code> -ROOT- </code> region
375    */
376   public boolean isRootRegion() {
377     if (this.root == null) {
378       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
379     }
380     return this.root.booleanValue();
381   }
382 
383   /**
384    * <em> INTERNAL </em> Used to denote if the current table represents
385    * <code> -ROOT- </code> region. This is used internally by the
386    * HTableDescriptor constructors
387    *
388    * @param isRoot true if this is the <code> -ROOT- </code> region
389    */
390   protected void setRootRegion(boolean isRoot) {
391     // TODO: Make the value a boolean rather than String of boolean.
392     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
393   }
394 
395   /**
396    * Checks if this table is <code> hbase:meta </code>
397    * region.
398    *
399    * @return true if this table is <code> hbase:meta </code>
400    * region
401    */
402   public boolean isMetaRegion() {
403     if (this.meta == null) {
404       this.meta = calculateIsMetaRegion();
405     }
406     return this.meta.booleanValue();
407   }
408 
409   private synchronized Boolean calculateIsMetaRegion() {
410     byte [] value = getValue(IS_META_KEY);
411     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
412   }
413 
414   private boolean isSomething(final ImmutableBytesWritable key,
415       final boolean valueIfNull) {
416     byte [] value = getValue(key);
417     if (value != null) {
418       return Boolean.valueOf(Bytes.toString(value));
419     }
420     return valueIfNull;
421   }
422 
423   /**
424    * <em> INTERNAL </em> Used to denote if the current table represents
425    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
426    * internally by the HTableDescriptor constructors
427    *
428    * @param isMeta true if its either <code> -ROOT- </code> or
429    * <code> hbase:meta </code> region
430    */
431   protected void setMetaRegion(boolean isMeta) {
432     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
433   }
434 
435   /**
436    * Checks if the table is a <code>hbase:meta</code> table
437    *
438    * @return true if table is <code> hbase:meta </code> region.
439    */
440   public boolean isMetaTable() {
441     return isMetaRegion() && !isRootRegion();
442   }
443 
444   /**
445    * Getter for accessing the metadata associated with the key
446    *
447    * @param key The key.
448    * @return The value.
449    * @see #values
450    */
451   public byte[] getValue(byte[] key) {
452     return getValue(new ImmutableBytesWritable(key));
453   }
454 
455   private byte[] getValue(final ImmutableBytesWritable key) {
456     ImmutableBytesWritable ibw = values.get(key);
457     if (ibw == null)
458       return null;
459     return ibw.get();
460   }
461 
462   /**
463    * Getter for accessing the metadata associated with the key
464    *
465    * @param key The key.
466    * @return The value.
467    * @see #values
468    */
469   public String getValue(String key) {
470     byte[] value = getValue(Bytes.toBytes(key));
471     if (value == null)
472       return null;
473     return Bytes.toString(value);
474   }
475 
476   /**
477    * Getter for fetching an unmodifiable {@link #values} map.
478    *
479    * @return unmodifiable map {@link #values}.
480    * @see #values
481    */
482   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
483     // shallow pointer copy
484     return Collections.unmodifiableMap(values);
485   }
486 
487   /**
488    * Setter for storing metadata as a (key, value) pair in {@link #values} map
489    *
490    * @param key The key.
491    * @param value The value.
492    * @see #values
493    */
494   public HTableDescriptor setValue(byte[] key, byte[] value) {
495     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
496     return this;
497   }
498 
499   /*
500    * @param key The key.
501    * @param value The value.
502    */
503   private HTableDescriptor setValue(final ImmutableBytesWritable key,
504       final String value) {
505     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
506     return this;
507   }
508 
509   /*
510    * Setter for storing metadata as a (key, value) pair in {@link #values} map
511    *
512    * @param key The key.
513    * @param value The value.
514    */
515   public HTableDescriptor setValue(final ImmutableBytesWritable key,
516       final ImmutableBytesWritable value) {
517     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
518       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
519       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
520           "use " + DURABILITY + " instead");
521       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
522       return this;
523     }
524     values.put(key, value);
525     return this;
526   }
527 
528   /**
529    * Setter for storing metadata as a (key, value) pair in {@link #values} map
530    *
531    * @param key The key.
532    * @param value The value.
533    * @see #values
534    */
535   public HTableDescriptor setValue(String key, String value) {
536     if (value == null) {
537       remove(key);
538     } else {
539       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
540     }
541     return this;
542   }
543 
544   /**
545    * Remove metadata represented by the key from the {@link #values} map
546    *
547    * @param key Key whose key and value we're to remove from HTableDescriptor
548    * parameters.
549    */
550   public void remove(final String key) {
551     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
552   }
553 
554   /**
555    * Remove metadata represented by the key from the {@link #values} map
556    *
557    * @param key Key whose key and value we're to remove from HTableDescriptor
558    * parameters.
559    */
560   public void remove(ImmutableBytesWritable key) {
561     values.remove(key);
562   }
563 
564   /**
565    * Remove metadata represented by the key from the {@link #values} map
566    *
567    * @param key Key whose key and value we're to remove from HTableDescriptor
568    * parameters.
569    */
570   public void remove(final byte [] key) {
571     remove(new ImmutableBytesWritable(key));
572   }
573 
574   /**
575    * Check if the readOnly flag of the table is set. If the readOnly flag is
576    * set then the contents of the table can only be read from but not modified.
577    *
578    * @return true if all columns in the table should be read only
579    */
580   public boolean isReadOnly() {
581     return isSomething(READONLY_KEY, DEFAULT_READONLY);
582   }
583 
584   /**
585    * Setting the table as read only sets all the columns in the table as read
586    * only. By default all tables are modifiable, but if the readOnly flag is
587    * set to true then the contents of the table can only be read but not modified.
588    *
589    * @param readOnly True if all of the columns in the table should be read
590    * only.
591    */
592   public HTableDescriptor setReadOnly(final boolean readOnly) {
593     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
594   }
595 
596   /**
597    * Check if the compaction enable flag of the table is true. If flag is
598    * false then no minor/major compactions will be done in real.
599    *
600    * @return true if table compaction enabled
601    */
602   public boolean isCompactionEnabled() {
603     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
604   }
605 
606   /**
607    * Setting the table compaction enable flag.
608    *
609    * @param isEnable True if enable compaction.
610    */
611   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
612     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
613     return this;
614   }
615 
616   /**
617    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
618    * @param durability enum value
619    */
620   public HTableDescriptor setDurability(Durability durability) {
621     this.durability = durability;
622     setValue(DURABILITY_KEY, durability.name());
623     return this;
624   }
625 
626   /**
627    * Returns the durability setting for the table.
628    * @return durability setting for the table.
629    */
630   public Durability getDurability() {
631     if (this.durability == null) {
632       byte[] durabilityValue = getValue(DURABILITY_KEY);
633       if (durabilityValue == null) {
634         this.durability = DEFAULT_DURABLITY;
635       } else {
636         try {
637           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
638         } catch (IllegalArgumentException ex) {
639           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
640             + " is not known. Durability:" + Bytes.toString(durabilityValue));
641           this.durability = DEFAULT_DURABLITY;
642         }
643       }
644     }
645     return this.durability;
646   }
647 
648   /**
649    * Get the name of the table
650    *
651    * @return TableName
652    */
653   public TableName getTableName() {
654     return name;
655   }
656 
657   /**
658    * Get the name of the table as a byte array.
659    *
660    * @return name of table
661    * @deprecated Use {@link #getTableName()} instead
662    */
663   @Deprecated
664   public byte[] getName() {
665     return name.getName();
666   }
667 
668   /**
669    * Get the name of the table as a String
670    *
671    * @return name of table as a String
672    */
673   public String getNameAsString() {
674     return name.getNameAsString();
675   }
676 
677   /**
678    * This sets the class associated with the region split policy which
679    * determines when a region split should occur.  The class used by
680    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
681    * @param clazz the class name
682    */
683   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
684     setValue(SPLIT_POLICY, clazz);
685     return this;
686   }
687 
688   /**
689    * This gets the class associated with the region split policy which
690    * determines when a region split should occur.  The class used by
691    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
692    *
693    * @return the class name of the region split policy for this table.
694    * If this returns null, the default split policy is used.
695    */
696    public String getRegionSplitPolicyClassName() {
697     return getValue(SPLIT_POLICY);
698   }
699 
700   /**
701    * Set the name of the table.
702    *
703    * @param name name of table
704    */
705   @Deprecated
706   public HTableDescriptor setName(byte[] name) {
707     setName(TableName.valueOf(name));
708     return this;
709   }
710 
711   @Deprecated
712   public HTableDescriptor setName(TableName name) {
713     this.name = name;
714     setMetaFlags(this.name);
715     return this;
716   }
717 
718   /**
719    * Returns the maximum size upto which a region can grow to after which a region
720    * split is triggered. The region size is represented by the size of the biggest
721    * store file in that region.
722    *
723    * @return max hregion size for table, -1 if not set.
724    *
725    * @see #setMaxFileSize(long)
726    */
727   public long getMaxFileSize() {
728     byte [] value = getValue(MAX_FILESIZE_KEY);
729     if (value != null) {
730       return Long.parseLong(Bytes.toString(value));
731     }
732     return -1;
733   }
734 
735   /**
736    * Sets the maximum size upto which a region can grow to after which a region
737    * split is triggered. The region size is represented by the size of the biggest
738    * store file in that region, i.e. If the biggest store file grows beyond the
739    * maxFileSize, then the region split is triggered. This defaults to a value of
740    * 256 MB.
741    * <p>
742    * This is not an absolute value and might vary. Assume that a single row exceeds
743    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
744    * a single row cannot be split across multiple regions
745    * </p>
746    *
747    * @param maxFileSize The maximum file size that a store file can grow to
748    * before a split is triggered.
749    */
750   public HTableDescriptor setMaxFileSize(long maxFileSize) {
751     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
752     return this;
753   }
754 
755   /**
756    * Returns the size of the memstore after which a flush to filesystem is triggered.
757    *
758    * @return memory cache flush size for each hregion, -1 if not set.
759    *
760    * @see #setMemStoreFlushSize(long)
761    */
762   public long getMemStoreFlushSize() {
763     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
764     if (value != null) {
765       return Long.parseLong(Bytes.toString(value));
766     }
767     return -1;
768   }
769 
770   /**
771    * Represents the maximum size of the memstore after which the contents of the
772    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
773    *
774    * @param memstoreFlushSize memory cache flush size for each hregion
775    */
776   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
777     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
778     return this;
779   }
780 
781   /**
782    * Adds a column family.
783    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
784    * @param family HColumnDescriptor of family to add.
785    */
786   public HTableDescriptor addFamily(final HColumnDescriptor family) {
787     if (family.getName() == null || family.getName().length <= 0) {
788       throw new IllegalArgumentException("Family name cannot be null or empty");
789     }
790     if (hasFamily(family.getName())) {
791       throw new IllegalArgumentException("Family '" +
792         family.getNameAsString() + "' already exists so cannot be added");
793     }
794     this.families.put(family.getName(), family);
795     return this;
796   }
797 
798   /**
799    * Modifies the existing column family.
800    * @param family HColumnDescriptor of family to update
801    * @return this (for chained invocation)
802    */
803   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
804     if (family.getName() == null || family.getName().length <= 0) {
805       throw new IllegalArgumentException("Family name cannot be null or empty");
806     }
807     if (!hasFamily(family.getName())) {
808       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
809         + "' does not exist");
810     }
811     this.families.put(family.getName(), family);
812     return this;
813   }
814 
815   /**
816    * Checks to see if this table contains the given column family
817    * @param familyName Family name or column name.
818    * @return true if the table contains the specified family name
819    */
820   public boolean hasFamily(final byte [] familyName) {
821     return families.containsKey(familyName);
822   }
823 
824   /**
825    * @return Name of this table and then a map of all of the column family
826    * descriptors.
827    * @see #getNameAsString()
828    */
829   @Override
830   public String toString() {
831     StringBuilder s = new StringBuilder();
832     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
833     s.append(getValues(true));
834     for (HColumnDescriptor f : families.values()) {
835       s.append(", ").append(f);
836     }
837     return s.toString();
838   }
839 
840   /**
841    * @return Name of this table and then a map of all of the column family
842    * descriptors (with only the non-default column family attributes)
843    */
844   public String toStringCustomizedValues() {
845     StringBuilder s = new StringBuilder();
846     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
847     s.append(getValues(false));
848     for(HColumnDescriptor hcd : families.values()) {
849       s.append(", ").append(hcd.toStringCustomizedValues());
850     }
851     return s.toString();
852   }
853 
854   /**
855    * @return map of all table attributes formatted into string.
856    */
857   public String toStringTableAttributes() {
858    return getValues(true).toString();
859   }
860 
861   private StringBuilder getValues(boolean printDefaults) {
862     StringBuilder s = new StringBuilder();
863 
864     // step 1: set partitioning and pruning
865     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
866     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
867     for (ImmutableBytesWritable k : values.keySet()) {
868       if (k == null || k.get() == null) continue;
869       String key = Bytes.toString(k.get());
870       // in this section, print out reserved keywords + coprocessor info
871       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
872         userKeys.add(k);
873         continue;
874       }
875       // only print out IS_ROOT/IS_META if true
876       String value = Bytes.toString(values.get(k).get());
877       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
878         if (Boolean.valueOf(value) == false) continue;
879       }
880       // see if a reserved key is a default value. may not want to print it out
881       if (printDefaults
882           || !DEFAULT_VALUES.containsKey(key)
883           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
884         reservedKeys.add(k);
885       }
886     }
887 
888     // early exit optimization
889     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
890     if (!hasAttributes && configuration.isEmpty()) return s;
891 
892     s.append(", {");
893     // step 2: printing attributes
894     if (hasAttributes) {
895       s.append("TABLE_ATTRIBUTES => {");
896 
897       // print all reserved keys first
898       boolean printCommaForAttr = false;
899       for (ImmutableBytesWritable k : reservedKeys) {
900         String key = Bytes.toString(k.get());
901         String value = Bytes.toStringBinary(values.get(k).get());
902         if (printCommaForAttr) s.append(", ");
903         printCommaForAttr = true;
904         s.append(key);
905         s.append(" => ");
906         s.append('\'').append(value).append('\'');
907       }
908 
909       if (!userKeys.isEmpty()) {
910         // print all non-reserved, advanced config keys as a separate subset
911         if (printCommaForAttr) s.append(", ");
912         printCommaForAttr = true;
913         s.append(HConstants.METADATA).append(" => ");
914         s.append("{");
915         boolean printCommaForCfg = false;
916         for (ImmutableBytesWritable k : userKeys) {
917           String key = Bytes.toString(k.get());
918           String value = Bytes.toStringBinary(values.get(k).get());
919           if (printCommaForCfg) s.append(", ");
920           printCommaForCfg = true;
921           s.append('\'').append(key).append('\'');
922           s.append(" => ");
923           s.append('\'').append(value).append('\'');
924         }
925         s.append("}");
926       }
927     }
928 
929     // step 3: printing all configuration:
930     if (!configuration.isEmpty()) {
931       if (hasAttributes) {
932         s.append(", ");
933       }
934       s.append(HConstants.CONFIGURATION).append(" => ");
935       s.append('{');
936       boolean printCommaForConfig = false;
937       for (Map.Entry<String, String> e : configuration.entrySet()) {
938         if (printCommaForConfig) s.append(", ");
939         printCommaForConfig = true;
940         s.append('\'').append(e.getKey()).append('\'');
941         s.append(" => ");
942         s.append('\'').append(e.getValue()).append('\'');
943       }
944       s.append("}");
945     }
946     s.append("}"); // end METHOD
947     return s;
948   }
949 
950   /**
951    * Compare the contents of the descriptor with another one passed as a parameter.
952    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
953    * contents of the descriptors are compared.
954    *
955    * @return true if the contents of the the two descriptors exactly match
956    *
957    * @see java.lang.Object#equals(java.lang.Object)
958    */
959   @Override
960   public boolean equals(Object obj) {
961     if (this == obj) {
962       return true;
963     }
964     if (obj == null) {
965       return false;
966     }
967     if (!(obj instanceof HTableDescriptor)) {
968       return false;
969     }
970     return compareTo((HTableDescriptor)obj) == 0;
971   }
972 
973   /**
974    * @see java.lang.Object#hashCode()
975    */
976   @Override
977   public int hashCode() {
978     int result = this.name.hashCode();
979     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
980     if (this.families != null && this.families.size() > 0) {
981       for (HColumnDescriptor e: this.families.values()) {
982         result ^= e.hashCode();
983       }
984     }
985     result ^= values.hashCode();
986     result ^= configuration.hashCode();
987     return result;
988   }
989 
990   /**
991    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
992    * and is used for de-serialization of the HTableDescriptor over RPC
993    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
994    */
995   @Deprecated
996   @Override
997   public void readFields(DataInput in) throws IOException {
998     int version = in.readInt();
999     if (version < 3)
1000       throw new IOException("versions < 3 are not supported (and never existed!?)");
1001     // version 3+
1002     name = TableName.valueOf(Bytes.readByteArray(in));
1003     setRootRegion(in.readBoolean());
1004     setMetaRegion(in.readBoolean());
1005     values.clear();
1006     configuration.clear();
1007     int numVals = in.readInt();
1008     for (int i = 0; i < numVals; i++) {
1009       ImmutableBytesWritable key = new ImmutableBytesWritable();
1010       ImmutableBytesWritable value = new ImmutableBytesWritable();
1011       key.readFields(in);
1012       value.readFields(in);
1013       setValue(key, value);
1014     }
1015     families.clear();
1016     int numFamilies = in.readInt();
1017     for (int i = 0; i < numFamilies; i++) {
1018       HColumnDescriptor c = new HColumnDescriptor();
1019       c.readFields(in);
1020       families.put(c.getName(), c);
1021     }
1022     if (version >= 7) {
1023       int numConfigs = in.readInt();
1024       for (int i = 0; i < numConfigs; i++) {
1025         ImmutableBytesWritable key = new ImmutableBytesWritable();
1026         ImmutableBytesWritable value = new ImmutableBytesWritable();
1027         key.readFields(in);
1028         value.readFields(in);
1029         configuration.put(
1030           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1031           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1032       }
1033     }
1034   }
1035 
1036   /**
1037    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1038    * and is used for serialization of the HTableDescriptor over RPC
1039    * @deprecated Writables are going away.
1040    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1041    */
1042   @Deprecated
1043   @Override
1044   public void write(DataOutput out) throws IOException {
1045     out.writeInt(TABLE_DESCRIPTOR_VERSION);
1046     Bytes.writeByteArray(out, name.toBytes());
1047     out.writeBoolean(isRootRegion());
1048     out.writeBoolean(isMetaRegion());
1049     out.writeInt(values.size());
1050     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1051         values.entrySet()) {
1052       e.getKey().write(out);
1053       e.getValue().write(out);
1054     }
1055     out.writeInt(families.size());
1056     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1057         it.hasNext(); ) {
1058       HColumnDescriptor family = it.next();
1059       family.write(out);
1060     }
1061     out.writeInt(configuration.size());
1062     for (Map.Entry<String, String> e : configuration.entrySet()) {
1063       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1064       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1065     }
1066   }
1067 
1068   // Comparable
1069 
1070   /**
1071    * Compares the descriptor with another descriptor which is passed as a parameter.
1072    * This compares the content of the two descriptors and not the reference.
1073    *
1074    * @return 0 if the contents of the descriptors are exactly matching,
1075    * 		 1 if there is a mismatch in the contents
1076    */
1077   @Override
1078   public int compareTo(final HTableDescriptor other) {
1079     int result = this.name.compareTo(other.name);
1080     if (result == 0) {
1081       result = families.size() - other.families.size();
1082     }
1083     if (result == 0 && families.size() != other.families.size()) {
1084       result = Integer.valueOf(families.size()).compareTo(
1085           Integer.valueOf(other.families.size()));
1086     }
1087     if (result == 0) {
1088       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1089           it2 = other.families.values().iterator(); it.hasNext(); ) {
1090         result = it.next().compareTo(it2.next());
1091         if (result != 0) {
1092           break;
1093         }
1094       }
1095     }
1096     if (result == 0) {
1097       // punt on comparison for ordering, just calculate difference
1098       result = this.values.hashCode() - other.values.hashCode();
1099       if (result < 0)
1100         result = -1;
1101       else if (result > 0)
1102         result = 1;
1103     }
1104     if (result == 0) {
1105       result = this.configuration.hashCode() - other.configuration.hashCode();
1106       if (result < 0)
1107         result = -1;
1108       else if (result > 0)
1109         result = 1;
1110     }
1111     return result;
1112   }
1113 
1114   /**
1115    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1116    * of all the column families of the table.
1117    *
1118    * @return Immutable collection of {@link HColumnDescriptor} of all the
1119    * column families.
1120    */
1121   public Collection<HColumnDescriptor> getFamilies() {
1122     return Collections.unmodifiableCollection(this.families.values());
1123   }
1124 
1125   /**
1126    * Returns the configured replicas per region
1127    */
1128   public int getRegionReplication() {
1129     byte[] val = getValue(REGION_REPLICATION_KEY);
1130     if (val == null || val.length == 0) {
1131       return DEFAULT_REGION_REPLICATION;
1132     }
1133     return Integer.parseInt(Bytes.toString(val));
1134   }
1135 
1136   /**
1137    * Sets the number of replicas per region.
1138    * @param regionReplication the replication factor per region
1139    */
1140   public HTableDescriptor setRegionReplication(int regionReplication) {
1141     setValue(REGION_REPLICATION_KEY,
1142         new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
1143     return this;
1144   }
1145 
1146   /**
1147    * Returns all the column family names of the current table. The map of
1148    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1149    * This returns all the keys of the family map which represents the column
1150    * family names of the table.
1151    *
1152    * @return Immutable sorted set of the keys of the families.
1153    */
1154   public Set<byte[]> getFamiliesKeys() {
1155     return Collections.unmodifiableSet(this.families.keySet());
1156   }
1157 
1158   /**
1159    * Returns an array all the {@link HColumnDescriptor} of the column families
1160    * of the table.
1161    *
1162    * @return Array of all the HColumnDescriptors of the current table
1163    *
1164    * @see #getFamilies()
1165    */
1166   public HColumnDescriptor[] getColumnFamilies() {
1167     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1168     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1169   }
1170 
1171 
1172   /**
1173    * Returns the HColumnDescriptor for a specific column family with name as
1174    * specified by the parameter column.
1175    *
1176    * @param column Column family name
1177    * @return Column descriptor for the passed family name or the family on
1178    * passed in column.
1179    */
1180   public HColumnDescriptor getFamily(final byte [] column) {
1181     return this.families.get(column);
1182   }
1183 
1184 
1185   /**
1186    * Removes the HColumnDescriptor with name specified by the parameter column
1187    * from the table descriptor
1188    *
1189    * @param column Name of the column family to be removed.
1190    * @return Column descriptor for the passed family name or the family on
1191    * passed in column.
1192    */
1193   public HColumnDescriptor removeFamily(final byte [] column) {
1194     return this.families.remove(column);
1195   }
1196 
1197 
1198   /**
1199    * Add a table coprocessor to this table. The coprocessor
1200    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1201    * or Endpoint.
1202    * It won't check if the class can be loaded or not.
1203    * Whether a coprocessor is loadable or not will be determined when
1204    * a region is opened.
1205    * @param className Full class name.
1206    * @throws IOException
1207    */
1208   public HTableDescriptor addCoprocessor(String className) throws IOException {
1209     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1210     return this;
1211   }
1212 
1213 
1214   /**
1215    * Add a table coprocessor to this table. The coprocessor
1216    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1217    * or Endpoint.
1218    * It won't check if the class can be loaded or not.
1219    * Whether a coprocessor is loadable or not will be determined when
1220    * a region is opened.
1221    * @param jarFilePath Path of the jar file. If it's null, the class will be
1222    * loaded from default classloader.
1223    * @param className Full class name.
1224    * @param priority Priority
1225    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1226    * @throws IOException
1227    */
1228   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1229                              int priority, final Map<String, String> kvs)
1230   throws IOException {
1231     if (hasCoprocessor(className)) {
1232       throw new IOException("Coprocessor " + className + " already exists.");
1233     }
1234     // validate parameter kvs
1235     StringBuilder kvString = new StringBuilder();
1236     if (kvs != null) {
1237       for (Map.Entry<String, String> e: kvs.entrySet()) {
1238         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1239           throw new IOException("Illegal parameter key = " + e.getKey());
1240         }
1241         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1242           throw new IOException("Illegal parameter (" + e.getKey() +
1243               ") value = " + e.getValue());
1244         }
1245         if (kvString.length() != 0) {
1246           kvString.append(',');
1247         }
1248         kvString.append(e.getKey());
1249         kvString.append('=');
1250         kvString.append(e.getValue());
1251       }
1252     }
1253 
1254     // generate a coprocessor key
1255     int maxCoprocessorNumber = 0;
1256     Matcher keyMatcher;
1257     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1258         this.values.entrySet()) {
1259       keyMatcher =
1260           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1261               Bytes.toString(e.getKey().get()));
1262       if (!keyMatcher.matches()) {
1263         continue;
1264       }
1265       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1266           maxCoprocessorNumber);
1267     }
1268     maxCoprocessorNumber++;
1269 
1270     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1271     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1272         "|" + className + "|" + Integer.toString(priority) + "|" +
1273         kvString.toString();
1274     setValue(key, value);
1275     return this;
1276   }
1277 
1278 
1279   /**
1280    * Check if the table has an attached co-processor represented by the name className
1281    *
1282    * @param className - Class name of the co-processor
1283    * @return true of the table has a co-processor className
1284    */
1285   public boolean hasCoprocessor(String className) {
1286     Matcher keyMatcher;
1287     Matcher valueMatcher;
1288     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1289         this.values.entrySet()) {
1290       keyMatcher =
1291           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1292               Bytes.toString(e.getKey().get()));
1293       if (!keyMatcher.matches()) {
1294         continue;
1295       }
1296       valueMatcher =
1297         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1298             Bytes.toString(e.getValue().get()));
1299       if (!valueMatcher.matches()) {
1300         continue;
1301       }
1302       // get className and compare
1303       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1304       if (clazz.equals(className.trim())) {
1305         return true;
1306       }
1307     }
1308     return false;
1309   }
1310 
1311   /**
1312    * Return the list of attached co-processor represented by their name className
1313    *
1314    * @return The list of co-processors classNames
1315    */
1316   public List<String> getCoprocessors() {
1317     List<String> result = new ArrayList<String>();
1318     Matcher keyMatcher;
1319     Matcher valueMatcher;
1320     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1321       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1322       if (!keyMatcher.matches()) {
1323         continue;
1324       }
1325       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1326           .toString(e.getValue().get()));
1327       if (!valueMatcher.matches()) {
1328         continue;
1329       }
1330       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1331     }
1332     return result;
1333   }
1334 
1335   /**
1336    * Remove a coprocessor from those set on the table
1337    * @param className Class name of the co-processor
1338    */
1339   public void removeCoprocessor(String className) {
1340     ImmutableBytesWritable match = null;
1341     Matcher keyMatcher;
1342     Matcher valueMatcher;
1343     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1344         .entrySet()) {
1345       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1346           .getKey().get()));
1347       if (!keyMatcher.matches()) {
1348         continue;
1349       }
1350       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1351           .toString(e.getValue().get()));
1352       if (!valueMatcher.matches()) {
1353         continue;
1354       }
1355       // get className and compare
1356       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1357       // remove the CP if it is present
1358       if (clazz.equals(className.trim())) {
1359         match = e.getKey();
1360         break;
1361       }
1362     }
1363     // if we found a match, remove it
1364     if (match != null)
1365       remove(match);
1366   }
1367 
1368   /**
1369    * Returns the {@link Path} object representing the table directory under
1370    * path rootdir
1371    *
1372    * Deprecated use FSUtils.getTableDir() instead.
1373    *
1374    * @param rootdir qualified path of HBase root directory
1375    * @param tableName name of table
1376    * @return {@link Path} for table
1377    */
1378   @Deprecated
1379   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1380     //This is bad I had to mirror code from FSUTils.getTableDir since
1381     //there is no module dependency between hbase-client and hbase-server
1382     TableName name = TableName.valueOf(tableName);
1383     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1384               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1385   }
1386 
1387   /**
1388    * Table descriptor for <code>hbase:meta</code> catalog table
1389    * @deprecated Use TableDescriptors#get(TableName.META_TABLE_NAME) or
1390    * HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead.
1391    */
1392   @Deprecated
1393   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1394       TableName.META_TABLE_NAME,
1395       new HColumnDescriptor[] {
1396           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1397               // Ten is arbitrary number.  Keep versions to help debugging.
1398               .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS)
1399               .setInMemory(true)
1400               .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)
1401               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1402               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1403               .setBloomFilterType(BloomType.NONE)
1404               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1405               // e.g. if using CombinedBlockCache (BucketCache).
1406               .setCacheDataInL1(true)
1407       });
1408 
1409   static {
1410     try {
1411       META_TABLEDESC.addCoprocessor(
1412           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1413           null, Coprocessor.PRIORITY_SYSTEM, null);
1414     } catch (IOException ex) {
1415       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1416       throw new RuntimeException(ex);
1417     }
1418   }
1419 
1420   public final static String NAMESPACE_FAMILY_INFO = "info";
1421   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1422   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1423 
1424   /** Table descriptor for namespace table */
1425   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1426       TableName.NAMESPACE_TABLE_NAME,
1427       new HColumnDescriptor[] {
1428           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1429               // Ten is arbitrary number.  Keep versions to help debugging.
1430               .setMaxVersions(10)
1431               .setInMemory(true)
1432               .setBlocksize(8 * 1024)
1433               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1434               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1435               // e.g. if using CombinedBlockCache (BucketCache).
1436               .setCacheDataInL1(true)
1437       });
1438 
1439   @Deprecated
1440   public HTableDescriptor setOwner(User owner) {
1441     return setOwnerString(owner != null ? owner.getShortName() : null);
1442   }
1443 
1444   // used by admin.rb:alter(table_name,*args) to update owner.
1445   @Deprecated
1446   public HTableDescriptor setOwnerString(String ownerString) {
1447     if (ownerString != null) {
1448       setValue(OWNER_KEY, ownerString);
1449     } else {
1450       remove(OWNER_KEY);
1451     }
1452     return this;
1453   }
1454 
1455   @Deprecated
1456   public String getOwnerString() {
1457     if (getValue(OWNER_KEY) != null) {
1458       return Bytes.toString(getValue(OWNER_KEY));
1459     }
1460     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1461     // hbase:meta and -ROOT- should return system user as owner, not null (see
1462     // MasterFileSystem.java:bootstrap()).
1463     return null;
1464   }
1465 
1466   /**
1467    * @return This instance serialized with pb with pb magic prefix
1468    * @see #parseFrom(byte[])
1469    */
1470   public byte [] toByteArray() {
1471     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1472   }
1473 
1474   /**
1475    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1476    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1477    * @throws DeserializationException
1478    * @throws IOException
1479    * @see #toByteArray()
1480    */
1481   public static HTableDescriptor parseFrom(final byte [] bytes)
1482   throws DeserializationException, IOException {
1483     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1484       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1485     }
1486     int pblen = ProtobufUtil.lengthOfPBMagic();
1487     TableSchema.Builder builder = TableSchema.newBuilder();
1488     TableSchema ts;
1489     try {
1490       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1491     } catch (InvalidProtocolBufferException e) {
1492       throw new DeserializationException(e);
1493     }
1494     return convert(ts);
1495   }
1496 
1497   /**
1498    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1499    */
1500   public TableSchema convert() {
1501     TableSchema.Builder builder = TableSchema.newBuilder();
1502     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1503     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1504       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1505       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1506       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1507       builder.addAttributes(aBuilder.build());
1508     }
1509     for (HColumnDescriptor hcd: getColumnFamilies()) {
1510       builder.addColumnFamilies(hcd.convert());
1511     }
1512     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1513       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1514       aBuilder.setName(e.getKey());
1515       aBuilder.setValue(e.getValue());
1516       builder.addConfiguration(aBuilder.build());
1517     }
1518     return builder.build();
1519   }
1520 
1521   /**
1522    * @param ts A pb TableSchema instance.
1523    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1524    */
1525   public static HTableDescriptor convert(final TableSchema ts) {
1526     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1527     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1528     int index = 0;
1529     for (ColumnFamilySchema cfs: list) {
1530       hcds[index++] = HColumnDescriptor.convert(cfs);
1531     }
1532     HTableDescriptor htd = new HTableDescriptor(
1533         ProtobufUtil.toTableName(ts.getTableName()),
1534         hcds);
1535     for (BytesBytesPair a: ts.getAttributesList()) {
1536       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1537     }
1538     for (NameStringPair a: ts.getConfigurationList()) {
1539       htd.setConfiguration(a.getName(), a.getValue());
1540     }
1541     return htd;
1542   }
1543 
1544   /**
1545    * Getter for accessing the configuration value by key
1546    */
1547   public String getConfigurationValue(String key) {
1548     return configuration.get(key);
1549   }
1550 
1551   /**
1552    * Getter for fetching an unmodifiable {@link #configuration} map.
1553    */
1554   public Map<String, String> getConfiguration() {
1555     // shallow pointer copy
1556     return Collections.unmodifiableMap(configuration);
1557   }
1558 
1559   /**
1560    * Setter for storing a configuration setting in {@link #configuration} map.
1561    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1562    * @param value String value. If null, removes the setting.
1563    */
1564   public HTableDescriptor setConfiguration(String key, String value) {
1565     if (value == null) {
1566       removeConfiguration(key);
1567     } else {
1568       configuration.put(key, value);
1569     }
1570     return this;
1571   }
1572 
1573   /**
1574    * Remove a config setting represented by the key from the {@link #configuration} map
1575    */
1576   public void removeConfiguration(final String key) {
1577     configuration.remove(key);
1578   }
1579 
1580   public static HTableDescriptor metaTableDescriptor(final Configuration conf)
1581       throws IOException {
1582     HTableDescriptor metaDescriptor = new HTableDescriptor(
1583       TableName.META_TABLE_NAME,
1584       new HColumnDescriptor[] {
1585         new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1586           .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
1587             HConstants.DEFAULT_HBASE_META_VERSIONS))
1588           .setInMemory(true)
1589           .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
1590             HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
1591           .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1592           // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1593           .setBloomFilterType(BloomType.NONE)
1594          });
1595     metaDescriptor.addCoprocessor(
1596       "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1597       null, Coprocessor.PRIORITY_SYSTEM, null);
1598     return metaDescriptor;
1599   }
1600 
1601 }