View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.hbase.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import org.apache.hadoop.hbase.util.ByteStringer;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
87     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
88     * family will be loaded into the cache as soon as the file is opened. These
89     * loads will not count as cache misses.
90     */
91    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
92  
93    /**
94     * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
95     * Use smaller block sizes for faster random-access at expense of larger
96     * indices (more memory consumption).
97     */
98    public static final String BLOCKSIZE = "BLOCKSIZE";
99  
100   public static final String LENGTH = "LENGTH";
101   public static final String TTL = "TTL";
102   public static final String BLOOMFILTER = "BLOOMFILTER";
103   public static final String FOREVER = "FOREVER";
104   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
105   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
106   public static final String MIN_VERSIONS = "MIN_VERSIONS";
107   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
108   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
109 
110   @InterfaceStability.Unstable
111   public static final String ENCRYPTION = "ENCRYPTION";
112   @InterfaceStability.Unstable
113   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
114 
115   /**
116    * Default compression type.
117    */
118   public static final String DEFAULT_COMPRESSION =
119     Compression.Algorithm.NONE.getName();
120 
121   /**
122    * Default value of the flag that enables data block encoding on disk, as
123    * opposed to encoding in cache only. We encode blocks everywhere by default,
124    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
125    */
126   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
127 
128   /** Default data block encoding algorithm. */
129   public static final String DEFAULT_DATA_BLOCK_ENCODING =
130       DataBlockEncoding.NONE.toString();
131 
132   /**
133    * Default number of versions of a record to keep.
134    */
135   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
136     "hbase.column.max.version", 1);
137 
138   /**
139    * Default is not to keep a minimum of versions.
140    */
141   public static final int DEFAULT_MIN_VERSIONS = 0;
142 
143   /*
144    * Cache here the HCD value.
145    * Question: its OK to cache since when we're reenable, we create a new HCD?
146    */
147   private volatile Integer blocksize = null;
148 
149   /**
150    * Default setting for whether to serve from memory or not.
151    */
152   public static final boolean DEFAULT_IN_MEMORY = false;
153 
154   /**
155    * Default setting for preventing deleted from being collected immediately.
156    */
157   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
158 
159   /**
160    * Default setting for whether to use a block cache or not.
161    */
162   public static final boolean DEFAULT_BLOCKCACHE = true;
163 
164   /**
165    * Default setting for whether to cache data blocks on write if block caching
166    * is enabled.
167    */
168   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
169 
170   /**
171    * Default setting for whether to cache index blocks on write if block
172    * caching is enabled.
173    */
174   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
175 
176   /**
177    * Default size of blocks in files stored to the filesytem (hfiles).
178    */
179   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
180 
181   /**
182    * Default setting for whether or not to use bloomfilters.
183    */
184   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
185 
186   /**
187    * Default setting for whether to cache bloom filter blocks on write if block
188    * caching is enabled.
189    */
190   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
191 
192   /**
193    * Default time to live of cell contents.
194    */
195   public static final int DEFAULT_TTL = HConstants.FOREVER;
196 
197   /**
198    * Default scope.
199    */
200   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
201 
202   /**
203    * Default setting for whether to evict cached blocks from the blockcache on
204    * close.
205    */
206   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
207 
208   /**
209    * Default compress tags along with any type of DataBlockEncoding.
210    */
211   public static final boolean DEFAULT_COMPRESS_TAGS = true;
212 
213   /*
214    * Default setting for whether to prefetch blocks into the blockcache on open.
215    */
216   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
217 
218   private final static Map<String, String> DEFAULT_VALUES
219     = new HashMap<String, String>();
220   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
221     = new HashSet<ImmutableBytesWritable>();
222   static {
223       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
224       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
225       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
226       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
227       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
228       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
229       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
230       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
231       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
232       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
233       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
234       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
235       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
236       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
237       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
238       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
239       for (String s : DEFAULT_VALUES.keySet()) {
240         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
241       }
242       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
243       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
244   }
245 
246   private static final int UNINITIALIZED = -1;
247 
248   // Column family name
249   private byte [] name;
250 
251   // Column metadata
252   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
253     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
254 
255   /**
256    * A map which holds the configuration specific to the column family.
257    * The keys of the map have the same names as config keys and override the defaults with
258    * cf-specific settings. Example usage may be for compactions, etc.
259    */
260   private final Map<String, String> configuration = new HashMap<String, String>();
261 
262   /*
263    * Cache the max versions rather than calculate it every time.
264    */
265   private int cachedMaxVersions = UNINITIALIZED;
266 
267   /**
268    * Default constructor. Must be present for Writable.
269    * @deprecated Used by Writables and Writables are going away.
270    */
271   @Deprecated
272   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
273   // deserializations.
274   public HColumnDescriptor() {
275     this.name = null;
276   }
277 
278   /**
279    * Construct a column descriptor specifying only the family name
280    * The other attributes are defaulted.
281    *
282    * @param familyName Column family name. Must be 'printable' -- digit or
283    * letter -- and may not contain a <code>:<code>
284    */
285   public HColumnDescriptor(final String familyName) {
286     this(Bytes.toBytes(familyName));
287   }
288 
289   /**
290    * Construct a column descriptor specifying only the family name
291    * The other attributes are defaulted.
292    *
293    * @param familyName Column family name. Must be 'printable' -- digit or
294    * letter -- and may not contain a <code>:<code>
295    */
296   public HColumnDescriptor(final byte [] familyName) {
297     this (familyName == null || familyName.length <= 0?
298       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
299       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
300       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
301   }
302 
303   /**
304    * Constructor.
305    * Makes a deep copy of the supplied descriptor.
306    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
307    * @param desc The descriptor.
308    */
309   public HColumnDescriptor(HColumnDescriptor desc) {
310     super();
311     this.name = desc.name.clone();
312     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
313         desc.values.entrySet()) {
314       this.values.put(e.getKey(), e.getValue());
315     }
316     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
317       this.configuration.put(e.getKey(), e.getValue());
318     }
319     setMaxVersions(desc.getMaxVersions());
320   }
321 
322   /**
323    * Constructor
324    * @param familyName Column family name. Must be 'printable' -- digit or
325    * letter -- and may not contain a <code>:<code>
326    * @param maxVersions Maximum number of versions to keep
327    * @param compression Compression type
328    * @param inMemory If true, column data should be kept in an HRegionServer's
329    * cache
330    * @param blockCacheEnabled If true, MapFile blocks should be cached
331    * @param timeToLive Time-to-live of cell contents, in seconds
332    * (use HConstants.FOREVER for unlimited TTL)
333    * @param bloomFilter Bloom filter type for this column
334    *
335    * @throws IllegalArgumentException if passed a family name that is made of
336    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
337    * a <code>:</code>
338    * @throws IllegalArgumentException if the number of versions is &lt;= 0
339    * @deprecated use {@link #HColumnDescriptor(String)} and setters
340    */
341   @Deprecated
342   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
343       final String compression, final boolean inMemory,
344       final boolean blockCacheEnabled,
345       final int timeToLive, final String bloomFilter) {
346     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
347       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
348   }
349 
350   /**
351    * Constructor
352    * @param familyName Column family name. Must be 'printable' -- digit or
353    * letter -- and may not contain a <code>:<code>
354    * @param maxVersions Maximum number of versions to keep
355    * @param compression Compression type
356    * @param inMemory If true, column data should be kept in an HRegionServer's
357    * cache
358    * @param blockCacheEnabled If true, MapFile blocks should be cached
359    * @param blocksize Block size to use when writing out storefiles.  Use
360    * smaller block sizes for faster random-access at expense of larger indices
361    * (more memory consumption).  Default is usually 64k.
362    * @param timeToLive Time-to-live of cell contents, in seconds
363    * (use HConstants.FOREVER for unlimited TTL)
364    * @param bloomFilter Bloom filter type for this column
365    * @param scope The scope tag for this column
366    *
367    * @throws IllegalArgumentException if passed a family name that is made of
368    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
369    * a <code>:</code>
370    * @throws IllegalArgumentException if the number of versions is &lt;= 0
371    * @deprecated use {@link #HColumnDescriptor(String)} and setters
372    */
373   @Deprecated
374   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
375       final String compression, final boolean inMemory,
376       final boolean blockCacheEnabled, final int blocksize,
377       final int timeToLive, final String bloomFilter, final int scope) {
378     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
379         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
380         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
381         scope);
382   }
383 
384   /**
385    * Constructor
386    * @param familyName Column family name. Must be 'printable' -- digit or
387    * letter -- and may not contain a <code>:<code>
388    * @param minVersions Minimum number of versions to keep
389    * @param maxVersions Maximum number of versions to keep
390    * @param keepDeletedCells Whether to retain deleted cells until they expire
391    *        up to maxVersions versions.
392    * @param compression Compression type
393    * @param encodeOnDisk whether to use the specified data block encoding
394    *        on disk. If false, the encoding will be used in cache only.
395    * @param dataBlockEncoding data block encoding
396    * @param inMemory If true, column data should be kept in an HRegionServer's
397    * cache
398    * @param blockCacheEnabled If true, MapFile blocks should be cached
399    * @param blocksize Block size to use when writing out storefiles.  Use
400    * smaller blocksizes for faster random-access at expense of larger indices
401    * (more memory consumption).  Default is usually 64k.
402    * @param timeToLive Time-to-live of cell contents, in seconds
403    * (use HConstants.FOREVER for unlimited TTL)
404    * @param bloomFilter Bloom filter type for this column
405    * @param scope The scope tag for this column
406    *
407    * @throws IllegalArgumentException if passed a family name that is made of
408    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
409    * a <code>:</code>
410    * @throws IllegalArgumentException if the number of versions is &lt;= 0
411    * @deprecated use {@link #HColumnDescriptor(String)} and setters
412    */
413   @Deprecated
414   public HColumnDescriptor(final byte[] familyName, final int minVersions,
415       final int maxVersions, final KeepDeletedCells keepDeletedCells,
416       final String compression, final boolean encodeOnDisk,
417       final String dataBlockEncoding, final boolean inMemory,
418       final boolean blockCacheEnabled, final int blocksize,
419       final int timeToLive, final String bloomFilter, final int scope) {
420     isLegalFamilyName(familyName);
421     this.name = familyName;
422 
423     if (maxVersions <= 0) {
424       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
425       // Until there is support, consider 0 or < 0 -- a configuration error.
426       throw new IllegalArgumentException("Maximum versions must be positive");
427     }
428 
429     if (minVersions > 0) {
430       if (timeToLive == HConstants.FOREVER) {
431         throw new IllegalArgumentException("Minimum versions requires TTL.");
432       }
433       if (minVersions >= maxVersions) {
434         throw new IllegalArgumentException("Minimum versions must be < "
435             + "maximum versions.");
436       }
437     }
438 
439     setMaxVersions(maxVersions);
440     setMinVersions(minVersions);
441     setKeepDeletedCells(keepDeletedCells);
442     setInMemory(inMemory);
443     setBlockCacheEnabled(blockCacheEnabled);
444     setTimeToLive(timeToLive);
445     setCompressionType(Compression.Algorithm.
446       valueOf(compression.toUpperCase()));
447     setDataBlockEncoding(DataBlockEncoding.
448         valueOf(dataBlockEncoding.toUpperCase()));
449     setBloomFilterType(BloomType.
450       valueOf(bloomFilter.toUpperCase()));
451     setBlocksize(blocksize);
452     setScope(scope);
453   }
454 
455   /**
456    * @param b Family name.
457    * @return <code>b</code>
458    * @throws IllegalArgumentException If not null and not a legitimate family
459    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
460    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
461    * either. Also Family can not be an empty value or equal "recovered.edits".
462    */
463   public static byte [] isLegalFamilyName(final byte [] b) {
464     if (b == null) {
465       return b;
466     }
467     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
468     if (b[0] == '.') {
469       throw new IllegalArgumentException("Family names cannot start with a " +
470         "period: " + Bytes.toString(b));
471     }
472     for (int i = 0; i < b.length; i++) {
473       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
474         throw new IllegalArgumentException("Illegal character <" + b[i] +
475           ">. Family names cannot contain control characters or colons: " +
476           Bytes.toString(b));
477       }
478     }
479     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
480     if (Bytes.equals(recoveredEdit, b)) {
481       throw new IllegalArgumentException("Family name cannot be: " +
482           HConstants.RECOVERED_EDITS_DIR);
483     }
484     return b;
485   }
486 
487   /**
488    * @return Name of this column family
489    */
490   public byte [] getName() {
491     return name;
492   }
493 
494   /**
495    * @return Name of this column family
496    */
497   public String getNameAsString() {
498     return Bytes.toString(this.name);
499   }
500 
501   /**
502    * @param key The key.
503    * @return The value.
504    */
505   public byte[] getValue(byte[] key) {
506     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
507     if (ibw == null)
508       return null;
509     return ibw.get();
510   }
511 
512   /**
513    * @param key The key.
514    * @return The value as a string.
515    */
516   public String getValue(String key) {
517     byte[] value = getValue(Bytes.toBytes(key));
518     if (value == null)
519       return null;
520     return Bytes.toString(value);
521   }
522 
523   /**
524    * @return All values.
525    */
526   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
527     // shallow pointer copy
528     return Collections.unmodifiableMap(values);
529   }
530 
531   /**
532    * @param key The key.
533    * @param value The value.
534    * @return this (for chained invocation)
535    */
536   public HColumnDescriptor setValue(byte[] key, byte[] value) {
537     values.put(new ImmutableBytesWritable(key),
538       new ImmutableBytesWritable(value));
539     return this;
540   }
541 
542   /**
543    * @param key Key whose key and value we're to remove from HCD parameters.
544    */
545   public void remove(final byte [] key) {
546     values.remove(new ImmutableBytesWritable(key));
547   }
548 
549   /**
550    * @param key The key.
551    * @param value The value.
552    * @return this (for chained invocation)
553    */
554   public HColumnDescriptor setValue(String key, String value) {
555     if (value == null) {
556       remove(Bytes.toBytes(key));
557     } else {
558       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
559     }
560     return this;
561   }
562 
563   /** @return compression type being used for the column family */
564   public Compression.Algorithm getCompression() {
565     String n = getValue(COMPRESSION);
566     if (n == null) {
567       return Compression.Algorithm.NONE;
568     }
569     return Compression.Algorithm.valueOf(n.toUpperCase());
570   }
571 
572   /** @return compression type being used for the column family for major
573       compression */
574   public Compression.Algorithm getCompactionCompression() {
575     String n = getValue(COMPRESSION_COMPACT);
576     if (n == null) {
577       return getCompression();
578     }
579     return Compression.Algorithm.valueOf(n.toUpperCase());
580   }
581 
582   /** @return maximum number of versions */
583   public int getMaxVersions() {
584     if (this.cachedMaxVersions == UNINITIALIZED) {
585       String v = getValue(HConstants.VERSIONS);
586       this.cachedMaxVersions = Integer.parseInt(v);
587     }
588     return this.cachedMaxVersions;
589   }
590 
591   /**
592    * @param maxVersions maximum number of versions
593    * @return this (for chained invocation)
594    */
595   public HColumnDescriptor setMaxVersions(int maxVersions) {
596     if (maxVersions <= 0) {
597       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
598       // Until there is support, consider 0 or < 0 -- a configuration error.
599       throw new IllegalArgumentException("Maximum versions must be positive");
600     }    
601     if (maxVersions < this.getMinVersions()) {      
602         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
603             + " while minVersion is " + this.getMinVersions()
604             + ". Maximum versions must be >= minimum versions ");      
605     }
606     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
607     cachedMaxVersions = maxVersions;
608     return this;
609   }
610 
611   /**
612    * Set minimum and maximum versions to keep
613    *
614    * @param minVersions minimal number of versions
615    * @param maxVersions maximum number of versions
616    * @return this (for chained invocation)
617    */
618   public HColumnDescriptor setVersions(int minVersions, int maxVersions) {
619     if (minVersions <= 0) {
620       // TODO: Allow minVersion and maxVersion of 0 to be the way you say "Keep all versions".
621       // Until there is support, consider 0 or < 0 -- a configuration error.
622       throw new IllegalArgumentException("Minimum versions must be positive");
623     }
624 
625     if (maxVersions < minVersions) {
626       throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions
627         + " and set MinVersion to " + minVersions
628         + ", as maximum versions must be >= minimum versions.");
629     }
630     setMinVersions(minVersions);
631     setMaxVersions(maxVersions);
632     return this;
633   }
634 
635   /**
636    * @return The storefile/hfile blocksize for this column family.
637    */
638   public synchronized int getBlocksize() {
639     if (this.blocksize == null) {
640       String value = getValue(BLOCKSIZE);
641       this.blocksize = (value != null)?
642         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
643     }
644     return this.blocksize.intValue();
645   }
646 
647   /**
648    * @param s Blocksize to use when writing out storefiles/hfiles on this
649    * column family.
650    * @return this (for chained invocation)
651    */
652   public HColumnDescriptor setBlocksize(int s) {
653     setValue(BLOCKSIZE, Integer.toString(s));
654     this.blocksize = null;
655     return this;
656   }
657 
658   /**
659    * @return Compression type setting.
660    */
661   public Compression.Algorithm getCompressionType() {
662     return getCompression();
663   }
664 
665   /**
666    * Compression types supported in hbase.
667    * LZO is not bundled as part of the hbase distribution.
668    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
669    * for how to enable it.
670    * @param type Compression type setting.
671    * @return this (for chained invocation)
672    */
673   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
674     return setValue(COMPRESSION, type.getName().toUpperCase());
675   }
676 
677   /** @return data block encoding algorithm used on disk */
678   @Deprecated
679   public DataBlockEncoding getDataBlockEncodingOnDisk() {
680     return getDataBlockEncoding();
681   }
682 
683   /**
684    * This method does nothing now. Flag ENCODE_ON_DISK is not used
685    * any more. Data blocks have the same encoding in cache as on disk.
686    * @return this (for chained invocation)
687    */
688   @Deprecated
689   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
690     return this;
691   }
692 
693   /**
694    * @return the data block encoding algorithm used in block cache and
695    *         optionally on disk
696    */
697   public DataBlockEncoding getDataBlockEncoding() {
698     String type = getValue(DATA_BLOCK_ENCODING);
699     if (type == null) {
700       type = DEFAULT_DATA_BLOCK_ENCODING;
701     }
702     return DataBlockEncoding.valueOf(type);
703   }
704 
705   /**
706    * Set data block encoding algorithm used in block cache.
707    * @param type What kind of data block encoding will be used.
708    * @return this (for chained invocation)
709    */
710   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
711     String name;
712     if (type != null) {
713       name = type.toString();
714     } else {
715       name = DataBlockEncoding.NONE.toString();
716     }
717     return setValue(DATA_BLOCK_ENCODING, name);
718   }
719 
720   /**
721    * Set whether the tags should be compressed along with DataBlockEncoding. When no
722    * DataBlockEncoding is been used, this is having no effect.
723    * 
724    * @param compressTags
725    * @return this (for chained invocation)
726    */
727   public HColumnDescriptor setCompressTags(boolean compressTags) {
728     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
729   }
730 
731   /**
732    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
733    *         DataBlockEncoding is been used, this is having no effect.
734    */
735   public boolean shouldCompressTags() {
736     String compressTagsStr = getValue(COMPRESS_TAGS);
737     boolean compressTags = DEFAULT_COMPRESS_TAGS;
738     if (compressTagsStr != null) {
739       compressTags = Boolean.valueOf(compressTagsStr);
740     }
741     return compressTags;
742   }
743 
744   /**
745    * @return Compression type setting.
746    */
747   public Compression.Algorithm getCompactionCompressionType() {
748     return getCompactionCompression();
749   }
750 
751   /**
752    * Compression types supported in hbase.
753    * LZO is not bundled as part of the hbase distribution.
754    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
755    * for how to enable it.
756    * @param type Compression type setting.
757    * @return this (for chained invocation)
758    */
759   public HColumnDescriptor setCompactionCompressionType(
760       Compression.Algorithm type) {
761     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
762   }
763 
764   /**
765    * @return True if we are to keep all in use HRegionServer cache.
766    */
767   public boolean isInMemory() {
768     String value = getValue(HConstants.IN_MEMORY);
769     if (value != null)
770       return Boolean.valueOf(value).booleanValue();
771     return DEFAULT_IN_MEMORY;
772   }
773 
774   /**
775    * @param inMemory True if we are to keep all values in the HRegionServer
776    * cache
777    * @return this (for chained invocation)
778    */
779   public HColumnDescriptor setInMemory(boolean inMemory) {
780     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
781   }
782 
783   public KeepDeletedCells getKeepDeletedCells() {
784     String value = getValue(KEEP_DELETED_CELLS);
785     if (value != null) {
786       // toUpperCase for backwards compatibility
787       return KeepDeletedCells.valueOf(value.toUpperCase());
788     }
789     return DEFAULT_KEEP_DELETED;
790   }
791 
792   /**
793    * @param keepDeletedCells True if deleted rows should not be collected
794    * immediately.
795    * @return this (for chained invocation)
796    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
797    */
798   @Deprecated
799   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
800     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
801         : KeepDeletedCells.FALSE).toString());
802   }
803 
804   /**
805    * @param keepDeletedCells True if deleted rows should not be collected
806    * immediately.
807    * @return this (for chained invocation)
808    */
809   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
810     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
811   }
812 
813   /**
814    * @return Time-to-live of cell contents, in seconds.
815    */
816   public int getTimeToLive() {
817     String value = getValue(TTL);
818     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
819   }
820 
821   /**
822    * @param timeToLive Time-to-live of cell contents, in seconds.
823    * @return this (for chained invocation)
824    */
825   public HColumnDescriptor setTimeToLive(int timeToLive) {
826     return setValue(TTL, Integer.toString(timeToLive));
827   }
828 
829   /**
830    * @return The minimum number of versions to keep.
831    */
832   public int getMinVersions() {
833     String value = getValue(MIN_VERSIONS);
834     return (value != null)? Integer.valueOf(value).intValue(): 0;
835   }
836 
837   /**
838    * @param minVersions The minimum number of versions to keep.
839    * (used when timeToLive is set)
840    * @return this (for chained invocation)
841    */
842   public HColumnDescriptor setMinVersions(int minVersions) {
843     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
844   }
845 
846   /**
847    * @return True if MapFile blocks should be cached.
848    */
849   public boolean isBlockCacheEnabled() {
850     String value = getValue(BLOCKCACHE);
851     if (value != null)
852       return Boolean.valueOf(value).booleanValue();
853     return DEFAULT_BLOCKCACHE;
854   }
855 
856   /**
857    * @param blockCacheEnabled True if MapFile blocks should be cached.
858    * @return this (for chained invocation)
859    */
860   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
861     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
862   }
863 
864   /**
865    * @return bloom filter type used for new StoreFiles in ColumnFamily
866    */
867   public BloomType getBloomFilterType() {
868     String n = getValue(BLOOMFILTER);
869     if (n == null) {
870       n = DEFAULT_BLOOMFILTER;
871     }
872     return BloomType.valueOf(n.toUpperCase());
873   }
874 
875   /**
876    * @param bt bloom filter type
877    * @return this (for chained invocation)
878    */
879   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
880     return setValue(BLOOMFILTER, bt.toString());
881   }
882 
883    /**
884     * @return the scope tag
885     */
886   public int getScope() {
887     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
888     if (value != null) {
889       return Integer.valueOf(Bytes.toString(value));
890     }
891     return DEFAULT_REPLICATION_SCOPE;
892   }
893 
894  /**
895   * @param scope the scope tag
896   * @return this (for chained invocation)
897   */
898   public HColumnDescriptor setScope(int scope) {
899     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
900   }
901 
902   /**
903    * @return true if we should cache data blocks on write
904    */
905   public boolean shouldCacheDataOnWrite() {
906     String value = getValue(CACHE_DATA_ON_WRITE);
907     if (value != null) {
908       return Boolean.valueOf(value).booleanValue();
909     }
910     return DEFAULT_CACHE_DATA_ON_WRITE;
911   }
912 
913   /**
914    * @param value true if we should cache data blocks on write
915    * @return this (for chained invocation)
916    */
917   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
918     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
919   }
920 
921   /**
922    * @return true if we should cache index blocks on write
923    */
924   public boolean shouldCacheIndexesOnWrite() {
925     String value = getValue(CACHE_INDEX_ON_WRITE);
926     if (value != null) {
927       return Boolean.valueOf(value).booleanValue();
928     }
929     return DEFAULT_CACHE_INDEX_ON_WRITE;
930   }
931 
932   /**
933    * @param value true if we should cache index blocks on write
934    * @return this (for chained invocation)
935    */
936   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
937     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
938   }
939 
940   /**
941    * @return true if we should cache bloomfilter blocks on write
942    */
943   public boolean shouldCacheBloomsOnWrite() {
944     String value = getValue(CACHE_BLOOMS_ON_WRITE);
945     if (value != null) {
946       return Boolean.valueOf(value).booleanValue();
947     }
948     return DEFAULT_CACHE_BLOOMS_ON_WRITE;
949   }
950 
951   /**
952    * @param value true if we should cache bloomfilter blocks on write
953    * @return this (for chained invocation)
954    */
955   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
956     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
957   }
958 
959   /**
960    * @return true if we should evict cached blocks from the blockcache on
961    * close
962    */
963   public boolean shouldEvictBlocksOnClose() {
964     String value = getValue(EVICT_BLOCKS_ON_CLOSE);
965     if (value != null) {
966       return Boolean.valueOf(value).booleanValue();
967     }
968     return DEFAULT_EVICT_BLOCKS_ON_CLOSE;
969   }
970 
971   /**
972    * @param value true if we should evict cached blocks from the blockcache on
973    * close
974    * @return this (for chained invocation)
975    */
976   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
977     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
978   }
979 
980   /**
981    * @return true if we should prefetch blocks into the blockcache on open
982    */
983   public boolean shouldPrefetchBlocksOnOpen() {
984     String value = getValue(PREFETCH_BLOCKS_ON_OPEN);
985    if (value != null) {
986       return Boolean.valueOf(value).booleanValue();
987     }
988     return DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
989   }
990 
991   /**
992    * @param value true if we should prefetch blocks into the blockcache on open
993    * @return this (for chained invocation)
994    */
995   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
996     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
997   }
998 
999   /**
1000    * @see java.lang.Object#toString()
1001    */
1002   @Override
1003   public String toString() {
1004     StringBuilder s = new StringBuilder();
1005 
1006     s.append('{');
1007     s.append(HConstants.NAME);
1008     s.append(" => '");
1009     s.append(Bytes.toString(name));
1010     s.append("'");
1011     s.append(getValues(true));
1012     s.append('}');
1013     return s.toString();
1014   }
1015 
1016   /**
1017    * @return Column family descriptor with only the customized attributes.
1018    */
1019   public String toStringCustomizedValues() {
1020     StringBuilder s = new StringBuilder();
1021     s.append('{');
1022     s.append(HConstants.NAME);
1023     s.append(" => '");
1024     s.append(Bytes.toString(name));
1025     s.append("'");
1026     s.append(getValues(false));
1027     s.append('}');
1028     return s.toString();
1029   }
1030 
1031   private StringBuilder getValues(boolean printDefaults) {
1032     StringBuilder s = new StringBuilder();
1033 
1034     boolean hasConfigKeys = false;
1035 
1036     // print all reserved keys first
1037     for (ImmutableBytesWritable k : values.keySet()) {
1038       if (!RESERVED_KEYWORDS.contains(k)) {
1039         hasConfigKeys = true;
1040         continue;
1041       }
1042       String key = Bytes.toString(k.get());
1043       String value = Bytes.toStringBinary(values.get(k).get());
1044       if (printDefaults
1045           || !DEFAULT_VALUES.containsKey(key)
1046           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1047         s.append(", ");
1048         s.append(key);
1049         s.append(" => ");
1050         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1051       }
1052     }
1053 
1054     // print all non-reserved, advanced config keys as a separate subset
1055     if (hasConfigKeys) {
1056       s.append(", ");
1057       s.append(HConstants.METADATA).append(" => ");
1058       s.append('{');
1059       boolean printComma = false;
1060       for (ImmutableBytesWritable k : values.keySet()) {
1061         if (RESERVED_KEYWORDS.contains(k)) {
1062           continue;
1063         }
1064         String key = Bytes.toString(k.get());
1065         String value = Bytes.toStringBinary(values.get(k).get());
1066         if (printComma) {
1067           s.append(", ");
1068         }
1069         printComma = true;
1070         s.append('\'').append(key).append('\'');
1071         s.append(" => ");
1072         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1073       }
1074       s.append('}');
1075     }
1076 
1077     if (!configuration.isEmpty()) {
1078       s.append(", ");
1079       s.append(HConstants.CONFIGURATION).append(" => ");
1080       s.append('{');
1081       boolean printCommaForConfiguration = false;
1082       for (Map.Entry<String, String> e : configuration.entrySet()) {
1083         if (printCommaForConfiguration) s.append(", ");
1084         printCommaForConfiguration = true;
1085         s.append('\'').append(e.getKey()).append('\'');
1086         s.append(" => ");
1087         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1088       }
1089       s.append("}");
1090     }
1091     return s;
1092   }
1093 
1094   public static Unit getUnit(String key) {
1095     Unit unit;
1096       /* TTL for now, we can add more as we neeed */
1097     if (key.equals(HColumnDescriptor.TTL)) {
1098       unit = Unit.TIME_INTERVAL;
1099     } else {
1100       unit = Unit.NONE;
1101     }
1102     return unit;
1103   }
1104 
1105   public static Map<String, String> getDefaultValues() {
1106     return Collections.unmodifiableMap(DEFAULT_VALUES);
1107   }
1108 
1109   /**
1110    * @see java.lang.Object#equals(java.lang.Object)
1111    */
1112   @Override
1113   public boolean equals(Object obj) {
1114     if (this == obj) {
1115       return true;
1116     }
1117     if (obj == null) {
1118       return false;
1119     }
1120     if (!(obj instanceof HColumnDescriptor)) {
1121       return false;
1122     }
1123     return compareTo((HColumnDescriptor)obj) == 0;
1124   }
1125 
1126   /**
1127    * @see java.lang.Object#hashCode()
1128    */
1129   @Override
1130   public int hashCode() {
1131     int result = Bytes.hashCode(this.name);
1132     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1133     result ^= values.hashCode();
1134     result ^= configuration.hashCode();
1135     return result;
1136   }
1137 
1138   /**
1139    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1140    */
1141   @Deprecated
1142   public void readFields(DataInput in) throws IOException {
1143     int version = in.readByte();
1144     if (version < 6) {
1145       if (version <= 2) {
1146         Text t = new Text();
1147         t.readFields(in);
1148         this.name = t.getBytes();
1149 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1150 //            > 0) {
1151 //          this.name = stripColon(this.name);
1152 //        }
1153       } else {
1154         this.name = Bytes.readByteArray(in);
1155       }
1156       this.values.clear();
1157       setMaxVersions(in.readInt());
1158       int ordinal = in.readInt();
1159       setCompressionType(Compression.Algorithm.values()[ordinal]);
1160       setInMemory(in.readBoolean());
1161       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1162       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1163         // If a bloomFilter is enabled and the column descriptor is less than
1164         // version 5, we need to skip over it to read the rest of the column
1165         // descriptor. There are no BloomFilterDescriptors written to disk for
1166         // column descriptors with a version number >= 5
1167         throw new UnsupportedClassVersionError(this.getClass().getName() +
1168             " does not support backward compatibility with versions older " +
1169             "than version 5");
1170       }
1171       if (version > 1) {
1172         setBlockCacheEnabled(in.readBoolean());
1173       }
1174       if (version > 2) {
1175        setTimeToLive(in.readInt());
1176       }
1177     } else {
1178       // version 6+
1179       this.name = Bytes.readByteArray(in);
1180       this.values.clear();
1181       int numValues = in.readInt();
1182       for (int i = 0; i < numValues; i++) {
1183         ImmutableBytesWritable key = new ImmutableBytesWritable();
1184         ImmutableBytesWritable value = new ImmutableBytesWritable();
1185         key.readFields(in);
1186         value.readFields(in);
1187 
1188         // in version 8, the BloomFilter setting changed from bool to enum
1189         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1190           value.set(Bytes.toBytes(
1191               Boolean.getBoolean(Bytes.toString(value.get()))
1192                 ? BloomType.ROW.toString()
1193                 : BloomType.NONE.toString()));
1194         }
1195 
1196         values.put(key, value);
1197       }
1198       if (version == 6) {
1199         // Convert old values.
1200         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1201       }
1202       String value = getValue(HConstants.VERSIONS);
1203       this.cachedMaxVersions = (value != null)?
1204           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1205       if (version > 10) {
1206         configuration.clear();
1207         int numConfigs = in.readInt();
1208         for (int i = 0; i < numConfigs; i++) {
1209           ImmutableBytesWritable key = new ImmutableBytesWritable();
1210           ImmutableBytesWritable val = new ImmutableBytesWritable();
1211           key.readFields(in);
1212           val.readFields(in);
1213           configuration.put(
1214             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1215             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1216         }
1217       }
1218     }
1219   }
1220 
1221   /**
1222    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1223    */
1224   @Deprecated
1225   public void write(DataOutput out) throws IOException {
1226     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1227     Bytes.writeByteArray(out, this.name);
1228     out.writeInt(values.size());
1229     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1230         values.entrySet()) {
1231       e.getKey().write(out);
1232       e.getValue().write(out);
1233     }
1234     out.writeInt(configuration.size());
1235     for (Map.Entry<String, String> e : configuration.entrySet()) {
1236       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1237       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1238     }
1239   }
1240 
1241   // Comparable
1242 
1243   public int compareTo(HColumnDescriptor o) {
1244     int result = Bytes.compareTo(this.name, o.getName());
1245     if (result == 0) {
1246       // punt on comparison for ordering, just calculate difference
1247       result = this.values.hashCode() - o.values.hashCode();
1248       if (result < 0)
1249         result = -1;
1250       else if (result > 0)
1251         result = 1;
1252     }
1253     if (result == 0) {
1254       result = this.configuration.hashCode() - o.configuration.hashCode();
1255       if (result < 0)
1256         result = -1;
1257       else if (result > 0)
1258         result = 1;
1259     }
1260     return result;
1261   }
1262 
1263   /**
1264    * @return This instance serialized with pb with pb magic prefix
1265    * @see #parseFrom(byte[])
1266    */
1267   public byte [] toByteArray() {
1268     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1269   }
1270 
1271   /**
1272    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1273    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1274    * @throws DeserializationException
1275    * @see #toByteArray()
1276    */
1277   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1278     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1279     int pblen = ProtobufUtil.lengthOfPBMagic();
1280     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1281     ColumnFamilySchema cfs = null;
1282     try {
1283       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1284     } catch (InvalidProtocolBufferException e) {
1285       throw new DeserializationException(e);
1286     }
1287     return convert(cfs);
1288   }
1289 
1290   /**
1291    * @param cfs
1292    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1293    */
1294   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1295     // Use the empty constructor so we preserve the initial values set on construction for things
1296     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1297     // unrelated-looking test failures that are hard to trace back to here.
1298     HColumnDescriptor hcd = new HColumnDescriptor();
1299     hcd.name = cfs.getName().toByteArray();
1300     for (BytesBytesPair a: cfs.getAttributesList()) {
1301       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1302     }
1303     for (NameStringPair a: cfs.getConfigurationList()) {
1304       hcd.setConfiguration(a.getName(), a.getValue());
1305     }
1306     return hcd;
1307   }
1308 
1309   /**
1310    * @return Convert this instance to a the pb column family type
1311    */
1312   public ColumnFamilySchema convert() {
1313     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1314     builder.setName(ByteStringer.wrap(getName()));
1315     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1316       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1317       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1318       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1319       builder.addAttributes(aBuilder.build());
1320     }
1321     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1322       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1323       aBuilder.setName(e.getKey());
1324       aBuilder.setValue(e.getValue());
1325       builder.addConfiguration(aBuilder.build());
1326     }
1327     return builder.build();
1328   }
1329 
1330   /**
1331    * Getter for accessing the configuration value by key.
1332    */
1333   public String getConfigurationValue(String key) {
1334     return configuration.get(key);
1335   }
1336 
1337   /**
1338    * Getter for fetching an unmodifiable {@link #configuration} map.
1339    */
1340   public Map<String, String> getConfiguration() {
1341     // shallow pointer copy
1342     return Collections.unmodifiableMap(configuration);
1343   }
1344 
1345   /**
1346    * Setter for storing a configuration setting in {@link #configuration} map.
1347    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1348    * @param value String value. If null, removes the configuration.
1349    */
1350   public void setConfiguration(String key, String value) {
1351     if (value == null) {
1352       removeConfiguration(key);
1353     } else {
1354       configuration.put(key, value);
1355     }
1356   }
1357 
1358   /**
1359    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1360    */
1361   public void removeConfiguration(final String key) {
1362     configuration.remove(key);
1363   }
1364 
1365   /**
1366    * Return the encryption algorithm in use by this family
1367    */
1368   @InterfaceStability.Unstable
1369   public String getEncryptionType() {
1370     return getValue(ENCRYPTION);
1371   }
1372 
1373   /**
1374    * Set the encryption algorithm for use with this family
1375    * @param algorithm
1376    */
1377   @InterfaceStability.Unstable
1378   public HColumnDescriptor setEncryptionType(String algorithm) {
1379     setValue(ENCRYPTION, algorithm);
1380     return this;
1381   }
1382 
1383   /** Return the raw crypto key attribute for the family, or null if not set  */
1384   @InterfaceStability.Unstable
1385   public byte[] getEncryptionKey() {
1386     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1387   }
1388 
1389   /** Set the raw crypto key attribute for the family */
1390   @InterfaceStability.Unstable
1391   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1392     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1393     return this;
1394   }
1395 }