View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.hbase.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import org.apache.hadoop.hbase.util.ByteStringer;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
87     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
88     * family will be loaded into the cache as soon as the file is opened. These
89     * loads will not count as cache misses.
90     */
91    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
92  
93    /**
94     * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
95     * Use smaller block sizes for faster random-access at expense of larger
96     * indices (more memory consumption).
97     */
98    public static final String BLOCKSIZE = "BLOCKSIZE";
99  
100   public static final String LENGTH = "LENGTH";
101   public static final String TTL = "TTL";
102   public static final String BLOOMFILTER = "BLOOMFILTER";
103   public static final String FOREVER = "FOREVER";
104   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
105   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
106   public static final String MIN_VERSIONS = "MIN_VERSIONS";
107   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
108   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
109 
110   public static final String ENCRYPTION = "ENCRYPTION";
111   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
112 
113   /**
114    * Default compression type.
115    */
116   public static final String DEFAULT_COMPRESSION =
117     Compression.Algorithm.NONE.getName();
118 
119   /**
120    * Default value of the flag that enables data block encoding on disk, as
121    * opposed to encoding in cache only. We encode blocks everywhere by default,
122    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
123    */
124   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
125 
126   /** Default data block encoding algorithm. */
127   public static final String DEFAULT_DATA_BLOCK_ENCODING =
128       DataBlockEncoding.NONE.toString();
129 
130   /**
131    * Default number of versions of a record to keep.
132    */
133   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
134     "hbase.column.max.version", 1);
135 
136   /**
137    * Default is not to keep a minimum of versions.
138    */
139   public static final int DEFAULT_MIN_VERSIONS = 0;
140 
141   /*
142    * Cache here the HCD value.
143    * Question: its OK to cache since when we're reenable, we create a new HCD?
144    */
145   private volatile Integer blocksize = null;
146 
147   /**
148    * Default setting for whether to serve from memory or not.
149    */
150   public static final boolean DEFAULT_IN_MEMORY = false;
151 
152   /**
153    * Default setting for preventing deleted from being collected immediately.
154    */
155   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
156 
157   /**
158    * Default setting for whether to use a block cache or not.
159    */
160   public static final boolean DEFAULT_BLOCKCACHE = true;
161 
162   /**
163    * Default setting for whether to cache data blocks on write if block caching
164    * is enabled.
165    */
166   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
167 
168   /**
169    * Default setting for whether to cache index blocks on write if block
170    * caching is enabled.
171    */
172   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
173 
174   /**
175    * Default size of blocks in files stored to the filesytem (hfiles).
176    */
177   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
178 
179   /**
180    * Default setting for whether or not to use bloomfilters.
181    */
182   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
183 
184   /**
185    * Default setting for whether to cache bloom filter blocks on write if block
186    * caching is enabled.
187    */
188   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
189 
190   /**
191    * Default time to live of cell contents.
192    */
193   public static final int DEFAULT_TTL = HConstants.FOREVER;
194 
195   /**
196    * Default scope.
197    */
198   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
199 
200   /**
201    * Default setting for whether to evict cached blocks from the blockcache on
202    * close.
203    */
204   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
205 
206   /**
207    * Default compress tags along with any type of DataBlockEncoding.
208    */
209   public static final boolean DEFAULT_COMPRESS_TAGS = true;
210 
211   /*
212    * Default setting for whether to prefetch blocks into the blockcache on open.
213    */
214   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
215 
216   private final static Map<String, String> DEFAULT_VALUES
217     = new HashMap<String, String>();
218   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
219     = new HashSet<ImmutableBytesWritable>();
220   static {
221       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
222       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
223       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
224       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
225       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
226       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
227       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
228       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
229       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
230       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
231       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
232       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
233       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
234       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
235       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
236       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
237       for (String s : DEFAULT_VALUES.keySet()) {
238         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
239       }
240       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
241       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
242   }
243 
244   private static final int UNINITIALIZED = -1;
245 
246   // Column family name
247   private byte [] name;
248 
249   // Column metadata
250   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
251     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
252 
253   /**
254    * A map which holds the configuration specific to the column family.
255    * The keys of the map have the same names as config keys and override the defaults with
256    * cf-specific settings. Example usage may be for compactions, etc.
257    */
258   private final Map<String, String> configuration = new HashMap<String, String>();
259 
260   /*
261    * Cache the max versions rather than calculate it every time.
262    */
263   private int cachedMaxVersions = UNINITIALIZED;
264 
265   /**
266    * Default constructor. Must be present for Writable.
267    * @deprecated Used by Writables and Writables are going away.
268    */
269   @Deprecated
270   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
271   // deserializations.
272   public HColumnDescriptor() {
273     this.name = null;
274   }
275 
276   /**
277    * Construct a column descriptor specifying only the family name
278    * The other attributes are defaulted.
279    *
280    * @param familyName Column family name. Must be 'printable' -- digit or
281    * letter -- and may not contain a <code>:<code>
282    */
283   public HColumnDescriptor(final String familyName) {
284     this(Bytes.toBytes(familyName));
285   }
286 
287   /**
288    * Construct a column descriptor specifying only the family name
289    * The other attributes are defaulted.
290    *
291    * @param familyName Column family name. Must be 'printable' -- digit or
292    * letter -- and may not contain a <code>:<code>
293    */
294   public HColumnDescriptor(final byte [] familyName) {
295     this (familyName == null || familyName.length <= 0?
296       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
297       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
298       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
299   }
300 
301   /**
302    * Constructor.
303    * Makes a deep copy of the supplied descriptor.
304    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
305    * @param desc The descriptor.
306    */
307   public HColumnDescriptor(HColumnDescriptor desc) {
308     super();
309     this.name = desc.name.clone();
310     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
311         desc.values.entrySet()) {
312       this.values.put(e.getKey(), e.getValue());
313     }
314     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
315       this.configuration.put(e.getKey(), e.getValue());
316     }
317     setMaxVersions(desc.getMaxVersions());
318   }
319 
320   /**
321    * Constructor
322    * @param familyName Column family name. Must be 'printable' -- digit or
323    * letter -- and may not contain a <code>:<code>
324    * @param maxVersions Maximum number of versions to keep
325    * @param compression Compression type
326    * @param inMemory If true, column data should be kept in an HRegionServer's
327    * cache
328    * @param blockCacheEnabled If true, MapFile blocks should be cached
329    * @param timeToLive Time-to-live of cell contents, in seconds
330    * (use HConstants.FOREVER for unlimited TTL)
331    * @param bloomFilter Bloom filter type for this column
332    *
333    * @throws IllegalArgumentException if passed a family name that is made of
334    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
335    * a <code>:</code>
336    * @throws IllegalArgumentException if the number of versions is &lt;= 0
337    * @deprecated use {@link #HColumnDescriptor(String)} and setters
338    */
339   @Deprecated
340   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
341       final String compression, final boolean inMemory,
342       final boolean blockCacheEnabled,
343       final int timeToLive, final String bloomFilter) {
344     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
345       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
346   }
347 
348   /**
349    * Constructor
350    * @param familyName Column family name. Must be 'printable' -- digit or
351    * letter -- and may not contain a <code>:<code>
352    * @param maxVersions Maximum number of versions to keep
353    * @param compression Compression type
354    * @param inMemory If true, column data should be kept in an HRegionServer's
355    * cache
356    * @param blockCacheEnabled If true, MapFile blocks should be cached
357    * @param blocksize Block size to use when writing out storefiles.  Use
358    * smaller block sizes for faster random-access at expense of larger indices
359    * (more memory consumption).  Default is usually 64k.
360    * @param timeToLive Time-to-live of cell contents, in seconds
361    * (use HConstants.FOREVER for unlimited TTL)
362    * @param bloomFilter Bloom filter type for this column
363    * @param scope The scope tag for this column
364    *
365    * @throws IllegalArgumentException if passed a family name that is made of
366    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
367    * a <code>:</code>
368    * @throws IllegalArgumentException if the number of versions is &lt;= 0
369    * @deprecated use {@link #HColumnDescriptor(String)} and setters
370    */
371   @Deprecated
372   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
373       final String compression, final boolean inMemory,
374       final boolean blockCacheEnabled, final int blocksize,
375       final int timeToLive, final String bloomFilter, final int scope) {
376     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
377         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
378         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
379         scope);
380   }
381 
382   /**
383    * Constructor
384    * @param familyName Column family name. Must be 'printable' -- digit or
385    * letter -- and may not contain a <code>:<code>
386    * @param minVersions Minimum number of versions to keep
387    * @param maxVersions Maximum number of versions to keep
388    * @param keepDeletedCells Whether to retain deleted cells until they expire
389    *        up to maxVersions versions.
390    * @param compression Compression type
391    * @param encodeOnDisk whether to use the specified data block encoding
392    *        on disk. If false, the encoding will be used in cache only.
393    * @param dataBlockEncoding data block encoding
394    * @param inMemory If true, column data should be kept in an HRegionServer's
395    * cache
396    * @param blockCacheEnabled If true, MapFile blocks should be cached
397    * @param blocksize Block size to use when writing out storefiles.  Use
398    * smaller blocksizes for faster random-access at expense of larger indices
399    * (more memory consumption).  Default is usually 64k.
400    * @param timeToLive Time-to-live of cell contents, in seconds
401    * (use HConstants.FOREVER for unlimited TTL)
402    * @param bloomFilter Bloom filter type for this column
403    * @param scope The scope tag for this column
404    *
405    * @throws IllegalArgumentException if passed a family name that is made of
406    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
407    * a <code>:</code>
408    * @throws IllegalArgumentException if the number of versions is &lt;= 0
409    * @deprecated use {@link #HColumnDescriptor(String)} and setters
410    */
411   @Deprecated
412   public HColumnDescriptor(final byte[] familyName, final int minVersions,
413       final int maxVersions, final KeepDeletedCells keepDeletedCells,
414       final String compression, final boolean encodeOnDisk,
415       final String dataBlockEncoding, final boolean inMemory,
416       final boolean blockCacheEnabled, final int blocksize,
417       final int timeToLive, final String bloomFilter, final int scope) {
418     isLegalFamilyName(familyName);
419     this.name = familyName;
420 
421     if (maxVersions <= 0) {
422       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
423       // Until there is support, consider 0 or < 0 -- a configuration error.
424       throw new IllegalArgumentException("Maximum versions must be positive");
425     }
426 
427     if (minVersions > 0) {
428       if (timeToLive == HConstants.FOREVER) {
429         throw new IllegalArgumentException("Minimum versions requires TTL.");
430       }
431       if (minVersions >= maxVersions) {
432         throw new IllegalArgumentException("Minimum versions must be < "
433             + "maximum versions.");
434       }
435     }
436 
437     setMaxVersions(maxVersions);
438     setMinVersions(minVersions);
439     setKeepDeletedCells(keepDeletedCells);
440     setInMemory(inMemory);
441     setBlockCacheEnabled(blockCacheEnabled);
442     setTimeToLive(timeToLive);
443     setCompressionType(Compression.Algorithm.
444       valueOf(compression.toUpperCase()));
445     setDataBlockEncoding(DataBlockEncoding.
446         valueOf(dataBlockEncoding.toUpperCase()));
447     setBloomFilterType(BloomType.
448       valueOf(bloomFilter.toUpperCase()));
449     setBlocksize(blocksize);
450     setScope(scope);
451   }
452 
453   /**
454    * @param b Family name.
455    * @return <code>b</code>
456    * @throws IllegalArgumentException If not null and not a legitimate family
457    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
458    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
459    * either. Also Family can not be an empty value or equal "recovered.edits".
460    */
461   public static byte [] isLegalFamilyName(final byte [] b) {
462     if (b == null) {
463       return b;
464     }
465     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
466     if (b[0] == '.') {
467       throw new IllegalArgumentException("Family names cannot start with a " +
468         "period: " + Bytes.toString(b));
469     }
470     for (int i = 0; i < b.length; i++) {
471       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
472         throw new IllegalArgumentException("Illegal character <" + b[i] +
473           ">. Family names cannot contain control characters or colons: " +
474           Bytes.toString(b));
475       }
476     }
477     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
478     if (Bytes.equals(recoveredEdit, b)) {
479       throw new IllegalArgumentException("Family name cannot be: " +
480           HConstants.RECOVERED_EDITS_DIR);
481     }
482     return b;
483   }
484 
485   /**
486    * @return Name of this column family
487    */
488   public byte [] getName() {
489     return name;
490   }
491 
492   /**
493    * @return Name of this column family
494    */
495   public String getNameAsString() {
496     return Bytes.toString(this.name);
497   }
498 
499   /**
500    * @param key The key.
501    * @return The value.
502    */
503   public byte[] getValue(byte[] key) {
504     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
505     if (ibw == null)
506       return null;
507     return ibw.get();
508   }
509 
510   /**
511    * @param key The key.
512    * @return The value as a string.
513    */
514   public String getValue(String key) {
515     byte[] value = getValue(Bytes.toBytes(key));
516     if (value == null)
517       return null;
518     return Bytes.toString(value);
519   }
520 
521   /**
522    * @return All values.
523    */
524   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
525     // shallow pointer copy
526     return Collections.unmodifiableMap(values);
527   }
528 
529   /**
530    * @param key The key.
531    * @param value The value.
532    * @return this (for chained invocation)
533    */
534   public HColumnDescriptor setValue(byte[] key, byte[] value) {
535     values.put(new ImmutableBytesWritable(key),
536       new ImmutableBytesWritable(value));
537     return this;
538   }
539 
540   /**
541    * @param key Key whose key and value we're to remove from HCD parameters.
542    */
543   public void remove(final byte [] key) {
544     values.remove(new ImmutableBytesWritable(key));
545   }
546 
547   /**
548    * @param key The key.
549    * @param value The value.
550    * @return this (for chained invocation)
551    */
552   public HColumnDescriptor setValue(String key, String value) {
553     if (value == null) {
554       remove(Bytes.toBytes(key));
555     } else {
556       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
557     }
558     return this;
559   }
560 
561   /** @return compression type being used for the column family */
562   public Compression.Algorithm getCompression() {
563     String n = getValue(COMPRESSION);
564     if (n == null) {
565       return Compression.Algorithm.NONE;
566     }
567     return Compression.Algorithm.valueOf(n.toUpperCase());
568   }
569 
570   /** @return compression type being used for the column family for major
571       compression */
572   public Compression.Algorithm getCompactionCompression() {
573     String n = getValue(COMPRESSION_COMPACT);
574     if (n == null) {
575       return getCompression();
576     }
577     return Compression.Algorithm.valueOf(n.toUpperCase());
578   }
579 
580   /** @return maximum number of versions */
581   public int getMaxVersions() {
582     if (this.cachedMaxVersions == UNINITIALIZED) {
583       String v = getValue(HConstants.VERSIONS);
584       this.cachedMaxVersions = Integer.parseInt(v);
585     }
586     return this.cachedMaxVersions;
587   }
588 
589   /**
590    * @param maxVersions maximum number of versions
591    * @return this (for chained invocation)
592    */
593   public HColumnDescriptor setMaxVersions(int maxVersions) {
594     if (maxVersions <= 0) {
595       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
596       // Until there is support, consider 0 or < 0 -- a configuration error.
597       throw new IllegalArgumentException("Maximum versions must be positive");
598     }    
599     if (maxVersions < this.getMinVersions()) {      
600         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
601             + " while minVersion is " + this.getMinVersions()
602             + ". Maximum versions must be >= minimum versions ");      
603     }
604     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
605     cachedMaxVersions = maxVersions;
606     return this;
607   }
608 
609   /**
610    * @return The storefile/hfile blocksize for this column family.
611    */
612   public synchronized int getBlocksize() {
613     if (this.blocksize == null) {
614       String value = getValue(BLOCKSIZE);
615       this.blocksize = (value != null)?
616         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
617     }
618     return this.blocksize.intValue();
619   }
620 
621   /**
622    * @param s Blocksize to use when writing out storefiles/hfiles on this
623    * column family.
624    * @return this (for chained invocation)
625    */
626   public HColumnDescriptor setBlocksize(int s) {
627     setValue(BLOCKSIZE, Integer.toString(s));
628     this.blocksize = null;
629     return this;
630   }
631 
632   /**
633    * @return Compression type setting.
634    */
635   public Compression.Algorithm getCompressionType() {
636     return getCompression();
637   }
638 
639   /**
640    * Compression types supported in hbase.
641    * LZO is not bundled as part of the hbase distribution.
642    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
643    * for how to enable it.
644    * @param type Compression type setting.
645    * @return this (for chained invocation)
646    */
647   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
648     return setValue(COMPRESSION, type.getName().toUpperCase());
649   }
650 
651   /** @return data block encoding algorithm used on disk */
652   @Deprecated
653   public DataBlockEncoding getDataBlockEncodingOnDisk() {
654     return getDataBlockEncoding();
655   }
656 
657   /**
658    * This method does nothing now. Flag ENCODE_ON_DISK is not used
659    * any more. Data blocks have the same encoding in cache as on disk.
660    * @return this (for chained invocation)
661    */
662   @Deprecated
663   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
664     return this;
665   }
666 
667   /**
668    * @return the data block encoding algorithm used in block cache and
669    *         optionally on disk
670    */
671   public DataBlockEncoding getDataBlockEncoding() {
672     String type = getValue(DATA_BLOCK_ENCODING);
673     if (type == null) {
674       type = DEFAULT_DATA_BLOCK_ENCODING;
675     }
676     return DataBlockEncoding.valueOf(type);
677   }
678 
679   /**
680    * Set data block encoding algorithm used in block cache.
681    * @param type What kind of data block encoding will be used.
682    * @return this (for chained invocation)
683    */
684   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
685     String name;
686     if (type != null) {
687       name = type.toString();
688     } else {
689       name = DataBlockEncoding.NONE.toString();
690     }
691     return setValue(DATA_BLOCK_ENCODING, name);
692   }
693 
694   /**
695    * Set whether the tags should be compressed along with DataBlockEncoding. When no
696    * DataBlockEncoding is been used, this is having no effect.
697    * 
698    * @param compressTags
699    * @return this (for chained invocation)
700    */
701   public HColumnDescriptor setCompressTags(boolean compressTags) {
702     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
703   }
704 
705   /**
706    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
707    *         DataBlockEncoding is been used, this is having no effect.
708    */
709   public boolean shouldCompressTags() {
710     String compressTagsStr = getValue(COMPRESS_TAGS);
711     boolean compressTags = DEFAULT_COMPRESS_TAGS;
712     if (compressTagsStr != null) {
713       compressTags = Boolean.valueOf(compressTagsStr);
714     }
715     return compressTags;
716   }
717 
718   /**
719    * @return Compression type setting.
720    */
721   public Compression.Algorithm getCompactionCompressionType() {
722     return getCompactionCompression();
723   }
724 
725   /**
726    * Compression types supported in hbase.
727    * LZO is not bundled as part of the hbase distribution.
728    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
729    * for how to enable it.
730    * @param type Compression type setting.
731    * @return this (for chained invocation)
732    */
733   public HColumnDescriptor setCompactionCompressionType(
734       Compression.Algorithm type) {
735     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
736   }
737 
738   /**
739    * @return True if we are to keep all in use HRegionServer cache.
740    */
741   public boolean isInMemory() {
742     String value = getValue(HConstants.IN_MEMORY);
743     if (value != null)
744       return Boolean.valueOf(value).booleanValue();
745     return DEFAULT_IN_MEMORY;
746   }
747 
748   /**
749    * @param inMemory True if we are to keep all values in the HRegionServer
750    * cache
751    * @return this (for chained invocation)
752    */
753   public HColumnDescriptor setInMemory(boolean inMemory) {
754     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
755   }
756 
757   public KeepDeletedCells getKeepDeletedCells() {
758     String value = getValue(KEEP_DELETED_CELLS);
759     if (value != null) {
760       // toUpperCase for backwards compatibility
761       return KeepDeletedCells.valueOf(value.toUpperCase());
762     }
763     return DEFAULT_KEEP_DELETED;
764   }
765 
766   /**
767    * @param keepDeletedCells True if deleted rows should not be collected
768    * immediately.
769    * @return this (for chained invocation)
770    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
771    */
772   @Deprecated
773   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
774     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
775         : KeepDeletedCells.FALSE).toString());
776   }
777 
778   /**
779    * @param keepDeletedCells True if deleted rows should not be collected
780    * immediately.
781    * @return this (for chained invocation)
782    */
783   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
784     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
785   }
786 
787   /**
788    * @return Time-to-live of cell contents, in seconds.
789    */
790   public int getTimeToLive() {
791     String value = getValue(TTL);
792     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
793   }
794 
795   /**
796    * @param timeToLive Time-to-live of cell contents, in seconds.
797    * @return this (for chained invocation)
798    */
799   public HColumnDescriptor setTimeToLive(int timeToLive) {
800     return setValue(TTL, Integer.toString(timeToLive));
801   }
802 
803   /**
804    * @return The minimum number of versions to keep.
805    */
806   public int getMinVersions() {
807     String value = getValue(MIN_VERSIONS);
808     return (value != null)? Integer.valueOf(value).intValue(): 0;
809   }
810 
811   /**
812    * @param minVersions The minimum number of versions to keep.
813    * (used when timeToLive is set)
814    * @return this (for chained invocation)
815    */
816   public HColumnDescriptor setMinVersions(int minVersions) {
817     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
818   }
819 
820   /**
821    * @return True if MapFile blocks should be cached.
822    */
823   public boolean isBlockCacheEnabled() {
824     String value = getValue(BLOCKCACHE);
825     if (value != null)
826       return Boolean.valueOf(value).booleanValue();
827     return DEFAULT_BLOCKCACHE;
828   }
829 
830   /**
831    * @param blockCacheEnabled True if MapFile blocks should be cached.
832    * @return this (for chained invocation)
833    */
834   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
835     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
836   }
837 
838   /**
839    * @return bloom filter type used for new StoreFiles in ColumnFamily
840    */
841   public BloomType getBloomFilterType() {
842     String n = getValue(BLOOMFILTER);
843     if (n == null) {
844       n = DEFAULT_BLOOMFILTER;
845     }
846     return BloomType.valueOf(n.toUpperCase());
847   }
848 
849   /**
850    * @param bt bloom filter type
851    * @return this (for chained invocation)
852    */
853   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
854     return setValue(BLOOMFILTER, bt.toString());
855   }
856 
857    /**
858     * @return the scope tag
859     */
860   public int getScope() {
861     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
862     if (value != null) {
863       return Integer.valueOf(Bytes.toString(value));
864     }
865     return DEFAULT_REPLICATION_SCOPE;
866   }
867 
868  /**
869   * @param scope the scope tag
870   * @return this (for chained invocation)
871   */
872   public HColumnDescriptor setScope(int scope) {
873     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
874   }
875 
876   /**
877    * @return true if we should cache data blocks on write
878    */
879   public boolean shouldCacheDataOnWrite() {
880     String value = getValue(CACHE_DATA_ON_WRITE);
881     if (value != null) {
882       return Boolean.valueOf(value).booleanValue();
883     }
884     return DEFAULT_CACHE_DATA_ON_WRITE;
885   }
886 
887   /**
888    * @param value true if we should cache data blocks on write
889    * @return this (for chained invocation)
890    */
891   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
892     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
893   }
894 
895   /**
896    * @return true if we should cache index blocks on write
897    */
898   public boolean shouldCacheIndexesOnWrite() {
899     String value = getValue(CACHE_INDEX_ON_WRITE);
900     if (value != null) {
901       return Boolean.valueOf(value).booleanValue();
902     }
903     return DEFAULT_CACHE_INDEX_ON_WRITE;
904   }
905 
906   /**
907    * @param value true if we should cache index blocks on write
908    * @return this (for chained invocation)
909    */
910   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
911     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
912   }
913 
914   /**
915    * @return true if we should cache bloomfilter blocks on write
916    */
917   public boolean shouldCacheBloomsOnWrite() {
918     String value = getValue(CACHE_BLOOMS_ON_WRITE);
919     if (value != null) {
920       return Boolean.valueOf(value).booleanValue();
921     }
922     return DEFAULT_CACHE_BLOOMS_ON_WRITE;
923   }
924 
925   /**
926    * @param value true if we should cache bloomfilter blocks on write
927    * @return this (for chained invocation)
928    */
929   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
930     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
931   }
932 
933   /**
934    * @return true if we should evict cached blocks from the blockcache on
935    * close
936    */
937   public boolean shouldEvictBlocksOnClose() {
938     String value = getValue(EVICT_BLOCKS_ON_CLOSE);
939     if (value != null) {
940       return Boolean.valueOf(value).booleanValue();
941     }
942     return DEFAULT_EVICT_BLOCKS_ON_CLOSE;
943   }
944 
945   /**
946    * @param value true if we should evict cached blocks from the blockcache on
947    * close
948    * @return this (for chained invocation)
949    */
950   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
951     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
952   }
953 
954   /**
955    * @return true if we should prefetch blocks into the blockcache on open
956    */
957   public boolean shouldPrefetchBlocksOnOpen() {
958     String value = getValue(PREFETCH_BLOCKS_ON_OPEN);
959    if (value != null) {
960       return Boolean.valueOf(value).booleanValue();
961     }
962     return DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
963   }
964 
965   /**
966    * @param value true if we should prefetch blocks into the blockcache on open
967    * @return this (for chained invocation)
968    */
969   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
970     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
971   }
972 
973   /**
974    * @see java.lang.Object#toString()
975    */
976   @Override
977   public String toString() {
978     StringBuilder s = new StringBuilder();
979 
980     s.append('{');
981     s.append(HConstants.NAME);
982     s.append(" => '");
983     s.append(Bytes.toString(name));
984     s.append("'");
985     s.append(getValues(true));
986     s.append('}');
987     return s.toString();
988   }
989 
990   /**
991    * @return Column family descriptor with only the customized attributes.
992    */
993   public String toStringCustomizedValues() {
994     StringBuilder s = new StringBuilder();
995     s.append('{');
996     s.append(HConstants.NAME);
997     s.append(" => '");
998     s.append(Bytes.toString(name));
999     s.append("'");
1000     s.append(getValues(false));
1001     s.append('}');
1002     return s.toString();
1003   }
1004 
1005   private StringBuilder getValues(boolean printDefaults) {
1006     StringBuilder s = new StringBuilder();
1007 
1008     boolean hasConfigKeys = false;
1009 
1010     // print all reserved keys first
1011     for (ImmutableBytesWritable k : values.keySet()) {
1012       if (!RESERVED_KEYWORDS.contains(k)) {
1013         hasConfigKeys = true;
1014         continue;
1015       }
1016       String key = Bytes.toString(k.get());
1017       String value = Bytes.toStringBinary(values.get(k).get());
1018       if (printDefaults
1019           || !DEFAULT_VALUES.containsKey(key)
1020           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1021         s.append(", ");
1022         s.append(key);
1023         s.append(" => ");
1024         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1025       }
1026     }
1027 
1028     // print all non-reserved, advanced config keys as a separate subset
1029     if (hasConfigKeys) {
1030       s.append(", ");
1031       s.append(HConstants.METADATA).append(" => ");
1032       s.append('{');
1033       boolean printComma = false;
1034       for (ImmutableBytesWritable k : values.keySet()) {
1035         if (RESERVED_KEYWORDS.contains(k)) {
1036           continue;
1037         }
1038         String key = Bytes.toString(k.get());
1039         String value = Bytes.toStringBinary(values.get(k).get());
1040         if (printComma) {
1041           s.append(", ");
1042         }
1043         printComma = true;
1044         s.append('\'').append(key).append('\'');
1045         s.append(" => ");
1046         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1047       }
1048       s.append('}');
1049     }
1050 
1051     if (!configuration.isEmpty()) {
1052       s.append(", ");
1053       s.append(HConstants.CONFIGURATION).append(" => ");
1054       s.append('{');
1055       boolean printCommaForConfiguration = false;
1056       for (Map.Entry<String, String> e : configuration.entrySet()) {
1057         if (printCommaForConfiguration) s.append(", ");
1058         printCommaForConfiguration = true;
1059         s.append('\'').append(e.getKey()).append('\'');
1060         s.append(" => ");
1061         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1062       }
1063       s.append("}");
1064     }
1065     return s;
1066   }
1067 
1068   public static Unit getUnit(String key) {
1069     Unit unit;
1070       /* TTL for now, we can add more as we neeed */
1071     if (key.equals(HColumnDescriptor.TTL)) {
1072       unit = Unit.TIME_INTERVAL;
1073     } else {
1074       unit = Unit.NONE;
1075     }
1076     return unit;
1077   }
1078 
1079   public static Map<String, String> getDefaultValues() {
1080     return Collections.unmodifiableMap(DEFAULT_VALUES);
1081   }
1082 
1083   /**
1084    * @see java.lang.Object#equals(java.lang.Object)
1085    */
1086   @Override
1087   public boolean equals(Object obj) {
1088     if (this == obj) {
1089       return true;
1090     }
1091     if (obj == null) {
1092       return false;
1093     }
1094     if (!(obj instanceof HColumnDescriptor)) {
1095       return false;
1096     }
1097     return compareTo((HColumnDescriptor)obj) == 0;
1098   }
1099 
1100   /**
1101    * @see java.lang.Object#hashCode()
1102    */
1103   @Override
1104   public int hashCode() {
1105     int result = Bytes.hashCode(this.name);
1106     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1107     result ^= values.hashCode();
1108     result ^= configuration.hashCode();
1109     return result;
1110   }
1111 
1112   /**
1113    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1114    */
1115   @Deprecated
1116   public void readFields(DataInput in) throws IOException {
1117     int version = in.readByte();
1118     if (version < 6) {
1119       if (version <= 2) {
1120         Text t = new Text();
1121         t.readFields(in);
1122         this.name = t.getBytes();
1123 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1124 //            > 0) {
1125 //          this.name = stripColon(this.name);
1126 //        }
1127       } else {
1128         this.name = Bytes.readByteArray(in);
1129       }
1130       this.values.clear();
1131       setMaxVersions(in.readInt());
1132       int ordinal = in.readInt();
1133       setCompressionType(Compression.Algorithm.values()[ordinal]);
1134       setInMemory(in.readBoolean());
1135       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1136       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1137         // If a bloomFilter is enabled and the column descriptor is less than
1138         // version 5, we need to skip over it to read the rest of the column
1139         // descriptor. There are no BloomFilterDescriptors written to disk for
1140         // column descriptors with a version number >= 5
1141         throw new UnsupportedClassVersionError(this.getClass().getName() +
1142             " does not support backward compatibility with versions older " +
1143             "than version 5");
1144       }
1145       if (version > 1) {
1146         setBlockCacheEnabled(in.readBoolean());
1147       }
1148       if (version > 2) {
1149        setTimeToLive(in.readInt());
1150       }
1151     } else {
1152       // version 6+
1153       this.name = Bytes.readByteArray(in);
1154       this.values.clear();
1155       int numValues = in.readInt();
1156       for (int i = 0; i < numValues; i++) {
1157         ImmutableBytesWritable key = new ImmutableBytesWritable();
1158         ImmutableBytesWritable value = new ImmutableBytesWritable();
1159         key.readFields(in);
1160         value.readFields(in);
1161 
1162         // in version 8, the BloomFilter setting changed from bool to enum
1163         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1164           value.set(Bytes.toBytes(
1165               Boolean.getBoolean(Bytes.toString(value.get()))
1166                 ? BloomType.ROW.toString()
1167                 : BloomType.NONE.toString()));
1168         }
1169 
1170         values.put(key, value);
1171       }
1172       if (version == 6) {
1173         // Convert old values.
1174         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1175       }
1176       String value = getValue(HConstants.VERSIONS);
1177       this.cachedMaxVersions = (value != null)?
1178           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1179       if (version > 10) {
1180         configuration.clear();
1181         int numConfigs = in.readInt();
1182         for (int i = 0; i < numConfigs; i++) {
1183           ImmutableBytesWritable key = new ImmutableBytesWritable();
1184           ImmutableBytesWritable val = new ImmutableBytesWritable();
1185           key.readFields(in);
1186           val.readFields(in);
1187           configuration.put(
1188             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1189             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1190         }
1191       }
1192     }
1193   }
1194 
1195   /**
1196    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1197    */
1198   @Deprecated
1199   public void write(DataOutput out) throws IOException {
1200     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1201     Bytes.writeByteArray(out, this.name);
1202     out.writeInt(values.size());
1203     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1204         values.entrySet()) {
1205       e.getKey().write(out);
1206       e.getValue().write(out);
1207     }
1208     out.writeInt(configuration.size());
1209     for (Map.Entry<String, String> e : configuration.entrySet()) {
1210       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1211       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1212     }
1213   }
1214 
1215   // Comparable
1216 
1217   public int compareTo(HColumnDescriptor o) {
1218     int result = Bytes.compareTo(this.name, o.getName());
1219     if (result == 0) {
1220       // punt on comparison for ordering, just calculate difference
1221       result = this.values.hashCode() - o.values.hashCode();
1222       if (result < 0)
1223         result = -1;
1224       else if (result > 0)
1225         result = 1;
1226     }
1227     if (result == 0) {
1228       result = this.configuration.hashCode() - o.configuration.hashCode();
1229       if (result < 0)
1230         result = -1;
1231       else if (result > 0)
1232         result = 1;
1233     }
1234     return result;
1235   }
1236 
1237   /**
1238    * @return This instance serialized with pb with pb magic prefix
1239    * @see #parseFrom(byte[])
1240    */
1241   public byte [] toByteArray() {
1242     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1243   }
1244 
1245   /**
1246    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1247    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1248    * @throws DeserializationException
1249    * @see #toByteArray()
1250    */
1251   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1252     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1253     int pblen = ProtobufUtil.lengthOfPBMagic();
1254     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1255     ColumnFamilySchema cfs = null;
1256     try {
1257       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1258     } catch (InvalidProtocolBufferException e) {
1259       throw new DeserializationException(e);
1260     }
1261     return convert(cfs);
1262   }
1263 
1264   /**
1265    * @param cfs
1266    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1267    */
1268   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1269     // Use the empty constructor so we preserve the initial values set on construction for things
1270     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1271     // unrelated-looking test failures that are hard to trace back to here.
1272     HColumnDescriptor hcd = new HColumnDescriptor();
1273     hcd.name = cfs.getName().toByteArray();
1274     for (BytesBytesPair a: cfs.getAttributesList()) {
1275       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1276     }
1277     for (NameStringPair a: cfs.getConfigurationList()) {
1278       hcd.setConfiguration(a.getName(), a.getValue());
1279     }
1280     return hcd;
1281   }
1282 
1283   /**
1284    * @return Convert this instance to a the pb column family type
1285    */
1286   public ColumnFamilySchema convert() {
1287     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1288     builder.setName(ByteStringer.wrap(getName()));
1289     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1290       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1291       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1292       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1293       builder.addAttributes(aBuilder.build());
1294     }
1295     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1296       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1297       aBuilder.setName(e.getKey());
1298       aBuilder.setValue(e.getValue());
1299       builder.addConfiguration(aBuilder.build());
1300     }
1301     return builder.build();
1302   }
1303 
1304   /**
1305    * Getter for accessing the configuration value by key.
1306    */
1307   public String getConfigurationValue(String key) {
1308     return configuration.get(key);
1309   }
1310 
1311   /**
1312    * Getter for fetching an unmodifiable {@link #configuration} map.
1313    */
1314   public Map<String, String> getConfiguration() {
1315     // shallow pointer copy
1316     return Collections.unmodifiableMap(configuration);
1317   }
1318 
1319   /**
1320    * Setter for storing a configuration setting in {@link #configuration} map.
1321    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1322    * @param value String value. If null, removes the configuration.
1323    */
1324   public void setConfiguration(String key, String value) {
1325     if (value == null) {
1326       removeConfiguration(key);
1327     } else {
1328       configuration.put(key, value);
1329     }
1330   }
1331 
1332   /**
1333    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1334    */
1335   public void removeConfiguration(final String key) {
1336     configuration.remove(key);
1337   }
1338 
1339   /**
1340    * Return the encryption algorithm in use by this family
1341    */
1342   public String getEncryptionType() {
1343     return getValue(ENCRYPTION);
1344   }
1345 
1346   /**
1347    * Set the encryption algorithm for use with this family
1348    * @param algorithm
1349    */
1350   public HColumnDescriptor setEncryptionType(String algorithm) {
1351     setValue(ENCRYPTION, algorithm);
1352     return this;
1353   }
1354 
1355   /** Return the raw crypto key attribute for the family, or null if not set  */
1356   public byte[] getEncryptionKey() {
1357     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1358   }
1359 
1360   /** Set the raw crypto key attribute for the family */
1361   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1362     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1363     return this;
1364   }
1365 }