View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.hbase.classification.InterfaceAudience;
31  import org.apache.hadoop.hbase.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import org.apache.hadoop.hbase.util.ByteStringer;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    /**
81     * Key for the BLOCKCACHE attribute.
82     * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
83     * cache DATA blocks.  We always cache INDEX and BLOOM blocks; caching these blocks cannot be
84     * disabled.
85     */
86    public static final String BLOCKCACHE = "BLOCKCACHE";
87    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
88    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
89    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
90    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
91    /**
92     * Key for cache data into L1 if cache is set up with more than one tier.
93     * To set in the shell, do something like this:
94     * <code>hbase(main):003:0> create 't',
95     *    {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
96     */
97    public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
98  
99    /**
100    * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
101    * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
102    * family will be loaded into the cache as soon as the file is opened. These
103    * loads will not count as cache misses.
104    */
105   public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
106 
107   /**
108    * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
109    * Use smaller block sizes for faster random-access at expense of larger
110    * indices (more memory consumption).
111    */
112   public static final String BLOCKSIZE = "BLOCKSIZE";
113 
114   public static final String LENGTH = "LENGTH";
115   public static final String TTL = "TTL";
116   public static final String BLOOMFILTER = "BLOOMFILTER";
117   public static final String FOREVER = "FOREVER";
118   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
119   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
120   public static final String MIN_VERSIONS = "MIN_VERSIONS";
121   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
122   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
123 
124   public static final String ENCRYPTION = "ENCRYPTION";
125   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
126 
127   /**
128    * Default compression type.
129    */
130   public static final String DEFAULT_COMPRESSION =
131     Compression.Algorithm.NONE.getName();
132 
133   /**
134    * Default value of the flag that enables data block encoding on disk, as
135    * opposed to encoding in cache only. We encode blocks everywhere by default,
136    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
137    */
138   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
139 
140   /** Default data block encoding algorithm. */
141   public static final String DEFAULT_DATA_BLOCK_ENCODING =
142       DataBlockEncoding.NONE.toString();
143 
144   /**
145    * Default number of versions of a record to keep.
146    */
147   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
148     "hbase.column.max.version", 1);
149 
150   /**
151    * Default is not to keep a minimum of versions.
152    */
153   public static final int DEFAULT_MIN_VERSIONS = 0;
154 
155   /*
156    * Cache here the HCD value.
157    * Question: its OK to cache since when we're reenable, we create a new HCD?
158    */
159   private volatile Integer blocksize = null;
160 
161   /**
162    * Default setting for whether to try and serve this column family from memory or not.
163    */
164   public static final boolean DEFAULT_IN_MEMORY = false;
165 
166   /**
167    * Default setting for preventing deleted from being collected immediately.
168    */
169   public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE;
170 
171   /**
172    * Default setting for whether to use a block cache or not.
173    */
174   public static final boolean DEFAULT_BLOCKCACHE = true;
175 
176   /**
177    * Default setting for whether to cache data blocks on write if block caching
178    * is enabled.
179    */
180   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
181 
182   /**
183    * Default setting for whether to cache data blocks in L1 tier.  Only makes sense if more than
184    * one tier in operations: i.e. if we have an L1 and a L2.  This will be the cases if we are
185    * using BucketCache.
186    */
187   public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
188 
189   /**
190    * Default setting for whether to cache index blocks on write if block
191    * caching is enabled.
192    */
193   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
194 
195   /**
196    * Default size of blocks in files stored to the filesytem (hfiles).
197    */
198   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
199 
200   /**
201    * Default setting for whether or not to use bloomfilters.
202    */
203   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
204 
205   /**
206    * Default setting for whether to cache bloom filter blocks on write if block
207    * caching is enabled.
208    */
209   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
210 
211   /**
212    * Default time to live of cell contents.
213    */
214   public static final int DEFAULT_TTL = HConstants.FOREVER;
215 
216   /**
217    * Default scope.
218    */
219   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
220 
221   /**
222    * Default setting for whether to evict cached blocks from the blockcache on
223    * close.
224    */
225   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
226 
227   /**
228    * Default compress tags along with any type of DataBlockEncoding.
229    */
230   public static final boolean DEFAULT_COMPRESS_TAGS = true;
231 
232   /*
233    * Default setting for whether to prefetch blocks into the blockcache on open.
234    */
235   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
236 
237   private final static Map<String, String> DEFAULT_VALUES
238     = new HashMap<String, String>();
239   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
240     = new HashSet<ImmutableBytesWritable>();
241   static {
242       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
243       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
244       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
245       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
246       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
247       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
248       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
249       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
250       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
251       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
252       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
253       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
254       DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
255       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
256       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
257       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
258       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
259       for (String s : DEFAULT_VALUES.keySet()) {
260         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
261       }
262       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
263       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
264   }
265 
266   private static final int UNINITIALIZED = -1;
267 
268   // Column family name
269   private byte [] name;
270 
271   // Column metadata
272   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
273     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
274 
275   /**
276    * A map which holds the configuration specific to the column family.
277    * The keys of the map have the same names as config keys and override the defaults with
278    * cf-specific settings. Example usage may be for compactions, etc.
279    */
280   private final Map<String, String> configuration = new HashMap<String, String>();
281 
282   /*
283    * Cache the max versions rather than calculate it every time.
284    */
285   private int cachedMaxVersions = UNINITIALIZED;
286 
287   /**
288    * Default constructor. Must be present for Writable.
289    * @deprecated Used by Writables and Writables are going away.
290    */
291   @Deprecated
292   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
293   // deserializations.
294   public HColumnDescriptor() {
295     this.name = null;
296   }
297 
298   /**
299    * Construct a column descriptor specifying only the family name
300    * The other attributes are defaulted.
301    *
302    * @param familyName Column family name. Must be 'printable' -- digit or
303    * letter -- and may not contain a <code>:<code>
304    */
305   public HColumnDescriptor(final String familyName) {
306     this(Bytes.toBytes(familyName));
307   }
308 
309   /**
310    * Construct a column descriptor specifying only the family name
311    * The other attributes are defaulted.
312    *
313    * @param familyName Column family name. Must be 'printable' -- digit or
314    * letter -- and may not contain a <code>:<code>
315    */
316   public HColumnDescriptor(final byte [] familyName) {
317     this (familyName == null || familyName.length <= 0?
318       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
319       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
320       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
321   }
322 
323   /**
324    * Constructor.
325    * Makes a deep copy of the supplied descriptor.
326    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
327    * @param desc The descriptor.
328    */
329   public HColumnDescriptor(HColumnDescriptor desc) {
330     super();
331     this.name = desc.name.clone();
332     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
333         desc.values.entrySet()) {
334       this.values.put(e.getKey(), e.getValue());
335     }
336     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
337       this.configuration.put(e.getKey(), e.getValue());
338     }
339     setMaxVersions(desc.getMaxVersions());
340   }
341 
342   /**
343    * Constructor
344    * @param familyName Column family name. Must be 'printable' -- digit or
345    * letter -- and may not contain a <code>:<code>
346    * @param maxVersions Maximum number of versions to keep
347    * @param compression Compression type
348    * @param inMemory If true, column data should be kept in an HRegionServer's
349    * cache
350    * @param blockCacheEnabled If true, MapFile blocks should be cached
351    * @param timeToLive Time-to-live of cell contents, in seconds
352    * (use HConstants.FOREVER for unlimited TTL)
353    * @param bloomFilter Bloom filter type for this column
354    *
355    * @throws IllegalArgumentException if passed a family name that is made of
356    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
357    * a <code>:</code>
358    * @throws IllegalArgumentException if the number of versions is &lt;= 0
359    * @deprecated use {@link #HColumnDescriptor(String)} and setters
360    */
361   @Deprecated
362   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
363       final String compression, final boolean inMemory,
364       final boolean blockCacheEnabled,
365       final int timeToLive, final String bloomFilter) {
366     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
367       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
368   }
369 
370   /**
371    * Constructor
372    * @param familyName Column family name. Must be 'printable' -- digit or
373    * letter -- and may not contain a <code>:<code>
374    * @param maxVersions Maximum number of versions to keep
375    * @param compression Compression type
376    * @param inMemory If true, column data should be kept in an HRegionServer's
377    * cache
378    * @param blockCacheEnabled If true, MapFile blocks should be cached
379    * @param blocksize Block size to use when writing out storefiles.  Use
380    * smaller block sizes for faster random-access at expense of larger indices
381    * (more memory consumption).  Default is usually 64k.
382    * @param timeToLive Time-to-live of cell contents, in seconds
383    * (use HConstants.FOREVER for unlimited TTL)
384    * @param bloomFilter Bloom filter type for this column
385    * @param scope The scope tag for this column
386    *
387    * @throws IllegalArgumentException if passed a family name that is made of
388    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
389    * a <code>:</code>
390    * @throws IllegalArgumentException if the number of versions is &lt;= 0
391    * @deprecated use {@link #HColumnDescriptor(String)} and setters
392    */
393   @Deprecated
394   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
395       final String compression, final boolean inMemory,
396       final boolean blockCacheEnabled, final int blocksize,
397       final int timeToLive, final String bloomFilter, final int scope) {
398     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
399         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
400         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
401         scope);
402   }
403 
404   /**
405    * Constructor
406    * @param familyName Column family name. Must be 'printable' -- digit or
407    * letter -- and may not contain a <code>:<code>
408    * @param minVersions Minimum number of versions to keep
409    * @param maxVersions Maximum number of versions to keep
410    * @param keepDeletedCells Whether to retain deleted cells until they expire
411    *        up to maxVersions versions.
412    * @param compression Compression type
413    * @param encodeOnDisk whether to use the specified data block encoding
414    *        on disk. If false, the encoding will be used in cache only.
415    * @param dataBlockEncoding data block encoding
416    * @param inMemory If true, column data should be kept in an HRegionServer's
417    * cache
418    * @param blockCacheEnabled If true, MapFile blocks should be cached
419    * @param blocksize Block size to use when writing out storefiles.  Use
420    * smaller blocksizes for faster random-access at expense of larger indices
421    * (more memory consumption).  Default is usually 64k.
422    * @param timeToLive Time-to-live of cell contents, in seconds
423    * (use HConstants.FOREVER for unlimited TTL)
424    * @param bloomFilter Bloom filter type for this column
425    * @param scope The scope tag for this column
426    *
427    * @throws IllegalArgumentException if passed a family name that is made of
428    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
429    * a <code>:</code>
430    * @throws IllegalArgumentException if the number of versions is &lt;= 0
431    * @deprecated use {@link #HColumnDescriptor(String)} and setters
432    */
433   @Deprecated
434   public HColumnDescriptor(final byte[] familyName, final int minVersions,
435       final int maxVersions, final KeepDeletedCells keepDeletedCells,
436       final String compression, final boolean encodeOnDisk,
437       final String dataBlockEncoding, final boolean inMemory,
438       final boolean blockCacheEnabled, final int blocksize,
439       final int timeToLive, final String bloomFilter, final int scope) {
440     isLegalFamilyName(familyName);
441     this.name = familyName;
442 
443     if (maxVersions <= 0) {
444       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
445       // Until there is support, consider 0 or < 0 -- a configuration error.
446       throw new IllegalArgumentException("Maximum versions must be positive");
447     }
448 
449     if (minVersions > 0) {
450       if (timeToLive == HConstants.FOREVER) {
451         throw new IllegalArgumentException("Minimum versions requires TTL.");
452       }
453       if (minVersions >= maxVersions) {
454         throw new IllegalArgumentException("Minimum versions must be < "
455             + "maximum versions.");
456       }
457     }
458 
459     setMaxVersions(maxVersions);
460     setMinVersions(minVersions);
461     setKeepDeletedCells(keepDeletedCells);
462     setInMemory(inMemory);
463     setBlockCacheEnabled(blockCacheEnabled);
464     setTimeToLive(timeToLive);
465     setCompressionType(Compression.Algorithm.
466       valueOf(compression.toUpperCase()));
467     setDataBlockEncoding(DataBlockEncoding.
468         valueOf(dataBlockEncoding.toUpperCase()));
469     setBloomFilterType(BloomType.
470       valueOf(bloomFilter.toUpperCase()));
471     setBlocksize(blocksize);
472     setScope(scope);
473   }
474 
475   /**
476    * @param b Family name.
477    * @return <code>b</code>
478    * @throws IllegalArgumentException If not null and not a legitimate family
479    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
480    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
481    * either. Also Family can not be an empty value or equal "recovered.edits".
482    */
483   public static byte [] isLegalFamilyName(final byte [] b) {
484     if (b == null) {
485       return b;
486     }
487     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
488     if (b[0] == '.') {
489       throw new IllegalArgumentException("Family names cannot start with a " +
490         "period: " + Bytes.toString(b));
491     }
492     for (int i = 0; i < b.length; i++) {
493       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
494         throw new IllegalArgumentException("Illegal character <" + b[i] +
495           ">. Family names cannot contain control characters or colons: " +
496           Bytes.toString(b));
497       }
498     }
499     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
500     if (Bytes.equals(recoveredEdit, b)) {
501       throw new IllegalArgumentException("Family name cannot be: " +
502           HConstants.RECOVERED_EDITS_DIR);
503     }
504     return b;
505   }
506 
507   /**
508    * @return Name of this column family
509    */
510   public byte [] getName() {
511     return name;
512   }
513 
514   /**
515    * @return Name of this column family
516    */
517   public String getNameAsString() {
518     return Bytes.toString(this.name);
519   }
520 
521   /**
522    * @param key The key.
523    * @return The value.
524    */
525   public byte[] getValue(byte[] key) {
526     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
527     if (ibw == null)
528       return null;
529     return ibw.get();
530   }
531 
532   /**
533    * @param key The key.
534    * @return The value as a string.
535    */
536   public String getValue(String key) {
537     byte[] value = getValue(Bytes.toBytes(key));
538     if (value == null)
539       return null;
540     return Bytes.toString(value);
541   }
542 
543   /**
544    * @return All values.
545    */
546   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
547     // shallow pointer copy
548     return Collections.unmodifiableMap(values);
549   }
550 
551   /**
552    * @param key The key.
553    * @param value The value.
554    * @return this (for chained invocation)
555    */
556   public HColumnDescriptor setValue(byte[] key, byte[] value) {
557     values.put(new ImmutableBytesWritable(key),
558       new ImmutableBytesWritable(value));
559     return this;
560   }
561 
562   /**
563    * @param key Key whose key and value we're to remove from HCD parameters.
564    */
565   public void remove(final byte [] key) {
566     values.remove(new ImmutableBytesWritable(key));
567   }
568 
569   /**
570    * @param key The key.
571    * @param value The value.
572    * @return this (for chained invocation)
573    */
574   public HColumnDescriptor setValue(String key, String value) {
575     if (value == null) {
576       remove(Bytes.toBytes(key));
577     } else {
578       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
579     }
580     return this;
581   }
582 
583   /** @return compression type being used for the column family */
584   public Compression.Algorithm getCompression() {
585     String n = getValue(COMPRESSION);
586     if (n == null) {
587       return Compression.Algorithm.NONE;
588     }
589     return Compression.Algorithm.valueOf(n.toUpperCase());
590   }
591 
592   /** @return compression type being used for the column family for major
593       compression */
594   public Compression.Algorithm getCompactionCompression() {
595     String n = getValue(COMPRESSION_COMPACT);
596     if (n == null) {
597       return getCompression();
598     }
599     return Compression.Algorithm.valueOf(n.toUpperCase());
600   }
601 
602   /** @return maximum number of versions */
603   public int getMaxVersions() {
604     if (this.cachedMaxVersions == UNINITIALIZED) {
605       String v = getValue(HConstants.VERSIONS);
606       this.cachedMaxVersions = Integer.parseInt(v);
607     }
608     return this.cachedMaxVersions;
609   }
610 
611   /**
612    * @param maxVersions maximum number of versions
613    * @return this (for chained invocation)
614    */
615   public HColumnDescriptor setMaxVersions(int maxVersions) {
616     if (maxVersions <= 0) {
617       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
618       // Until there is support, consider 0 or < 0 -- a configuration error.
619       throw new IllegalArgumentException("Maximum versions must be positive");
620     }
621     if (maxVersions < this.getMinVersions()) {
622         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
623             + " while minVersion is " + this.getMinVersions()
624             + ". Maximum versions must be >= minimum versions ");
625     }
626     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
627     cachedMaxVersions = maxVersions;
628     return this;
629   }
630 
631   /**
632    * @return The storefile/hfile blocksize for this column family.
633    */
634   public synchronized int getBlocksize() {
635     if (this.blocksize == null) {
636       String value = getValue(BLOCKSIZE);
637       this.blocksize = (value != null)?
638         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
639     }
640     return this.blocksize.intValue();
641   }
642 
643   /**
644    * @param s Blocksize to use when writing out storefiles/hfiles on this
645    * column family.
646    * @return this (for chained invocation)
647    */
648   public HColumnDescriptor setBlocksize(int s) {
649     setValue(BLOCKSIZE, Integer.toString(s));
650     this.blocksize = null;
651     return this;
652   }
653 
654   /**
655    * @return Compression type setting.
656    */
657   public Compression.Algorithm getCompressionType() {
658     return getCompression();
659   }
660 
661   /**
662    * Compression types supported in hbase.
663    * LZO is not bundled as part of the hbase distribution.
664    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
665    * for how to enable it.
666    * @param type Compression type setting.
667    * @return this (for chained invocation)
668    */
669   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
670     return setValue(COMPRESSION, type.getName().toUpperCase());
671   }
672 
673   /** @return data block encoding algorithm used on disk */
674   @Deprecated
675   public DataBlockEncoding getDataBlockEncodingOnDisk() {
676     return getDataBlockEncoding();
677   }
678 
679   /**
680    * This method does nothing now. Flag ENCODE_ON_DISK is not used
681    * any more. Data blocks have the same encoding in cache as on disk.
682    * @return this (for chained invocation)
683    */
684   @Deprecated
685   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
686     return this;
687   }
688 
689   /**
690    * @return the data block encoding algorithm used in block cache and
691    *         optionally on disk
692    */
693   public DataBlockEncoding getDataBlockEncoding() {
694     String type = getValue(DATA_BLOCK_ENCODING);
695     if (type == null) {
696       type = DEFAULT_DATA_BLOCK_ENCODING;
697     }
698     return DataBlockEncoding.valueOf(type);
699   }
700 
701   /**
702    * Set data block encoding algorithm used in block cache.
703    * @param type What kind of data block encoding will be used.
704    * @return this (for chained invocation)
705    */
706   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
707     String name;
708     if (type != null) {
709       name = type.toString();
710     } else {
711       name = DataBlockEncoding.NONE.toString();
712     }
713     return setValue(DATA_BLOCK_ENCODING, name);
714   }
715 
716   /**
717    * Set whether the tags should be compressed along with DataBlockEncoding. When no
718    * DataBlockEncoding is been used, this is having no effect.
719    *
720    * @param compressTags
721    * @return this (for chained invocation)
722    */
723   public HColumnDescriptor setCompressTags(boolean compressTags) {
724     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
725   }
726 
727   /**
728    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
729    *         DataBlockEncoding is been used, this is having no effect.
730    * @deprecated Use {@link #isCompressTags()} instead
731    */
732   @Deprecated
733   public boolean shouldCompressTags() {
734     String compressTagsStr = getValue(COMPRESS_TAGS);
735     boolean compressTags = DEFAULT_COMPRESS_TAGS;
736     if (compressTagsStr != null) {
737       compressTags = Boolean.valueOf(compressTagsStr);
738     }
739     return compressTags;
740   }
741 
742   /**
743    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
744    *         DataBlockEncoding is been used, this is having no effect.
745    */
746   public boolean isCompressTags() {
747     String compressTagsStr = getValue(COMPRESS_TAGS);
748     boolean compressTags = DEFAULT_COMPRESS_TAGS;
749     if (compressTagsStr != null) {
750       compressTags = Boolean.valueOf(compressTagsStr);
751     }
752     return compressTags;
753   }
754 
755   /**
756    * @return Compression type setting.
757    */
758   public Compression.Algorithm getCompactionCompressionType() {
759     return getCompactionCompression();
760   }
761 
762   /**
763    * Compression types supported in hbase.
764    * LZO is not bundled as part of the hbase distribution.
765    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
766    * for how to enable it.
767    * @param type Compression type setting.
768    * @return this (for chained invocation)
769    */
770   public HColumnDescriptor setCompactionCompressionType(
771       Compression.Algorithm type) {
772     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
773   }
774 
775   /**
776    * @return True if we are to favor keeping all values for this column family in the
777    * HRegionServer cache.
778    */
779   public boolean isInMemory() {
780     String value = getValue(HConstants.IN_MEMORY);
781     if (value != null)
782       return Boolean.valueOf(value).booleanValue();
783     return DEFAULT_IN_MEMORY;
784   }
785 
786   /**
787    * @param inMemory True if we are to favor keeping all values for this column family in the
788    * HRegionServer cache
789    * @return this (for chained invocation)
790    */
791   public HColumnDescriptor setInMemory(boolean inMemory) {
792     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
793   }
794 
795   public KeepDeletedCells getKeepDeletedCells() {
796     String value = getValue(KEEP_DELETED_CELLS);
797     if (value != null) {
798       // toUpperCase for backwards compatibility
799       return KeepDeletedCells.valueOf(value.toUpperCase());
800     }
801     return DEFAULT_KEEP_DELETED;
802   }
803 
804   /**
805    * @param keepDeletedCells True if deleted rows should not be collected
806    * immediately.
807    * @return this (for chained invocation)
808    * @deprecated use {@link #setKeepDeletedCells(KeepDeletedCells)}
809    */
810   @Deprecated
811   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
812     return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
813         : KeepDeletedCells.FALSE).toString());
814   }
815 
816   /**
817    * @param keepDeletedCells True if deleted rows should not be collected
818    * immediately.
819    * @return this (for chained invocation)
820    */
821   public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
822     return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString());
823   }
824 
825   /**
826    * @return Time-to-live of cell contents, in seconds.
827    */
828   public int getTimeToLive() {
829     String value = getValue(TTL);
830     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
831   }
832 
833   /**
834    * @param timeToLive Time-to-live of cell contents, in seconds.
835    * @return this (for chained invocation)
836    */
837   public HColumnDescriptor setTimeToLive(int timeToLive) {
838     return setValue(TTL, Integer.toString(timeToLive));
839   }
840 
841   /**
842    * @return The minimum number of versions to keep.
843    */
844   public int getMinVersions() {
845     String value = getValue(MIN_VERSIONS);
846     return (value != null)? Integer.valueOf(value).intValue(): 0;
847   }
848 
849   /**
850    * @param minVersions The minimum number of versions to keep.
851    * (used when timeToLive is set)
852    * @return this (for chained invocation)
853    */
854   public HColumnDescriptor setMinVersions(int minVersions) {
855     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
856   }
857 
858   /**
859    * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
860    * and BLOOM type blocks).
861    */
862   public boolean isBlockCacheEnabled() {
863     String value = getValue(BLOCKCACHE);
864     if (value != null)
865       return Boolean.valueOf(value).booleanValue();
866     return DEFAULT_BLOCKCACHE;
867   }
868 
869   /**
870    * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
871    * INDEX and BLOOM blocks; you cannot turn this off).
872    * @return this (for chained invocation)
873    */
874   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
875     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
876   }
877 
878   /**
879    * @return bloom filter type used for new StoreFiles in ColumnFamily
880    */
881   public BloomType getBloomFilterType() {
882     String n = getValue(BLOOMFILTER);
883     if (n == null) {
884       n = DEFAULT_BLOOMFILTER;
885     }
886     return BloomType.valueOf(n.toUpperCase());
887   }
888 
889   /**
890    * @param bt bloom filter type
891    * @return this (for chained invocation)
892    */
893   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
894     return setValue(BLOOMFILTER, bt.toString());
895   }
896 
897    /**
898     * @return the scope tag
899     */
900   public int getScope() {
901     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
902     if (value != null) {
903       return Integer.valueOf(Bytes.toString(value));
904     }
905     return DEFAULT_REPLICATION_SCOPE;
906   }
907 
908  /**
909   * @param scope the scope tag
910   * @return this (for chained invocation)
911   */
912   public HColumnDescriptor setScope(int scope) {
913     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
914   }
915 
916   /**
917    * @return true if we should cache data blocks on write
918    * @deprecated Use {@link #isCacheDataOnWrite()} instead
919    */
920   @Deprecated
921   public boolean shouldCacheDataOnWrite() {
922     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
923   }
924 
925   /**
926    * @return true if we should cache data blocks on write
927    */
928   public boolean isCacheDataOnWrite() {
929     return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
930   }
931 
932   /**
933    * @param value true if we should cache data blocks on write
934    * @return this (for chained invocation)
935    */
936   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
937     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
938   }
939 
940   /**
941    * @return true if we should cache data blocks in the L1 cache (if block cache deploy
942    * has more than one tier; e.g. we are using CombinedBlockCache).
943    * @deprecated Use {@link #isCacheDataInL1()} instead
944    */
945   @Deprecated
946   public boolean shouldCacheDataInL1() {
947     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
948   }
949 
950   /**
951    * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
952    *         than one tier; e.g. we are using CombinedBlockCache).
953    */
954   public boolean isCacheDataInL1() {
955     return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
956   }
957 
958   /**
959    * @param value true if we should cache data blocks in the L1 cache (if block cache deploy
960    * has more than one tier; e.g. we are using CombinedBlockCache).
961    * @return this (for chained invocation)
962    */
963   public HColumnDescriptor setCacheDataInL1(boolean value) {
964     return setValue(CACHE_DATA_IN_L1, Boolean.toString(value));
965   }
966 
967   private boolean setAndGetBoolean(final String key, final boolean defaultSetting) {
968     String value = getValue(key);
969     if (value != null) return Boolean.valueOf(value).booleanValue();
970     return defaultSetting;
971   }
972 
973   /**
974    * @return true if we should cache index blocks on write
975    * @deprecated Use {@link #isCacheIndexesOnWrite()} instead
976    */
977   @Deprecated
978   public boolean shouldCacheIndexesOnWrite() {
979     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
980   }
981 
982   /**
983    * @return true if we should cache index blocks on write
984    */
985   public boolean isCacheIndexesOnWrite() {
986     return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
987   }
988 
989   /**
990    * @param value true if we should cache index blocks on write
991    * @return this (for chained invocation)
992    */
993   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
994     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
995   }
996 
997   /**
998    * @return true if we should cache bloomfilter blocks on write
999    * @deprecated Use {@link #isCacheBloomsOnWrite()} instead
1000    */
1001   @Deprecated
1002   public boolean shouldCacheBloomsOnWrite() {
1003     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1004   }
1005 
1006   /**
1007    * @return true if we should cache bloomfilter blocks on write
1008    */
1009   public boolean isCacheBloomsOnWrite() {
1010     return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
1011   }
1012 
1013   /**
1014    * @param value true if we should cache bloomfilter blocks on write
1015    * @return this (for chained invocation)
1016    */
1017   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
1018     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
1019   }
1020 
1021   /**
1022    * @return true if we should evict cached blocks from the blockcache on
1023    * close
1024    * @deprecated {@link #isEvictBlocksOnClose()} instead
1025    */
1026   @Deprecated
1027   public boolean shouldEvictBlocksOnClose() {
1028     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1029   }
1030 
1031   /**
1032    * @return true if we should evict cached blocks from the blockcache on close
1033    */
1034   public boolean isEvictBlocksOnClose() {
1035     return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
1036   }
1037 
1038   /**
1039    * @param value true if we should evict cached blocks from the blockcache on
1040    * close
1041    * @return this (for chained invocation)
1042    */
1043   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
1044     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
1045   }
1046 
1047   /**
1048    * @return true if we should prefetch blocks into the blockcache on open
1049    * @deprecated Use {@link #isPrefetchBlocksOnOpen()} instead
1050    */
1051   @Deprecated
1052   public boolean shouldPrefetchBlocksOnOpen() {
1053     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1054   }
1055 
1056   /**
1057    * @return true if we should prefetch blocks into the blockcache on open
1058    */
1059   public boolean isPrefetchBlocksOnOpen() {
1060     return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
1061   }
1062 
1063   /**
1064    * @param value true if we should prefetch blocks into the blockcache on open
1065    * @return this (for chained invocation)
1066    */
1067   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
1068     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
1069   }
1070 
1071   /**
1072    * @see java.lang.Object#toString()
1073    */
1074   @Override
1075   public String toString() {
1076     StringBuilder s = new StringBuilder();
1077 
1078     s.append('{');
1079     s.append(HConstants.NAME);
1080     s.append(" => '");
1081     s.append(Bytes.toString(name));
1082     s.append("'");
1083     s.append(getValues(true));
1084     s.append('}');
1085     return s.toString();
1086   }
1087 
1088   /**
1089    * @return Column family descriptor with only the customized attributes.
1090    */
1091   public String toStringCustomizedValues() {
1092     StringBuilder s = new StringBuilder();
1093     s.append('{');
1094     s.append(HConstants.NAME);
1095     s.append(" => '");
1096     s.append(Bytes.toString(name));
1097     s.append("'");
1098     s.append(getValues(false));
1099     s.append('}');
1100     return s.toString();
1101   }
1102 
1103   private StringBuilder getValues(boolean printDefaults) {
1104     StringBuilder s = new StringBuilder();
1105 
1106     boolean hasConfigKeys = false;
1107 
1108     // print all reserved keys first
1109     for (ImmutableBytesWritable k : values.keySet()) {
1110       if (!RESERVED_KEYWORDS.contains(k)) {
1111         hasConfigKeys = true;
1112         continue;
1113       }
1114       String key = Bytes.toString(k.get());
1115       String value = Bytes.toStringBinary(values.get(k).get());
1116       if (printDefaults
1117           || !DEFAULT_VALUES.containsKey(key)
1118           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1119         s.append(", ");
1120         s.append(key);
1121         s.append(" => ");
1122         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1123       }
1124     }
1125 
1126     // print all non-reserved, advanced config keys as a separate subset
1127     if (hasConfigKeys) {
1128       s.append(", ");
1129       s.append(HConstants.METADATA).append(" => ");
1130       s.append('{');
1131       boolean printComma = false;
1132       for (ImmutableBytesWritable k : values.keySet()) {
1133         if (RESERVED_KEYWORDS.contains(k)) {
1134           continue;
1135         }
1136         String key = Bytes.toString(k.get());
1137         String value = Bytes.toStringBinary(values.get(k).get());
1138         if (printComma) {
1139           s.append(", ");
1140         }
1141         printComma = true;
1142         s.append('\'').append(key).append('\'');
1143         s.append(" => ");
1144         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1145       }
1146       s.append('}');
1147     }
1148 
1149     if (!configuration.isEmpty()) {
1150       s.append(", ");
1151       s.append(HConstants.CONFIGURATION).append(" => ");
1152       s.append('{');
1153       boolean printCommaForConfiguration = false;
1154       for (Map.Entry<String, String> e : configuration.entrySet()) {
1155         if (printCommaForConfiguration) s.append(", ");
1156         printCommaForConfiguration = true;
1157         s.append('\'').append(e.getKey()).append('\'');
1158         s.append(" => ");
1159         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1160       }
1161       s.append("}");
1162     }
1163     return s;
1164   }
1165 
1166   public static Unit getUnit(String key) {
1167     Unit unit;
1168       /* TTL for now, we can add more as we neeed */
1169     if (key.equals(HColumnDescriptor.TTL)) {
1170       unit = Unit.TIME_INTERVAL;
1171     } else {
1172       unit = Unit.NONE;
1173     }
1174     return unit;
1175   }
1176 
1177   public static Map<String, String> getDefaultValues() {
1178     return Collections.unmodifiableMap(DEFAULT_VALUES);
1179   }
1180 
1181   /**
1182    * @see java.lang.Object#equals(java.lang.Object)
1183    */
1184   @Override
1185   public boolean equals(Object obj) {
1186     if (this == obj) {
1187       return true;
1188     }
1189     if (obj == null) {
1190       return false;
1191     }
1192     if (!(obj instanceof HColumnDescriptor)) {
1193       return false;
1194     }
1195     return compareTo((HColumnDescriptor)obj) == 0;
1196   }
1197 
1198   /**
1199    * @see java.lang.Object#hashCode()
1200    */
1201   @Override
1202   public int hashCode() {
1203     int result = Bytes.hashCode(this.name);
1204     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1205     result ^= values.hashCode();
1206     result ^= configuration.hashCode();
1207     return result;
1208   }
1209 
1210   /**
1211    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1212    */
1213   @Deprecated
1214   public void readFields(DataInput in) throws IOException {
1215     int version = in.readByte();
1216     if (version < 6) {
1217       if (version <= 2) {
1218         Text t = new Text();
1219         t.readFields(in);
1220         this.name = t.getBytes();
1221 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1222 //            > 0) {
1223 //          this.name = stripColon(this.name);
1224 //        }
1225       } else {
1226         this.name = Bytes.readByteArray(in);
1227       }
1228       this.values.clear();
1229       setMaxVersions(in.readInt());
1230       int ordinal = in.readInt();
1231       setCompressionType(Compression.Algorithm.values()[ordinal]);
1232       setInMemory(in.readBoolean());
1233       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1234       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1235         // If a bloomFilter is enabled and the column descriptor is less than
1236         // version 5, we need to skip over it to read the rest of the column
1237         // descriptor. There are no BloomFilterDescriptors written to disk for
1238         // column descriptors with a version number >= 5
1239         throw new UnsupportedClassVersionError(this.getClass().getName() +
1240             " does not support backward compatibility with versions older " +
1241             "than version 5");
1242       }
1243       if (version > 1) {
1244         setBlockCacheEnabled(in.readBoolean());
1245       }
1246       if (version > 2) {
1247        setTimeToLive(in.readInt());
1248       }
1249     } else {
1250       // version 6+
1251       this.name = Bytes.readByteArray(in);
1252       this.values.clear();
1253       int numValues = in.readInt();
1254       for (int i = 0; i < numValues; i++) {
1255         ImmutableBytesWritable key = new ImmutableBytesWritable();
1256         ImmutableBytesWritable value = new ImmutableBytesWritable();
1257         key.readFields(in);
1258         value.readFields(in);
1259 
1260         // in version 8, the BloomFilter setting changed from bool to enum
1261         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1262           value.set(Bytes.toBytes(
1263               Boolean.getBoolean(Bytes.toString(value.get()))
1264                 ? BloomType.ROW.toString()
1265                 : BloomType.NONE.toString()));
1266         }
1267 
1268         values.put(key, value);
1269       }
1270       if (version == 6) {
1271         // Convert old values.
1272         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1273       }
1274       String value = getValue(HConstants.VERSIONS);
1275       this.cachedMaxVersions = (value != null)?
1276           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1277       if (version > 10) {
1278         configuration.clear();
1279         int numConfigs = in.readInt();
1280         for (int i = 0; i < numConfigs; i++) {
1281           ImmutableBytesWritable key = new ImmutableBytesWritable();
1282           ImmutableBytesWritable val = new ImmutableBytesWritable();
1283           key.readFields(in);
1284           val.readFields(in);
1285           configuration.put(
1286             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1287             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1288         }
1289       }
1290     }
1291   }
1292 
1293   /**
1294    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1295    */
1296   @Deprecated
1297   public void write(DataOutput out) throws IOException {
1298     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1299     Bytes.writeByteArray(out, this.name);
1300     out.writeInt(values.size());
1301     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1302         values.entrySet()) {
1303       e.getKey().write(out);
1304       e.getValue().write(out);
1305     }
1306     out.writeInt(configuration.size());
1307     for (Map.Entry<String, String> e : configuration.entrySet()) {
1308       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1309       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1310     }
1311   }
1312 
1313   // Comparable
1314   @Override
1315   public int compareTo(HColumnDescriptor o) {
1316     int result = Bytes.compareTo(this.name, o.getName());
1317     if (result == 0) {
1318       // punt on comparison for ordering, just calculate difference
1319       result = this.values.hashCode() - o.values.hashCode();
1320       if (result < 0)
1321         result = -1;
1322       else if (result > 0)
1323         result = 1;
1324     }
1325     if (result == 0) {
1326       result = this.configuration.hashCode() - o.configuration.hashCode();
1327       if (result < 0)
1328         result = -1;
1329       else if (result > 0)
1330         result = 1;
1331     }
1332     return result;
1333   }
1334 
1335   /**
1336    * @return This instance serialized with pb with pb magic prefix
1337    * @see #parseFrom(byte[])
1338    */
1339   public byte [] toByteArray() {
1340     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1341   }
1342 
1343   /**
1344    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1345    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1346    * @throws DeserializationException
1347    * @see #toByteArray()
1348    */
1349   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1350     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1351     int pblen = ProtobufUtil.lengthOfPBMagic();
1352     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1353     ColumnFamilySchema cfs = null;
1354     try {
1355       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1356     } catch (InvalidProtocolBufferException e) {
1357       throw new DeserializationException(e);
1358     }
1359     return convert(cfs);
1360   }
1361 
1362   /**
1363    * @param cfs
1364    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1365    */
1366   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1367     // Use the empty constructor so we preserve the initial values set on construction for things
1368     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1369     // unrelated-looking test failures that are hard to trace back to here.
1370     HColumnDescriptor hcd = new HColumnDescriptor();
1371     hcd.name = cfs.getName().toByteArray();
1372     for (BytesBytesPair a: cfs.getAttributesList()) {
1373       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1374     }
1375     for (NameStringPair a: cfs.getConfigurationList()) {
1376       hcd.setConfiguration(a.getName(), a.getValue());
1377     }
1378     return hcd;
1379   }
1380 
1381   /**
1382    * @return Convert this instance to a the pb column family type
1383    */
1384   public ColumnFamilySchema convert() {
1385     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1386     builder.setName(ByteStringer.wrap(getName()));
1387     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1388       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1389       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1390       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1391       builder.addAttributes(aBuilder.build());
1392     }
1393     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1394       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1395       aBuilder.setName(e.getKey());
1396       aBuilder.setValue(e.getValue());
1397       builder.addConfiguration(aBuilder.build());
1398     }
1399     return builder.build();
1400   }
1401 
1402   /**
1403    * Getter for accessing the configuration value by key.
1404    */
1405   public String getConfigurationValue(String key) {
1406     return configuration.get(key);
1407   }
1408 
1409   /**
1410    * Getter for fetching an unmodifiable {@link #configuration} map.
1411    */
1412   public Map<String, String> getConfiguration() {
1413     // shallow pointer copy
1414     return Collections.unmodifiableMap(configuration);
1415   }
1416 
1417   /**
1418    * Setter for storing a configuration setting in {@link #configuration} map.
1419    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1420    * @param value String value. If null, removes the configuration.
1421    */
1422   public HColumnDescriptor setConfiguration(String key, String value) {
1423     if (value == null) {
1424       removeConfiguration(key);
1425     } else {
1426       configuration.put(key, value);
1427     }
1428     return this;
1429   }
1430 
1431   /**
1432    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1433    */
1434   public void removeConfiguration(final String key) {
1435     configuration.remove(key);
1436   }
1437 
1438   /**
1439    * Return the encryption algorithm in use by this family
1440    */
1441   public String getEncryptionType() {
1442     return getValue(ENCRYPTION);
1443   }
1444 
1445   /**
1446    * Set the encryption algorithm for use with this family
1447    * @param algorithm
1448    */
1449   public HColumnDescriptor setEncryptionType(String algorithm) {
1450     setValue(ENCRYPTION, algorithm);
1451     return this;
1452   }
1453 
1454   /** Return the raw crypto key attribute for the family, or null if not set  */
1455   public byte[] getEncryptionKey() {
1456     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1457   }
1458 
1459   /** Set the raw crypto key attribute for the family */
1460   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1461     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1462     return this;
1463   }
1464 }