View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.Collections;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.Map;
28  import java.util.Set;
29  
30  import org.apache.hadoop.classification.InterfaceAudience;
31  import org.apache.hadoop.classification.InterfaceStability;
32  import org.apache.hadoop.hbase.exceptions.DeserializationException;
33  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
34  import org.apache.hadoop.hbase.io.compress.Compression;
35  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
39  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
40  import org.apache.hadoop.hbase.regionserver.BloomType;
41  import org.apache.hadoop.hbase.util.Bytes;
42  import org.apache.hadoop.hbase.util.PrettyPrinter;
43  import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
44  import org.apache.hadoop.io.Text;
45  import org.apache.hadoop.io.WritableComparable;
46  
47  import com.google.common.base.Preconditions;
48  import org.apache.hadoop.hbase.util.ByteStringer;
49  import com.google.protobuf.InvalidProtocolBufferException;
50  
51  /**
52   * An HColumnDescriptor contains information about a column family such as the
53   * number of versions, compression settings, etc.
54   *
55   * It is used as input when creating a table or adding a column.
56   */
57  @InterfaceAudience.Public
58  @InterfaceStability.Evolving
59  public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
60    // For future backward compatibility
61  
62    // Version  3 was when column names become byte arrays and when we picked up
63    // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
64    // Version  5 was when bloom filter descriptors were removed.
65    // Version  6 adds metadata as a map where keys and values are byte[].
66    // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
67    // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
68    // Version  9 -- add data block encoding
69    // Version 10 -- change metadata to standard type.
70    // Version 11 -- add column family level configuration.
71    private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
72  
73    // These constants are used as FileInfo keys
74    public static final String COMPRESSION = "COMPRESSION";
75    public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
76    public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
77        "ENCODE_ON_DISK";
78    public static final String DATA_BLOCK_ENCODING =
79        "DATA_BLOCK_ENCODING";
80    public static final String BLOCKCACHE = "BLOCKCACHE";
81    public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
82    public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
83    public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE";
84    public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
85    /**
86     * Key for the PREFETCH_BLOCKS_ON_OPEN attribute.
87     * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this
88     * family will be loaded into the cache as soon as the file is opened. These
89     * loads will not count as cache misses.
90     */
91    public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN";
92  
93    /**
94     * Size of storefile/hfile 'blocks'.  Default is {@link #DEFAULT_BLOCKSIZE}.
95     * Use smaller block sizes for faster random-access at expense of larger
96     * indices (more memory consumption).
97     */
98    public static final String BLOCKSIZE = "BLOCKSIZE";
99  
100   public static final String LENGTH = "LENGTH";
101   public static final String TTL = "TTL";
102   public static final String BLOOMFILTER = "BLOOMFILTER";
103   public static final String FOREVER = "FOREVER";
104   public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE";
105   public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE);
106   public static final String MIN_VERSIONS = "MIN_VERSIONS";
107   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
108   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
109 
110   public static final String ENCRYPTION = "ENCRYPTION";
111   public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
112 
113   /**
114    * Default compression type.
115    */
116   public static final String DEFAULT_COMPRESSION =
117     Compression.Algorithm.NONE.getName();
118 
119   /**
120    * Default value of the flag that enables data block encoding on disk, as
121    * opposed to encoding in cache only. We encode blocks everywhere by default,
122    * as long as {@link #DATA_BLOCK_ENCODING} is not NONE.
123    */
124   public static final boolean DEFAULT_ENCODE_ON_DISK = true;
125 
126   /** Default data block encoding algorithm. */
127   public static final String DEFAULT_DATA_BLOCK_ENCODING =
128       DataBlockEncoding.NONE.toString();
129 
130   /**
131    * Default number of versions of a record to keep.
132    */
133   public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt(
134     "hbase.column.max.version", 1);
135 
136   /**
137    * Default is not to keep a minimum of versions.
138    */
139   public static final int DEFAULT_MIN_VERSIONS = 0;
140 
141   /*
142    * Cache here the HCD value.
143    * Question: its OK to cache since when we're reenable, we create a new HCD?
144    */
145   private volatile Integer blocksize = null;
146 
147   /**
148    * Default setting for whether to serve from memory or not.
149    */
150   public static final boolean DEFAULT_IN_MEMORY = false;
151 
152   /**
153    * Default setting for preventing deleted from being collected immediately.
154    */
155   public static final boolean DEFAULT_KEEP_DELETED = false;
156 
157   /**
158    * Default setting for whether to use a block cache or not.
159    */
160   public static final boolean DEFAULT_BLOCKCACHE = true;
161 
162   /**
163    * Default setting for whether to cache data blocks on write if block caching
164    * is enabled.
165    */
166   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
167 
168   /**
169    * Default setting for whether to cache index blocks on write if block
170    * caching is enabled.
171    */
172   public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false;
173 
174   /**
175    * Default size of blocks in files stored to the filesytem (hfiles).
176    */
177   public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE;
178 
179   /**
180    * Default setting for whether or not to use bloomfilters.
181    */
182   public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
183 
184   /**
185    * Default setting for whether to cache bloom filter blocks on write if block
186    * caching is enabled.
187    */
188   public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false;
189 
190   /**
191    * Default time to live of cell contents.
192    */
193   public static final int DEFAULT_TTL = HConstants.FOREVER;
194 
195   /**
196    * Default scope.
197    */
198   public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
199 
200   /**
201    * Default setting for whether to evict cached blocks from the blockcache on
202    * close.
203    */
204   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
205 
206   /**
207    * Default compress tags along with any type of DataBlockEncoding.
208    */
209   public static final boolean DEFAULT_COMPRESS_TAGS = true;
210 
211   /*
212    * Default setting for whether to prefetch blocks into the blockcache on open.
213    */
214   public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false;
215 
216   private final static Map<String, String> DEFAULT_VALUES
217     = new HashMap<String, String>();
218   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
219     = new HashSet<ImmutableBytesWritable>();
220   static {
221       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
222       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
223       DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS));
224       DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS));
225       DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION);
226       DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
227       DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
228       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
229       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
230       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
231       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
232       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
233       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
234       DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
235       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
236       DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
237       for (String s : DEFAULT_VALUES.keySet()) {
238         RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
239       }
240       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
241       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
242   }
243 
244   private static final int UNINITIALIZED = -1;
245 
246   // Column family name
247   private byte [] name;
248 
249   // Column metadata
250   private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
251     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
252 
253   /**
254    * A map which holds the configuration specific to the column family.
255    * The keys of the map have the same names as config keys and override the defaults with
256    * cf-specific settings. Example usage may be for compactions, etc.
257    */
258   private final Map<String, String> configuration = new HashMap<String, String>();
259 
260   /*
261    * Cache the max versions rather than calculate it every time.
262    */
263   private int cachedMaxVersions = UNINITIALIZED;
264 
265   /**
266    * Default constructor. Must be present for Writable.
267    * @deprecated Used by Writables and Writables are going away.
268    */
269   @Deprecated
270   // Make this private rather than remove after deprecation period elapses.  Its needed by pb
271   // deserializations.
272   public HColumnDescriptor() {
273     this.name = null;
274   }
275 
276   /**
277    * Construct a column descriptor specifying only the family name
278    * The other attributes are defaulted.
279    *
280    * @param familyName Column family name. Must be 'printable' -- digit or
281    * letter -- and may not contain a <code>:<code>
282    */
283   public HColumnDescriptor(final String familyName) {
284     this(Bytes.toBytes(familyName));
285   }
286 
287   /**
288    * Construct a column descriptor specifying only the family name
289    * The other attributes are defaulted.
290    *
291    * @param familyName Column family name. Must be 'printable' -- digit or
292    * letter -- and may not contain a <code>:<code>
293    */
294   public HColumnDescriptor(final byte [] familyName) {
295     this (familyName == null || familyName.length <= 0?
296       HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
297       DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
298       DEFAULT_TTL, DEFAULT_BLOOMFILTER);
299   }
300 
301   /**
302    * Constructor.
303    * Makes a deep copy of the supplied descriptor.
304    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
305    * @param desc The descriptor.
306    */
307   public HColumnDescriptor(HColumnDescriptor desc) {
308     super();
309     this.name = desc.name.clone();
310     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
311         desc.values.entrySet()) {
312       this.values.put(e.getKey(), e.getValue());
313     }
314     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
315       this.configuration.put(e.getKey(), e.getValue());
316     }
317     setMaxVersions(desc.getMaxVersions());
318   }
319 
320   /**
321    * Constructor
322    * @param familyName Column family name. Must be 'printable' -- digit or
323    * letter -- and may not contain a <code>:<code>
324    * @param maxVersions Maximum number of versions to keep
325    * @param compression Compression type
326    * @param inMemory If true, column data should be kept in an HRegionServer's
327    * cache
328    * @param blockCacheEnabled If true, MapFile blocks should be cached
329    * @param timeToLive Time-to-live of cell contents, in seconds
330    * (use HConstants.FOREVER for unlimited TTL)
331    * @param bloomFilter Bloom filter type for this column
332    *
333    * @throws IllegalArgumentException if passed a family name that is made of
334    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
335    * a <code>:</code>
336    * @throws IllegalArgumentException if the number of versions is &lt;= 0
337    * @deprecated use {@link #HColumnDescriptor(String)} and setters
338    */
339   @Deprecated
340   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
341       final String compression, final boolean inMemory,
342       final boolean blockCacheEnabled,
343       final int timeToLive, final String bloomFilter) {
344     this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
345       DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
346   }
347 
348   /**
349    * Constructor
350    * @param familyName Column family name. Must be 'printable' -- digit or
351    * letter -- and may not contain a <code>:<code>
352    * @param maxVersions Maximum number of versions to keep
353    * @param compression Compression type
354    * @param inMemory If true, column data should be kept in an HRegionServer's
355    * cache
356    * @param blockCacheEnabled If true, MapFile blocks should be cached
357    * @param blocksize Block size to use when writing out storefiles.  Use
358    * smaller block sizes for faster random-access at expense of larger indices
359    * (more memory consumption).  Default is usually 64k.
360    * @param timeToLive Time-to-live of cell contents, in seconds
361    * (use HConstants.FOREVER for unlimited TTL)
362    * @param bloomFilter Bloom filter type for this column
363    * @param scope The scope tag for this column
364    *
365    * @throws IllegalArgumentException if passed a family name that is made of
366    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
367    * a <code>:</code>
368    * @throws IllegalArgumentException if the number of versions is &lt;= 0
369    * @deprecated use {@link #HColumnDescriptor(String)} and setters
370    */
371   @Deprecated
372   public HColumnDescriptor(final byte [] familyName, final int maxVersions,
373       final String compression, final boolean inMemory,
374       final boolean blockCacheEnabled, final int blocksize,
375       final int timeToLive, final String bloomFilter, final int scope) {
376     this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
377         compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
378         inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
379         scope);
380   }
381 
382   /**
383    * Constructor
384    * @param familyName Column family name. Must be 'printable' -- digit or
385    * letter -- and may not contain a <code>:<code>
386    * @param minVersions Minimum number of versions to keep
387    * @param maxVersions Maximum number of versions to keep
388    * @param keepDeletedCells Whether to retain deleted cells until they expire
389    *        up to maxVersions versions.
390    * @param compression Compression type
391    * @param encodeOnDisk whether to use the specified data block encoding
392    *        on disk. If false, the encoding will be used in cache only.
393    * @param dataBlockEncoding data block encoding
394    * @param inMemory If true, column data should be kept in an HRegionServer's
395    * cache
396    * @param blockCacheEnabled If true, MapFile blocks should be cached
397    * @param blocksize Block size to use when writing out storefiles.  Use
398    * smaller blocksizes for faster random-access at expense of larger indices
399    * (more memory consumption).  Default is usually 64k.
400    * @param timeToLive Time-to-live of cell contents, in seconds
401    * (use HConstants.FOREVER for unlimited TTL)
402    * @param bloomFilter Bloom filter type for this column
403    * @param scope The scope tag for this column
404    *
405    * @throws IllegalArgumentException if passed a family name that is made of
406    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
407    * a <code>:</code>
408    * @throws IllegalArgumentException if the number of versions is &lt;= 0
409    * @deprecated use {@link #HColumnDescriptor(String)} and setters
410    */
411   @Deprecated
412   public HColumnDescriptor(final byte[] familyName, final int minVersions,
413       final int maxVersions, final boolean keepDeletedCells,
414       final String compression, final boolean encodeOnDisk,
415       final String dataBlockEncoding, final boolean inMemory,
416       final boolean blockCacheEnabled, final int blocksize,
417       final int timeToLive, final String bloomFilter, final int scope) {
418     isLegalFamilyName(familyName);
419     this.name = familyName;
420 
421     if (maxVersions <= 0) {
422       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
423       // Until there is support, consider 0 or < 0 -- a configuration error.
424       throw new IllegalArgumentException("Maximum versions must be positive");
425     }
426 
427     if (minVersions > 0) {
428       if (timeToLive == HConstants.FOREVER) {
429         throw new IllegalArgumentException("Minimum versions requires TTL.");
430       }
431       if (minVersions >= maxVersions) {
432         throw new IllegalArgumentException("Minimum versions must be < "
433             + "maximum versions.");
434       }
435     }
436 
437     setMaxVersions(maxVersions);
438     setMinVersions(minVersions);
439     setKeepDeletedCells(keepDeletedCells);
440     setInMemory(inMemory);
441     setBlockCacheEnabled(blockCacheEnabled);
442     setTimeToLive(timeToLive);
443     setCompressionType(Compression.Algorithm.
444       valueOf(compression.toUpperCase()));
445     setDataBlockEncoding(DataBlockEncoding.
446         valueOf(dataBlockEncoding.toUpperCase()));
447     setBloomFilterType(BloomType.
448       valueOf(bloomFilter.toUpperCase()));
449     setBlocksize(blocksize);
450     setScope(scope);
451   }
452 
453   /**
454    * @param b Family name.
455    * @return <code>b</code>
456    * @throws IllegalArgumentException If not null and not a legitimate family
457    * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
458    * <code>b</code> can be null when deserializing).  Cannot start with a '.'
459    * either. Also Family can not be an empty value or equal "recovered.edits".
460    */
461   public static byte [] isLegalFamilyName(final byte [] b) {
462     if (b == null) {
463       return b;
464     }
465     Preconditions.checkArgument(b.length != 0, "Family name can not be empty");
466     if (b[0] == '.') {
467       throw new IllegalArgumentException("Family names cannot start with a " +
468         "period: " + Bytes.toString(b));
469     }
470     for (int i = 0; i < b.length; i++) {
471       if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') {
472         throw new IllegalArgumentException("Illegal character <" + b[i] +
473           ">. Family names cannot contain control characters or colons: " +
474           Bytes.toString(b));
475       }
476     }
477     byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR);
478     if (Bytes.equals(recoveredEdit, b)) {
479       throw new IllegalArgumentException("Family name cannot be: " +
480           HConstants.RECOVERED_EDITS_DIR);
481     }
482     return b;
483   }
484 
485   /**
486    * @return Name of this column family
487    */
488   public byte [] getName() {
489     return name;
490   }
491 
492   /**
493    * @return Name of this column family
494    */
495   public String getNameAsString() {
496     return Bytes.toString(this.name);
497   }
498 
499   /**
500    * @param key The key.
501    * @return The value.
502    */
503   public byte[] getValue(byte[] key) {
504     ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
505     if (ibw == null)
506       return null;
507     return ibw.get();
508   }
509 
510   /**
511    * @param key The key.
512    * @return The value as a string.
513    */
514   public String getValue(String key) {
515     byte[] value = getValue(Bytes.toBytes(key));
516     if (value == null)
517       return null;
518     return Bytes.toString(value);
519   }
520 
521   /**
522    * @return All values.
523    */
524   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
525     // shallow pointer copy
526     return Collections.unmodifiableMap(values);
527   }
528 
529   /**
530    * @param key The key.
531    * @param value The value.
532    * @return this (for chained invocation)
533    */
534   public HColumnDescriptor setValue(byte[] key, byte[] value) {
535     values.put(new ImmutableBytesWritable(key),
536       new ImmutableBytesWritable(value));
537     return this;
538   }
539 
540   /**
541    * @param key Key whose key and value we're to remove from HCD parameters.
542    */
543   public void remove(final byte [] key) {
544     values.remove(new ImmutableBytesWritable(key));
545   }
546 
547   /**
548    * @param key The key.
549    * @param value The value.
550    * @return this (for chained invocation)
551    */
552   public HColumnDescriptor setValue(String key, String value) {
553     if (value == null) {
554       remove(Bytes.toBytes(key));
555     } else {
556       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
557     }
558     return this;
559   }
560 
561   /** @return compression type being used for the column family */
562   public Compression.Algorithm getCompression() {
563     String n = getValue(COMPRESSION);
564     if (n == null) {
565       return Compression.Algorithm.NONE;
566     }
567     return Compression.Algorithm.valueOf(n.toUpperCase());
568   }
569 
570   /** @return compression type being used for the column family for major
571       compression */
572   public Compression.Algorithm getCompactionCompression() {
573     String n = getValue(COMPRESSION_COMPACT);
574     if (n == null) {
575       return getCompression();
576     }
577     return Compression.Algorithm.valueOf(n.toUpperCase());
578   }
579 
580   /** @return maximum number of versions */
581   public int getMaxVersions() {
582     if (this.cachedMaxVersions == UNINITIALIZED) {
583       String v = getValue(HConstants.VERSIONS);
584       this.cachedMaxVersions = Integer.parseInt(v);
585     }
586     return this.cachedMaxVersions;
587   }
588 
589   /**
590    * @param maxVersions maximum number of versions
591    * @return this (for chained invocation)
592    */
593   public HColumnDescriptor setMaxVersions(int maxVersions) {
594     if (maxVersions <= 0) {
595       // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
596       // Until there is support, consider 0 or < 0 -- a configuration error.
597       throw new IllegalArgumentException("Maximum versions must be positive");
598     }    
599     if (maxVersions < this.getMinVersions()) {      
600         throw new IllegalArgumentException("Set MaxVersion to " + maxVersions
601             + " while minVersion is " + this.getMinVersions()
602             + ". Maximum versions must be >= minimum versions ");      
603     }
604     setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
605     cachedMaxVersions = maxVersions;
606     return this;
607   }
608 
609   /**
610    * @return The storefile/hfile blocksize for this column family.
611    */
612   public synchronized int getBlocksize() {
613     if (this.blocksize == null) {
614       String value = getValue(BLOCKSIZE);
615       this.blocksize = (value != null)?
616         Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE);
617     }
618     return this.blocksize.intValue();
619   }
620 
621   /**
622    * @param s Blocksize to use when writing out storefiles/hfiles on this
623    * column family.
624    * @return this (for chained invocation)
625    */
626   public HColumnDescriptor setBlocksize(int s) {
627     setValue(BLOCKSIZE, Integer.toString(s));
628     this.blocksize = null;
629     return this;
630   }
631 
632   /**
633    * @return Compression type setting.
634    */
635   public Compression.Algorithm getCompressionType() {
636     return getCompression();
637   }
638 
639   /**
640    * Compression types supported in hbase.
641    * LZO is not bundled as part of the hbase distribution.
642    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
643    * for how to enable it.
644    * @param type Compression type setting.
645    * @return this (for chained invocation)
646    */
647   public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
648     return setValue(COMPRESSION, type.getName().toUpperCase());
649   }
650 
651   /** @return data block encoding algorithm used on disk */
652   @Deprecated
653   public DataBlockEncoding getDataBlockEncodingOnDisk() {
654     return getDataBlockEncoding();
655   }
656 
657   /**
658    * This method does nothing now. Flag ENCODE_ON_DISK is not used
659    * any more. Data blocks have the same encoding in cache as on disk.
660    * @return this (for chained invocation)
661    */
662   @Deprecated
663   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
664     return this;
665   }
666 
667   /**
668    * @return the data block encoding algorithm used in block cache and
669    *         optionally on disk
670    */
671   public DataBlockEncoding getDataBlockEncoding() {
672     String type = getValue(DATA_BLOCK_ENCODING);
673     if (type == null) {
674       type = DEFAULT_DATA_BLOCK_ENCODING;
675     }
676     return DataBlockEncoding.valueOf(type);
677   }
678 
679   /**
680    * Set data block encoding algorithm used in block cache.
681    * @param type What kind of data block encoding will be used.
682    * @return this (for chained invocation)
683    */
684   public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
685     String name;
686     if (type != null) {
687       name = type.toString();
688     } else {
689       name = DataBlockEncoding.NONE.toString();
690     }
691     return setValue(DATA_BLOCK_ENCODING, name);
692   }
693 
694   /**
695    * Set whether the tags should be compressed along with DataBlockEncoding. When no
696    * DataBlockEncoding is been used, this is having no effect.
697    * 
698    * @param compressTags
699    * @return this (for chained invocation)
700    */
701   public HColumnDescriptor setCompressTags(boolean compressTags) {
702     return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
703   }
704 
705   /**
706    * @return Whether KV tags should be compressed along with DataBlockEncoding. When no
707    *         DataBlockEncoding is been used, this is having no effect.
708    */
709   public boolean shouldCompressTags() {
710     String compressTagsStr = getValue(COMPRESS_TAGS);
711     boolean compressTags = DEFAULT_COMPRESS_TAGS;
712     if (compressTagsStr != null) {
713       compressTags = Boolean.valueOf(compressTagsStr);
714     }
715     return compressTags;
716   }
717 
718   /**
719    * @return Compression type setting.
720    */
721   public Compression.Algorithm getCompactionCompressionType() {
722     return getCompactionCompression();
723   }
724 
725   /**
726    * Compression types supported in hbase.
727    * LZO is not bundled as part of the hbase distribution.
728    * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
729    * for how to enable it.
730    * @param type Compression type setting.
731    * @return this (for chained invocation)
732    */
733   public HColumnDescriptor setCompactionCompressionType(
734       Compression.Algorithm type) {
735     return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase());
736   }
737 
738   /**
739    * @return True if we are to keep all in use HRegionServer cache.
740    */
741   public boolean isInMemory() {
742     String value = getValue(HConstants.IN_MEMORY);
743     if (value != null)
744       return Boolean.valueOf(value).booleanValue();
745     return DEFAULT_IN_MEMORY;
746   }
747 
748   /**
749    * @param inMemory True if we are to keep all values in the HRegionServer
750    * cache
751    * @return this (for chained invocation)
752    */
753   public HColumnDescriptor setInMemory(boolean inMemory) {
754     return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
755   }
756 
757   public boolean getKeepDeletedCells() {
758     String value = getValue(KEEP_DELETED_CELLS);
759     if (value != null) {
760       return Boolean.valueOf(value).booleanValue();
761     }
762     return DEFAULT_KEEP_DELETED;
763   }
764 
765   /**
766    * @param keepDeletedCells True if deleted rows should not be collected
767    * immediately.
768    * @return this (for chained invocation)
769    */
770   public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
771     return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
772   }
773 
774   /**
775    * @return Time-to-live of cell contents, in seconds.
776    */
777   public int getTimeToLive() {
778     String value = getValue(TTL);
779     return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL;
780   }
781 
782   /**
783    * @param timeToLive Time-to-live of cell contents, in seconds.
784    * @return this (for chained invocation)
785    */
786   public HColumnDescriptor setTimeToLive(int timeToLive) {
787     return setValue(TTL, Integer.toString(timeToLive));
788   }
789 
790   /**
791    * @return The minimum number of versions to keep.
792    */
793   public int getMinVersions() {
794     String value = getValue(MIN_VERSIONS);
795     return (value != null)? Integer.valueOf(value).intValue(): 0;
796   }
797 
798   /**
799    * @param minVersions The minimum number of versions to keep.
800    * (used when timeToLive is set)
801    * @return this (for chained invocation)
802    */
803   public HColumnDescriptor setMinVersions(int minVersions) {
804     return setValue(MIN_VERSIONS, Integer.toString(minVersions));
805   }
806 
807   /**
808    * @return True if MapFile blocks should be cached.
809    */
810   public boolean isBlockCacheEnabled() {
811     String value = getValue(BLOCKCACHE);
812     if (value != null)
813       return Boolean.valueOf(value).booleanValue();
814     return DEFAULT_BLOCKCACHE;
815   }
816 
817   /**
818    * @param blockCacheEnabled True if MapFile blocks should be cached.
819    * @return this (for chained invocation)
820    */
821   public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
822     return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
823   }
824 
825   /**
826    * @return bloom filter type used for new StoreFiles in ColumnFamily
827    */
828   public BloomType getBloomFilterType() {
829     String n = getValue(BLOOMFILTER);
830     if (n == null) {
831       n = DEFAULT_BLOOMFILTER;
832     }
833     return BloomType.valueOf(n.toUpperCase());
834   }
835 
836   /**
837    * @param bt bloom filter type
838    * @return this (for chained invocation)
839    */
840   public HColumnDescriptor setBloomFilterType(final BloomType bt) {
841     return setValue(BLOOMFILTER, bt.toString());
842   }
843 
844    /**
845     * @return the scope tag
846     */
847   public int getScope() {
848     byte[] value = getValue(REPLICATION_SCOPE_BYTES);
849     if (value != null) {
850       return Integer.valueOf(Bytes.toString(value));
851     }
852     return DEFAULT_REPLICATION_SCOPE;
853   }
854 
855  /**
856   * @param scope the scope tag
857   * @return this (for chained invocation)
858   */
859   public HColumnDescriptor setScope(int scope) {
860     return setValue(REPLICATION_SCOPE, Integer.toString(scope));
861   }
862 
863   /**
864    * @return true if we should cache data blocks on write
865    */
866   public boolean shouldCacheDataOnWrite() {
867     String value = getValue(CACHE_DATA_ON_WRITE);
868     if (value != null) {
869       return Boolean.valueOf(value).booleanValue();
870     }
871     return DEFAULT_CACHE_DATA_ON_WRITE;
872   }
873 
874   /**
875    * @param value true if we should cache data blocks on write
876    * @return this (for chained invocation)
877    */
878   public HColumnDescriptor setCacheDataOnWrite(boolean value) {
879     return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
880   }
881 
882   /**
883    * @return true if we should cache index blocks on write
884    */
885   public boolean shouldCacheIndexesOnWrite() {
886     String value = getValue(CACHE_INDEX_ON_WRITE);
887     if (value != null) {
888       return Boolean.valueOf(value).booleanValue();
889     }
890     return DEFAULT_CACHE_INDEX_ON_WRITE;
891   }
892 
893   /**
894    * @param value true if we should cache index blocks on write
895    * @return this (for chained invocation)
896    */
897   public HColumnDescriptor setCacheIndexesOnWrite(boolean value) {
898     return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
899   }
900 
901   /**
902    * @return true if we should cache bloomfilter blocks on write
903    */
904   public boolean shouldCacheBloomsOnWrite() {
905     String value = getValue(CACHE_BLOOMS_ON_WRITE);
906     if (value != null) {
907       return Boolean.valueOf(value).booleanValue();
908     }
909     return DEFAULT_CACHE_BLOOMS_ON_WRITE;
910   }
911 
912   /**
913    * @param value true if we should cache bloomfilter blocks on write
914    * @return this (for chained invocation)
915    */
916   public HColumnDescriptor setCacheBloomsOnWrite(boolean value) {
917     return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
918   }
919 
920   /**
921    * @return true if we should evict cached blocks from the blockcache on
922    * close
923    */
924   public boolean shouldEvictBlocksOnClose() {
925     String value = getValue(EVICT_BLOCKS_ON_CLOSE);
926     if (value != null) {
927       return Boolean.valueOf(value).booleanValue();
928     }
929     return DEFAULT_EVICT_BLOCKS_ON_CLOSE;
930   }
931 
932   /**
933    * @param value true if we should evict cached blocks from the blockcache on
934    * close
935    * @return this (for chained invocation)
936    */
937   public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
938     return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
939   }
940 
941   /**
942    * @return true if we should prefetch blocks into the blockcache on open
943    */
944   public boolean shouldPrefetchBlocksOnOpen() {
945     String value = getValue(PREFETCH_BLOCKS_ON_OPEN);
946    if (value != null) {
947       return Boolean.valueOf(value).booleanValue();
948     }
949     return DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
950   }
951 
952   /**
953    * @param value true if we should prefetch blocks into the blockcache on open
954    * @return this (for chained invocation)
955    */
956   public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) {
957     return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value));
958   }
959 
960   /**
961    * @see java.lang.Object#toString()
962    */
963   @Override
964   public String toString() {
965     StringBuilder s = new StringBuilder();
966 
967     s.append('{');
968     s.append(HConstants.NAME);
969     s.append(" => '");
970     s.append(Bytes.toString(name));
971     s.append("'");
972     s.append(getValues(true));
973     s.append('}');
974     return s.toString();
975   }
976 
977   /**
978    * @return Column family descriptor with only the customized attributes.
979    */
980   public String toStringCustomizedValues() {
981     StringBuilder s = new StringBuilder();
982     s.append('{');
983     s.append(HConstants.NAME);
984     s.append(" => '");
985     s.append(Bytes.toString(name));
986     s.append("'");
987     s.append(getValues(false));
988     s.append('}');
989     return s.toString();
990   }
991 
992   private StringBuilder getValues(boolean printDefaults) {
993     StringBuilder s = new StringBuilder();
994 
995     boolean hasConfigKeys = false;
996 
997     // print all reserved keys first
998     for (ImmutableBytesWritable k : values.keySet()) {
999       if (!RESERVED_KEYWORDS.contains(k)) {
1000         hasConfigKeys = true;
1001         continue;
1002       }
1003       String key = Bytes.toString(k.get());
1004       String value = Bytes.toStringBinary(values.get(k).get());
1005       if (printDefaults
1006           || !DEFAULT_VALUES.containsKey(key)
1007           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
1008         s.append(", ");
1009         s.append(key);
1010         s.append(" => ");
1011         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1012       }
1013     }
1014 
1015     // print all non-reserved, advanced config keys as a separate subset
1016     if (hasConfigKeys) {
1017       s.append(", ");
1018       s.append(HConstants.METADATA).append(" => ");
1019       s.append('{');
1020       boolean printComma = false;
1021       for (ImmutableBytesWritable k : values.keySet()) {
1022         if (RESERVED_KEYWORDS.contains(k)) {
1023           continue;
1024         }
1025         String key = Bytes.toString(k.get());
1026         String value = Bytes.toStringBinary(values.get(k).get());
1027         if (printComma) {
1028           s.append(", ");
1029         }
1030         printComma = true;
1031         s.append('\'').append(key).append('\'');
1032         s.append(" => ");
1033         s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\'');
1034       }
1035       s.append('}');
1036     }
1037 
1038     if (!configuration.isEmpty()) {
1039       s.append(", ");
1040       s.append(HConstants.CONFIGURATION).append(" => ");
1041       s.append('{');
1042       boolean printCommaForConfiguration = false;
1043       for (Map.Entry<String, String> e : configuration.entrySet()) {
1044         if (printCommaForConfiguration) s.append(", ");
1045         printCommaForConfiguration = true;
1046         s.append('\'').append(e.getKey()).append('\'');
1047         s.append(" => ");
1048         s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\'');
1049       }
1050       s.append("}");
1051     }
1052     return s;
1053   }
1054 
1055   public static Unit getUnit(String key) {
1056     Unit unit;
1057       /* TTL for now, we can add more as we neeed */
1058     if (key.equals(HColumnDescriptor.TTL)) {
1059       unit = Unit.TIME_INTERVAL;
1060     } else {
1061       unit = Unit.NONE;
1062     }
1063     return unit;
1064   }
1065 
1066   public static Map<String, String> getDefaultValues() {
1067     return Collections.unmodifiableMap(DEFAULT_VALUES);
1068   }
1069 
1070   /**
1071    * @see java.lang.Object#equals(java.lang.Object)
1072    */
1073   @Override
1074   public boolean equals(Object obj) {
1075     if (this == obj) {
1076       return true;
1077     }
1078     if (obj == null) {
1079       return false;
1080     }
1081     if (!(obj instanceof HColumnDescriptor)) {
1082       return false;
1083     }
1084     return compareTo((HColumnDescriptor)obj) == 0;
1085   }
1086 
1087   /**
1088    * @see java.lang.Object#hashCode()
1089    */
1090   @Override
1091   public int hashCode() {
1092     int result = Bytes.hashCode(this.name);
1093     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
1094     result ^= values.hashCode();
1095     result ^= configuration.hashCode();
1096     return result;
1097   }
1098 
1099   /**
1100    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1101    */
1102   @Deprecated
1103   public void readFields(DataInput in) throws IOException {
1104     int version = in.readByte();
1105     if (version < 6) {
1106       if (version <= 2) {
1107         Text t = new Text();
1108         t.readFields(in);
1109         this.name = t.getBytes();
1110 //        if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
1111 //            > 0) {
1112 //          this.name = stripColon(this.name);
1113 //        }
1114       } else {
1115         this.name = Bytes.readByteArray(in);
1116       }
1117       this.values.clear();
1118       setMaxVersions(in.readInt());
1119       int ordinal = in.readInt();
1120       setCompressionType(Compression.Algorithm.values()[ordinal]);
1121       setInMemory(in.readBoolean());
1122       setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
1123       if (getBloomFilterType() != BloomType.NONE && version < 5) {
1124         // If a bloomFilter is enabled and the column descriptor is less than
1125         // version 5, we need to skip over it to read the rest of the column
1126         // descriptor. There are no BloomFilterDescriptors written to disk for
1127         // column descriptors with a version number >= 5
1128         throw new UnsupportedClassVersionError(this.getClass().getName() +
1129             " does not support backward compatibility with versions older " +
1130             "than version 5");
1131       }
1132       if (version > 1) {
1133         setBlockCacheEnabled(in.readBoolean());
1134       }
1135       if (version > 2) {
1136        setTimeToLive(in.readInt());
1137       }
1138     } else {
1139       // version 6+
1140       this.name = Bytes.readByteArray(in);
1141       this.values.clear();
1142       int numValues = in.readInt();
1143       for (int i = 0; i < numValues; i++) {
1144         ImmutableBytesWritable key = new ImmutableBytesWritable();
1145         ImmutableBytesWritable value = new ImmutableBytesWritable();
1146         key.readFields(in);
1147         value.readFields(in);
1148 
1149         // in version 8, the BloomFilter setting changed from bool to enum
1150         if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
1151           value.set(Bytes.toBytes(
1152               Boolean.getBoolean(Bytes.toString(value.get()))
1153                 ? BloomType.ROW.toString()
1154                 : BloomType.NONE.toString()));
1155         }
1156 
1157         values.put(key, value);
1158       }
1159       if (version == 6) {
1160         // Convert old values.
1161         setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
1162       }
1163       String value = getValue(HConstants.VERSIONS);
1164       this.cachedMaxVersions = (value != null)?
1165           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
1166       if (version > 10) {
1167         configuration.clear();
1168         int numConfigs = in.readInt();
1169         for (int i = 0; i < numConfigs; i++) {
1170           ImmutableBytesWritable key = new ImmutableBytesWritable();
1171           ImmutableBytesWritable val = new ImmutableBytesWritable();
1172           key.readFields(in);
1173           val.readFields(in);
1174           configuration.put(
1175             Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1176             Bytes.toString(val.get(), val.getOffset(), val.getLength()));
1177         }
1178       }
1179     }
1180   }
1181 
1182   /**
1183    * @deprecated Writables are going away.  Use {@link #toByteArray()} instead.
1184    */
1185   @Deprecated
1186   public void write(DataOutput out) throws IOException {
1187     out.writeByte(COLUMN_DESCRIPTOR_VERSION);
1188     Bytes.writeByteArray(out, this.name);
1189     out.writeInt(values.size());
1190     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1191         values.entrySet()) {
1192       e.getKey().write(out);
1193       e.getValue().write(out);
1194     }
1195     out.writeInt(configuration.size());
1196     for (Map.Entry<String, String> e : configuration.entrySet()) {
1197       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1198       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1199     }
1200   }
1201 
1202   // Comparable
1203 
1204   public int compareTo(HColumnDescriptor o) {
1205     int result = Bytes.compareTo(this.name, o.getName());
1206     if (result == 0) {
1207       // punt on comparison for ordering, just calculate difference
1208       result = this.values.hashCode() - o.values.hashCode();
1209       if (result < 0)
1210         result = -1;
1211       else if (result > 0)
1212         result = 1;
1213     }
1214     if (result == 0) {
1215       result = this.configuration.hashCode() - o.configuration.hashCode();
1216       if (result < 0)
1217         result = -1;
1218       else if (result > 0)
1219         result = 1;
1220     }
1221     return result;
1222   }
1223 
1224   /**
1225    * @return This instance serialized with pb with pb magic prefix
1226    * @see #parseFrom(byte[])
1227    */
1228   public byte [] toByteArray() {
1229     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1230   }
1231 
1232   /**
1233    * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
1234    * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
1235    * @throws DeserializationException
1236    * @see #toByteArray()
1237    */
1238   public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
1239     if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic");
1240     int pblen = ProtobufUtil.lengthOfPBMagic();
1241     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1242     ColumnFamilySchema cfs = null;
1243     try {
1244       cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1245     } catch (InvalidProtocolBufferException e) {
1246       throw new DeserializationException(e);
1247     }
1248     return convert(cfs);
1249   }
1250 
1251   /**
1252    * @param cfs
1253    * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
1254    */
1255   public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
1256     // Use the empty constructor so we preserve the initial values set on construction for things
1257     // like maxVersion.  Otherwise, we pick up wrong values on deserialization which makes for
1258     // unrelated-looking test failures that are hard to trace back to here.
1259     HColumnDescriptor hcd = new HColumnDescriptor();
1260     hcd.name = cfs.getName().toByteArray();
1261     for (BytesBytesPair a: cfs.getAttributesList()) {
1262       hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1263     }
1264     for (NameStringPair a: cfs.getConfigurationList()) {
1265       hcd.setConfiguration(a.getName(), a.getValue());
1266     }
1267     return hcd;
1268   }
1269 
1270   /**
1271    * @return Convert this instance to a the pb column family type
1272    */
1273   public ColumnFamilySchema convert() {
1274     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
1275     builder.setName(ByteStringer.wrap(getName()));
1276     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1277       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1278       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1279       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1280       builder.addAttributes(aBuilder.build());
1281     }
1282     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1283       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1284       aBuilder.setName(e.getKey());
1285       aBuilder.setValue(e.getValue());
1286       builder.addConfiguration(aBuilder.build());
1287     }
1288     return builder.build();
1289   }
1290 
1291   /**
1292    * Getter for accessing the configuration value by key.
1293    */
1294   public String getConfigurationValue(String key) {
1295     return configuration.get(key);
1296   }
1297 
1298   /**
1299    * Getter for fetching an unmodifiable {@link #configuration} map.
1300    */
1301   public Map<String, String> getConfiguration() {
1302     // shallow pointer copy
1303     return Collections.unmodifiableMap(configuration);
1304   }
1305 
1306   /**
1307    * Setter for storing a configuration setting in {@link #configuration} map.
1308    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1309    * @param value String value. If null, removes the configuration.
1310    */
1311   public void setConfiguration(String key, String value) {
1312     if (value == null) {
1313       removeConfiguration(key);
1314     } else {
1315       configuration.put(key, value);
1316     }
1317   }
1318 
1319   /**
1320    * Remove a configuration setting represented by the key from the {@link #configuration} map.
1321    */
1322   public void removeConfiguration(final String key) {
1323     configuration.remove(key);
1324   }
1325 
1326   /**
1327    * Return the encryption algorithm in use by this family
1328    */
1329   public String getEncryptionType() {
1330     return getValue(ENCRYPTION);
1331   }
1332 
1333   /**
1334    * Set the encryption algorithm for use with this family
1335    * @param algorithm
1336    */
1337   public HColumnDescriptor setEncryptionType(String algorithm) {
1338     setValue(ENCRYPTION, algorithm);
1339     return this;
1340   }
1341 
1342   /** Return the raw crypto key attribute for the family, or null if not set  */
1343   public byte[] getEncryptionKey() {
1344     return getValue(Bytes.toBytes(ENCRYPTION_KEY));
1345   }
1346 
1347   /** Set the raw crypto key attribute for the family */
1348   public HColumnDescriptor setEncryptionKey(byte[] keyBytes) {
1349     setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes);
1350     return this;
1351   }
1352 }