View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.classification.InterfaceAudience;
41  import org.apache.hadoop.classification.InterfaceStability;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.client.Durability;
44  import org.apache.hadoop.hbase.exceptions.DeserializationException;
45  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
46  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
47  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
48  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
51  import org.apache.hadoop.hbase.regionserver.BloomType;
52  import org.apache.hadoop.hbase.security.User;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.Writables;
55  import org.apache.hadoop.io.WritableComparable;
56  
57  import com.google.protobuf.InvalidProtocolBufferException;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   /**
147    * <em>INTERNAL</em> Used by rest interface to access this metadata
148    * attribute which denotes if the table is a -ROOT- region or not
149    *
150    * @see #isRootRegion()
151    */
152   public static final String IS_ROOT = "IS_ROOT";
153   private static final ImmutableBytesWritable IS_ROOT_KEY =
154     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
155 
156   /**
157    * <em>INTERNAL</em> Used by rest interface to access this metadata
158    * attribute which denotes if it is a catalog table, either
159    * <code> hbase:meta </code> or <code> -ROOT- </code>
160    *
161    * @see #isMetaRegion()
162    */
163   public static final String IS_META = "IS_META";
164   private static final ImmutableBytesWritable IS_META_KEY =
165     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
166 
167   /**
168    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
169    * attribute which denotes if the deferred log flush option is enabled.
170    * @deprecated Use {@link #DURABILITY} instead.
171    */
172   @Deprecated
173   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
174   @Deprecated
175   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
176     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
177 
178   /**
179    * <em>INTERNAL</em> {@link Durability} setting for the table.
180    */
181   public static final String DURABILITY = "DURABILITY";
182   private static final ImmutableBytesWritable DURABILITY_KEY =
183       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
184 
185   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
186   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
187 
188   /*
189    *  The below are ugly but better than creating them each time till we
190    *  replace booleans being saved as Strings with plain booleans.  Need a
191    *  migration script to do this.  TODO.
192    */
193   private static final ImmutableBytesWritable FALSE =
194     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
195 
196   private static final ImmutableBytesWritable TRUE =
197     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
198 
199   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
200 
201   /**
202    * Constant that denotes whether the table is READONLY by default and is false
203    */
204   public static final boolean DEFAULT_READONLY = false;
205 
206   /**
207    * Constant that denotes whether the table is compaction enabled by default
208    */
209   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
210 
211   /**
212    * Constant that denotes the maximum default size of the memstore after which
213    * the contents are flushed to the store files
214    */
215   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
216 
217   private final static Map<String, String> DEFAULT_VALUES
218     = new HashMap<String, String>();
219   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
220     = new HashSet<ImmutableBytesWritable>();
221   static {
222     DEFAULT_VALUES.put(MAX_FILESIZE,
223         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
224     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
225     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
226         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
227     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
228         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
229     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
230     for (String s : DEFAULT_VALUES.keySet()) {
231       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
232     }
233     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
234     RESERVED_KEYWORDS.add(IS_META_KEY);
235   }
236 
237   /**
238    * Cache of whether this is a meta table or not.
239    */
240   private volatile Boolean meta = null;
241   /**
242    * Cache of whether this is root table or not.
243    */
244   private volatile Boolean root = null;
245 
246   /**
247    * Durability setting for the table
248    */
249   private Durability durability = null;
250 
251   /**
252    * Maps column family name to the respective HColumnDescriptors
253    */
254   private final Map<byte [], HColumnDescriptor> families =
255     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
256 
257   /**
258    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
259    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
260    */
261   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
262     setName(name);
263     for(HColumnDescriptor descriptor : families) {
264       this.families.put(descriptor.getName(), descriptor);
265     }
266   }
267 
268   /**
269    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
270    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
271    */
272   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
273       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
274     setName(name);
275     for(HColumnDescriptor descriptor : families) {
276       this.families.put(descriptor.getName(), descriptor);
277     }
278     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
279         values.entrySet()) {
280       setValue(entry.getKey(), entry.getValue());
281     }
282   }
283 
284   /**
285    * Default constructor which constructs an empty object.
286    * For deserializing an HTableDescriptor instance only.
287    * @deprecated Used by Writables and Writables are going away.
288    */
289   @Deprecated
290   public HTableDescriptor() {
291     super();
292   }
293 
294   /**
295    * Construct a table descriptor specifying a TableName object
296    * @param name Table name.
297    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
298    */
299   public HTableDescriptor(final TableName name) {
300     super();
301     setName(name);
302   }
303 
304   /**
305    * Construct a table descriptor specifying a byte array table name
306    * @param name Table name.
307    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
308    */
309   @Deprecated
310   public HTableDescriptor(final byte[] name) {
311     this(TableName.valueOf(name));
312   }
313 
314   /**
315    * Construct a table descriptor specifying a String table name
316    * @param name Table name.
317    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
318    */
319   @Deprecated
320   public HTableDescriptor(final String name) {
321     this(TableName.valueOf(name));
322   }
323 
324   /**
325    * Construct a table descriptor by cloning the descriptor passed as a parameter.
326    * <p>
327    * Makes a deep copy of the supplied descriptor.
328    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
329    * @param desc The descriptor.
330    */
331   public HTableDescriptor(final HTableDescriptor desc) {
332     super();
333     setName(desc.name);
334     setMetaFlags(this.name);
335     for (HColumnDescriptor c: desc.families.values()) {
336       this.families.put(c.getName(), new HColumnDescriptor(c));
337     }
338     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
339         desc.values.entrySet()) {
340       setValue(e.getKey(), e.getValue());
341     }
342     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
343       this.configuration.put(e.getKey(), e.getValue());
344     }
345   }
346 
347   /*
348    * Set meta flags on this table.
349    * IS_ROOT_KEY is set if its a -ROOT- table
350    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
351    * Called by constructors.
352    * @param name
353    */
354   private void setMetaFlags(final TableName name) {
355     setMetaRegion(isRootRegion() ||
356         name.equals(TableName.META_TABLE_NAME));
357   }
358 
359   /**
360    * Check if the descriptor represents a <code> -ROOT- </code> region.
361    *
362    * @return true if this is a <code> -ROOT- </code> region
363    */
364   public boolean isRootRegion() {
365     if (this.root == null) {
366       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
367     }
368     return this.root.booleanValue();
369   }
370 
371   /**
372    * <em> INTERNAL </em> Used to denote if the current table represents
373    * <code> -ROOT- </code> region. This is used internally by the
374    * HTableDescriptor constructors
375    *
376    * @param isRoot true if this is the <code> -ROOT- </code> region
377    */
378   protected void setRootRegion(boolean isRoot) {
379     // TODO: Make the value a boolean rather than String of boolean.
380     setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
381   }
382 
383   /**
384    * Checks if this table is <code> hbase:meta </code>
385    * region.
386    *
387    * @return true if this table is <code> hbase:meta </code>
388    * region
389    */
390   public boolean isMetaRegion() {
391     if (this.meta == null) {
392       this.meta = calculateIsMetaRegion();
393     }
394     return this.meta.booleanValue();
395   }
396 
397   private synchronized Boolean calculateIsMetaRegion() {
398     byte [] value = getValue(IS_META_KEY);
399     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
400   }
401 
402   private boolean isSomething(final ImmutableBytesWritable key,
403       final boolean valueIfNull) {
404     byte [] value = getValue(key);
405     if (value != null) {
406       return Boolean.valueOf(Bytes.toString(value));
407     }
408     return valueIfNull;
409   }
410 
411   /**
412    * <em> INTERNAL </em> Used to denote if the current table represents
413    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
414    * internally by the HTableDescriptor constructors
415    *
416    * @param isMeta true if its either <code> -ROOT- </code> or
417    * <code> hbase:meta </code> region
418    */
419   protected void setMetaRegion(boolean isMeta) {
420     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
421   }
422 
423   /**
424    * Checks if the table is a <code>hbase:meta</code> table
425    *
426    * @return true if table is <code> hbase:meta </code> region.
427    */
428   public boolean isMetaTable() {
429     return isMetaRegion() && !isRootRegion();
430   }
431 
432   /**
433    * Getter for accessing the metadata associated with the key
434    *
435    * @param key The key.
436    * @return The value.
437    * @see #values
438    */
439   public byte[] getValue(byte[] key) {
440     return getValue(new ImmutableBytesWritable(key));
441   }
442 
443   private byte[] getValue(final ImmutableBytesWritable key) {
444     ImmutableBytesWritable ibw = values.get(key);
445     if (ibw == null)
446       return null;
447     return ibw.get();
448   }
449 
450   /**
451    * Getter for accessing the metadata associated with the key
452    *
453    * @param key The key.
454    * @return The value.
455    * @see #values
456    */
457   public String getValue(String key) {
458     byte[] value = getValue(Bytes.toBytes(key));
459     if (value == null)
460       return null;
461     return Bytes.toString(value);
462   }
463 
464   /**
465    * Getter for fetching an unmodifiable {@link #values} map.
466    *
467    * @return unmodifiable map {@link #values}.
468    * @see #values
469    */
470   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
471     // shallow pointer copy
472     return Collections.unmodifiableMap(values);
473   }
474 
475   /**
476    * Setter for storing metadata as a (key, value) pair in {@link #values} map
477    *
478    * @param key The key.
479    * @param value The value.
480    * @see #values
481    */
482   public void setValue(byte[] key, byte[] value) {
483     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
484   }
485 
486   /*
487    * @param key The key.
488    * @param value The value.
489    */
490   private void setValue(final ImmutableBytesWritable key,
491       final String value) {
492     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
493   }
494 
495   /*
496    * Setter for storing metadata as a (key, value) pair in {@link #values} map
497    *
498    * @param key The key.
499    * @param value The value.
500    */
501   public void setValue(final ImmutableBytesWritable key,
502       final ImmutableBytesWritable value) {
503     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
504       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
505       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
506           "use " + DURABILITY + " instead");
507       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
508       return;
509     }
510     values.put(key, value);
511   }
512 
513   /**
514    * Setter for storing metadata as a (key, value) pair in {@link #values} map
515    *
516    * @param key The key.
517    * @param value The value.
518    * @see #values
519    */
520   public void setValue(String key, String value) {
521     if (value == null) {
522       remove(key);
523     } else {
524       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
525     }
526   }
527 
528   /**
529    * Remove metadata represented by the key from the {@link #values} map
530    *
531    * @param key Key whose key and value we're to remove from HTableDescriptor
532    * parameters.
533    */
534   public void remove(final String key) {
535     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
536   }
537 
538   /**
539    * Remove metadata represented by the key from the {@link #values} map
540    *
541    * @param key Key whose key and value we're to remove from HTableDescriptor
542    * parameters.
543    */
544   public void remove(ImmutableBytesWritable key) {
545     values.remove(key);
546   }
547 
548   /**
549    * Remove metadata represented by the key from the {@link #values} map
550    *
551    * @param key Key whose key and value we're to remove from HTableDescriptor
552    * parameters.
553    */
554   public void remove(final byte [] key) {
555     remove(new ImmutableBytesWritable(key));
556   }
557 
558   /**
559    * Check if the readOnly flag of the table is set. If the readOnly flag is
560    * set then the contents of the table can only be read from but not modified.
561    *
562    * @return true if all columns in the table should be read only
563    */
564   public boolean isReadOnly() {
565     return isSomething(READONLY_KEY, DEFAULT_READONLY);
566   }
567 
568   /**
569    * Setting the table as read only sets all the columns in the table as read
570    * only. By default all tables are modifiable, but if the readOnly flag is
571    * set to true then the contents of the table can only be read but not modified.
572    *
573    * @param readOnly True if all of the columns in the table should be read
574    * only.
575    */
576   public void setReadOnly(final boolean readOnly) {
577     setValue(READONLY_KEY, readOnly? TRUE: FALSE);
578   }
579 
580   /**
581    * Check if the compaction enable flag of the table is true. If flag is
582    * false then no minor/major compactions will be done in real.
583    *
584    * @return true if table compaction enabled
585    */
586   public boolean isCompactionEnabled() {
587     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
588   }
589 
590   /**
591    * Setting the table compaction enable flag.
592    *
593    * @param isEnable True if enable compaction.
594    */
595   public void setCompactionEnabled(final boolean isEnable) {
596     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
597   }
598 
599   /**
600    * Check if async log edits are enabled on the table.
601    *
602    * @return true if that async log flush is enabled on the table
603    *
604    * @see #setAsyncLogFlush(boolean)
605    */
606   @Deprecated
607   public synchronized boolean isDeferredLogFlush() {
608     return getDurability() == Durability.ASYNC_WAL;
609   }
610 
611   /**
612    * This is used to allowing the log edits syncing to the file system. Everytime
613    * an edit is sent to the server it is first sync'd to the file system by the
614    * log writer. This sync is an expensive operation and thus can be deferred so
615    * that the edits are kept in memory until the background async writer-sync-notifier
616    * threads do the sync and not explicitly flushed for every edit.
617    * <p>
618    * NOTE:- This option might result in data loss if the region server crashes
619    * before these pending edits in memory are flushed onto the filesystem.
620    * </p>
621    *
622    * @param isAsyncLogFlush
623    */
624   @Deprecated
625   public synchronized void setDeferredLogFlush(final boolean isAsyncLogFlush) {
626     this.setDurability(isAsyncLogFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
627   }
628 
629   /**
630    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
631    * @param durability enum value
632    */
633   public void setDurability(Durability durability) {
634     this.durability = durability;
635     setValue(DURABILITY_KEY, durability.name());
636   }
637 
638   /**
639    * Returns the durability setting for the table.
640    * @return durability setting for the table.
641    */
642   public Durability getDurability() {
643     if (this.durability == null) {
644       byte[] durabilityValue = getValue(DURABILITY_KEY);
645       if (durabilityValue == null) {
646         this.durability = DEFAULT_DURABLITY;
647       } else {
648         try {
649           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
650         } catch (IllegalArgumentException ex) {
651           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
652             + " is not known. Durability:" + Bytes.toString(durabilityValue));
653           this.durability = DEFAULT_DURABLITY;
654         }
655       }
656     }
657     return this.durability;
658   }
659 
660   /**
661    * Get the name of the table
662    *
663    * @return TableName
664    */
665   public TableName getTableName() {
666     return name;
667   }
668 
669   /**
670    * Get the name of the table as a byte array.
671    *
672    * @return name of table
673    */
674   public byte[] getName() {
675     return name.getName();
676   }
677 
678   /**
679    * Get the name of the table as a String
680    *
681    * @return name of table as a String
682    */
683   public String getNameAsString() {
684     return name.getNameAsString();
685   }
686 
687   /**
688    * This get the class associated with the region split policy which
689    * determines when a region split should occur.  The class used by
690    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
691    *
692    * @return the class name of the region split policy for this table.
693    * If this returns null, the default split policy is used.
694    */
695    public String getRegionSplitPolicyClassName() {
696     return getValue(SPLIT_POLICY);
697   }
698 
699   /**
700    * Set the name of the table.
701    *
702    * @param name name of table
703    */
704   @Deprecated
705   public void setName(byte[] name) {
706     setName(TableName.valueOf(name));
707   }
708 
709   @Deprecated
710   public void setName(TableName name) {
711     this.name = name;
712     setMetaFlags(this.name);
713   }
714 
715   /**
716    * Returns the maximum size upto which a region can grow to after which a region
717    * split is triggered. The region size is represented by the size of the biggest
718    * store file in that region.
719    *
720    * @return max hregion size for table, -1 if not set.
721    *
722    * @see #setMaxFileSize(long)
723    */
724   public long getMaxFileSize() {
725     byte [] value = getValue(MAX_FILESIZE_KEY);
726     if (value != null) {
727       return Long.parseLong(Bytes.toString(value));
728     }
729     return -1;
730   }
731 
732   /**
733    * Sets the maximum size upto which a region can grow to after which a region
734    * split is triggered. The region size is represented by the size of the biggest
735    * store file in that region, i.e. If the biggest store file grows beyond the
736    * maxFileSize, then the region split is triggered. This defaults to a value of
737    * 256 MB.
738    * <p>
739    * This is not an absolute value and might vary. Assume that a single row exceeds
740    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
741    * a single row cannot be split across multiple regions
742    * </p>
743    *
744    * @param maxFileSize The maximum file size that a store file can grow to
745    * before a split is triggered.
746    */
747   public void setMaxFileSize(long maxFileSize) {
748     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
749   }
750 
751   /**
752    * Returns the size of the memstore after which a flush to filesystem is triggered.
753    *
754    * @return memory cache flush size for each hregion, -1 if not set.
755    *
756    * @see #setMemStoreFlushSize(long)
757    */
758   public long getMemStoreFlushSize() {
759     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
760     if (value != null) {
761       return Long.parseLong(Bytes.toString(value));
762     }
763     return -1;
764   }
765 
766   /**
767    * Represents the maximum size of the memstore after which the contents of the
768    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
769    *
770    * @param memstoreFlushSize memory cache flush size for each hregion
771    */
772   public void setMemStoreFlushSize(long memstoreFlushSize) {
773     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
774   }
775 
776   /**
777    * Adds a column family.
778    * @param family HColumnDescriptor of family to add.
779    */
780   public void addFamily(final HColumnDescriptor family) {
781     if (family.getName() == null || family.getName().length <= 0) {
782       throw new NullPointerException("Family name cannot be null or empty");
783     }
784     this.families.put(family.getName(), family);
785   }
786 
787   /**
788    * Checks to see if this table contains the given column family
789    * @param familyName Family name or column name.
790    * @return true if the table contains the specified family name
791    */
792   public boolean hasFamily(final byte [] familyName) {
793     return families.containsKey(familyName);
794   }
795 
796   /**
797    * @return Name of this table and then a map of all of the column family
798    * descriptors.
799    * @see #getNameAsString()
800    */
801   @Override
802   public String toString() {
803     StringBuilder s = new StringBuilder();
804     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
805     s.append(getValues(true));
806     for (HColumnDescriptor f : families.values()) {
807       s.append(", ").append(f);
808     }
809     return s.toString();
810   }
811 
812   /**
813    * @return Name of this table and then a map of all of the column family
814    * descriptors (with only the non-default column family attributes)
815    */
816   public String toStringCustomizedValues() {
817     StringBuilder s = new StringBuilder();
818     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
819     s.append(getValues(false));
820     for(HColumnDescriptor hcd : families.values()) {
821       s.append(", ").append(hcd.toStringCustomizedValues());
822     }
823     return s.toString();
824   }
825 
826   private StringBuilder getValues(boolean printDefaults) {
827     StringBuilder s = new StringBuilder();
828 
829     // step 1: set partitioning and pruning
830     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
831     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
832     for (ImmutableBytesWritable k : values.keySet()) {
833       if (k == null || k.get() == null) continue;
834       String key = Bytes.toString(k.get());
835       // in this section, print out reserved keywords + coprocessor info
836       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
837         userKeys.add(k);
838         continue;
839       }
840       // only print out IS_ROOT/IS_META if true
841       String value = Bytes.toString(values.get(k).get());
842       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
843         if (Boolean.valueOf(value) == false) continue;
844       }
845       // see if a reserved key is a default value. may not want to print it out
846       if (printDefaults
847           || !DEFAULT_VALUES.containsKey(key)
848           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
849         reservedKeys.add(k);
850       }
851     }
852 
853     // early exit optimization
854     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
855     if (!hasAttributes && configuration.isEmpty()) return s;
856 
857     s.append(", {");
858     // step 2: printing attributes
859     if (hasAttributes) {
860       s.append("TABLE_ATTRIBUTES => {");
861 
862       // print all reserved keys first
863       boolean printCommaForAttr = false;
864       for (ImmutableBytesWritable k : reservedKeys) {
865         String key = Bytes.toString(k.get());
866         String value = Bytes.toStringBinary(values.get(k).get());
867         if (printCommaForAttr) s.append(", ");
868         printCommaForAttr = true;
869         s.append(key);
870         s.append(" => ");
871         s.append('\'').append(value).append('\'');
872       }
873 
874       if (!userKeys.isEmpty()) {
875         // print all non-reserved, advanced config keys as a separate subset
876         if (printCommaForAttr) s.append(", ");
877         printCommaForAttr = true;
878         s.append(HConstants.METADATA).append(" => ");
879         s.append("{");
880         boolean printCommaForCfg = false;
881         for (ImmutableBytesWritable k : userKeys) {
882           String key = Bytes.toString(k.get());
883           String value = Bytes.toStringBinary(values.get(k).get());
884           if (printCommaForCfg) s.append(", ");
885           printCommaForCfg = true;
886           s.append('\'').append(key).append('\'');
887           s.append(" => ");
888           s.append('\'').append(value).append('\'');
889         }
890         s.append("}");
891       }
892     }
893 
894     // step 3: printing all configuration:
895     if (!configuration.isEmpty()) {
896       if (hasAttributes) {
897         s.append(", ");
898       }
899       s.append(HConstants.CONFIGURATION).append(" => ");
900       s.append('{');
901       boolean printCommaForConfig = false;
902       for (Map.Entry<String, String> e : configuration.entrySet()) {
903         if (printCommaForConfig) s.append(", ");
904         printCommaForConfig = true;
905         s.append('\'').append(e.getKey()).append('\'');
906         s.append(" => ");
907         s.append('\'').append(e.getValue()).append('\'');
908       }
909       s.append("}");
910     }
911     s.append("}"); // end METHOD
912     return s;
913   }
914 
915   /**
916    * Compare the contents of the descriptor with another one passed as a parameter.
917    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
918    * contents of the descriptors are compared.
919    *
920    * @return true if the contents of the the two descriptors exactly match
921    *
922    * @see java.lang.Object#equals(java.lang.Object)
923    */
924   @Override
925   public boolean equals(Object obj) {
926     if (this == obj) {
927       return true;
928     }
929     if (obj == null) {
930       return false;
931     }
932     if (!(obj instanceof HTableDescriptor)) {
933       return false;
934     }
935     return compareTo((HTableDescriptor)obj) == 0;
936   }
937 
938   /**
939    * @see java.lang.Object#hashCode()
940    */
941   @Override
942   public int hashCode() {
943     int result = this.name.hashCode();
944     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
945     if (this.families != null && this.families.size() > 0) {
946       for (HColumnDescriptor e: this.families.values()) {
947         result ^= e.hashCode();
948       }
949     }
950     result ^= values.hashCode();
951     result ^= configuration.hashCode();
952     return result;
953   }
954 
955   /**
956    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
957    * and is used for de-serialization of the HTableDescriptor over RPC
958    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
959    */
960   @Deprecated
961   @Override
962   public void readFields(DataInput in) throws IOException {
963     int version = in.readInt();
964     if (version < 3)
965       throw new IOException("versions < 3 are not supported (and never existed!?)");
966     // version 3+
967     name = TableName.valueOf(Bytes.readByteArray(in));
968     setRootRegion(in.readBoolean());
969     setMetaRegion(in.readBoolean());
970     values.clear();
971     configuration.clear();
972     int numVals = in.readInt();
973     for (int i = 0; i < numVals; i++) {
974       ImmutableBytesWritable key = new ImmutableBytesWritable();
975       ImmutableBytesWritable value = new ImmutableBytesWritable();
976       key.readFields(in);
977       value.readFields(in);
978       setValue(key, value);
979     }
980     families.clear();
981     int numFamilies = in.readInt();
982     for (int i = 0; i < numFamilies; i++) {
983       HColumnDescriptor c = new HColumnDescriptor();
984       c.readFields(in);
985       families.put(c.getName(), c);
986     }
987     if (version >= 7) {
988       int numConfigs = in.readInt();
989       for (int i = 0; i < numConfigs; i++) {
990         ImmutableBytesWritable key = new ImmutableBytesWritable();
991         ImmutableBytesWritable value = new ImmutableBytesWritable();
992         key.readFields(in);
993         value.readFields(in);
994         configuration.put(
995           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
996           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
997       }
998     }
999   }
1000 
1001   /**
1002    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1003    * and is used for serialization of the HTableDescriptor over RPC
1004    * @deprecated Writables are going away.
1005    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1006    */
1007   @Deprecated
1008   @Override
1009   public void write(DataOutput out) throws IOException {
1010 	  out.writeInt(TABLE_DESCRIPTOR_VERSION);
1011     Bytes.writeByteArray(out, name.toBytes());
1012     out.writeBoolean(isRootRegion());
1013     out.writeBoolean(isMetaRegion());
1014     out.writeInt(values.size());
1015     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1016         values.entrySet()) {
1017       e.getKey().write(out);
1018       e.getValue().write(out);
1019     }
1020     out.writeInt(families.size());
1021     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1022         it.hasNext(); ) {
1023       HColumnDescriptor family = it.next();
1024       family.write(out);
1025     }
1026     out.writeInt(configuration.size());
1027     for (Map.Entry<String, String> e : configuration.entrySet()) {
1028       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1029       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1030     }
1031   }
1032 
1033   // Comparable
1034 
1035   /**
1036    * Compares the descriptor with another descriptor which is passed as a parameter.
1037    * This compares the content of the two descriptors and not the reference.
1038    *
1039    * @return 0 if the contents of the descriptors are exactly matching,
1040    * 		 1 if there is a mismatch in the contents
1041    */
1042   @Override
1043   public int compareTo(final HTableDescriptor other) {
1044     int result = this.name.compareTo(other.name);
1045     if (result == 0) {
1046       result = families.size() - other.families.size();
1047     }
1048     if (result == 0 && families.size() != other.families.size()) {
1049       result = Integer.valueOf(families.size()).compareTo(
1050           Integer.valueOf(other.families.size()));
1051     }
1052     if (result == 0) {
1053       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1054           it2 = other.families.values().iterator(); it.hasNext(); ) {
1055         result = it.next().compareTo(it2.next());
1056         if (result != 0) {
1057           break;
1058         }
1059       }
1060     }
1061     if (result == 0) {
1062       // punt on comparison for ordering, just calculate difference
1063       result = this.values.hashCode() - other.values.hashCode();
1064       if (result < 0)
1065         result = -1;
1066       else if (result > 0)
1067         result = 1;
1068     }
1069     if (result == 0) {
1070       result = this.configuration.hashCode() - other.configuration.hashCode();
1071       if (result < 0)
1072         result = -1;
1073       else if (result > 0)
1074         result = 1;
1075     }
1076     return result;
1077   }
1078 
1079   /**
1080    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1081    * of all the column families of the table.
1082    *
1083    * @return Immutable collection of {@link HColumnDescriptor} of all the
1084    * column families.
1085    */
1086   public Collection<HColumnDescriptor> getFamilies() {
1087     return Collections.unmodifiableCollection(this.families.values());
1088   }
1089 
1090   /**
1091    * Returns all the column family names of the current table. The map of
1092    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1093    * This returns all the keys of the family map which represents the column
1094    * family names of the table.
1095    *
1096    * @return Immutable sorted set of the keys of the families.
1097    */
1098   public Set<byte[]> getFamiliesKeys() {
1099     return Collections.unmodifiableSet(this.families.keySet());
1100   }
1101 
1102   /**
1103    * Returns an array all the {@link HColumnDescriptor} of the column families
1104    * of the table.
1105    *
1106    * @return Array of all the HColumnDescriptors of the current table
1107    *
1108    * @see #getFamilies()
1109    */
1110   public HColumnDescriptor[] getColumnFamilies() {
1111     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1112     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1113   }
1114 
1115 
1116   /**
1117    * Returns the HColumnDescriptor for a specific column family with name as
1118    * specified by the parameter column.
1119    *
1120    * @param column Column family name
1121    * @return Column descriptor for the passed family name or the family on
1122    * passed in column.
1123    */
1124   public HColumnDescriptor getFamily(final byte [] column) {
1125     return this.families.get(column);
1126   }
1127 
1128 
1129   /**
1130    * Removes the HColumnDescriptor with name specified by the parameter column
1131    * from the table descriptor
1132    *
1133    * @param column Name of the column family to be removed.
1134    * @return Column descriptor for the passed family name or the family on
1135    * passed in column.
1136    */
1137   public HColumnDescriptor removeFamily(final byte [] column) {
1138     return this.families.remove(column);
1139   }
1140 
1141 
1142   /**
1143    * Add a table coprocessor to this table. The coprocessor
1144    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1145    * or Endpoint.
1146    * It won't check if the class can be loaded or not.
1147    * Whether a coprocessor is loadable or not will be determined when
1148    * a region is opened.
1149    * @param className Full class name.
1150    * @throws IOException
1151    */
1152   public void addCoprocessor(String className) throws IOException {
1153     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1154   }
1155 
1156 
1157   /**
1158    * Add a table coprocessor to this table. The coprocessor
1159    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1160    * or Endpoint.
1161    * It won't check if the class can be loaded or not.
1162    * Whether a coprocessor is loadable or not will be determined when
1163    * a region is opened.
1164    * @param jarFilePath Path of the jar file. If it's null, the class will be
1165    * loaded from default classloader.
1166    * @param className Full class name.
1167    * @param priority Priority
1168    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1169    * @throws IOException
1170    */
1171   public void addCoprocessor(String className, Path jarFilePath,
1172                              int priority, final Map<String, String> kvs)
1173   throws IOException {
1174     if (hasCoprocessor(className)) {
1175       throw new IOException("Coprocessor " + className + " already exists.");
1176     }
1177     // validate parameter kvs
1178     StringBuilder kvString = new StringBuilder();
1179     if (kvs != null) {
1180       for (Map.Entry<String, String> e: kvs.entrySet()) {
1181         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1182           throw new IOException("Illegal parameter key = " + e.getKey());
1183         }
1184         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1185           throw new IOException("Illegal parameter (" + e.getKey() +
1186               ") value = " + e.getValue());
1187         }
1188         if (kvString.length() != 0) {
1189           kvString.append(',');
1190         }
1191         kvString.append(e.getKey());
1192         kvString.append('=');
1193         kvString.append(e.getValue());
1194       }
1195     }
1196 
1197     // generate a coprocessor key
1198     int maxCoprocessorNumber = 0;
1199     Matcher keyMatcher;
1200     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1201         this.values.entrySet()) {
1202       keyMatcher =
1203           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1204               Bytes.toString(e.getKey().get()));
1205       if (!keyMatcher.matches()) {
1206         continue;
1207       }
1208       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1209           maxCoprocessorNumber);
1210     }
1211     maxCoprocessorNumber++;
1212 
1213     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1214     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1215         "|" + className + "|" + Integer.toString(priority) + "|" +
1216         kvString.toString();
1217     setValue(key, value);
1218   }
1219 
1220 
1221   /**
1222    * Check if the table has an attached co-processor represented by the name className
1223    *
1224    * @param className - Class name of the co-processor
1225    * @return true of the table has a co-processor className
1226    */
1227   public boolean hasCoprocessor(String className) {
1228     Matcher keyMatcher;
1229     Matcher valueMatcher;
1230     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1231         this.values.entrySet()) {
1232       keyMatcher =
1233           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1234               Bytes.toString(e.getKey().get()));
1235       if (!keyMatcher.matches()) {
1236         continue;
1237       }
1238       valueMatcher =
1239         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1240             Bytes.toString(e.getValue().get()));
1241       if (!valueMatcher.matches()) {
1242         continue;
1243       }
1244       // get className and compare
1245       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1246       if (clazz.equals(className.trim())) {
1247         return true;
1248       }
1249     }
1250     return false;
1251   }
1252 
1253   /**
1254    * Return the list of attached co-processor represented by their name className
1255    *
1256    * @return The list of co-processors classNames
1257    */
1258   public List<String> getCoprocessors() {
1259     List<String> result = new ArrayList<String>();
1260     Matcher keyMatcher;
1261     Matcher valueMatcher;
1262     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1263       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1264       if (!keyMatcher.matches()) {
1265         continue;
1266       }
1267       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1268           .toString(e.getValue().get()));
1269       if (!valueMatcher.matches()) {
1270         continue;
1271       }
1272       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1273     }
1274     return result;
1275   }
1276 
1277   /**
1278    * Remove a coprocessor from those set on the table
1279    * @param className Class name of the co-processor
1280    */
1281   public void removeCoprocessor(String className) {
1282     ImmutableBytesWritable match = null;
1283     Matcher keyMatcher;
1284     Matcher valueMatcher;
1285     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1286         .entrySet()) {
1287       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1288           .getKey().get()));
1289       if (!keyMatcher.matches()) {
1290         continue;
1291       }
1292       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1293           .toString(e.getValue().get()));
1294       if (!valueMatcher.matches()) {
1295         continue;
1296       }
1297       // get className and compare
1298       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1299       // remove the CP if it is present
1300       if (clazz.equals(className.trim())) {
1301         match = e.getKey();
1302         break;
1303       }
1304     }
1305     // if we found a match, remove it
1306     if (match != null)
1307       remove(match);
1308   }
1309 
1310   /**
1311    * Returns the {@link Path} object representing the table directory under
1312    * path rootdir
1313    *
1314    * Deprecated use FSUtils.getTableDir() instead.
1315    *
1316    * @param rootdir qualified path of HBase root directory
1317    * @param tableName name of table
1318    * @return {@link Path} for table
1319    */
1320   @Deprecated
1321   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1322     //This is bad I had to mirror code from FSUTils.getTableDir since
1323     //there is no module dependency between hbase-client and hbase-server
1324     TableName name = TableName.valueOf(tableName);
1325     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1326               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1327   }
1328 
1329   /** Table descriptor for <code>hbase:meta</code> catalog table */
1330   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1331       TableName.META_TABLE_NAME,
1332       new HColumnDescriptor[] {
1333           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1334               // Ten is arbitrary number.  Keep versions to help debugging.
1335               .setMaxVersions(10)
1336               .setInMemory(true)
1337               .setBlocksize(8 * 1024)
1338               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1339               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1340               .setBloomFilterType(BloomType.NONE)
1341       });
1342 
1343   static {
1344     try {
1345       META_TABLEDESC.addCoprocessor(
1346           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1347           null, Coprocessor.PRIORITY_SYSTEM, null);
1348     } catch (IOException ex) {
1349       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1350       throw new RuntimeException(ex);
1351     }
1352   }
1353 
1354   public final static String NAMESPACE_FAMILY_INFO = "info";
1355   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1356   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1357 
1358   /** Table descriptor for namespace table */
1359   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1360       TableName.NAMESPACE_TABLE_NAME,
1361       new HColumnDescriptor[] {
1362           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1363               // Ten is arbitrary number.  Keep versions to help debugging.
1364               .setMaxVersions(10)
1365               .setInMemory(true)
1366               .setBlocksize(8 * 1024)
1367               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1368       });
1369 
1370   @Deprecated
1371   public void setOwner(User owner) {
1372     setOwnerString(owner != null ? owner.getShortName() : null);
1373   }
1374 
1375   // used by admin.rb:alter(table_name,*args) to update owner.
1376   @Deprecated
1377   public void setOwnerString(String ownerString) {
1378     if (ownerString != null) {
1379       setValue(OWNER_KEY, ownerString);
1380     } else {
1381       remove(OWNER_KEY);
1382     }
1383   }
1384 
1385   @Deprecated
1386   public String getOwnerString() {
1387     if (getValue(OWNER_KEY) != null) {
1388       return Bytes.toString(getValue(OWNER_KEY));
1389     }
1390     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1391     // hbase:meta and -ROOT- should return system user as owner, not null (see
1392     // MasterFileSystem.java:bootstrap()).
1393     return null;
1394   }
1395 
1396   /**
1397    * @return This instance serialized with pb with pb magic prefix
1398    * @see #parseFrom(byte[])
1399    */
1400   public byte [] toByteArray() {
1401     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1402   }
1403 
1404   /**
1405    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1406    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1407    * @throws DeserializationException
1408    * @throws IOException
1409    * @see #toByteArray()
1410    */
1411   public static HTableDescriptor parseFrom(final byte [] bytes)
1412   throws DeserializationException, IOException {
1413     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1414       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1415     }
1416     int pblen = ProtobufUtil.lengthOfPBMagic();
1417     TableSchema.Builder builder = TableSchema.newBuilder();
1418     TableSchema ts;
1419     try {
1420       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
1421     } catch (InvalidProtocolBufferException e) {
1422       throw new DeserializationException(e);
1423     }
1424     return convert(ts);
1425   }
1426 
1427   /**
1428    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1429    */
1430   public TableSchema convert() {
1431     TableSchema.Builder builder = TableSchema.newBuilder();
1432     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1433     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1434       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1435       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1436       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1437       builder.addAttributes(aBuilder.build());
1438     }
1439     for (HColumnDescriptor hcd: getColumnFamilies()) {
1440       builder.addColumnFamilies(hcd.convert());
1441     }
1442     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1443       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1444       aBuilder.setName(e.getKey());
1445       aBuilder.setValue(e.getValue());
1446       builder.addConfiguration(aBuilder.build());
1447     }
1448     return builder.build();
1449   }
1450 
1451   /**
1452    * @param ts A pb TableSchema instance.
1453    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1454    */
1455   public static HTableDescriptor convert(final TableSchema ts) {
1456     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1457     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1458     int index = 0;
1459     for (ColumnFamilySchema cfs: list) {
1460       hcds[index++] = HColumnDescriptor.convert(cfs);
1461     }
1462     HTableDescriptor htd = new HTableDescriptor(
1463         ProtobufUtil.toTableName(ts.getTableName()),
1464         hcds);
1465     for (BytesBytesPair a: ts.getAttributesList()) {
1466       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1467     }
1468     for (NameStringPair a: ts.getConfigurationList()) {
1469       htd.setConfiguration(a.getName(), a.getValue());
1470     }
1471     return htd;
1472   }
1473 
1474   /**
1475    * Getter for accessing the configuration value by key
1476    */
1477   public String getConfigurationValue(String key) {
1478     return configuration.get(key);
1479   }
1480 
1481   /**
1482    * Getter for fetching an unmodifiable {@link #configuration} map.
1483    */
1484   public Map<String, String> getConfiguration() {
1485     // shallow pointer copy
1486     return Collections.unmodifiableMap(configuration);
1487   }
1488 
1489   /**
1490    * Setter for storing a configuration setting in {@link #configuration} map.
1491    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1492    * @param value String value. If null, removes the setting.
1493    */
1494   public void setConfiguration(String key, String value) {
1495     if (value == null) {
1496       removeConfiguration(key);
1497     } else {
1498       configuration.put(key, value);
1499     }
1500   }
1501 
1502   /**
1503    * Remove a config setting represented by the key from the {@link #configuration} map
1504    */
1505   public void removeConfiguration(final String key) {
1506     configuration.remove(key);
1507   }
1508 }