View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashSet;
25  import java.util.List;
26  import java.util.NavigableMap;
27  import java.util.Set;
28  import java.util.concurrent.locks.Lock;
29  import java.util.concurrent.locks.ReentrantLock;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.classification.InterfaceAudience;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileStatus;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.fs.PathFilter;
39  import org.apache.hadoop.hbase.ClusterId;
40  import org.apache.hadoop.hbase.TableName;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HRegionInfo;
44  import org.apache.hadoop.hbase.HTableDescriptor;
45  import org.apache.hadoop.hbase.InvalidFamilyOperationException;
46  import org.apache.hadoop.hbase.RemoteExceptionHandler;
47  import org.apache.hadoop.hbase.Server;
48  import org.apache.hadoop.hbase.ServerName;
49  import org.apache.hadoop.hbase.backup.HFileArchiver;
50  import org.apache.hadoop.hbase.catalog.MetaReader;
51  import org.apache.hadoop.hbase.client.Result;
52  import org.apache.hadoop.hbase.exceptions.DeserializationException;
53  import org.apache.hadoop.hbase.fs.HFileSystem;
54  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
55  import org.apache.hadoop.hbase.regionserver.HRegion;
56  import org.apache.hadoop.hbase.regionserver.wal.HLog;
57  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
58  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
59  import org.apache.hadoop.hbase.util.Bytes;
60  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
61  import org.apache.hadoop.hbase.util.FSTableDescriptors;
62  import org.apache.hadoop.hbase.util.FSUtils;
63  import org.apache.zookeeper.KeeperException;
64  
65  /**
66   * This class abstracts a bunch of operations the HMaster needs to interact with
67   * the underlying file system, including splitting log files, checking file
68   * system status, etc.
69   */
70  @InterfaceAudience.Private
71  public class MasterFileSystem {
72    private static final Log LOG = LogFactory.getLog(MasterFileSystem.class.getName());
73    // HBase configuration
74    Configuration conf;
75    // master status
76    Server master;
77    // metrics for master
78    private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem();
79    // Persisted unique cluster ID
80    private ClusterId clusterId;
81    // Keep around for convenience.
82    private final FileSystem fs;
83    // Is the fileystem ok?
84    private volatile boolean fsOk = true;
85    // The Path to the old logs dir
86    private final Path oldLogDir;
87    // root hbase directory on the FS
88    private final Path rootdir;
89    // hbase temp directory used for table construction and deletion
90    private final Path tempdir;
91    // create the split log lock
92    final Lock splitLogLock = new ReentrantLock();
93    final boolean distributedLogReplay;
94    final SplitLogManager splitLogManager;
95    private final MasterServices services;
96  
97    final static PathFilter META_FILTER = new PathFilter() {
98      public boolean accept(Path p) {
99        return HLogUtil.isMetaFile(p);
100     }
101   };
102 
103   final static PathFilter NON_META_FILTER = new PathFilter() {
104     public boolean accept(Path p) {
105       return !HLogUtil.isMetaFile(p);
106     }
107   };
108 
109   public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
110   throws IOException {
111     this.conf = master.getConfiguration();
112     this.master = master;
113     this.services = services;
114     // Set filesystem to be that of this.rootdir else we get complaints about
115     // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
116     // default localfs.  Presumption is that rootdir is fully-qualified before
117     // we get to here with appropriate fs scheme.
118     this.rootdir = FSUtils.getRootDir(conf);
119     this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
120     // Cover both bases, the old way of setting default fs and the new.
121     // We're supposed to run on 0.20 and 0.21 anyways.
122     this.fs = this.rootdir.getFileSystem(conf);
123     FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
124     // make sure the fs has the same conf
125     fs.setConf(conf);
126     // setup the filesystem variable
127     // set up the archived logs path
128     this.oldLogDir = createInitialFileSystemLayout();
129     HFileSystem.addLocationsOrderInterceptor(conf);
130     try {
131       this.splitLogManager = new SplitLogManager(master.getZooKeeper(), master.getConfiguration(),
132           master, services, master.getServerName());
133     } catch (KeeperException e) {
134       throw new IOException(e);
135     }
136     this.distributedLogReplay = (this.splitLogManager.getRecoveryMode() == RecoveryMode.LOG_REPLAY);
137   }
138 
139   /**
140    * Create initial layout in filesystem.
141    * <ol>
142    * <li>Check if the meta region exists and is readable, if not create it.
143    * Create hbase.version and the hbase:meta directory if not one.
144    * </li>
145    * <li>Create a log archive directory for RS to put archived logs</li>
146    * </ol>
147    * Idempotent.
148    */
149   private Path createInitialFileSystemLayout() throws IOException {
150     // check if the root directory exists
151     checkRootDir(this.rootdir, conf, this.fs);
152 
153     // check if temp directory exists and clean it
154     checkTempDir(this.tempdir, conf, this.fs);
155 
156     Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
157 
158     // Make sure the region servers can archive their old logs
159     if(!this.fs.exists(oldLogDir)) {
160       this.fs.mkdirs(oldLogDir);
161     }
162 
163     return oldLogDir;
164   }
165 
166   public FileSystem getFileSystem() {
167     return this.fs;
168   }
169 
170   /**
171    * Get the directory where old logs go
172    * @return the dir
173    */
174   public Path getOldLogDir() {
175     return this.oldLogDir;
176   }
177 
178   /**
179    * Checks to see if the file system is still accessible.
180    * If not, sets closed
181    * @return false if file system is not available
182    */
183   public boolean checkFileSystem() {
184     if (this.fsOk) {
185       try {
186         FSUtils.checkFileSystemAvailable(this.fs);
187         FSUtils.checkDfsSafeMode(this.conf);
188       } catch (IOException e) {
189         master.abort("Shutting down HBase cluster: file system not available", e);
190         this.fsOk = false;
191       }
192     }
193     return this.fsOk;
194   }
195 
196   /**
197    * @return HBase root dir.
198    */
199   public Path getRootDir() {
200     return this.rootdir;
201   }
202 
203   /**
204    * @return HBase temp dir.
205    */
206   public Path getTempDir() {
207     return this.tempdir;
208   }
209 
210   /**
211    * @return The unique identifier generated for this cluster
212    */
213   public ClusterId getClusterId() {
214     return clusterId;
215   }
216 
217   /**
218    * Inspect the log directory to find dead servers which need recovery work
219    * @return A set of ServerNames which aren't running but still have WAL files left in file system
220    */
221   Set<ServerName> getFailedServersFromLogFolders() {
222     boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
223       HLog.SPLIT_SKIP_ERRORS_DEFAULT);
224 
225     Set<ServerName> serverNames = new HashSet<ServerName>();
226     Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
227 
228     do {
229       if (master.isStopped()) {
230         LOG.warn("Master stopped while trying to get failed servers.");
231         break;
232       }
233       try {
234         if (!this.fs.exists(logsDirPath)) return serverNames;
235         FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
236         // Get online servers after getting log folders to avoid log folder deletion of newly
237         // checked in region servers . see HBASE-5916
238         Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
239             .keySet();
240 
241         if (logFolders == null || logFolders.length == 0) {
242           LOG.debug("No log files to split, proceeding...");
243           return serverNames;
244         }
245         for (FileStatus status : logFolders) {
246           String sn = status.getPath().getName();
247           // truncate splitting suffix if present (for ServerName parsing)
248           if (sn.endsWith(HLog.SPLITTING_EXT)) {
249             sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
250           }
251           ServerName serverName = ServerName.parseServerName(sn);
252           if (!onlineServers.contains(serverName)) {
253             LOG.info("Log folder " + status.getPath() + " doesn't belong "
254                 + "to a known region server, splitting");
255             serverNames.add(serverName);
256           } else {
257             LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
258           }
259         }
260         retrySplitting = false;
261       } catch (IOException ioe) {
262         LOG.warn("Failed getting failed servers to be recovered.", ioe);
263         if (!checkFileSystem()) {
264           LOG.warn("Bad Filesystem, exiting");
265           Runtime.getRuntime().halt(1);
266         }
267         try {
268           if (retrySplitting) {
269             Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
270           }
271         } catch (InterruptedException e) {
272           LOG.warn("Interrupted, aborting since cannot return w/o splitting");
273           Thread.currentThread().interrupt();
274           retrySplitting = false;
275           Runtime.getRuntime().halt(1);
276         }
277       }
278     } while (retrySplitting);
279 
280     return serverNames;
281   }
282 
283   public void splitLog(final ServerName serverName) throws IOException {
284     Set<ServerName> serverNames = new HashSet<ServerName>();
285     serverNames.add(serverName);
286     splitLog(serverNames);
287   }
288 
289   /**
290    * Specialized method to handle the splitting for meta HLog
291    * @param serverName
292    * @throws IOException
293    */
294   public void splitMetaLog(final ServerName serverName) throws IOException {
295     Set<ServerName> serverNames = new HashSet<ServerName>();
296     serverNames.add(serverName);
297     splitMetaLog(serverNames);
298   }
299 
300   /**
301    * Specialized method to handle the splitting for meta HLog
302    * @param serverNames
303    * @throws IOException
304    */
305   public void splitMetaLog(final Set<ServerName> serverNames) throws IOException {
306     splitLog(serverNames, META_FILTER);
307   }
308 
309   private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
310     List<Path> logDirs = new ArrayList<Path>();
311     boolean needReleaseLock = false;
312     if (!this.services.isInitialized()) {
313       // during master initialization, we could have multiple places splitting a same wal
314       this.splitLogLock.lock();
315       needReleaseLock = true;
316     }
317     try {
318       for (ServerName serverName : serverNames) {
319         Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
320         Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
321         // Rename the directory so a rogue RS doesn't create more HLogs
322         if (fs.exists(logDir)) {
323           if (!this.fs.rename(logDir, splitDir)) {
324             throw new IOException("Failed fs.rename for log split: " + logDir);
325           }
326           logDir = splitDir;
327           LOG.debug("Renamed region directory: " + splitDir);
328         } else if (!fs.exists(splitDir)) {
329           LOG.info("Log dir for server " + serverName + " does not exist");
330           continue;
331         }
332         logDirs.add(splitDir);
333       }
334     } finally {
335       if (needReleaseLock) {
336         this.splitLogLock.unlock();
337       }
338     }
339     return logDirs;
340   }
341 
342   /**
343    * Mark regions in recovering state when distributedLogReplay are set true
344    * @param serverNames Set of ServerNames to be replayed wals in order to recover changes contained
345    *          in them
346    * @throws IOException
347    */
348   public void prepareLogReplay(Set<ServerName> serverNames) throws IOException {
349     if (!this.distributedLogReplay) {
350       return;
351     }
352     // mark regions in recovering state
353     for (ServerName serverName : serverNames) {
354       NavigableMap<HRegionInfo, Result> regions = this.getServerUserRegions(serverName);
355       if (regions == null) {
356         continue;
357       }
358       try {
359         this.splitLogManager.markRegionsRecoveringInZK(serverName, regions.keySet());
360       } catch (KeeperException e) {
361         throw new IOException(e);
362       }
363     }
364   }
365 
366   /**
367    * Mark regions in recovering state when distributedLogReplay are set true
368    * @param serverName Failed region server whose wals to be replayed
369    * @param regions Set of regions to be recovered
370    * @throws IOException
371    */
372   public void prepareLogReplay(ServerName serverName, Set<HRegionInfo> regions) throws IOException {
373     if (!this.distributedLogReplay) {
374       return;
375     }
376     // mark regions in recovering state
377     if (regions == null || regions.isEmpty()) {
378       return;
379     }
380     try {
381       this.splitLogManager.markRegionsRecoveringInZK(serverName, regions);
382     } catch (KeeperException e) {
383       throw new IOException(e);
384     }
385   }
386 
387   public void splitLog(final Set<ServerName> serverNames) throws IOException {
388     splitLog(serverNames, NON_META_FILTER);
389   }
390 
391   /**
392    * Wrapper function on {@link SplitLogManager#removeStaleRecoveringRegionsFromZK(Set)}
393    * @param failedServers
394    * @throws KeeperException
395    */
396   void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
397       throws KeeperException {
398     this.splitLogManager.removeStaleRecoveringRegionsFromZK(failedServers);
399   }
400 
401   /**
402    * This method is the base split method that splits HLog files matching a filter. Callers should
403    * pass the appropriate filter for meta and non-meta HLogs.
404    * @param serverNames
405    * @param filter
406    * @throws IOException
407    */
408   public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException {
409     long splitTime = 0, splitLogSize = 0;
410     List<Path> logDirs = getLogDirs(serverNames);
411 
412     splitLogManager.handleDeadWorkers(serverNames);
413     splitTime = EnvironmentEdgeManager.currentTimeMillis();
414     splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
415     splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime;
416 
417     if (this.metricsMasterFilesystem != null) {
418       if (filter == META_FILTER) {
419         this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize);
420       } else {
421         this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize);
422       }
423     }
424   }
425 
426   /**
427    * Get the rootdir.  Make sure its wholesome and exists before returning.
428    * @param rd
429    * @param c
430    * @param fs
431    * @return hbase.rootdir (after checks for existence and bootstrapping if
432    * needed populating the directory with necessary bootup files).
433    * @throws IOException
434    */
435   @SuppressWarnings("deprecation")
436   private Path checkRootDir(final Path rd, final Configuration c,
437     final FileSystem fs)
438   throws IOException {
439     // If FS is in safe mode wait till out of it.
440     FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
441     // Filesystem is good. Go ahead and check for hbase.rootdir.
442     try {
443       if (!fs.exists(rd)) {
444         fs.mkdirs(rd);
445         // DFS leaves safe mode with 0 DNs when there are 0 blocks.
446         // We used to handle this by checking the current DN count and waiting until
447         // it is nonzero. With security, the check for datanode count doesn't work --
448         // it is a privileged op. So instead we adopt the strategy of the jobtracker
449         // and simply retry file creation during bootstrap indefinitely. As soon as
450         // there is one datanode it will succeed. Permission problems should have
451         // already been caught by mkdirs above.
452         FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
453           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
454             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
455       } else {
456         if (!fs.isDirectory(rd)) {
457           throw new IllegalArgumentException(rd.toString() + " is not a directory");
458         }
459         // as above
460         FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
461           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
462             HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
463       }
464     } catch (DeserializationException de) {
465       LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
466       IOException ioe = new IOException();
467       ioe.initCause(de);
468       throw ioe;
469     } catch (IllegalArgumentException iae) {
470       LOG.fatal("Please fix invalid configuration for "
471         + HConstants.HBASE_DIR + " " + rd.toString(), iae);
472       throw iae;
473     }
474     // Make sure cluster ID exists
475     if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
476         HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
477       FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
478     }
479     clusterId = FSUtils.getClusterId(fs, rd);
480 
481     // Make sure the meta region directory exists!
482     if (!FSUtils.metaRegionExists(fs, rd)) {
483       bootstrap(rd, c);
484     } else {
485       // Migrate table descriptor files if necessary
486       org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
487         .migrateFSTableDescriptorsIfNecessary(fs, rd);
488     }
489       
490     // Create tableinfo-s for hbase:meta if not already there.
491     new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
492 
493     return rd;
494   }
495 
496   /**
497    * Make sure the hbase temp directory exists and is empty.
498    * NOTE that this method is only executed once just after the master becomes the active one.
499    */
500   private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
501       throws IOException {
502     // If the temp directory exists, clear the content (left over, from the previous run)
503     if (fs.exists(tmpdir)) {
504       // Archive table in temp, maybe left over from failed deletion,
505       // if not the cleaner will take care of them.
506       for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
507         for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
508           HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
509         }
510       }
511       if (!fs.delete(tmpdir, true)) {
512         throw new IOException("Unable to clean the temp directory: " + tmpdir);
513       }
514     }
515 
516     // Create the temp directory
517     if (!fs.mkdirs(tmpdir)) {
518       throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
519     }
520   }
521 
522   private static void bootstrap(final Path rd, final Configuration c)
523   throws IOException {
524     LOG.info("BOOTSTRAP: creating hbase:meta region");
525     try {
526       // Bootstrapping, make sure blockcache is off.  Else, one will be
527       // created here in bootstrap and it'll need to be cleaned up.  Better to
528       // not make it in first place.  Turn off block caching for bootstrap.
529       // Enable after.
530       HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
531       setInfoFamilyCachingForMeta(false);
532       HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
533           HTableDescriptor.META_TABLEDESC);
534       setInfoFamilyCachingForMeta(true);
535       HRegion.closeHRegion(meta);
536     } catch (IOException e) {
537       e = RemoteExceptionHandler.checkIOException(e);
538       LOG.error("bootstrap", e);
539       throw e;
540     }
541   }
542 
543   /**
544    * Enable in memory caching for hbase:meta
545    */
546   public static void setInfoFamilyCachingForMeta(final boolean b) {
547     for (HColumnDescriptor hcd:
548         HTableDescriptor.META_TABLEDESC.getColumnFamilies()) {
549       if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
550         hcd.setBlockCacheEnabled(b);
551         hcd.setInMemory(b);
552       }
553     }
554   }
555 
556 
557   public void deleteRegion(HRegionInfo region) throws IOException {
558     HFileArchiver.archiveRegion(conf, fs, region);
559   }
560 
561   public void deleteTable(TableName tableName) throws IOException {
562     fs.delete(FSUtils.getTableDir(rootdir, tableName), true);
563   }
564 
565   /**
566    * Move the specified table to the hbase temp directory
567    * @param tableName Table name to move
568    * @return The temp location of the table moved
569    * @throws IOException in case of file-system failure
570    */
571   public Path moveTableToTemp(TableName tableName) throws IOException {
572     Path srcPath = FSUtils.getTableDir(rootdir, tableName);
573     Path tempPath = FSUtils.getTableDir(this.tempdir, tableName);
574 
575     // Ensure temp exists
576     if (!fs.exists(tempPath.getParent()) && !fs.mkdirs(tempPath.getParent())) {
577       throw new IOException("HBase temp directory '" + tempPath.getParent() + "' creation failure.");
578     }
579 
580     if (!fs.rename(srcPath, tempPath)) {
581       throw new IOException("Unable to move '" + srcPath + "' to temp '" + tempPath + "'");
582     }
583 
584     return tempPath;
585   }
586 
587   public void updateRegionInfo(HRegionInfo region) {
588     // TODO implement this.  i think this is currently broken in trunk i don't
589     //      see this getting updated.
590     //      @see HRegion.checkRegioninfoOnFilesystem()
591   }
592 
593   public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
594       throws IOException {
595     // archive family store files
596     Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
597     HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
598 
599     // delete the family folder
600     Path familyDir = new Path(tableDir,
601       new Path(region.getEncodedName(), Bytes.toString(familyName)));
602     if (fs.delete(familyDir, true) == false) {
603       throw new IOException("Could not delete family "
604           + Bytes.toString(familyName) + " from FileSystem for region "
605           + region.getRegionNameAsString() + "(" + region.getEncodedName()
606           + ")");
607     }
608   }
609 
610   public void stop() {
611     if (splitLogManager != null) {
612       this.splitLogManager.stop();
613     }
614   }
615 
616   /**
617    * Delete column of a table
618    * @param tableName
619    * @param familyName
620    * @return Modified HTableDescriptor with requested column deleted.
621    * @throws IOException
622    */
623   public HTableDescriptor deleteColumn(TableName tableName, byte[] familyName)
624       throws IOException {
625     LOG.info("DeleteColumn. Table = " + tableName
626         + " family = " + Bytes.toString(familyName));
627     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
628     htd.removeFamily(familyName);
629     this.services.getTableDescriptors().add(htd);
630     return htd;
631   }
632 
633   /**
634    * Modify Column of a table
635    * @param tableName
636    * @param hcd HColumnDesciptor
637    * @return Modified HTableDescriptor with the column modified.
638    * @throws IOException
639    */
640   public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd)
641       throws IOException {
642     LOG.info("AddModifyColumn. Table = " + tableName
643         + " HCD = " + hcd.toString());
644 
645     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
646     byte [] familyName = hcd.getName();
647     if(!htd.hasFamily(familyName)) {
648       throw new InvalidFamilyOperationException("Family '" +
649         Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
650     }
651     htd.addFamily(hcd);
652     this.services.getTableDescriptors().add(htd);
653     return htd;
654   }
655 
656   /**
657    * Add column to a table
658    * @param tableName
659    * @param hcd
660    * @return Modified HTableDescriptor with new column added.
661    * @throws IOException
662    */
663   public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd)
664       throws IOException {
665     LOG.info("AddColumn. Table = " + tableName + " HCD = " +
666       hcd.toString());
667     HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
668     if (htd == null) {
669       throw new InvalidFamilyOperationException("Family '" +
670         hcd.getNameAsString() + "' cannot be modified as HTD is null");
671     }
672     htd.addFamily(hcd);
673     this.services.getTableDescriptors().add(htd);
674     return htd;
675   }
676 
677   private NavigableMap<HRegionInfo, Result> getServerUserRegions(ServerName serverName)
678       throws IOException {
679     if (!this.master.isStopped()) {
680       try {
681         this.master.getCatalogTracker().waitForMeta();
682         return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
683       } catch (InterruptedException e) {
684         throw (InterruptedIOException)new InterruptedIOException().initCause(e);
685       }
686     }
687     return null;
688   }
689 
690   /**
691    * The function is used in SSH to set recovery mode based on configuration after all outstanding
692    * log split tasks drained.
693    * @throws KeeperException
694    * @throws InterruptedIOException
695    */
696   public void setLogRecoveryMode() throws IOException {
697     try {
698       this.splitLogManager.setRecoveryMode(false);
699     } catch (KeeperException e) {
700       throw new IOException(e);
701     }
702   }
703 
704   public RecoveryMode getLogRecoveryMode() {
705     return this.splitLogManager.getRecoveryMode();
706   }
707 }