View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.migration;
19  
20  import java.io.IOException;
21  import java.util.List;
22  
23  import org.apache.commons.cli.CommandLine;
24  import org.apache.commons.cli.CommandLineParser;
25  import org.apache.commons.cli.GnuParser;
26  import org.apache.commons.cli.HelpFormatter;
27  import org.apache.commons.cli.Option;
28  import org.apache.commons.cli.Options;
29  import org.apache.commons.cli.ParseException;
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.conf.Configured;
33  import org.apache.hadoop.fs.FileStatus;
34  import org.apache.hadoop.fs.FileSystem;
35  import org.apache.hadoop.fs.Path;
36  import org.apache.hadoop.hbase.Abortable;
37  import org.apache.hadoop.hbase.HBaseConfiguration;
38  import org.apache.hadoop.hbase.HConstants;
39  import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.FSUtils;
42  import org.apache.hadoop.hbase.util.HFileV1Detector;
43  import org.apache.hadoop.hbase.util.ZKDataMigrator;
44  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
45  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
46  import org.apache.hadoop.util.Tool;
47  import org.apache.hadoop.util.ToolRunner;
48  
49  public class UpgradeTo96 extends Configured implements Tool {
50    private static final Log LOG = LogFactory.getLog(UpgradeTo96.class);
51  
52    private Options options = new Options();
53    /**
54     * whether to do overall upgrade (namespace and znodes)
55     */
56    private boolean upgrade;
57    /**
58     * whether to check for HFileV1
59     */
60    private boolean checkForHFileV1;
61    /**
62     * Path of directory to check for HFileV1
63     */
64    private String dirToCheckForHFileV1;
65  
66    UpgradeTo96() {
67      setOptions();
68    }
69  
70    private void setOptions() {
71      options.addOption("h", "help", false, "Help");
72      options.addOption(new Option("check", false, "Run upgrade check; looks for HFileV1 "
73          + " under ${hbase.rootdir} or provided 'dir' directory."));
74      options.addOption(new Option("execute", false, "Run upgrade; zk and hdfs must be up, hbase down"));
75      Option pathOption = new Option("dir", true,
76          "Relative path of dir to check for HFileV1s.");
77      pathOption.setRequired(false);
78      options.addOption(pathOption);
79    }
80  
81    private boolean parseOption(String[] args) throws ParseException {
82      if (args.length == 0) return false; // no args shows help.
83  
84      CommandLineParser parser = new GnuParser();
85      CommandLine cmd = parser.parse(options, args);
86      if (cmd.hasOption("h")) {
87        return false;
88      }
89      if (cmd.hasOption("execute")) upgrade = true;
90      if (cmd.hasOption("check")) checkForHFileV1 = true;
91      if (checkForHFileV1 && cmd.hasOption("dir")) {
92        this.dirToCheckForHFileV1 = cmd.getOptionValue("dir");
93      }
94      return true;
95    }
96  
97    private void printUsage() {
98      HelpFormatter formatter = new HelpFormatter();
99      formatter.printHelp("$bin/hbase upgrade -check [-dir DIR]|-execute", options);
100     System.out.println("Read http://hbase.apache.org/book.html#upgrade0.96 before attempting upgrade");
101     System.out.println();
102     System.out.println("Example usage:");
103     System.out.println();
104     System.out.println("Run upgrade check; looks for HFileV1s under ${hbase.rootdir}:");
105     System.out.println(" $ bin/hbase upgrade -check");
106     System.out.println();
107     System.out.println("Run the upgrade: ");
108     System.out.println(" $ bin/hbase upgrade -execute");
109   }
110 
111   @Override
112   public int run(String[] args) throws Exception {
113     if (!parseOption(args)) {
114       printUsage();
115       return -1;
116     }
117     if (checkForHFileV1) {
118       int res = doHFileV1Check();
119       if (res == 0) LOG.info("No HFileV1 found.");
120       else {
121         LOG.warn("There are some HFileV1, or corrupt files (files with incorrect major version).");
122       }
123       return res;
124     }
125     // if the user wants to upgrade, check for any HBase live process.
126     // If yes, prompt the user to stop them
127     else if (upgrade) {
128       if (isAnyHBaseProcessAlive()) {
129         LOG.error("Some HBase processes are still alive, or znodes not expired yet. "
130             + "Please stop them before upgrade or try after some time.");
131         throw new IOException("Some HBase processes are still alive, or znodes not expired yet");
132       }
133       return executeUpgrade();
134     }
135     return -1;
136   }
137 
138   private boolean isAnyHBaseProcessAlive() throws IOException {
139     ZooKeeperWatcher zkw = null;
140     try {
141       zkw = new ZooKeeperWatcher(getConf(), "Check Live Processes.", new Abortable() {
142         private boolean aborted = false;
143 
144         @Override
145         public void abort(String why, Throwable e) {
146           LOG.warn("Got aborted with reason: " + why + ", and error: " + e);
147           this.aborted = true;
148         }
149 
150         @Override
151         public boolean isAborted() {
152           return this.aborted;
153         }
154 
155       });
156       boolean liveProcessesExists = false;
157       if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
158         return false;
159       }
160       if (ZKUtil.checkExists(zkw, zkw.backupMasterAddressesZNode) != -1) {
161         List<String> backupMasters = ZKUtil
162             .listChildrenNoWatch(zkw, zkw.backupMasterAddressesZNode);
163         if (!backupMasters.isEmpty()) {
164           LOG.warn("Backup master(s) " + backupMasters
165               + " are alive or backup-master znodes not expired.");
166           liveProcessesExists = true;
167         }
168       }
169       if (ZKUtil.checkExists(zkw, zkw.rsZNode) != -1) {
170         List<String> regionServers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
171         if (!regionServers.isEmpty()) {
172           LOG.warn("Region server(s) " + regionServers + " are alive or rs znodes not expired.");
173           liveProcessesExists = true;
174         }
175       }
176       if (ZKUtil.checkExists(zkw, zkw.getMasterAddressZNode()) != -1) {
177         byte[] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode());
178         if (data != null && !Bytes.equals(data, HConstants.EMPTY_BYTE_ARRAY)) {
179           LOG.warn("Active master at address " + Bytes.toString(data)
180               + " is still alive or master znode not expired.");
181           liveProcessesExists = true;
182         }
183       }
184       return liveProcessesExists;
185     } catch (Exception e) {
186       LOG.error("Got exception while checking live hbase processes", e);
187       throw new IOException(e);
188     } finally {
189       if (zkw != null) {
190         zkw.close();
191       }
192     }
193   }
194 
195   private int doHFileV1Check() throws Exception {
196     String[] args = null;
197     if (dirToCheckForHFileV1 != null) args = new String[] { "-p" + dirToCheckForHFileV1 };
198     return ToolRunner.run(getConf(), new HFileV1Detector(), args);
199   }
200 
201   /**
202    * Executes the upgrade process. It involves:
203    * <ul>
204    * <li> Upgrading Namespace
205    * <li> Upgrading Znodes
206    * <li> Log splitting
207    * </ul>
208    * @throws Exception
209    */
210   private int executeUpgrade() throws Exception {
211     executeTool("Namespace upgrade", new NamespaceUpgrade(),
212       new String[] { "--upgrade" }, 0);
213     executeTool("Znode upgrade", new ZKDataMigrator(), null, 0);
214     doOfflineLogSplitting();
215     return 0;
216   }
217 
218   private void executeTool(String toolMessage, Tool tool, String[] args, int expectedResult)
219       throws Exception {
220     LOG.info("Starting " + toolMessage);
221     int res = ToolRunner.run(getConf(), tool, new String[] { "--upgrade" });
222     if (res != expectedResult) {
223       LOG.error(toolMessage + "returned " + res + ", expected " + expectedResult);
224       throw new Exception("Unexpected return code from " + toolMessage);
225     }
226     LOG.info("Successfully completed " + toolMessage);
227   }
228 
229   /**
230    * Performs log splitting for all regionserver directories.
231    * @throws Exception
232    */
233   private void doOfflineLogSplitting() throws Exception {
234     LOG.info("Starting Log splitting");
235     final Path rootDir = FSUtils.getRootDir(getConf());
236     final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
237     FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
238     Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
239     FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
240     if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
241       LOG.info("No log directories to split, returning");
242       return;
243     }
244     try {
245       for (FileStatus regionServerLogDir : regionServerLogDirs) {
246         // split its log dir, if exists
247         HLogSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf());
248       }
249       LOG.info("Successfully completed Log splitting");
250     } catch (Exception e) {
251       LOG.error("Got exception while doing Log splitting ", e);
252       throw e;
253     }
254   }
255 
256   public static void main(String[] args) throws Exception {
257     System.exit(ToolRunner.run(HBaseConfiguration.create(), new UpgradeTo96(), args));
258   }
259 }