View Javadoc

1   /**
2    * The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.migration;
21  
22  import java.io.IOException;
23  import java.util.Arrays;
24  import java.util.Comparator;
25  import java.util.List;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FSDataInputStream;
31  import org.apache.hadoop.fs.FileStatus;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.fs.PathFilter;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.CellUtil;
37  import org.apache.hadoop.hbase.HConstants;
38  import org.apache.hadoop.hbase.HRegionInfo;
39  import org.apache.hadoop.hbase.HTableDescriptor;
40  import org.apache.hadoop.hbase.NamespaceDescriptor;
41  import org.apache.hadoop.hbase.ServerName;
42  import org.apache.hadoop.hbase.TableName;
43  import org.apache.hadoop.hbase.catalog.MetaEditor;
44  import org.apache.hadoop.hbase.client.Delete;
45  import org.apache.hadoop.hbase.client.Get;
46  import org.apache.hadoop.hbase.client.Put;
47  import org.apache.hadoop.hbase.client.Result;
48  import org.apache.hadoop.hbase.exceptions.DeserializationException;
49  import org.apache.hadoop.hbase.regionserver.HRegion;
50  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
51  import org.apache.hadoop.hbase.regionserver.wal.HLog;
52  import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
53  import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
54  import org.apache.hadoop.hbase.security.access.AccessControlLists;
55  import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
56  import org.apache.hadoop.hbase.util.Bytes;
57  import org.apache.hadoop.hbase.util.FSTableDescriptors;
58  import org.apache.hadoop.hbase.util.FSUtils;
59  import org.apache.hadoop.util.Tool;
60  
61  import com.google.common.collect.Lists;
62  import com.google.common.primitives.Ints;
63  
64  /**
65   * Upgrades old 0.94 filesystem layout to namespace layout
66   * Does the following:
67   *
68   * - creates system namespace directory and move .META. table there
69   * renaming .META. table to hbase:meta,
70   * this in turn would require to re-encode the region directory name
71   *
72   * <p>The pre-0.96 paths and dir names are hardcoded in here.
73   */
74  public class NamespaceUpgrade implements Tool {
75    private static final Log LOG = LogFactory.getLog(NamespaceUpgrade.class);
76  
77    private Configuration conf;
78  
79    private FileSystem fs;
80  
81    private Path rootDir;
82    private Path sysNsDir;
83    private Path defNsDir;
84    private Path baseDirs[];
85    private Path backupDir;
86    // First move everything to this tmp .data dir in case there is a table named 'data'
87    private static final String TMP_DATA_DIR = ".data";
88    // Old dir names to migrate.
89    private static final String DOT_LOGS = ".logs";
90    private static final String DOT_OLD_LOGS = ".oldlogs";
91    private static final String DOT_CORRUPT = ".corrupt";
92    private static final String DOT_SPLITLOG = "splitlog";
93    private static final String DOT_ARCHIVE = ".archive";
94  
95    // The old default directory of hbase.dynamic.jars.dir(0.94.12 release).
96    private static final String DOT_LIB_DIR = ".lib";
97  
98    private static final String OLD_ACL = "_acl_";
99    /** Directories that are not HBase table directories */
100   static final List<String> NON_USER_TABLE_DIRS = Arrays.asList(new String[] {
101       DOT_LOGS,
102       DOT_OLD_LOGS,
103       DOT_CORRUPT,
104       DOT_SPLITLOG,
105       HConstants.HBCK_SIDELINEDIR_NAME,
106       DOT_ARCHIVE,
107       HConstants.SNAPSHOT_DIR_NAME,
108       HConstants.HBASE_TEMP_DIRECTORY,
109       TMP_DATA_DIR,
110       OLD_ACL,
111       DOT_LIB_DIR});
112 
113   public NamespaceUpgrade() throws IOException {
114     super();
115   }
116 
117   public void init() throws IOException {
118     this.rootDir = FSUtils.getRootDir(conf);
119     FSUtils.setFsDefault(getConf(), rootDir);
120     this.fs = FileSystem.get(conf);
121     Path tmpDataDir = new Path(rootDir, TMP_DATA_DIR);
122     sysNsDir = new Path(tmpDataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
123     defNsDir = new Path(tmpDataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
124     baseDirs = new Path[]{rootDir,
125         new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
126         new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY)};
127     backupDir = new Path(rootDir, HConstants.MIGRATION_NAME);
128   }
129 
130 
131   public void upgradeTableDirs() throws IOException, DeserializationException {
132     // if new version is written then upgrade is done
133     if (verifyNSUpgrade(fs, rootDir)) {
134       return;
135     }
136 
137     makeNamespaceDirs();
138 
139     migrateTables();
140 
141     migrateSnapshots();
142 
143     migrateDotDirs();
144 
145     migrateMeta();
146 
147     migrateACL();
148 
149     deleteRoot();
150 
151     FSUtils.setVersion(fs, rootDir);
152   }
153 
154   /**
155    * Remove the -ROOT- dir. No longer of use.
156    * @throws IOException
157    */
158   public void deleteRoot() throws IOException {
159     Path rootDir = new Path(this.rootDir, "-ROOT-");
160     if (this.fs.exists(rootDir)) {
161       if (!this.fs.delete(rootDir, true)) LOG.info("Failed remove of " + rootDir);
162       LOG.info("Deleted " + rootDir);
163     }
164   }
165 
166   /**
167    * Rename all the dot dirs -- .data, .archive, etc. -- as data, archive, etc.; i.e. minus the dot.
168    * @throws IOException
169    */
170   public void migrateDotDirs() throws IOException {
171     // Dot dirs to rename.  Leave the tmp dir named '.tmp' and snapshots as .hbase-snapshot.
172     final Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
173     Path [][] dirs = new Path[][] {
174       new Path [] {new Path(rootDir, DOT_CORRUPT), new Path(rootDir, HConstants.CORRUPT_DIR_NAME)},
175       new Path [] {new Path(rootDir, DOT_LOGS), new Path(rootDir, HConstants.HREGION_LOGDIR_NAME)},
176       new Path [] {new Path(rootDir, DOT_OLD_LOGS),
177         new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME)},
178       new Path [] {new Path(rootDir, TMP_DATA_DIR),
179         new Path(rootDir, HConstants.BASE_NAMESPACE_DIR)},
180       new Path[] { new Path(rootDir, DOT_LIB_DIR),
181         new Path(rootDir, HConstants.LIB_DIR)}};
182     for (Path [] dir: dirs) {
183       Path src = dir[0];
184       Path tgt = dir[1];
185       if (!this.fs.exists(src)) {
186         LOG.info("Does not exist: " + src);
187         continue;
188       }
189       rename(src, tgt);
190     }
191     // Do the .archive dir.  Need to move its subdirs to the default ns dir under data dir... so
192     // from '.archive/foo', to 'archive/data/default/foo'.
193     Path oldArchiveDir = new Path(rootDir, DOT_ARCHIVE);
194     if (this.fs.exists(oldArchiveDir)) {
195       // This is a pain doing two nn calls but portable over h1 and h2.
196       mkdirs(archiveDir);
197       Path archiveDataDir = new Path(archiveDir, HConstants.BASE_NAMESPACE_DIR);
198       mkdirs(archiveDataDir);
199       rename(oldArchiveDir, new Path(archiveDataDir,
200         NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR));
201     }
202     // Update the system and user namespace dirs removing the dot in front of .data.
203     Path dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR);
204     sysNsDir = new Path(dataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
205     defNsDir = new Path(dataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
206   }
207 
208   private void mkdirs(final Path p) throws IOException {
209     if (!this.fs.mkdirs(p)) throw new IOException("Failed make of " + p);
210   }
211 
212   private void rename(final Path src, final Path tgt) throws IOException {
213     if (!fs.rename(src, tgt)) {
214       throw new IOException("Failed move " + src + " to " + tgt);
215     }
216   }
217 
218   /**
219    * Create the system and default namespaces dirs
220    * @throws IOException
221    */
222   public void makeNamespaceDirs() throws IOException {
223     if (!fs.exists(sysNsDir)) {
224       if (!fs.mkdirs(sysNsDir)) {
225         throw new IOException("Failed to create system namespace dir: " + sysNsDir);
226       }
227     }
228     if (!fs.exists(defNsDir)) {
229       if (!fs.mkdirs(defNsDir)) {
230         throw new IOException("Failed to create default namespace dir: " + defNsDir);
231       }
232     }
233   }
234 
235   /**
236    * Migrate all tables into respective namespaces, either default or system.  We put them into
237    * a temporary location, '.data', in case a user table is name 'data'.  In a later method we will
238    * move stuff from .data to data.
239    * @throws IOException
240    */
241   public void migrateTables() throws IOException {
242     List<String> sysTables = Lists.newArrayList("-ROOT-",".META.", ".META");
243 
244     // Migrate tables including archive and tmp
245     for (Path baseDir: baseDirs) {
246       if (!fs.exists(baseDir)) continue;
247       List<Path> oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir);
248       for (Path oldTableDir: oldTableDirs) {
249         if (NON_USER_TABLE_DIRS.contains(oldTableDir.getName())) continue;
250         if (sysTables.contains(oldTableDir.getName())) continue;
251         // Make the new directory under the ns to which we will move the table.
252         Path nsDir = new Path(this.defNsDir,
253           TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
254         if (!fs.exists(nsDir.getParent())) {
255           if (!fs.mkdirs(nsDir.getParent())) {
256             throw new IOException("Failed to create namespace dir "+nsDir.getParent());
257           }
258         }
259         if (sysTables.indexOf(oldTableDir.getName()) < 0) {
260           LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir);
261           if (!fs.rename(oldTableDir, nsDir)) {
262             throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir);
263           }
264         }
265       }
266     }
267   }
268 
269   public void migrateSnapshots() throws IOException {
270     //migrate snapshot dir
271     Path oldSnapshotDir = new Path(rootDir, HConstants.OLD_SNAPSHOT_DIR_NAME);
272     Path newSnapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
273     if (fs.exists(oldSnapshotDir)) {
274       boolean foundOldSnapshotDir = false;
275       // Logic to verify old snapshot dir culled from SnapshotManager
276       // ignore all the snapshots in progress
277       FileStatus[] snapshots = fs.listStatus(oldSnapshotDir,
278         new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
279       // loop through all the completed snapshots
280       for (FileStatus snapshot : snapshots) {
281         Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
282         // if the snapshot is bad
283         if (fs.exists(info)) {
284           foundOldSnapshotDir = true;
285           break;
286         }
287       }
288       if(foundOldSnapshotDir) {
289         LOG.info("Migrating snapshot dir");
290         if (!fs.rename(oldSnapshotDir, newSnapshotDir)) {
291           throw new IOException("Failed to move old snapshot dir "+
292               oldSnapshotDir+" to new "+newSnapshotDir);
293         }
294       }
295     }
296   }
297 
298   public void migrateMeta() throws IOException {
299     Path newMetaDir = new Path(this.sysNsDir, TableName.META_TABLE_NAME.getQualifierAsString());
300     Path newMetaRegionDir =
301       new Path(newMetaDir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
302     Path oldMetaDir = new Path(rootDir, ".META.");
303     if (fs.exists(oldMetaDir)) {
304       LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir);
305       if (!fs.rename(oldMetaDir, newMetaDir)) {
306         throw new IOException("Failed to migrate meta table "
307             + oldMetaDir.getName() + " to " + newMetaDir);
308       }
309     } else {
310       // on windows NTFS, meta's name is .META (note the missing dot at the end)
311       oldMetaDir = new Path(rootDir, ".META");
312       if (fs.exists(oldMetaDir)) {
313         LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir);
314         if (!fs.rename(oldMetaDir, newMetaDir)) {
315           throw new IOException("Failed to migrate meta table "
316               + oldMetaDir.getName() + " to " + newMetaDir);
317         }
318       }
319     }
320 
321     // Since meta table name has changed rename meta region dir from it's old encoding to new one
322     Path oldMetaRegionDir = HRegion.getRegionDir(rootDir,
323       new Path(newMetaDir, "1028785192").toString());
324     if (fs.exists(oldMetaRegionDir)) {
325       LOG.info("Migrating meta region " + oldMetaRegionDir + " to " + newMetaRegionDir);
326       if (!fs.rename(oldMetaRegionDir, newMetaRegionDir)) {
327         throw new IOException("Failed to migrate meta region "
328             + oldMetaRegionDir + " to " + newMetaRegionDir);
329       }
330     }
331     // Remove .tableinfo files as they refer to ".META.".
332     // They will be recreated by master on startup.
333     removeTableInfoInPre96Format(TableName.META_TABLE_NAME);
334 
335     Path oldRootDir = new Path(rootDir, "-ROOT-");
336     if(!fs.rename(oldRootDir, backupDir)) {
337       throw new IllegalStateException("Failed to old data: "+oldRootDir+" to "+backupDir);
338     }
339   }
340 
341   /**
342    * Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under
343    * table directory).
344    * @param tableName
345    * @throws IOException
346    */
347   private void removeTableInfoInPre96Format(TableName tableName) throws IOException {
348     Path tableDir = FSUtils.getTableDir(rootDir, tableName);
349     FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER);
350     if (status == null) return;
351     for (FileStatus fStatus : status) {
352       FSUtils.delete(fs, fStatus.getPath(), false);
353     }
354   }
355 
356   public void migrateACL() throws IOException {
357 
358     TableName oldTableName = TableName.valueOf(OLD_ACL);
359     Path oldTablePath = new Path(rootDir, oldTableName.getNameAsString());
360 
361     if(!fs.exists(oldTablePath)) {
362       return;
363     }
364 
365     LOG.info("Migrating ACL table");
366 
367     TableName newTableName = AccessControlLists.ACL_TABLE_NAME;
368     Path newTablePath = FSUtils.getTableDir(rootDir, newTableName);
369     HTableDescriptor oldDesc =
370         readTableDescriptor(fs, getCurrentTableInfoStatus(fs, oldTablePath));
371 
372     if(FSTableDescriptors.getTableInfoPath(fs, newTablePath) == null) {
373       LOG.info("Creating new tableDesc for ACL");
374       HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
375       newDesc.setName(newTableName);
376       new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
377         newTablePath, newDesc, true);
378     }
379 
380 
381     ServerName fakeServer = ServerName.valueOf("nsupgrade", 96, 123);
382     String metaLogName = HLogUtil.getHLogDirectoryName(fakeServer.toString());
383     HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
384         metaLogName, conf, null,
385         fakeServer.toString());
386     HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
387         HTableDescriptor.META_TABLEDESC, metaHLog, conf);
388     HRegion region = null;
389     try {
390       for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
391         LOG.info("Migrating ACL region "+regionDir.getName());
392         HRegionInfo oldRegionInfo = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
393         HRegionInfo newRegionInfo =
394             new HRegionInfo(newTableName,
395                 oldRegionInfo.getStartKey(),
396                 oldRegionInfo.getEndKey(),
397                 oldRegionInfo.isSplit(),
398                 oldRegionInfo.getRegionId());
399         newRegionInfo.setOffline(oldRegionInfo.isOffline());
400         region =
401             new HRegion(
402                 HRegionFileSystem.openRegionFromFileSystem(conf, fs, oldTablePath,
403                     oldRegionInfo, false),
404                 metaHLog,
405                 conf,
406                 oldDesc,
407                 null);
408         region.initialize();
409         updateAcls(region);
410         // closing the region would flush it so we don't need an explicit flush to save
411         // acl changes.
412         region.close();
413 
414         //Create new region dir
415         Path newRegionDir = new Path(newTablePath, newRegionInfo.getEncodedName());
416         if(!fs.exists(newRegionDir)) {
417           if(!fs.mkdirs(newRegionDir)) {
418             throw new IllegalStateException("Failed to create new region dir: " + newRegionDir);
419           }
420         }
421 
422         //create new region info file, delete in case one exists
423         HRegionFileSystem.openRegionFromFileSystem(conf, fs, newTablePath, newRegionInfo, false);
424 
425         //migrate region contents
426         for(FileStatus file : fs.listStatus(regionDir, new FSUtils.UserTableDirFilter(fs)))  {
427           if(file.getPath().getName().equals(HRegionFileSystem.REGION_INFO_FILE))
428             continue;
429           if(!fs.rename(file.getPath(), newRegionDir))  {
430             throw new IllegalStateException("Failed to move file "+file.getPath()+" to " +
431                 newRegionDir);
432           }
433         }
434         meta.put(MetaEditor.makePutFromRegionInfo(newRegionInfo));
435         meta.delete(MetaEditor.makeDeleteFromRegionInfo(oldRegionInfo));
436       }
437     } finally {
438       meta.flushcache();
439       meta.waitForFlushesAndCompactions();
440       meta.close();
441       metaHLog.closeAndDelete();
442       if(region != null) {
443         region.close();
444       }
445     }
446     if(!fs.rename(oldTablePath, backupDir)) {
447       throw new IllegalStateException("Failed to old data: "+oldTablePath+" to "+backupDir);
448     }
449   }
450 
451   /**
452    * Deletes the old _acl_ entry, and inserts a new one using namespace.
453    * @param region
454    * @throws IOException
455    */
456   void updateAcls(HRegion region) throws IOException {
457     byte[] rowKey = Bytes.toBytes(NamespaceUpgrade.OLD_ACL);
458     // get the old _acl_ entry, if present.
459     Get g = new Get(rowKey);
460     Result r = region.get(g);
461     if (r != null && r.size() > 0) {
462       // create a put for new _acl_ entry with rowkey as hbase:acl
463       Put p = new Put(AccessControlLists.ACL_GLOBAL_NAME);
464       for (Cell c : r.rawCells()) {
465         p.addImmutable(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), CellUtil.cloneValue(c));
466       }
467       region.put(p);
468       // delete the old entry
469       Delete del = new Delete(rowKey);
470       region.delete(del);
471     }
472 
473     // delete the old entry for '-ROOT-'
474     rowKey = Bytes.toBytes(TableName.OLD_ROOT_STR);
475     Delete del = new Delete(rowKey);
476     region.delete(del);
477 
478     // rename .META. to hbase:meta
479     rowKey = Bytes.toBytes(TableName.OLD_META_STR);
480     g = new Get(rowKey);
481     r = region.get(g);
482     if (r != null && r.size() > 0) {
483       // create a put for new .META. entry with rowkey as hbase:meta
484       Put p = new Put(TableName.META_TABLE_NAME.getName());
485       for (Cell c : r.rawCells()) {
486         p.addImmutable(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), CellUtil.cloneValue(c));
487       }
488       region.put(p);
489       // delete the old entry
490       del = new Delete(rowKey);
491       region.delete(del);
492     }
493   }
494 
495   //Culled from FSTableDescriptors
496   private static HTableDescriptor readTableDescriptor(FileSystem fs,
497                                                       FileStatus status) throws IOException {
498     int len = Ints.checkedCast(status.getLen());
499     byte [] content = new byte[len];
500     FSDataInputStream fsDataInputStream = fs.open(status.getPath());
501     try {
502       fsDataInputStream.readFully(content);
503     } finally {
504       fsDataInputStream.close();
505     }
506     HTableDescriptor htd = null;
507     try {
508       htd = HTableDescriptor.parseFrom(content);
509     } catch (DeserializationException e) {
510       throw new IOException("content=" + Bytes.toShort(content), e);
511     }
512     return htd;
513   }
514 
515   private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
516     @Override
517     public boolean accept(Path p) {
518       // Accept any file that starts with TABLEINFO_NAME
519       return p.getName().startsWith(".tableinfo");
520     }
521   };
522 
523   static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR =
524   new Comparator<FileStatus>() {
525     @Override
526     public int compare(FileStatus left, FileStatus right) {
527       return right.compareTo(left);
528     }};
529 
530   // logic culled from FSTableDescriptors
531   static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir)
532   throws IOException {
533     FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
534     if (status == null || status.length < 1) return null;
535     FileStatus mostCurrent = null;
536     for (FileStatus file : status) {
537       if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
538         mostCurrent = file;
539       }
540     }
541     return mostCurrent;
542   }
543 
544   public static boolean verifyNSUpgrade(FileSystem fs, Path rootDir)
545       throws IOException {
546     try {
547       return FSUtils.getVersion(fs, rootDir).equals(HConstants.FILE_SYSTEM_VERSION);
548     } catch (DeserializationException e) {
549       throw new IOException("Failed to verify namespace upgrade", e);
550     }
551   }
552 
553 
554   @Override
555   public int run(String[] args) throws Exception {
556     if (args.length < 1 || !args[0].equals("--upgrade")) {
557       System.out.println("Usage: <CMD> --upgrade");
558       return 0;
559     }
560     init();
561     upgradeTableDirs();
562     return 0;
563   }
564 
565   @Override
566   public void setConf(Configuration conf) {
567     this.conf = conf;
568   }
569 
570   @Override
571   public Configuration getConf() {
572     return conf;
573   }
574 }