1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import static org.junit.Assert.assertTrue;
21 import static org.junit.Assert.fail;
22
23 import java.io.File;
24 import java.io.IOException;
25 import java.io.OutputStream;
26 import java.lang.reflect.Field;
27 import java.lang.reflect.Modifier;
28 import java.net.InetAddress;
29 import java.net.ServerSocket;
30 import java.net.Socket;
31 import java.net.UnknownHostException;
32 import java.security.MessageDigest;
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashSet;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.UUID;
44 import java.util.concurrent.TimeUnit;
45
46 import org.apache.commons.logging.Log;
47 import org.apache.commons.logging.LogFactory;
48 import org.apache.commons.logging.impl.Jdk14Logger;
49 import org.apache.commons.logging.impl.Log4JLogger;
50 import org.apache.hadoop.classification.InterfaceAudience;
51 import org.apache.hadoop.classification.InterfaceStability;
52 import org.apache.hadoop.conf.Configuration;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.hbase.Waiter.Predicate;
56 import org.apache.hadoop.hbase.catalog.MetaEditor;
57 import org.apache.hadoop.hbase.client.Delete;
58 import org.apache.hadoop.hbase.client.Durability;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.HBaseAdmin;
61 import org.apache.hadoop.hbase.client.HConnection;
62 import org.apache.hadoop.hbase.client.HTable;
63 import org.apache.hadoop.hbase.client.Put;
64 import org.apache.hadoop.hbase.client.Result;
65 import org.apache.hadoop.hbase.client.ResultScanner;
66 import org.apache.hadoop.hbase.client.Scan;
67 import org.apache.hadoop.hbase.fs.HFileSystem;
68 import org.apache.hadoop.hbase.io.compress.Compression;
69 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
70 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
71 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
72 import org.apache.hadoop.hbase.io.hfile.HFile;
73 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
74 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
75 import org.apache.hadoop.hbase.master.HMaster;
76 import org.apache.hadoop.hbase.master.RegionStates;
77 import org.apache.hadoop.hbase.master.ServerManager;
78 import org.apache.hadoop.hbase.regionserver.BloomType;
79 import org.apache.hadoop.hbase.regionserver.HRegion;
80 import org.apache.hadoop.hbase.regionserver.HRegionServer;
81 import org.apache.hadoop.hbase.regionserver.HStore;
82 import org.apache.hadoop.hbase.regionserver.InternalScanner;
83 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
84 import org.apache.hadoop.hbase.regionserver.wal.HLog;
85 import org.apache.hadoop.hbase.security.User;
86 import org.apache.hadoop.hbase.tool.Canary;
87 import org.apache.hadoop.hbase.util.Bytes;
88 import org.apache.hadoop.hbase.util.FSUtils;
89 import org.apache.hadoop.hbase.util.JVMClusterUtil;
90 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
91 import org.apache.hadoop.hbase.util.RegionSplitter;
92 import org.apache.hadoop.hbase.util.RetryCounter;
93 import org.apache.hadoop.hbase.util.Threads;
94 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
95 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
96 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
97 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
98 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
99 import org.apache.hadoop.hdfs.DFSClient;
100 import org.apache.hadoop.hdfs.DistributedFileSystem;
101 import org.apache.hadoop.hdfs.MiniDFSCluster;
102 import org.apache.hadoop.mapred.JobConf;
103 import org.apache.hadoop.mapred.MiniMRCluster;
104 import org.apache.hadoop.mapred.TaskLog;
105 import org.apache.zookeeper.KeeperException;
106 import org.apache.zookeeper.KeeperException.NodeExistsException;
107 import org.apache.zookeeper.WatchedEvent;
108 import org.apache.zookeeper.ZooKeeper;
109 import org.apache.zookeeper.ZooKeeper.States;
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 @InterfaceAudience.Public
126 @InterfaceStability.Evolving
127 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
128 private MiniZooKeeperCluster zkCluster = null;
129
130 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
131
132
133
134
135 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
136
137
138
139
140
141 private boolean passedZkCluster = false;
142 private MiniDFSCluster dfsCluster = null;
143
144 private HBaseCluster hbaseCluster = null;
145 private MiniMRCluster mrCluster = null;
146
147
148 private boolean miniClusterRunning;
149
150 private String hadoopLogDir;
151
152
153 private File clusterTestDir = null;
154
155
156
157 private Path dataTestDirOnTestFS = null;
158
159
160
161
162
163
164
165
166 @Deprecated
167 private static final String TEST_DIRECTORY_KEY = "test.build.data";
168
169
170 private static String FS_URI;
171
172
173 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
174
175
176 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
177 Arrays.asList(new Object[][] {
178 { Compression.Algorithm.NONE },
179 { Compression.Algorithm.GZ }
180 });
181
182
183 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
184 Arrays.asList(new Object[][] {
185 { new Boolean(false) },
186 { new Boolean(true) }
187 });
188
189
190 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
191
192 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
193 Compression.Algorithm.NONE, Compression.Algorithm.GZ
194 };
195
196
197
198
199
200 private static List<Object[]> bloomAndCompressionCombinations() {
201 List<Object[]> configurations = new ArrayList<Object[]>();
202 for (Compression.Algorithm comprAlgo :
203 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
204 for (BloomType bloomType : BloomType.values()) {
205 configurations.add(new Object[] { comprAlgo, bloomType });
206 }
207 }
208 return Collections.unmodifiableList(configurations);
209 }
210
211
212
213
214 private static List<Object[]> memStoreTSAndTagsCombination() {
215 List<Object[]> configurations = new ArrayList<Object[]>();
216 configurations.add(new Object[] { false, false });
217 configurations.add(new Object[] { false, true });
218 configurations.add(new Object[] { true, false });
219 configurations.add(new Object[] { true, true });
220 return Collections.unmodifiableList(configurations);
221 }
222
223 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
224 bloomAndCompressionCombinations();
225
226 public HBaseTestingUtility() {
227 this(HBaseConfiguration.create());
228 }
229
230 public HBaseTestingUtility(Configuration conf) {
231 super(conf);
232
233
234 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
235 }
236
237
238
239
240
241
242
243 public static HBaseTestingUtility createLocalHTU() {
244 Configuration c = HBaseConfiguration.create();
245 return createLocalHTU(c);
246 }
247
248
249
250
251
252
253
254
255 public static HBaseTestingUtility createLocalHTU(Configuration c) {
256 HBaseTestingUtility htu = new HBaseTestingUtility(c);
257 String dataTestDir = htu.getDataTestDir().toString();
258 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
259 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
260 return htu;
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274 @Override
275 public Configuration getConfiguration() {
276 return super.getConfiguration();
277 }
278
279 public void setHBaseCluster(HBaseCluster hbaseCluster) {
280 this.hbaseCluster = hbaseCluster;
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 @Override
300 protected Path setupDataTestDir() {
301 Path testPath = super.setupDataTestDir();
302 if (null == testPath) {
303 return null;
304 }
305
306 createSubDirAndSystemProperty(
307 "hadoop.log.dir",
308 testPath, "hadoop-log-dir");
309
310
311
312 createSubDirAndSystemProperty(
313 "hadoop.tmp.dir",
314 testPath, "hadoop-tmp-dir");
315
316
317 createSubDir(
318 "mapred.local.dir",
319 testPath, "mapred-local-dir");
320
321 return testPath;
322 }
323
324 private void createSubDirAndSystemProperty(
325 String propertyName, Path parent, String subDirName){
326
327 String sysValue = System.getProperty(propertyName);
328
329 if (sysValue != null) {
330
331
332 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
333 sysValue + " so I do NOT create it in " + parent);
334 String confValue = conf.get(propertyName);
335 if (confValue != null && !confValue.endsWith(sysValue)){
336 LOG.warn(
337 propertyName + " property value differs in configuration and system: "+
338 "Configuration="+confValue+" while System="+sysValue+
339 " Erasing configuration value by system value."
340 );
341 }
342 conf.set(propertyName, sysValue);
343 } else {
344
345 createSubDir(propertyName, parent, subDirName);
346 System.setProperty(propertyName, conf.get(propertyName));
347 }
348 }
349
350
351
352
353
354
355
356 private Path getBaseTestDirOnTestFS() throws IOException {
357 FileSystem fs = getTestFileSystem();
358 return new Path(fs.getWorkingDirectory(), "test-data");
359 }
360
361
362
363
364
365
366 Path getClusterTestDir() {
367 if (clusterTestDir == null){
368 setupClusterTestDir();
369 }
370 return new Path(clusterTestDir.getAbsolutePath());
371 }
372
373
374
375
376 private void setupClusterTestDir() {
377 if (clusterTestDir != null) {
378 return;
379 }
380
381
382
383 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
384 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
385
386 boolean b = deleteOnExit();
387 if (b) clusterTestDir.deleteOnExit();
388 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
389 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
390 }
391
392
393
394
395
396
397
398 public Path getDataTestDirOnTestFS() throws IOException {
399 if (dataTestDirOnTestFS == null) {
400 setupDataTestDirOnTestFS();
401 }
402
403 return dataTestDirOnTestFS;
404 }
405
406
407
408
409
410
411
412
413 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
414 return new Path(getDataTestDirOnTestFS(), subdirName);
415 }
416
417
418
419
420 private void setupDataTestDirOnTestFS() throws IOException {
421 if (dataTestDirOnTestFS != null) {
422 LOG.warn("Data test on test fs dir already setup in "
423 + dataTestDirOnTestFS.toString());
424 return;
425 }
426
427
428
429
430
431 FileSystem fs = getTestFileSystem();
432 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
433 File dataTestDir = new File(getDataTestDir().toString());
434 if (deleteOnExit()) dataTestDir.deleteOnExit();
435 dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
436 } else {
437 Path base = getBaseTestDirOnTestFS();
438 String randomStr = UUID.randomUUID().toString();
439 dataTestDirOnTestFS = new Path(base, randomStr);
440 if (deleteOnExit()) fs.deleteOnExit(dataTestDirOnTestFS);
441 }
442 }
443
444
445
446
447
448
449 public boolean cleanupDataTestDirOnTestFS() throws IOException {
450 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
451 if (ret)
452 dataTestDirOnTestFS = null;
453 return ret;
454 }
455
456
457
458
459
460
461 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
462 Path cpath = getDataTestDirOnTestFS(subdirName);
463 return getTestFileSystem().delete(cpath, true);
464 }
465
466
467
468
469
470
471
472
473 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
474 return startMiniDFSCluster(servers, null);
475 }
476
477
478
479
480
481
482
483
484
485
486
487
488 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
489 throws Exception {
490 if ( hosts != null && hosts.length != 0) {
491 return startMiniDFSCluster(hosts.length, hosts);
492 } else {
493 return startMiniDFSCluster(1, null);
494 }
495 }
496
497
498
499
500
501
502
503
504
505
506 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
507 throws Exception {
508 createDirsAndSetProperties();
509
510
511 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
512 setLevel(org.apache.log4j.Level.ERROR);
513 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
514 setLevel(org.apache.log4j.Level.ERROR);
515
516
517 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
518 true, null, null, hosts, null);
519
520
521 FileSystem fs = this.dfsCluster.getFileSystem();
522 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
523
524
525 this.dfsCluster.waitClusterUp();
526
527
528 dataTestDirOnTestFS = null;
529
530 return this.dfsCluster;
531 }
532
533
534 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
535 throws Exception {
536 createDirsAndSetProperties();
537 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
538 true, null, racks, hosts, null);
539
540
541 FileSystem fs = this.dfsCluster.getFileSystem();
542 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
543
544
545 this.dfsCluster.waitClusterUp();
546
547
548 dataTestDirOnTestFS = null;
549
550 return this.dfsCluster;
551 }
552
553 public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
554 createDirsAndSetProperties();
555 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
556 null, null, null);
557 return dfsCluster;
558 }
559
560
561 private void createDirsAndSetProperties() throws IOException {
562 setupClusterTestDir();
563 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
564 createDirAndSetProperty("cache_data", "test.cache.data");
565 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
566 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
567 createDirAndSetProperty("mapred_local", "mapred.local.dir");
568 createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
569 enableShortCircuit();
570
571 Path root = getDataTestDirOnTestFS("hadoop");
572 conf.set(MapreduceTestingShim.getMROutputDirProp(),
573 new Path(root, "mapred-output-dir").toString());
574 conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
575 conf.set("mapreduce.jobtracker.staging.root.dir",
576 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
577 conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
578 }
579
580
581
582
583
584
585
586 public boolean isReadShortCircuitOn(){
587 final String propName = "hbase.tests.use.shortcircuit.reads";
588 String readOnProp = System.getProperty(propName);
589 if (readOnProp != null){
590 return Boolean.parseBoolean(readOnProp);
591 } else {
592 return conf.getBoolean(propName, false);
593 }
594 }
595
596
597
598
599 private void enableShortCircuit() {
600 if (isReadShortCircuitOn()) {
601 String curUser = System.getProperty("user.name");
602 LOG.info("read short circuit is ON for user " + curUser);
603
604 conf.set("dfs.block.local-path-access.user", curUser);
605
606 conf.setBoolean("dfs.client.read.shortcircuit", true);
607
608 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
609 } else {
610 LOG.info("read short circuit is OFF");
611 }
612 }
613
614 private String createDirAndSetProperty(final String relPath, String property) {
615 String path = getDataTestDir(relPath).toString();
616 System.setProperty(property, path);
617 conf.set(property, path);
618 new File(path).mkdirs();
619 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
620 return path;
621 }
622
623
624
625
626
627
628 public void shutdownMiniDFSCluster() throws IOException {
629 if (this.dfsCluster != null) {
630
631 this.dfsCluster.shutdown();
632 dfsCluster = null;
633 dataTestDirOnTestFS = null;
634 FSUtils.setFsDefault(this.conf, new Path("file:///"));
635 }
636 }
637
638
639
640
641
642
643
644
645 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
646 return startMiniZKCluster(1);
647 }
648
649
650
651
652
653
654
655
656
657 public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
658 throws Exception {
659 setupClusterTestDir();
660 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
661 }
662
663 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
664 throws Exception {
665 return startMiniZKCluster(dir,1);
666 }
667
668
669
670
671
672 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
673 int zooKeeperServerNum)
674 throws Exception {
675 if (this.zkCluster != null) {
676 throw new IOException("Cluster already running at " + dir);
677 }
678 this.passedZkCluster = false;
679 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
680 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
681 if (defPort > 0){
682
683 this.zkCluster.setDefaultClientPort(defPort);
684 }
685 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
686 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
687 Integer.toString(clientPort));
688 return this.zkCluster;
689 }
690
691
692
693
694
695
696
697 public void shutdownMiniZKCluster() throws IOException {
698 if (this.zkCluster != null) {
699 this.zkCluster.shutdown();
700 this.zkCluster = null;
701 }
702 }
703
704
705
706
707
708
709
710 public MiniHBaseCluster startMiniCluster() throws Exception {
711 return startMiniCluster(1, 1);
712 }
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727 public MiniHBaseCluster startMiniCluster(final int numSlaves)
728 throws Exception {
729 return startMiniCluster(1, numSlaves);
730 }
731
732
733
734
735
736
737
738
739 public MiniHBaseCluster startMiniCluster(final int numMasters,
740 final int numSlaves)
741 throws Exception {
742 return startMiniCluster(numMasters, numSlaves, null);
743 }
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769 public MiniHBaseCluster startMiniCluster(final int numMasters,
770 final int numSlaves, final String[] dataNodeHosts) throws Exception {
771 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts, null, null);
772 }
773
774
775
776
777
778 public MiniHBaseCluster startMiniCluster(final int numMasters,
779 final int numSlaves, final int numDataNodes) throws Exception {
780 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
781 }
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810 public MiniHBaseCluster startMiniCluster(final int numMasters,
811 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
812 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
813 throws Exception {
814 return startMiniCluster(
815 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
816 }
817
818
819
820
821
822
823 public MiniHBaseCluster startMiniCluster(final int numMasters,
824 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
825 Class<? extends HMaster> masterClass,
826 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
827 throws Exception {
828 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
829 numDataNodes = dataNodeHosts.length;
830 }
831
832 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
833 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
834
835
836 if (miniClusterRunning) {
837 throw new IllegalStateException("A mini-cluster is already running");
838 }
839 miniClusterRunning = true;
840
841 setupClusterTestDir();
842 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
843
844
845
846 startMiniDFSCluster(numDataNodes, dataNodeHosts);
847
848
849 if (this.zkCluster == null) {
850 startMiniZKCluster(clusterTestDir);
851 }
852
853
854 return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
855 }
856
857 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
858 throws IOException, InterruptedException{
859 return startMiniHBaseCluster(numMasters, numSlaves, null, null);
860 }
861
862
863
864
865
866
867
868
869
870
871
872
873 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
874 final int numSlaves, Class<? extends HMaster> masterClass,
875 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
876 throws IOException, InterruptedException {
877
878 createRootDir();
879
880
881
882 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
883 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
884 }
885 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
886 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
887 }
888
889 Configuration c = new Configuration(this.conf);
890 this.hbaseCluster =
891 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
892
893 HTable t = new HTable(c, TableName.META_TABLE_NAME);
894 ResultScanner s = t.getScanner(new Scan());
895 while (s.next() != null) {
896 continue;
897 }
898 s.close();
899 t.close();
900
901 getHBaseAdmin();
902 LOG.info("Minicluster is up");
903 return (MiniHBaseCluster)this.hbaseCluster;
904 }
905
906
907
908
909
910
911
912 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
913 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
914
915 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
916 ResultScanner s = t.getScanner(new Scan());
917 while (s.next() != null) {
918
919 }
920 LOG.info("HBase has been restarted");
921 s.close();
922 t.close();
923 }
924
925
926
927
928
929
930 public MiniHBaseCluster getMiniHBaseCluster() {
931 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
932 return (MiniHBaseCluster)this.hbaseCluster;
933 }
934 throw new RuntimeException(hbaseCluster + " not an instance of " +
935 MiniHBaseCluster.class.getName());
936 }
937
938
939
940
941
942
943 public void shutdownMiniCluster() throws Exception {
944 LOG.info("Shutting down minicluster");
945 shutdownMiniHBaseCluster();
946 if (!this.passedZkCluster){
947 shutdownMiniZKCluster();
948 }
949 shutdownMiniDFSCluster();
950
951 cleanupTestDir();
952 miniClusterRunning = false;
953 LOG.info("Minicluster is down");
954 }
955
956
957
958
959
960 @Override
961 public boolean cleanupTestDir() throws IOException {
962 boolean ret = super.cleanupTestDir();
963 if (deleteDir(this.clusterTestDir)) {
964 this.clusterTestDir = null;
965 return ret & true;
966 }
967 return false;
968 }
969
970
971
972
973
974 public void shutdownMiniHBaseCluster() throws IOException {
975 if (hbaseAdmin != null) {
976 hbaseAdmin.close0();
977 hbaseAdmin = null;
978 }
979
980
981 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
982 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
983 if (this.hbaseCluster != null) {
984 this.hbaseCluster.shutdown();
985
986 this.hbaseCluster.waitUntilShutDown();
987 this.hbaseCluster = null;
988 }
989
990 if (zooKeeperWatcher != null) {
991 zooKeeperWatcher.close();
992 zooKeeperWatcher = null;
993 }
994 }
995
996
997
998
999
1000
1001
1002 public Path getDefaultRootDirPath() throws IOException {
1003 FileSystem fs = FileSystem.get(this.conf);
1004 return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 public Path createRootDir() throws IOException {
1016 FileSystem fs = FileSystem.get(this.conf);
1017 Path hbaseRootdir = getDefaultRootDirPath();
1018 FSUtils.setRootDir(this.conf, hbaseRootdir);
1019 fs.mkdirs(hbaseRootdir);
1020 FSUtils.setVersion(fs, hbaseRootdir);
1021 return hbaseRootdir;
1022 }
1023
1024
1025
1026
1027
1028 public void flush() throws IOException {
1029 getMiniHBaseCluster().flushcache();
1030 }
1031
1032
1033
1034
1035
1036 public void flush(TableName tableName) throws IOException {
1037 getMiniHBaseCluster().flushcache(tableName);
1038 }
1039
1040
1041
1042
1043
1044 public void compact(boolean major) throws IOException {
1045 getMiniHBaseCluster().compact(major);
1046 }
1047
1048
1049
1050
1051
1052 public void compact(TableName tableName, boolean major) throws IOException {
1053 getMiniHBaseCluster().compact(tableName, major);
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063 public HTable createTable(String tableName, String family)
1064 throws IOException{
1065 return createTable(TableName.valueOf(tableName), new String[]{family});
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075 public HTable createTable(byte[] tableName, byte[] family)
1076 throws IOException{
1077 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 public HTable createTable(TableName tableName, String[] families)
1088 throws IOException {
1089 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1090 for (String family : families) {
1091 fams.add(Bytes.toBytes(family));
1092 }
1093 return createTable(tableName, fams.toArray(new byte[0][]));
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103 public HTable createTable(TableName tableName, byte[] family)
1104 throws IOException{
1105 return createTable(tableName, new byte[][]{family});
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 public HTable createTable(byte[] tableName, byte[][] families)
1117 throws IOException {
1118 return createTable(tableName, families,
1119 new Configuration(getConfiguration()));
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129 public HTable createTable(TableName tableName, byte[][] families)
1130 throws IOException {
1131 return createTable(tableName, families,
1132 new Configuration(getConfiguration()));
1133 }
1134
1135 public HTable createTable(byte[] tableName, byte[][] families,
1136 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1137 return createTable(TableName.valueOf(tableName), families, numVersions,
1138 startKey, endKey, numRegions);
1139 }
1140
1141 public HTable createTable(String tableName, byte[][] families,
1142 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1143 return createTable(TableName.valueOf(tableName), families, numVersions,
1144 startKey, endKey, numRegions);
1145 }
1146
1147 public HTable createTable(TableName tableName, byte[][] families,
1148 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1149 throws IOException{
1150 HTableDescriptor desc = new HTableDescriptor(tableName);
1151 for (byte[] family : families) {
1152 HColumnDescriptor hcd = new HColumnDescriptor(family)
1153 .setMaxVersions(numVersions);
1154 desc.addFamily(hcd);
1155 }
1156 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1157
1158 waitUntilAllRegionsAssigned(tableName);
1159 return new HTable(getConfiguration(), tableName);
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1171 throws IOException {
1172 for(byte[] family : families) {
1173 HColumnDescriptor hcd = new HColumnDescriptor(family);
1174
1175
1176
1177 hcd.setBloomFilterType(BloomType.NONE);
1178 htd.addFamily(hcd);
1179 }
1180 getHBaseAdmin().createTable(htd);
1181
1182 waitUntilAllRegionsAssigned(htd.getTableName());
1183 return new HTable(c, htd.getTableName());
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 public HTable createTable(TableName tableName, byte[][] families,
1195 final Configuration c)
1196 throws IOException {
1197 return createTable(new HTableDescriptor(tableName), families, c);
1198 }
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 public HTable createTable(byte[] tableName, byte[][] families,
1209 final Configuration c)
1210 throws IOException {
1211 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1212 for(byte[] family : families) {
1213 HColumnDescriptor hcd = new HColumnDescriptor(family);
1214
1215
1216
1217 hcd.setBloomFilterType(BloomType.NONE);
1218 desc.addFamily(hcd);
1219 }
1220 getHBaseAdmin().createTable(desc);
1221 return new HTable(c, tableName);
1222 }
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 public HTable createTable(TableName tableName, byte[][] families,
1234 final Configuration c, int numVersions)
1235 throws IOException {
1236 HTableDescriptor desc = new HTableDescriptor(tableName);
1237 for(byte[] family : families) {
1238 HColumnDescriptor hcd = new HColumnDescriptor(family)
1239 .setMaxVersions(numVersions);
1240 desc.addFamily(hcd);
1241 }
1242 getHBaseAdmin().createTable(desc);
1243
1244 waitUntilAllRegionsAssigned(tableName);
1245 return new HTable(c, tableName);
1246 }
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 public HTable createTable(byte[] tableName, byte[][] families,
1258 final Configuration c, int numVersions)
1259 throws IOException {
1260 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1261 for(byte[] family : families) {
1262 HColumnDescriptor hcd = new HColumnDescriptor(family)
1263 .setMaxVersions(numVersions);
1264 desc.addFamily(hcd);
1265 }
1266 getHBaseAdmin().createTable(desc);
1267 return new HTable(c, tableName);
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1279 throws IOException {
1280 return createTable(tableName, new byte[][]{family}, numVersions);
1281 }
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1292 throws IOException {
1293 return createTable(tableName, new byte[][]{family}, numVersions);
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 public HTable createTable(byte[] tableName, byte[][] families,
1305 int numVersions)
1306 throws IOException {
1307 return createTable(TableName.valueOf(tableName), families, numVersions);
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 public HTable createTable(TableName tableName, byte[][] families,
1319 int numVersions)
1320 throws IOException {
1321 HTableDescriptor desc = new HTableDescriptor(tableName);
1322 for (byte[] family : families) {
1323 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1324 desc.addFamily(hcd);
1325 }
1326 getHBaseAdmin().createTable(desc);
1327
1328 waitUntilAllRegionsAssigned(tableName);
1329 return new HTable(new Configuration(getConfiguration()), tableName);
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 public HTable createTable(byte[] tableName, byte[][] families,
1341 int numVersions, int blockSize) throws IOException {
1342 return createTable(TableName.valueOf(tableName),
1343 families, numVersions, blockSize);
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 public HTable createTable(TableName tableName, byte[][] families,
1355 int numVersions, int blockSize) throws IOException {
1356 HTableDescriptor desc = new HTableDescriptor(tableName);
1357 for (byte[] family : families) {
1358 HColumnDescriptor hcd = new HColumnDescriptor(family)
1359 .setMaxVersions(numVersions)
1360 .setBlocksize(blockSize);
1361 desc.addFamily(hcd);
1362 }
1363 getHBaseAdmin().createTable(desc);
1364
1365 waitUntilAllRegionsAssigned(tableName);
1366 return new HTable(new Configuration(getConfiguration()), tableName);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 public HTable createTable(byte[] tableName, byte[][] families,
1378 int[] numVersions)
1379 throws IOException {
1380 return createTable(TableName.valueOf(tableName), families, numVersions);
1381 }
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 public HTable createTable(TableName tableName, byte[][] families,
1392 int[] numVersions)
1393 throws IOException {
1394 HTableDescriptor desc = new HTableDescriptor(tableName);
1395 int i = 0;
1396 for (byte[] family : families) {
1397 HColumnDescriptor hcd = new HColumnDescriptor(family)
1398 .setMaxVersions(numVersions[i]);
1399 desc.addFamily(hcd);
1400 i++;
1401 }
1402 getHBaseAdmin().createTable(desc);
1403
1404 waitUntilAllRegionsAssigned(tableName);
1405 return new HTable(new Configuration(getConfiguration()), tableName);
1406 }
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1417 throws IOException{
1418 return createTable(TableName.valueOf(tableName), family, splitRows);
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1430 throws IOException {
1431 HTableDescriptor desc = new HTableDescriptor(tableName);
1432 HColumnDescriptor hcd = new HColumnDescriptor(family);
1433 desc.addFamily(hcd);
1434 getHBaseAdmin().createTable(desc, splitRows);
1435
1436 waitUntilAllRegionsAssigned(tableName);
1437 return new HTable(getConfiguration(), tableName);
1438 }
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1449 throws IOException {
1450 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1451 for(byte[] family:families) {
1452 HColumnDescriptor hcd = new HColumnDescriptor(family);
1453 desc.addFamily(hcd);
1454 }
1455 getHBaseAdmin().createTable(desc, splitRows);
1456
1457 waitUntilAllRegionsAssigned(TableName.valueOf(tableName));
1458 return new HTable(getConfiguration(), tableName);
1459 }
1460
1461
1462
1463
1464
1465 public void deleteTable(String tableName) throws IOException {
1466 deleteTable(TableName.valueOf(tableName));
1467 }
1468
1469
1470
1471
1472
1473 public void deleteTable(byte[] tableName) throws IOException {
1474 deleteTable(TableName.valueOf(tableName));
1475 }
1476
1477
1478
1479
1480
1481 public void deleteTable(TableName tableName) throws IOException {
1482 try {
1483 getHBaseAdmin().disableTable(tableName);
1484 } catch (TableNotEnabledException e) {
1485 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1486 }
1487 getHBaseAdmin().deleteTable(tableName);
1488 }
1489
1490
1491
1492
1493
1494 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1495 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1496 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1497 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1498 private static final int MAXVERSIONS = 3;
1499
1500 public static final char FIRST_CHAR = 'a';
1501 public static final char LAST_CHAR = 'z';
1502 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1503 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1504
1505
1506
1507
1508
1509
1510
1511
1512 public HTableDescriptor createTableDescriptor(final String name,
1513 final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
1514 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1515 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1516 htd.addFamily(new HColumnDescriptor(cfName)
1517 .setMinVersions(minVersions)
1518 .setMaxVersions(versions)
1519 .setKeepDeletedCells(keepDeleted)
1520 .setBlockCacheEnabled(false)
1521 .setTimeToLive(ttl)
1522 );
1523 }
1524 return htd;
1525 }
1526
1527
1528
1529
1530
1531
1532
1533 public HTableDescriptor createTableDescriptor(final String name) {
1534 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1535 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
1547 byte [] endKey)
1548 throws IOException {
1549 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1550 return createLocalHRegion(hri, desc);
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
1561 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, HLog hlog) throws IOException {
1573 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, hlog);
1574 }
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1590 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
1591 HLog hlog, byte[]... families) throws IOException {
1592 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
1593 htd.setReadOnly(isReadOnly);
1594 for (byte[] family : families) {
1595 HColumnDescriptor hcd = new HColumnDescriptor(family);
1596
1597 hcd.setMaxVersions(Integer.MAX_VALUE);
1598 htd.addFamily(hcd);
1599 }
1600 htd.setDurability(durability);
1601 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
1602 return createLocalHRegion(info, htd, hlog);
1603 }
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 public HTable truncateTable(byte[] tableName) throws IOException {
1614 return truncateTable(TableName.valueOf(tableName));
1615 }
1616
1617
1618
1619
1620
1621
1622
1623 public HTable truncateTable(TableName tableName) throws IOException {
1624 HTable table = new HTable(getConfiguration(), tableName);
1625 Scan scan = new Scan();
1626 ResultScanner resScan = table.getScanner(scan);
1627 for(Result res : resScan) {
1628 Delete del = new Delete(res.getRow());
1629 table.delete(del);
1630 }
1631 resScan = table.getScanner(scan);
1632 resScan.close();
1633 return table;
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643 public int loadTable(final HTable t, final byte[] f) throws IOException {
1644 return loadTable(t, new byte[][] {f});
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654 public int loadTable(final HTable t, final byte[] f, boolean writeToWAL) throws IOException {
1655 return loadTable(t, new byte[][] {f}, null, writeToWAL);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665 public int loadTable(final HTable t, final byte[][] f) throws IOException {
1666 return loadTable(t, f, null);
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 public int loadTable(final HTable t, final byte[][] f, byte[] value) throws IOException {
1678 return loadTable(t, f, value, true);
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 public int loadTable(final HTable t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
1690 t.setAutoFlush(false);
1691 int rowCount = 0;
1692 for (byte[] row : HBaseTestingUtility.ROWS) {
1693 Put put = new Put(row);
1694 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
1695 for (int i = 0; i < f.length; i++) {
1696 put.add(f[i], null, value != null ? value : row);
1697 }
1698 t.put(put);
1699 rowCount++;
1700 }
1701 t.flushCommits();
1702 return rowCount;
1703 }
1704
1705
1706
1707
1708 public static class SeenRowTracker {
1709 int dim = 'z' - 'a' + 1;
1710 int[][][] seenRows = new int[dim][dim][dim];
1711 byte[] startRow;
1712 byte[] stopRow;
1713
1714 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
1715 this.startRow = startRow;
1716 this.stopRow = stopRow;
1717 }
1718
1719 void reset() {
1720 for (byte[] row : ROWS) {
1721 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
1722 }
1723 }
1724
1725 int i(byte b) {
1726 return b - 'a';
1727 }
1728
1729 public void addRow(byte[] row) {
1730 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
1731 }
1732
1733
1734
1735
1736 public void validate() {
1737 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1738 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1739 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1740 int count = seenRows[i(b1)][i(b2)][i(b3)];
1741 int expectedCount = 0;
1742 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
1743 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
1744 expectedCount = 1;
1745 }
1746 if (count != expectedCount) {
1747 String row = new String(new byte[] {b1,b2,b3});
1748 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
1749 }
1750 }
1751 }
1752 }
1753 }
1754 }
1755
1756 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
1757 return loadRegion(r, f, false);
1758 }
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
1769 throws IOException {
1770 byte[] k = new byte[3];
1771 int rowCount = 0;
1772 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1773 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1774 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1775 k[0] = b1;
1776 k[1] = b2;
1777 k[2] = b3;
1778 Put put = new Put(k);
1779 put.setDurability(Durability.SKIP_WAL);
1780 put.add(f, null, k);
1781 if (r.getLog() == null) put.setDurability(Durability.SKIP_WAL);
1782
1783 int preRowCount = rowCount;
1784 int pause = 10;
1785 int maxPause = 1000;
1786 while (rowCount == preRowCount) {
1787 try {
1788 r.put(put);
1789 rowCount++;
1790 } catch (RegionTooBusyException e) {
1791 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
1792 Threads.sleep(pause);
1793 }
1794 }
1795 }
1796 }
1797 if (flush) {
1798 r.flushcache();
1799 }
1800 }
1801 return rowCount;
1802 }
1803
1804 public void loadNumericRows(final HTable t, final byte[] f, int startRow, int endRow) throws IOException {
1805 for (int i = startRow; i < endRow; i++) {
1806 byte[] data = Bytes.toBytes(String.valueOf(i));
1807 Put put = new Put(data);
1808 put.add(f, null, data);
1809 t.put(put);
1810 }
1811 }
1812
1813
1814
1815
1816 public int countRows(final HTable table) throws IOException {
1817 Scan scan = new Scan();
1818 ResultScanner results = table.getScanner(scan);
1819 int count = 0;
1820 for (@SuppressWarnings("unused") Result res : results) {
1821 count++;
1822 }
1823 results.close();
1824 return count;
1825 }
1826
1827 public int countRows(final HTable table, final byte[]... families) throws IOException {
1828 Scan scan = new Scan();
1829 for (byte[] family: families) {
1830 scan.addFamily(family);
1831 }
1832 ResultScanner results = table.getScanner(scan);
1833 int count = 0;
1834 for (@SuppressWarnings("unused") Result res : results) {
1835 count++;
1836 }
1837 results.close();
1838 return count;
1839 }
1840
1841
1842
1843
1844 public String checksumRows(final HTable table) throws Exception {
1845 Scan scan = new Scan();
1846 ResultScanner results = table.getScanner(scan);
1847 MessageDigest digest = MessageDigest.getInstance("MD5");
1848 for (Result res : results) {
1849 digest.update(res.getRow());
1850 }
1851 results.close();
1852 return digest.toString();
1853 }
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863 public int createMultiRegions(HTable table, byte[] columnFamily)
1864 throws IOException {
1865 return createMultiRegions(getConfiguration(), table, columnFamily);
1866 }
1867
1868
1869 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
1870 static {
1871 int i = 0;
1872 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
1873 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
1874 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
1875 ROWS[i][0] = b1;
1876 ROWS[i][1] = b2;
1877 ROWS[i][2] = b3;
1878 i++;
1879 }
1880 }
1881 }
1882 }
1883
1884 public static final byte[][] KEYS = {
1885 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
1886 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1887 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1888 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1889 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1890 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1891 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1892 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1893 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
1894 };
1895
1896 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
1897 Bytes.toBytes("bbb"),
1898 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
1899 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
1900 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
1901 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
1902 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
1903 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
1904 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
1905 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
1906 };
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 public int createMultiRegions(final Configuration c, final HTable table,
1917 final byte[] columnFamily)
1918 throws IOException {
1919 return createMultiRegions(c, table, columnFamily, KEYS);
1920 }
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 public int createMultiRegions(final Configuration c, final HTable table,
1932 final byte [] family, int numRegions)
1933 throws IOException {
1934 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1935 byte [] startKey = Bytes.toBytes("aaaaa");
1936 byte [] endKey = Bytes.toBytes("zzzzz");
1937 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1938 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
1939 for (int i=0;i<splitKeys.length;i++) {
1940 regionStartKeys[i+1] = splitKeys[i];
1941 }
1942 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
1943 return createMultiRegions(c, table, family, regionStartKeys);
1944 }
1945
1946 @SuppressWarnings("deprecation")
1947 public int createMultiRegions(final Configuration c, final HTable table,
1948 final byte[] columnFamily, byte [][] startKeys)
1949 throws IOException {
1950 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
1951 HTable meta = new HTable(c, TableName.META_TABLE_NAME);
1952 HTableDescriptor htd = table.getTableDescriptor();
1953 if(!htd.hasFamily(columnFamily)) {
1954 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
1955 htd.addFamily(hcd);
1956 }
1957
1958
1959
1960
1961 List<byte[]> rows = getMetaTableRows(htd.getTableName());
1962 String regionToDeleteInFS = table
1963 .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
1964 .getRegionInfo().getEncodedName();
1965 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
1966
1967 int count = 0;
1968 for (int i = 0; i < startKeys.length; i++) {
1969 int j = (i + 1) % startKeys.length;
1970 HRegionInfo hri = new HRegionInfo(table.getName(),
1971 startKeys[i], startKeys[j]);
1972 MetaEditor.addRegionToMeta(meta, hri);
1973 newRegions.add(hri);
1974 count++;
1975 }
1976
1977 for (byte[] row : rows) {
1978 LOG.info("createMultiRegions: deleting meta row -> " +
1979 Bytes.toStringBinary(row));
1980 meta.delete(new Delete(row));
1981 }
1982
1983 Path tableDir = new Path(getDefaultRootDirPath().toString()
1984 + System.getProperty("file.separator") + htd.getTableName()
1985 + System.getProperty("file.separator") + regionToDeleteInFS);
1986 FileSystem.get(c).delete(tableDir);
1987
1988 HConnection conn = table.getConnection();
1989 conn.clearRegionCache();
1990
1991 HBaseAdmin admin = getHBaseAdmin();
1992 if (admin.isTableEnabled(table.getTableName())) {
1993 for(HRegionInfo hri : newRegions) {
1994 admin.assign(hri.getRegionName());
1995 }
1996 }
1997
1998 meta.close();
1999
2000 return count;
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2014 final HTableDescriptor htd, byte [][] startKeys)
2015 throws IOException {
2016 HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
2017 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2018 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2019
2020 for (int i = 0; i < startKeys.length; i++) {
2021 int j = (i + 1) % startKeys.length;
2022 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2023 startKeys[j]);
2024 MetaEditor.addRegionToMeta(meta, hri);
2025 newRegions.add(hri);
2026 }
2027
2028 meta.close();
2029 return newRegions;
2030 }
2031
2032
2033
2034
2035
2036
2037 public List<byte[]> getMetaTableRows() throws IOException {
2038
2039 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2040 List<byte[]> rows = new ArrayList<byte[]>();
2041 ResultScanner s = t.getScanner(new Scan());
2042 for (Result result : s) {
2043 LOG.info("getMetaTableRows: row -> " +
2044 Bytes.toStringBinary(result.getRow()));
2045 rows.add(result.getRow());
2046 }
2047 s.close();
2048 t.close();
2049 return rows;
2050 }
2051
2052
2053
2054
2055
2056
2057 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2058
2059 HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2060 List<byte[]> rows = new ArrayList<byte[]>();
2061 ResultScanner s = t.getScanner(new Scan());
2062 for (Result result : s) {
2063 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2064 if (info == null) {
2065 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2066
2067 continue;
2068 }
2069
2070 if (info.getTable().equals(tableName)) {
2071 LOG.info("getMetaTableRows: row -> " +
2072 Bytes.toStringBinary(result.getRow()) + info);
2073 rows.add(result.getRow());
2074 }
2075 }
2076 s.close();
2077 t.close();
2078 return rows;
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
2093 throws IOException, InterruptedException {
2094 return getRSForFirstRegionInTable(TableName.valueOf(tableName));
2095 }
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2107 throws IOException, InterruptedException {
2108 List<byte[]> metaRows = getMetaTableRows(tableName);
2109 if (metaRows == null || metaRows.isEmpty()) {
2110 return null;
2111 }
2112 LOG.debug("Found " + metaRows.size() + " rows for table " +
2113 tableName);
2114 byte [] firstrow = metaRows.get(0);
2115 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2116 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2117 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2118 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2119 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2120 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2121 while(retrier.shouldRetry()) {
2122 int index = getMiniHBaseCluster().getServerWith(firstrow);
2123 if (index != -1) {
2124 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2125 }
2126
2127 retrier.sleepUntilNextRetry();
2128 }
2129 return null;
2130 }
2131
2132
2133
2134
2135
2136
2137
2138 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2139 startMiniMapReduceCluster(2);
2140 return mrCluster;
2141 }
2142
2143
2144
2145
2146
2147 private void forceChangeTaskLogDir() {
2148 Field logDirField;
2149 try {
2150 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2151 logDirField.setAccessible(true);
2152
2153 Field modifiersField = Field.class.getDeclaredField("modifiers");
2154 modifiersField.setAccessible(true);
2155 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2156
2157 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2158 } catch (SecurityException e) {
2159 throw new RuntimeException(e);
2160 } catch (NoSuchFieldException e) {
2161
2162 throw new RuntimeException(e);
2163 } catch (IllegalArgumentException e) {
2164 throw new RuntimeException(e);
2165 } catch (IllegalAccessException e) {
2166 throw new RuntimeException(e);
2167 }
2168 }
2169
2170
2171
2172
2173
2174
2175
2176 private void startMiniMapReduceCluster(final int servers) throws IOException {
2177 if (mrCluster != null) {
2178 throw new IllegalStateException("MiniMRCluster is already running");
2179 }
2180 LOG.info("Starting mini mapreduce cluster...");
2181 setupClusterTestDir();
2182 createDirsAndSetProperties();
2183
2184 forceChangeTaskLogDir();
2185
2186
2187
2188
2189 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2190
2191
2192
2193 conf.setBoolean("mapreduce.map.speculative", false);
2194 conf.setBoolean("mapreduce.reduce.speculative", false);
2195
2196
2197
2198 mrCluster = new MiniMRCluster(servers,
2199 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2200 null, null, new JobConf(this.conf));
2201 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2202 if (jobConf == null) {
2203 jobConf = mrCluster.createJobConf();
2204 }
2205
2206 jobConf.set("mapred.local.dir",
2207 conf.get("mapred.local.dir"));
2208 LOG.info("Mini mapreduce cluster started");
2209
2210
2211
2212
2213 conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
2214
2215 conf.set("mapreduce.framework.name", "yarn");
2216 conf.setBoolean("yarn.is.minicluster", true);
2217 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2218 if (rmAddress != null) {
2219 conf.set("yarn.resourcemanager.address", rmAddress);
2220 }
2221 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2222 if (historyAddress != null) {
2223 conf.set("mapreduce.jobhistory.address", historyAddress);
2224 }
2225 String schedulerAddress =
2226 jobConf.get("yarn.resourcemanager.scheduler.address");
2227 if (schedulerAddress != null) {
2228 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2229 }
2230 }
2231
2232
2233
2234
2235 public void shutdownMiniMapReduceCluster() {
2236 LOG.info("Stopping mini mapreduce cluster...");
2237 if (mrCluster != null) {
2238 mrCluster.shutdown();
2239 mrCluster = null;
2240 }
2241
2242 conf.set("mapred.job.tracker", "local");
2243 LOG.info("Mini mapreduce cluster stopped");
2244 }
2245
2246
2247
2248
2249 public RegionServerServices createMockRegionServerService() throws IOException {
2250 return createMockRegionServerService((ServerName)null);
2251 }
2252
2253
2254
2255
2256
2257 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2258 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2259 rss.setFileSystem(getTestFileSystem());
2260 rss.setRpcServer(rpc);
2261 return rss;
2262 }
2263
2264
2265
2266
2267
2268 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2269 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2270 rss.setFileSystem(getTestFileSystem());
2271 return rss;
2272 }
2273
2274
2275
2276
2277
2278
2279 public void enableDebug(Class<?> clazz) {
2280 Log l = LogFactory.getLog(clazz);
2281 if (l instanceof Log4JLogger) {
2282 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2283 } else if (l instanceof Jdk14Logger) {
2284 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2285 }
2286 }
2287
2288
2289
2290
2291
2292 public void expireMasterSession() throws Exception {
2293 HMaster master = getMiniHBaseCluster().getMaster();
2294 expireSession(master.getZooKeeper(), false);
2295 }
2296
2297
2298
2299
2300
2301
2302 public void expireRegionServerSession(int index) throws Exception {
2303 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2304 expireSession(rs.getZooKeeper(), false);
2305 decrementMinRegionServerCount();
2306 }
2307
2308 private void decrementMinRegionServerCount() {
2309
2310
2311 decrementMinRegionServerCount(getConfiguration());
2312
2313
2314 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2315 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2316 }
2317 }
2318
2319 private void decrementMinRegionServerCount(Configuration conf) {
2320 int currentCount = conf.getInt(
2321 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2322 if (currentCount != -1) {
2323 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2324 Math.max(currentCount - 1, 1));
2325 }
2326 }
2327
2328 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2329 expireSession(nodeZK, false);
2330 }
2331
2332 @Deprecated
2333 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2334 throws Exception {
2335 expireSession(nodeZK, false);
2336 }
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2350 throws Exception {
2351 Configuration c = new Configuration(this.conf);
2352 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2353 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2354 byte[] password = zk.getSessionPasswd();
2355 long sessionID = zk.getSessionId();
2356
2357
2358
2359
2360
2361
2362
2363
2364 ZooKeeper monitor = new ZooKeeper(quorumServers,
2365 1000, new org.apache.zookeeper.Watcher(){
2366 @Override
2367 public void process(WatchedEvent watchedEvent) {
2368 LOG.info("Monitor ZKW received event="+watchedEvent);
2369 }
2370 } , sessionID, password);
2371
2372
2373 ZooKeeper newZK = new ZooKeeper(quorumServers,
2374 1000, EmptyWatcher.instance, sessionID, password);
2375
2376
2377
2378 long start = System.currentTimeMillis();
2379 while (newZK.getState() != States.CONNECTED
2380 && System.currentTimeMillis() - start < 1000) {
2381 Thread.sleep(1);
2382 }
2383 newZK.close();
2384 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2385
2386
2387 monitor.close();
2388
2389 if (checkStatus) {
2390 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2391 }
2392 }
2393
2394
2395
2396
2397
2398
2399
2400 public MiniHBaseCluster getHBaseCluster() {
2401 return getMiniHBaseCluster();
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 public HBaseCluster getHBaseClusterInterface() {
2413
2414
2415 return hbaseCluster;
2416 }
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 public synchronized HBaseAdmin getHBaseAdmin()
2428 throws IOException {
2429 if (hbaseAdmin == null){
2430 hbaseAdmin = new HBaseAdminForTests(getConfiguration());
2431 }
2432 return hbaseAdmin;
2433 }
2434
2435 private HBaseAdminForTests hbaseAdmin = null;
2436 private static class HBaseAdminForTests extends HBaseAdmin {
2437 public HBaseAdminForTests(Configuration c) throws MasterNotRunningException,
2438 ZooKeeperConnectionException, IOException {
2439 super(c);
2440 }
2441
2442 @Override
2443 public synchronized void close() throws IOException {
2444 LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()");
2445 }
2446
2447 private synchronized void close0() throws IOException {
2448 super.close();
2449 }
2450 }
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
2462 throws IOException {
2463 if (zooKeeperWatcher == null) {
2464 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
2465 new Abortable() {
2466 @Override public void abort(String why, Throwable e) {
2467 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
2468 }
2469 @Override public boolean isAborted() {return false;}
2470 });
2471 }
2472 return zooKeeperWatcher;
2473 }
2474 private ZooKeeperWatcher zooKeeperWatcher;
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484 public void closeRegion(String regionName) throws IOException {
2485 closeRegion(Bytes.toBytes(regionName));
2486 }
2487
2488
2489
2490
2491
2492
2493
2494 public void closeRegion(byte[] regionName) throws IOException {
2495 getHBaseAdmin().closeRegion(regionName, null);
2496 }
2497
2498
2499
2500
2501
2502
2503
2504
2505 public void closeRegionByRow(String row, HTable table) throws IOException {
2506 closeRegionByRow(Bytes.toBytes(row), table);
2507 }
2508
2509
2510
2511
2512
2513
2514
2515
2516 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
2517 HRegionLocation hrl = table.getRegionLocation(row);
2518 closeRegion(hrl.getRegionInfo().getRegionName());
2519 }
2520
2521
2522
2523
2524
2525
2526
2527
2528 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
2529 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
2530 int regCount = regions.size();
2531 Set<Integer> attempted = new HashSet<Integer>();
2532 int idx;
2533 int attempts = 0;
2534 do {
2535 regions = getHBaseCluster().getRegions(tableName);
2536 if (regCount != regions.size()) {
2537
2538 attempted.clear();
2539 }
2540 regCount = regions.size();
2541
2542
2543 if (regCount > 0) {
2544 idx = random.nextInt(regCount);
2545
2546 if (attempted.contains(idx))
2547 continue;
2548 try {
2549 regions.get(idx).checkSplit();
2550 return regions.get(idx);
2551 } catch (Exception ex) {
2552 LOG.warn("Caught exception", ex);
2553 attempted.add(idx);
2554 }
2555 }
2556 attempts++;
2557 } while (maxAttempts == -1 || attempts < maxAttempts);
2558 return null;
2559 }
2560
2561 public MiniZooKeeperCluster getZkCluster() {
2562 return zkCluster;
2563 }
2564
2565 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
2566 this.passedZkCluster = true;
2567 this.zkCluster = zkCluster;
2568 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
2569 }
2570
2571 public MiniDFSCluster getDFSCluster() {
2572 return dfsCluster;
2573 }
2574
2575 public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
2576 if (dfsCluster != null && dfsCluster.isClusterUp()) {
2577 throw new IOException("DFSCluster is already running! Shut it down first.");
2578 }
2579 this.dfsCluster = cluster;
2580 }
2581
2582 public FileSystem getTestFileSystem() throws IOException {
2583 return HFileSystem.get(conf);
2584 }
2585
2586
2587
2588
2589
2590
2591
2592
2593 public void waitTableAvailable(byte[] table)
2594 throws InterruptedException, IOException {
2595 waitTableAvailable(getHBaseAdmin(), table, 30000);
2596 }
2597
2598 public void waitTableAvailable(HBaseAdmin admin, byte[] table)
2599 throws InterruptedException, IOException {
2600 waitTableAvailable(admin, table, 30000);
2601 }
2602
2603
2604
2605
2606
2607
2608
2609
2610 public void waitTableAvailable(byte[] table, long timeoutMillis)
2611 throws InterruptedException, IOException {
2612 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
2613 }
2614
2615 public void waitTableAvailable(HBaseAdmin admin, byte[] table, long timeoutMillis)
2616 throws InterruptedException, IOException {
2617 long startWait = System.currentTimeMillis();
2618 while (!admin.isTableAvailable(table)) {
2619 assertTrue("Timed out waiting for table to become available " +
2620 Bytes.toStringBinary(table),
2621 System.currentTimeMillis() - startWait < timeoutMillis);
2622 Thread.sleep(200);
2623 }
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 public void waitTableEnabled(byte[] table)
2636 throws InterruptedException, IOException {
2637 waitTableEnabled(getHBaseAdmin(), table, 30000);
2638 }
2639
2640 public void waitTableEnabled(HBaseAdmin admin, byte[] table)
2641 throws InterruptedException, IOException {
2642 waitTableEnabled(admin, table, 30000);
2643 }
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654 public void waitTableEnabled(byte[] table, long timeoutMillis)
2655 throws InterruptedException, IOException {
2656 waitTableEnabled(getHBaseAdmin(), table, timeoutMillis);
2657 }
2658
2659 public void waitTableEnabled(HBaseAdmin admin, byte[] table, long timeoutMillis)
2660 throws InterruptedException, IOException {
2661 long startWait = System.currentTimeMillis();
2662 waitTableAvailable(admin, table, timeoutMillis);
2663 long remainder = System.currentTimeMillis() - startWait;
2664 while (!admin.isTableEnabled(table)) {
2665 assertTrue("Timed out waiting for table to become available and enabled " +
2666 Bytes.toStringBinary(table),
2667 System.currentTimeMillis() - remainder < timeoutMillis);
2668 Thread.sleep(200);
2669 }
2670
2671
2672
2673
2674
2675 try {
2676 Canary.sniff(admin, TableName.valueOf(table));
2677 } catch (Exception e) {
2678 throw new IOException(e);
2679 }
2680 }
2681
2682
2683
2684
2685
2686
2687
2688
2689 public boolean ensureSomeRegionServersAvailable(final int num)
2690 throws IOException {
2691 boolean startedServer = false;
2692 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
2693 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
2694 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
2695 startedServer = true;
2696 }
2697
2698 return startedServer;
2699 }
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
2711 throws IOException {
2712 boolean startedServer = ensureSomeRegionServersAvailable(num);
2713
2714 int nonStoppedServers = 0;
2715 for (JVMClusterUtil.RegionServerThread rst :
2716 getMiniHBaseCluster().getRegionServerThreads()) {
2717
2718 HRegionServer hrs = rst.getRegionServer();
2719 if (hrs.isStopping() || hrs.isStopped()) {
2720 LOG.info("A region server is stopped or stopping:"+hrs);
2721 } else {
2722 nonStoppedServers++;
2723 }
2724 }
2725 for (int i=nonStoppedServers; i<num; ++i) {
2726 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2727 startedServer = true;
2728 }
2729 return startedServer;
2730 }
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742 public static User getDifferentUser(final Configuration c,
2743 final String differentiatingSuffix)
2744 throws IOException {
2745 FileSystem currentfs = FileSystem.get(c);
2746 if (!(currentfs instanceof DistributedFileSystem)) {
2747 return User.getCurrent();
2748 }
2749
2750
2751 String username = User.getCurrent().getName() +
2752 differentiatingSuffix;
2753 User user = User.createUserForTesting(c, username,
2754 new String[]{"supergroup"});
2755 return user;
2756 }
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771 public static void setMaxRecoveryErrorCount(final OutputStream stream,
2772 final int max) {
2773 try {
2774 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
2775 for (Class<?> clazz: clazzes) {
2776 String className = clazz.getSimpleName();
2777 if (className.equals("DFSOutputStream")) {
2778 if (clazz.isInstance(stream)) {
2779 Field maxRecoveryErrorCountField =
2780 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
2781 maxRecoveryErrorCountField.setAccessible(true);
2782 maxRecoveryErrorCountField.setInt(stream, max);
2783 break;
2784 }
2785 }
2786 }
2787 } catch (Exception e) {
2788 LOG.info("Could not set max recovery field", e);
2789 }
2790 }
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
2801 waitUntilAllRegionsAssigned(tableName, 60000);
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
2814 throws IOException {
2815 final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
2816 try {
2817 waitFor(timeout, 200, true, new Predicate<IOException>() {
2818 @Override
2819 public boolean evaluate() throws IOException {
2820 boolean allRegionsAssigned = true;
2821 Scan scan = new Scan();
2822 scan.addFamily(HConstants.CATALOG_FAMILY);
2823 ResultScanner s = meta.getScanner(scan);
2824 try {
2825 Result r;
2826 while ((r = s.next()) != null) {
2827 byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
2828 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
2829 if (info != null && info.getTable().equals(tableName)) {
2830 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
2831 allRegionsAssigned &= (b != null);
2832 }
2833 }
2834 } finally {
2835 s.close();
2836 }
2837 return allRegionsAssigned;
2838 }
2839 });
2840 } finally {
2841 meta.close();
2842 }
2843 }
2844
2845
2846
2847
2848
2849 public static List<Cell> getFromStoreFile(HStore store,
2850 Get get) throws IOException {
2851 Scan scan = new Scan(get);
2852 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
2853 scan.getFamilyMap().get(store.getFamily().getName()),
2854
2855
2856 0);
2857
2858 List<Cell> result = new ArrayList<Cell>();
2859 scanner.next(result);
2860 if (!result.isEmpty()) {
2861
2862 Cell kv = result.get(0);
2863 if (!CellUtil.matchingRow(kv, get.getRow())) {
2864 result.clear();
2865 }
2866 }
2867 scanner.close();
2868 return result;
2869 }
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
2880 assertTrue(numRegions>3);
2881 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
2882 byte [][] result = new byte[tmpSplitKeys.length+1][];
2883 for (int i=0;i<tmpSplitKeys.length;i++) {
2884 result[i+1] = tmpSplitKeys[i];
2885 }
2886 result[0] = HConstants.EMPTY_BYTE_ARRAY;
2887 return result;
2888 }
2889
2890
2891
2892
2893
2894 public static List<Cell> getFromStoreFile(HStore store,
2895 byte [] row,
2896 NavigableSet<byte[]> columns
2897 ) throws IOException {
2898 Get get = new Get(row);
2899 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
2900 s.put(store.getFamily().getName(), columns);
2901
2902 return getFromStoreFile(store,get);
2903 }
2904
2905
2906
2907
2908
2909 public static ZooKeeperWatcher getZooKeeperWatcher(
2910 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
2911 IOException {
2912 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
2913 "unittest", new Abortable() {
2914 boolean aborted = false;
2915
2916 @Override
2917 public void abort(String why, Throwable e) {
2918 aborted = true;
2919 throw new RuntimeException("Fatal ZK error, why=" + why, e);
2920 }
2921
2922 @Override
2923 public boolean isAborted() {
2924 return aborted;
2925 }
2926 });
2927 return zkw;
2928 }
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
2942 HBaseTestingUtility TEST_UTIL, HRegion region,
2943 ServerName serverName) throws ZooKeeperConnectionException,
2944 IOException, KeeperException, NodeExistsException {
2945 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
2946 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
2947 int version = ZKAssign.transitionNodeOpening(zkw, region
2948 .getRegionInfo(), serverName);
2949 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
2950 version);
2951 return zkw;
2952 }
2953
2954 public static void assertKVListsEqual(String additionalMsg,
2955 final List<? extends Cell> expected,
2956 final List<? extends Cell> actual) {
2957 final int eLen = expected.size();
2958 final int aLen = actual.size();
2959 final int minLen = Math.min(eLen, aLen);
2960
2961 int i;
2962 for (i = 0; i < minLen
2963 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
2964 ++i) {}
2965
2966 if (additionalMsg == null) {
2967 additionalMsg = "";
2968 }
2969 if (!additionalMsg.isEmpty()) {
2970 additionalMsg = ". " + additionalMsg;
2971 }
2972
2973 if (eLen != aLen || i != minLen) {
2974 throw new AssertionError(
2975 "Expected and actual KV arrays differ at position " + i + ": " +
2976 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
2977 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
2978 }
2979 }
2980
2981 private static <T> String safeGetAsStr(List<T> lst, int i) {
2982 if (0 <= i && i < lst.size()) {
2983 return lst.get(i).toString();
2984 } else {
2985 return "<out_of_range>";
2986 }
2987 }
2988
2989 public String getClusterKey() {
2990 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
2991 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
2992 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
2993 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
2994 }
2995
2996
2997 public HTable createRandomTable(String tableName,
2998 final Collection<String> families,
2999 final int maxVersions,
3000 final int numColsPerRow,
3001 final int numFlushes,
3002 final int numRegions,
3003 final int numRowsPerFlush)
3004 throws IOException, InterruptedException {
3005
3006 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3007 " regions, " + numFlushes + " storefiles per region, " +
3008 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3009 "\n");
3010
3011 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3012 final int numCF = families.size();
3013 final byte[][] cfBytes = new byte[numCF][];
3014 {
3015 int cfIndex = 0;
3016 for (String cf : families) {
3017 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3018 }
3019 }
3020
3021 final int actualStartKey = 0;
3022 final int actualEndKey = Integer.MAX_VALUE;
3023 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3024 final int splitStartKey = actualStartKey + keysPerRegion;
3025 final int splitEndKey = actualEndKey - keysPerRegion;
3026 final String keyFormat = "%08x";
3027 final HTable table = createTable(tableName, cfBytes,
3028 maxVersions,
3029 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3030 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3031 numRegions);
3032
3033 if (hbaseCluster != null) {
3034 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3035 }
3036
3037 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3038 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3039 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3040 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3041
3042 Put put = new Put(row);
3043 Delete del = new Delete(row);
3044 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3045 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3046 final long ts = rand.nextInt();
3047 final byte[] qual = Bytes.toBytes("col" + iCol);
3048 if (rand.nextBoolean()) {
3049 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3050 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3051 ts + "_random_" + rand.nextLong());
3052 put.add(cf, qual, ts, value);
3053 } else if (rand.nextDouble() < 0.8) {
3054 del.deleteColumn(cf, qual, ts);
3055 } else {
3056 del.deleteColumns(cf, qual, ts);
3057 }
3058 }
3059
3060 if (!put.isEmpty()) {
3061 table.put(put);
3062 }
3063
3064 if (!del.isEmpty()) {
3065 table.delete(del);
3066 }
3067 }
3068 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3069 table.flushCommits();
3070 if (hbaseCluster != null) {
3071 getMiniHBaseCluster().flushcache(table.getName());
3072 }
3073 }
3074
3075 return table;
3076 }
3077
3078 private static final int MIN_RANDOM_PORT = 0xc000;
3079 private static final int MAX_RANDOM_PORT = 0xfffe;
3080 private static Random random = new Random();
3081
3082
3083
3084
3085
3086 public static int randomPort() {
3087 return MIN_RANDOM_PORT
3088 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3089 }
3090
3091
3092
3093
3094
3095 public static int randomFreePort() {
3096 int port = 0;
3097 do {
3098 port = randomPort();
3099 if (takenRandomPorts.contains(port)) {
3100 continue;
3101 }
3102 takenRandomPorts.add(port);
3103
3104 try {
3105 ServerSocket sock = new ServerSocket(port);
3106 sock.close();
3107 } catch (IOException ex) {
3108 port = 0;
3109 }
3110 } while (port == 0);
3111 return port;
3112 }
3113
3114
3115 public static String randomMultiCastAddress() {
3116 return "226.1.1." + random.nextInt(254);
3117 }
3118
3119
3120
3121 public static void waitForHostPort(String host, int port)
3122 throws IOException {
3123 final int maxTimeMs = 10000;
3124 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3125 IOException savedException = null;
3126 LOG.info("Waiting for server at " + host + ":" + port);
3127 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3128 try {
3129 Socket sock = new Socket(InetAddress.getByName(host), port);
3130 sock.close();
3131 savedException = null;
3132 LOG.info("Server at " + host + ":" + port + " is available");
3133 break;
3134 } catch (UnknownHostException e) {
3135 throw new IOException("Failed to look up " + host, e);
3136 } catch (IOException e) {
3137 savedException = e;
3138 }
3139 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3140 }
3141
3142 if (savedException != null) {
3143 throw savedException;
3144 }
3145 }
3146
3147
3148
3149
3150
3151
3152 public static int createPreSplitLoadTestTable(Configuration conf,
3153 TableName tableName, byte[] columnFamily, Algorithm compression,
3154 DataBlockEncoding dataBlockEncoding) throws IOException {
3155 HTableDescriptor desc = new HTableDescriptor(tableName);
3156 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3157 hcd.setDataBlockEncoding(dataBlockEncoding);
3158 hcd.setCompressionType(compression);
3159 return createPreSplitLoadTestTable(conf, desc, hcd);
3160 }
3161
3162
3163
3164
3165
3166
3167 public static int createPreSplitLoadTestTable(Configuration conf,
3168 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3169 if (!desc.hasFamily(hcd.getName())) {
3170 desc.addFamily(hcd);
3171 }
3172
3173 int totalNumberOfRegions = 0;
3174 HBaseAdmin admin = new HBaseAdmin(conf);
3175 try {
3176
3177
3178
3179 int numberOfServers = admin.getClusterStatus().getServers().size();
3180 if (numberOfServers == 0) {
3181 throw new IllegalStateException("No live regionservers");
3182 }
3183
3184 int regionsPerServer = conf.getInt(REGIONS_PER_SERVER_KEY, DEFAULT_REGIONS_PER_SERVER);
3185 totalNumberOfRegions = numberOfServers * regionsPerServer;
3186 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3187 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3188 "(default regions per server: " + regionsPerServer + ")");
3189
3190 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3191 totalNumberOfRegions);
3192
3193 admin.createTable(desc, splits);
3194 } catch (MasterNotRunningException e) {
3195 LOG.error("Master not running", e);
3196 throw new IOException(e);
3197 } catch (TableExistsException e) {
3198 LOG.warn("Table " + desc.getTableName() +
3199 " already exists, continuing");
3200 } finally {
3201 admin.close();
3202 }
3203 return totalNumberOfRegions;
3204 }
3205
3206 public static int getMetaRSPort(Configuration conf) throws IOException {
3207 HTable table = new HTable(conf, TableName.META_TABLE_NAME);
3208 HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes(""));
3209 table.close();
3210 return hloc.getPort();
3211 }
3212
3213
3214
3215
3216
3217
3218
3219 public void assertRegionOnServer(
3220 final HRegionInfo hri, final ServerName server,
3221 final long timeout) throws IOException, InterruptedException {
3222 long timeoutTime = System.currentTimeMillis() + timeout;
3223 while (true) {
3224 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3225 if (regions.contains(hri)) return;
3226 long now = System.currentTimeMillis();
3227 if (now > timeoutTime) break;
3228 Thread.sleep(10);
3229 }
3230 fail("Could not find region " + hri.getRegionNameAsString()
3231 + " on server " + server);
3232 }
3233
3234
3235
3236
3237
3238 public void assertRegionOnlyOnServer(
3239 final HRegionInfo hri, final ServerName server,
3240 final long timeout) throws IOException, InterruptedException {
3241 long timeoutTime = System.currentTimeMillis() + timeout;
3242 while (true) {
3243 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
3244 if (regions.contains(hri)) {
3245 List<JVMClusterUtil.RegionServerThread> rsThreads =
3246 getHBaseCluster().getLiveRegionServerThreads();
3247 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
3248 HRegionServer rs = rsThread.getRegionServer();
3249 if (server.equals(rs.getServerName())) {
3250 continue;
3251 }
3252 Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3253 for (HRegion r: hrs) {
3254 assertTrue("Region should not be double assigned",
3255 r.getRegionId() != hri.getRegionId());
3256 }
3257 }
3258 return;
3259 }
3260 long now = System.currentTimeMillis();
3261 if (now > timeoutTime) break;
3262 Thread.sleep(10);
3263 }
3264 fail("Could not find region " + hri.getRegionNameAsString()
3265 + " on server " + server);
3266 }
3267
3268 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
3269 throws IOException {
3270 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
3271 htd.addFamily(hcd);
3272 HRegionInfo info =
3273 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
3274 HRegion region =
3275 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
3276 return region;
3277 }
3278
3279 public void setFileSystemURI(String fsURI) {
3280 FS_URI = fsURI;
3281 }
3282
3283
3284
3285
3286 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
3287 throws E {
3288 return Waiter.waitFor(this.conf, timeout, predicate);
3289 }
3290
3291
3292
3293
3294 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
3295 throws E {
3296 return Waiter.waitFor(this.conf, timeout, interval, predicate);
3297 }
3298
3299
3300
3301
3302 public <E extends Exception> long waitFor(long timeout, long interval,
3303 boolean failIfTimeout, Predicate<E> predicate) throws E {
3304 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
3305 }
3306
3307
3308
3309
3310 public Waiter.Predicate<Exception> predicateNoRegionsInTransition() {
3311 return new Waiter.Predicate<Exception>() {
3312 @Override
3313 public boolean evaluate() throws Exception {
3314 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
3315 .getAssignmentManager().getRegionStates();
3316 return !regionStates.isRegionsInTransition();
3317 }
3318 };
3319 }
3320
3321
3322
3323
3324 public Waiter.Predicate<Exception> predicateTableEnabled(final TableName tableName) {
3325 return new Waiter.Predicate<Exception>() {
3326 @Override
3327 public boolean evaluate() throws Exception {
3328 return getHBaseAdmin().isTableEnabled(tableName);
3329 }
3330 };
3331 }
3332
3333
3334
3335
3336
3337
3338 public static List<HColumnDescriptor> generateColumnDescriptors() {
3339 return generateColumnDescriptors("");
3340 }
3341
3342
3343
3344
3345
3346
3347
3348 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
3349 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
3350 long familyId = 0;
3351 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
3352 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
3353 for (BloomType bloomType: BloomType.values()) {
3354 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
3355 HColumnDescriptor htd = new HColumnDescriptor(name);
3356 htd.setCompressionType(compressionType);
3357 htd.setDataBlockEncoding(encodingType);
3358 htd.setBloomFilterType(bloomType);
3359 htds.add(htd);
3360 familyId++;
3361 }
3362 }
3363 }
3364 return htds;
3365 }
3366
3367
3368
3369
3370
3371 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
3372 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
3373 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
3374 for (String algoName : allAlgos) {
3375 try {
3376 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
3377 algo.getCompressor();
3378 supportedAlgos.add(algo);
3379 } catch (Throwable t) {
3380
3381 }
3382 }
3383 return supportedAlgos.toArray(new Compression.Algorithm[0]);
3384 }
3385 }