View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertTrue;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.util.ArrayList;
30  import java.util.List;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.hbase.Cell;
38  import org.apache.hadoop.hbase.CellUtil;
39  import org.apache.hadoop.hbase.Coprocessor;
40  import org.apache.hadoop.hbase.HBaseTestingUtility;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.MediumTests;
46  import org.apache.hadoop.hbase.MiniHBaseCluster;
47  import org.apache.hadoop.hbase.ServerName;
48  import org.apache.hadoop.hbase.TableName;
49  import org.apache.hadoop.hbase.client.Append;
50  import org.apache.hadoop.hbase.client.Delete;
51  import org.apache.hadoop.hbase.client.Durability;
52  import org.apache.hadoop.hbase.client.Get;
53  import org.apache.hadoop.hbase.client.HBaseAdmin;
54  import org.apache.hadoop.hbase.client.HTable;
55  import org.apache.hadoop.hbase.client.Increment;
56  import org.apache.hadoop.hbase.client.Put;
57  import org.apache.hadoop.hbase.client.Result;
58  import org.apache.hadoop.hbase.client.ResultScanner;
59  import org.apache.hadoop.hbase.client.RowMutations;
60  import org.apache.hadoop.hbase.client.Scan;
61  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
62  import org.apache.hadoop.hbase.io.hfile.HFile;
63  import org.apache.hadoop.hbase.io.hfile.HFileContext;
64  import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
65  import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
66  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
67  import org.apache.hadoop.hbase.regionserver.HRegion;
68  import org.apache.hadoop.hbase.regionserver.InternalScanner;
69  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
70  import org.apache.hadoop.hbase.regionserver.ScanType;
71  import org.apache.hadoop.hbase.regionserver.Store;
72  import org.apache.hadoop.hbase.regionserver.StoreFile;
73  import org.apache.hadoop.hbase.util.Bytes;
74  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
75  import org.apache.hadoop.hbase.util.JVMClusterUtil;
76  import org.apache.hadoop.hbase.util.Threads;
77  import org.junit.AfterClass;
78  import org.junit.BeforeClass;
79  import org.junit.Test;
80  import org.junit.experimental.categories.Category;
81  
82  @Category(MediumTests.class)
83  public class TestRegionObserverInterface {
84    static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class);
85  
86    public static final TableName TEST_TABLE = TableName.valueOf("TestTable");
87    public final static byte[] A = Bytes.toBytes("a");
88    public final static byte[] B = Bytes.toBytes("b");
89    public final static byte[] C = Bytes.toBytes("c");
90    public final static byte[] ROW = Bytes.toBytes("testrow");
91  
92    private static HBaseTestingUtility util = new HBaseTestingUtility();
93    private static MiniHBaseCluster cluster = null;
94  
95    @BeforeClass
96    public static void setupBeforeClass() throws Exception {
97      // set configure to indicate which cp should be loaded
98      Configuration conf = util.getConfiguration();
99      conf.setBoolean("hbase.master.distributed.log.replay", true);
100     conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
101         "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver");
102 
103     util.startMiniCluster();
104     cluster = util.getMiniHBaseCluster();
105   }
106 
107   @AfterClass
108   public static void tearDownAfterClass() throws Exception {
109     util.shutdownMiniCluster();
110   }
111 
112   @Test
113   public void testRegionObserver() throws IOException {
114     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
115     // recreate table every time in order to reset the status of the
116     // coprocessor.
117     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
118     try {
119       verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
120           "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation",
121           "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName,
122         new Boolean[] { false, false, false, false, false, false, false, false });
123 
124       Put put = new Put(ROW);
125       put.add(A, A, A);
126       put.add(B, B, B);
127       put.add(C, C, C);
128       table.put(put);
129 
130       verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
131           "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete",
132           "hadPostStartRegionOperation", "hadPostCloseRegionOperation",
133           "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true,
134           true, true, true, false, true, true, true });
135 
136       verifyMethodResult(SimpleRegionObserver.class,
137           new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
138           tableName,
139           new Integer[] {1, 1, 0, 0});
140 
141       Get get = new Get(ROW);
142       get.addColumn(A, A);
143       get.addColumn(B, B);
144       get.addColumn(C, C);
145       table.get(get);
146 
147       verifyMethodResult(SimpleRegionObserver.class,
148           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
149       "hadDelete", "hadPrePreparedDeleteTS"},
150       tableName,
151       new Boolean[] {true, true, true, true, false, false}
152           );
153 
154       Delete delete = new Delete(ROW);
155       delete.deleteColumn(A, A);
156       delete.deleteColumn(B, B);
157       delete.deleteColumn(C, C);
158       table.delete(delete);
159 
160       verifyMethodResult(SimpleRegionObserver.class,
161           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
162         "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS"},
163         tableName,
164         new Boolean[] {true, true, true, true, true, true, true, true}
165           );
166     } finally {
167       util.deleteTable(tableName);
168       table.close();
169     }
170     verifyMethodResult(SimpleRegionObserver.class,
171         new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
172         tableName,
173         new Integer[] {1, 1, 1, 1});
174   }
175 
176   @Test
177   public void testRowMutation() throws IOException {
178     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
179     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
180     try {
181       verifyMethodResult(SimpleRegionObserver.class,
182         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
183             "hadDeleted"},
184         tableName,
185         new Boolean[] {false, false, false, false, false});
186       Put put = new Put(ROW);
187       put.add(A, A, A);
188       put.add(B, B, B);
189       put.add(C, C, C);
190 
191       Delete delete = new Delete(ROW);
192       delete.deleteColumn(A, A);
193       delete.deleteColumn(B, B);
194       delete.deleteColumn(C, C);
195 
196       RowMutations arm = new RowMutations(ROW);
197       arm.add(put);
198       arm.add(delete);
199       table.mutateRow(arm);
200 
201       verifyMethodResult(SimpleRegionObserver.class,
202           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
203       "hadDeleted"},
204       tableName,
205       new Boolean[] {false, false, true, true, true}
206           );
207     } finally {
208       util.deleteTable(tableName);
209       table.close();
210     }
211   }
212 
213   @Test
214   public void testIncrementHook() throws IOException {
215     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
216     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
217     try {
218       Increment inc = new Increment(Bytes.toBytes(0));
219       inc.addColumn(A, A, 1);
220 
221       verifyMethodResult(SimpleRegionObserver.class,
222           new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
223           tableName,
224           new Boolean[] {false, false, false}
225           );
226 
227       table.increment(inc);
228 
229       verifyMethodResult(SimpleRegionObserver.class,
230           new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
231           tableName,
232           new Boolean[] {true, true, true}
233           );
234     } finally {
235       util.deleteTable(tableName);
236       table.close();
237     }
238   }
239 
240   @Test
241   public void testCheckAndPutHooks() throws IOException {
242     TableName tableName = 
243         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
244     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
245     try {
246       Put p = new Put(Bytes.toBytes(0));
247       p.add(A, A, A);
248       table.put(p);
249       table.flushCommits();
250       p = new Put(Bytes.toBytes(0));
251       p.add(A, A, A);
252       verifyMethodResult(SimpleRegionObserver.class,
253           new String[] {"hadPreCheckAndPut", 
254               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
255           tableName,
256           new Boolean[] {false, false, false}
257           );
258       table.checkAndPut(Bytes.toBytes(0), A, A, A, p);
259       verifyMethodResult(SimpleRegionObserver.class,
260           new String[] {"hadPreCheckAndPut", 
261               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
262           tableName,
263           new Boolean[] {true, true, true}
264           );
265     } finally {
266       util.deleteTable(tableName);
267       table.close();
268     }
269   }
270 
271   @Test
272   public void testCheckAndDeleteHooks() throws IOException {
273     TableName tableName = 
274         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
275     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
276     try {
277       Put p = new Put(Bytes.toBytes(0));
278       p.add(A, A, A);
279       table.put(p);
280       table.flushCommits();
281       Delete d = new Delete(Bytes.toBytes(0));
282       table.delete(d);
283       verifyMethodResult(SimpleRegionObserver.class,
284           new String[] {"hadPreCheckAndDelete", 
285               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
286           tableName,
287           new Boolean[] {false, false, false}
288           );
289       table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
290       verifyMethodResult(SimpleRegionObserver.class,
291           new String[] {"hadPreCheckAndDelete", 
292               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
293           tableName,
294           new Boolean[] {true, true, true}
295           );
296     } finally {
297       util.deleteTable(tableName);
298       table.close();
299     }
300   }
301 
302   @Test
303   public void testAppendHook() throws IOException {
304     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
305     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
306     try {
307       Append app = new Append(Bytes.toBytes(0));
308       app.add(A, A, A);
309 
310       verifyMethodResult(SimpleRegionObserver.class,
311           new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
312           tableName,
313           new Boolean[] {false, false, false}
314           );
315 
316       table.append(app);
317 
318       verifyMethodResult(SimpleRegionObserver.class,
319           new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
320           tableName,
321           new Boolean[] {true, true, true}
322           );
323     } finally {
324       util.deleteTable(tableName);
325       table.close();
326     }
327   }
328 
329   @Test
330   // HBase-3583
331   public void testHBase3583() throws IOException {
332     TableName tableName =
333         TableName.valueOf("testHBase3583");
334     util.createTable(tableName, new byte[][] {A, B, C});
335     util.waitUntilAllRegionsAssigned(tableName);
336 
337     verifyMethodResult(SimpleRegionObserver.class,
338         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
339             "wasScannerCloseCalled"},
340         tableName,
341         new Boolean[] {false, false, false, false}
342     );
343 
344     HTable table = new HTable(util.getConfiguration(), tableName);
345     Put put = new Put(ROW);
346     put.add(A, A, A);
347     table.put(put);
348 
349     Get get = new Get(ROW);
350     get.addColumn(A, A);
351     table.get(get);
352 
353     // verify that scannerNext and scannerClose upcalls won't be invoked
354     // when we perform get().
355     verifyMethodResult(SimpleRegionObserver.class,
356         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
357             "wasScannerCloseCalled"},
358         tableName,
359         new Boolean[] {true, true, false, false}
360     );
361 
362     Scan s = new Scan();
363     ResultScanner scanner = table.getScanner(s);
364     try {
365       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
366       }
367     } finally {
368       scanner.close();
369     }
370 
371     // now scanner hooks should be invoked.
372     verifyMethodResult(SimpleRegionObserver.class,
373         new String[] {"wasScannerNextCalled", "wasScannerCloseCalled"},
374         tableName,
375         new Boolean[] {true, true}
376     );
377     util.deleteTable(tableName);
378     table.close();
379   }
380 
381   @Test
382   // HBase-3758
383   public void testHBase3758() throws IOException {
384     TableName tableName =
385         TableName.valueOf("testHBase3758");
386     util.createTable(tableName, new byte[][] {A, B, C});
387 
388     verifyMethodResult(SimpleRegionObserver.class,
389         new String[] {"hadDeleted", "wasScannerOpenCalled"},
390         tableName,
391         new Boolean[] {false, false}
392     );
393 
394     HTable table = new HTable(util.getConfiguration(), tableName);
395     Put put = new Put(ROW);
396     put.add(A, A, A);
397     table.put(put);
398 
399     Delete delete = new Delete(ROW);
400     table.delete(delete);
401 
402     verifyMethodResult(SimpleRegionObserver.class,
403         new String[] {"hadDeleted", "wasScannerOpenCalled"},
404         tableName,
405         new Boolean[] {true, false}
406     );
407 
408     Scan s = new Scan();
409     ResultScanner scanner = table.getScanner(s);
410     try {
411       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
412       }
413     } finally {
414       scanner.close();
415     }
416 
417     // now scanner hooks should be invoked.
418     verifyMethodResult(SimpleRegionObserver.class,
419         new String[] {"wasScannerOpenCalled"},
420         tableName,
421         new Boolean[] {true}
422     );
423     util.deleteTable(tableName);
424     table.close();
425   }
426 
427   /* Overrides compaction to only output rows with keys that are even numbers */
428   public static class EvenOnlyCompactor extends BaseRegionObserver {
429     long lastCompaction;
430     long lastFlush;
431 
432     @Override
433     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
434         Store store, final InternalScanner scanner, final ScanType scanType) {
435       return new InternalScanner() {
436         @Override
437         public boolean next(List<Cell> results) throws IOException {
438           return next(results, -1);
439         }
440 
441         @Override
442         public boolean next(List<Cell> results, int limit)
443             throws IOException{
444           List<Cell> internalResults = new ArrayList<Cell>();
445           boolean hasMore;
446           do {
447             hasMore = scanner.next(internalResults, limit);
448             if (!internalResults.isEmpty()) {
449               long row = Bytes.toLong(CellUtil.cloneValue(internalResults.get(0)));
450               if (row % 2 == 0) {
451                 // return this row
452                 break;
453               }
454               // clear and continue
455               internalResults.clear();
456             }
457           } while (hasMore);
458 
459           if (!internalResults.isEmpty()) {
460             results.addAll(internalResults);
461           }
462           return hasMore;
463         }
464 
465         @Override
466         public void close() throws IOException {
467           scanner.close();
468         }
469       };
470     }
471 
472     @Override
473     public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
474         Store store, StoreFile resultFile) {
475       lastCompaction = EnvironmentEdgeManager.currentTimeMillis();
476     }
477 
478     @Override
479     public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) {
480       lastFlush = EnvironmentEdgeManager.currentTimeMillis();
481     }
482   }
483   /**
484    * Tests overriding compaction handling via coprocessor hooks
485    * @throws Exception
486    */
487   @Test
488   public void testCompactionOverride() throws Exception {
489     byte[] compactTable = Bytes.toBytes("TestCompactionOverride");
490     HBaseAdmin admin = util.getHBaseAdmin();
491     if (admin.tableExists(compactTable)) {
492       admin.disableTable(compactTable);
493       admin.deleteTable(compactTable);
494     }
495 
496     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable));
497     htd.addFamily(new HColumnDescriptor(A));
498     htd.addCoprocessor(EvenOnlyCompactor.class.getName());
499     admin.createTable(htd);
500 
501     HTable table = new HTable(util.getConfiguration(), compactTable);
502     for (long i=1; i<=10; i++) {
503       byte[] iBytes = Bytes.toBytes(i);
504       Put put = new Put(iBytes);
505       put.setDurability(Durability.SKIP_WAL);
506       put.add(A, A, iBytes);
507       table.put(put);
508     }
509 
510     HRegion firstRegion = cluster.getRegions(compactTable).get(0);
511     Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(
512         EvenOnlyCompactor.class.getName());
513     assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
514     EvenOnlyCompactor compactor = (EvenOnlyCompactor)cp;
515 
516     // force a compaction
517     long ts = System.currentTimeMillis();
518     admin.flush(compactTable);
519     // wait for flush
520     for (int i=0; i<10; i++) {
521       if (compactor.lastFlush >= ts) {
522         break;
523       }
524       Thread.sleep(1000);
525     }
526     assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
527     LOG.debug("Flush complete");
528 
529     ts = compactor.lastFlush;
530     admin.majorCompact(compactTable);
531     // wait for compaction
532     for (int i=0; i<30; i++) {
533       if (compactor.lastCompaction >= ts) {
534         break;
535       }
536       Thread.sleep(1000);
537     }
538     LOG.debug("Last compaction was at "+compactor.lastCompaction);
539     assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
540 
541     // only even rows should remain
542     ResultScanner scanner = table.getScanner(new Scan());
543     try {
544       for (long i=2; i<=10; i+=2) {
545         Result r = scanner.next();
546         assertNotNull(r);
547         assertFalse(r.isEmpty());
548         byte[] iBytes = Bytes.toBytes(i);
549         assertArrayEquals("Row should be "+i, r.getRow(), iBytes);
550         assertArrayEquals("Value should be "+i, r.getValue(A, A), iBytes);
551       }
552     } finally {
553       scanner.close();
554     }
555     table.close();
556   }
557 
558   @Test
559   public void bulkLoadHFileTest() throws Exception {
560     String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
561     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
562     Configuration conf = util.getConfiguration();
563     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
564     try {
565       verifyMethodResult(SimpleRegionObserver.class,
566           new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
567           tableName,
568           new Boolean[] {false, false}
569           );
570 
571       FileSystem fs = util.getTestFileSystem();
572       final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
573       Path familyDir = new Path(dir, Bytes.toString(A));
574 
575       createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
576 
577       //Bulk load
578       new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));
579 
580       verifyMethodResult(SimpleRegionObserver.class,
581           new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
582           tableName,
583           new Boolean[] {true, true}
584           );
585     } finally {
586       util.deleteTable(tableName);
587       table.close();
588     }
589   }
590 
591   @Test
592   public void testRecovery() throws Exception {
593     LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery");
594     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
595     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
596     try {
597       JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
598       ServerName sn2 = rs1.getRegionServer().getServerName();
599       String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();
600 
601       util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
602       while (!sn2.equals(table.getRegionLocations().firstEntry().getValue() )){
603         Thread.sleep(100);
604       }
605 
606       Put put = new Put(ROW);
607       put.add(A, A, A);
608       put.add(B, B, B);
609       put.add(C, C, C);
610       table.put(put);
611 
612       verifyMethodResult(SimpleRegionObserver.class,
613           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
614         "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
615         tableName,
616         new Boolean[] {false, false, true, true, true, true, false}
617           );
618 
619       verifyMethodResult(SimpleRegionObserver.class,
620           new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut"},
621           tableName,
622           new Integer[] {0, 0, 1, 1});
623 
624       cluster.killRegionServer(rs1.getRegionServer().getServerName());
625       Threads.sleep(1000); // Let the kill soak in.
626       util.waitUntilAllRegionsAssigned(tableName);
627       LOG.info("All regions assigned");
628 
629       verifyMethodResult(SimpleRegionObserver.class,
630           new String[]{"getCtPrePut", "getCtPostPut"},
631           tableName,
632           new Integer[]{0, 0});
633     } finally {
634       util.deleteTable(tableName);
635       table.close();
636     }
637   }
638 
639   @Test
640   public void testPreWALRestoreSkip() throws Exception {
641     LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
642     TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
643     HTable table = util.createTable(tableName, new byte[][] { A, B, C });
644 
645     JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
646     ServerName sn2 = rs1.getRegionServer().getServerName();
647     String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();
648 
649     util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
650     while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
651       Thread.sleep(100);
652     }
653 
654     Put put = new Put(ROW);
655     put.add(A, A, A);
656     put.add(B, B, B);
657     put.add(C, C, C);
658     table.put(put);
659     table.flushCommits();
660 
661     cluster.killRegionServer(rs1.getRegionServer().getServerName());
662     Threads.sleep(20000); // just to be sure that the kill has fully started.
663     util.waitUntilAllRegionsAssigned(tableName);
664 
665     verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
666         "getCtPostWALRestore" }, tableName, new Integer[] { 0, 0 });
667 
668     util.deleteTable(tableName);
669     table.close();
670   }
671 
672   // check each region whether the coprocessor upcalls are called or not.
673   private void verifyMethodResult(Class<?> c, String methodName[], TableName tableName,
674                                   Object value[]) throws IOException {
675     try {
676       for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
677         if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()){
678           continue;
679         }
680         for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer())) {
681           if (!r.getTable().equals(tableName)) {
682             continue;
683           }
684           RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
685               getCoprocessorHost();
686 
687           Coprocessor cp = cph.findCoprocessor(c.getName());
688           assertNotNull(cp);
689           for (int i = 0; i < methodName.length; ++i) {
690             Method m = c.getMethod(methodName[i]);
691             Object o = m.invoke(cp);
692             assertTrue("Result of " + c.getName() + "." + methodName[i]
693                 + " is expected to be " + value[i].toString()
694                 + ", while we get " + o.toString(), o.equals(value[i]));
695           }
696         }
697       }
698     } catch (Exception e) {
699       throw new IOException(e.toString());
700     }
701   }
702 
703   private static void createHFile(
704       Configuration conf,
705       FileSystem fs, Path path,
706       byte[] family, byte[] qualifier) throws IOException {
707     HFileContext context = new HFileContextBuilder().build();
708     HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
709         .withPath(fs, path)
710         .withFileContext(context)
711         .create();
712     long now = System.currentTimeMillis();
713     try {
714       for (int i =1;i<=9;i++) {
715         KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
716         writer.append(kv);
717       }
718     } finally {
719       writer.close();
720     }
721   }
722 
723 }