1 /** 2 * 3 * Licensed to the Apache Software Foundation (ASF) under one 4 * or more contributor license agreements. See the NOTICE file 5 * distributed with this work for additional information 6 * regarding copyright ownership. The ASF licenses this file 7 * to you under the Apache License, Version 2.0 (the 8 * "License"); you may not use this file except in compliance 9 * with the License. You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 */ 19 package org.apache.hadoop.hbase.client; 20 21 import com.google.protobuf.Descriptors; 22 import com.google.protobuf.Message; 23 import com.google.protobuf.Service; 24 import com.google.protobuf.ServiceException; 25 26 import org.apache.hadoop.classification.InterfaceAudience; 27 import org.apache.hadoop.classification.InterfaceStability; 28 import org.apache.hadoop.conf.Configuration; 29 import org.apache.hadoop.hbase.TableName; 30 import org.apache.hadoop.hbase.HTableDescriptor; 31 import org.apache.hadoop.hbase.KeyValue; 32 import org.apache.hadoop.hbase.client.coprocessor.Batch; 33 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; 34 35 import java.io.Closeable; 36 import java.io.IOException; 37 import java.util.List; 38 import java.util.Map; 39 40 /** 41 * Used to communicate with a single HBase table. 42 * Obtain an instance from an {@link HConnection}. 43 * 44 * @since 0.21.0 45 */ 46 @InterfaceAudience.Public 47 @InterfaceStability.Stable 48 public interface HTableInterface extends Closeable { 49 50 /** 51 * Gets the name of this table. 52 * 53 * @return the table name. 54 */ 55 byte[] getTableName(); 56 57 /** 58 * Gets the fully qualified table name instance of this table. 59 */ 60 TableName getName(); 61 62 /** 63 * Returns the {@link Configuration} object used by this instance. 64 * <p> 65 * The reference returned is not a copy, so any change made to it will 66 * affect this instance. 67 */ 68 Configuration getConfiguration(); 69 70 /** 71 * Gets the {@link HTableDescriptor table descriptor} for this table. 72 * @throws IOException if a remote or network exception occurs. 73 */ 74 HTableDescriptor getTableDescriptor() throws IOException; 75 76 /** 77 * Test for the existence of columns in the table, as specified by the Get. 78 * <p> 79 * 80 * This will return true if the Get matches one or more keys, false if not. 81 * <p> 82 * 83 * This is a server-side call so it prevents any data from being transfered to 84 * the client. 85 * 86 * @param get the Get 87 * @return true if the specified Get matches one or more keys, false if not 88 * @throws IOException e 89 */ 90 boolean exists(Get get) throws IOException; 91 92 /** 93 * Test for the existence of columns in the table, as specified by the Gets. 94 * <p> 95 * 96 * This will return an array of booleans. Each value will be true if the related Get matches 97 * one or more keys, false if not. 98 * <p> 99 * 100 * This is a server-side call so it prevents any data from being transfered to 101 * the client. 102 * 103 * @param gets the Gets 104 * @return Array of Boolean true if the specified Get matches one or more keys, false if not 105 * @throws IOException e 106 */ 107 Boolean[] exists(List<Get> gets) throws IOException; 108 109 /** 110 * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations. 111 * The ordering of execution of the actions is not defined. Meaning if you do a Put and a 112 * Get in the same {@link #batch} call, you will not necessarily be 113 * guaranteed that the Get returns what the Put had put. 114 * 115 * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects 116 * @param results Empty Object[], same size as actions. Provides access to partial 117 * results, in case an exception is thrown. A null in the result array means that 118 * the call for that action failed, even after retries 119 * @throws IOException 120 * @since 0.90.0 121 */ 122 void batch(final List<?extends Row> actions, final Object[] results) throws IOException, InterruptedException; 123 124 /** 125 * Same as {@link #batch(List, Object[])}, but returns an array of 126 * results instead of using a results parameter reference. 127 * 128 * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects 129 * @return the results from the actions. A null in the return array means that 130 * the call for that action failed, even after retries 131 * @throws IOException 132 * @since 0.90.0 133 * @deprecated If any exception is thrown by one of the actions, there is no way to 134 * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead. 135 */ 136 Object[] batch(final List<? extends Row> actions) throws IOException, InterruptedException; 137 138 /** 139 * Same as {@link #batch(List, Object[])}, but with a callback. 140 * @since 0.96.0 141 */ 142 <R> void batchCallback( 143 final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback 144 ) 145 throws IOException, InterruptedException; 146 147 148 /** 149 * Same as {@link #batch(List)}, but with a callback. 150 * @since 0.96.0 151 * @deprecated If any exception is thrown by one of the actions, there is no way to 152 * retrieve the partially executed results. Use 153 * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} 154 * instead. 155 */ 156 <R> Object[] batchCallback( 157 List<? extends Row> actions, Batch.Callback<R> callback 158 ) throws IOException, 159 InterruptedException; 160 161 /** 162 * Extracts certain cells from a given row. 163 * @param get The object that specifies what data to fetch and from which row. 164 * @return The data coming from the specified row, if it exists. If the row 165 * specified doesn't exist, the {@link Result} instance returned won't 166 * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. 167 * @throws IOException if a remote or network exception occurs. 168 * @since 0.20.0 169 */ 170 Result get(Get get) throws IOException; 171 172 /** 173 * Extracts certain cells from the given rows, in batch. 174 * 175 * @param gets The objects that specify what data to fetch and from which rows. 176 * 177 * @return The data coming from the specified rows, if it exists. If the row 178 * specified doesn't exist, the {@link Result} instance returned won't 179 * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. 180 * If there are any failures even after retries, there will be a null in 181 * the results array for those Gets, AND an exception will be thrown. 182 * @throws IOException if a remote or network exception occurs. 183 * 184 * @since 0.90.0 185 */ 186 Result[] get(List<Get> gets) throws IOException; 187 188 /** 189 * Return the row that matches <i>row</i> exactly, 190 * or the one that immediately precedes it. 191 * 192 * @param row A row key. 193 * @param family Column family to include in the {@link Result}. 194 * @throws IOException if a remote or network exception occurs. 195 * @since 0.20.0 196 * 197 * @deprecated As of version 0.92 this method is deprecated without 198 * replacement. 199 * getRowOrBefore is used internally to find entries in hbase:meta and makes 200 * various assumptions about the table (which are true for hbase:meta but not 201 * in general) to be efficient. 202 */ 203 Result getRowOrBefore(byte[] row, byte[] family) throws IOException; 204 205 /** 206 * Returns a scanner on the current table as specified by the {@link Scan} 207 * object. 208 * Note that the passed {@link Scan}'s start row and caching properties 209 * maybe changed. 210 * 211 * @param scan A configured {@link Scan} object. 212 * @return A scanner. 213 * @throws IOException if a remote or network exception occurs. 214 * @since 0.20.0 215 */ 216 ResultScanner getScanner(Scan scan) throws IOException; 217 218 /** 219 * Gets a scanner on the current table for the given family. 220 * 221 * @param family The column family to scan. 222 * @return A scanner. 223 * @throws IOException if a remote or network exception occurs. 224 * @since 0.20.0 225 */ 226 ResultScanner getScanner(byte[] family) throws IOException; 227 228 /** 229 * Gets a scanner on the current table for the given family and qualifier. 230 * 231 * @param family The column family to scan. 232 * @param qualifier The column qualifier to scan. 233 * @return A scanner. 234 * @throws IOException if a remote or network exception occurs. 235 * @since 0.20.0 236 */ 237 ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException; 238 239 240 /** 241 * Puts some data in the table. 242 * <p> 243 * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered 244 * until the internal buffer is full. 245 * @param put The data to put. 246 * @throws IOException if a remote or network exception occurs. 247 * @since 0.20.0 248 */ 249 void put(Put put) throws IOException; 250 251 /** 252 * Puts some data in the table, in batch. 253 * <p> 254 * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered 255 * until the internal buffer is full. 256 * <p> 257 * This can be used for group commit, or for submitting user defined 258 * batches. The writeBuffer will be periodically inspected while the List 259 * is processed, so depending on the List size the writeBuffer may flush 260 * not at all, or more than once. 261 * @param puts The list of mutations to apply. The batch put is done by 262 * aggregating the iteration of the Puts over the write buffer 263 * at the client-side for a single RPC call. 264 * @throws IOException if a remote or network exception occurs. 265 * @since 0.20.0 266 */ 267 void put(List<Put> puts) throws IOException; 268 269 /** 270 * Atomically checks if a row/family/qualifier value matches the expected 271 * value. If it does, it adds the put. If the passed value is null, the check 272 * is for the lack of column (ie: non-existance) 273 * 274 * @param row to check 275 * @param family column family to check 276 * @param qualifier column qualifier to check 277 * @param value the expected value 278 * @param put data to put if check succeeds 279 * @throws IOException e 280 * @return true if the new put was executed, false otherwise 281 */ 282 boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, 283 byte[] value, Put put) throws IOException; 284 285 /** 286 * Deletes the specified cells/row. 287 * 288 * @param delete The object that specifies what to delete. 289 * @throws IOException if a remote or network exception occurs. 290 * @since 0.20.0 291 */ 292 void delete(Delete delete) throws IOException; 293 294 /** 295 * Deletes the specified cells/rows in bulk. 296 * @param deletes List of things to delete. List gets modified by this 297 * method (in particular it gets re-ordered, so the order in which the elements 298 * are inserted in the list gives no guarantee as to the order in which the 299 * {@link Delete}s are executed). 300 * @throws IOException if a remote or network exception occurs. In that case 301 * the {@code deletes} argument will contain the {@link Delete} instances 302 * that have not be successfully applied. 303 * @since 0.20.1 304 */ 305 void delete(List<Delete> deletes) throws IOException; 306 307 /** 308 * Atomically checks if a row/family/qualifier value matches the expected 309 * value. If it does, it adds the delete. If the passed value is null, the 310 * check is for the lack of column (ie: non-existance) 311 * 312 * @param row to check 313 * @param family column family to check 314 * @param qualifier column qualifier to check 315 * @param value the expected value 316 * @param delete data to delete if check succeeds 317 * @throws IOException e 318 * @return true if the new delete was executed, false otherwise 319 */ 320 boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, 321 byte[] value, Delete delete) throws IOException; 322 323 /** 324 * Performs multiple mutations atomically on a single row. Currently 325 * {@link Put} and {@link Delete} are supported. 326 * 327 * @param rm object that specifies the set of mutations to perform atomically 328 * @throws IOException 329 */ 330 void mutateRow(final RowMutations rm) throws IOException; 331 332 /** 333 * Appends values to one or more columns within a single row. 334 * <p> 335 * This operation does not appear atomic to readers. Appends are done 336 * under a single row lock, so write operations to a row are synchronized, but 337 * readers do not take row locks so get and scan operations can see this 338 * operation partially completed. 339 * 340 * @param append object that specifies the columns and amounts to be used 341 * for the increment operations 342 * @throws IOException e 343 * @return values of columns after the append operation (maybe null) 344 */ 345 Result append(final Append append) throws IOException; 346 347 /** 348 * Increments one or more columns within a single row. 349 * <p> 350 * This operation does not appear atomic to readers. Increments are done 351 * under a single row lock, so write operations to a row are synchronized, but 352 * readers do not take row locks so get and scan operations can see this 353 * operation partially completed. 354 * 355 * @param increment object that specifies the columns and amounts to be used 356 * for the increment operations 357 * @throws IOException e 358 * @return values of columns after the increment 359 */ 360 Result increment(final Increment increment) throws IOException; 361 362 /** 363 * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} 364 * <p> 365 * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}. 366 * @param row The row that contains the cell to increment. 367 * @param family The column family of the cell to increment. 368 * @param qualifier The column qualifier of the cell to increment. 369 * @param amount The amount to increment the cell with (or decrement, if the 370 * amount is negative). 371 * @return The new value, post increment. 372 * @throws IOException if a remote or network exception occurs. 373 */ 374 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, 375 long amount) throws IOException; 376 377 /** 378 * Atomically increments a column value. If the column value already exists 379 * and is not a big-endian long, this could throw an exception. If the column 380 * value does not yet exist it is initialized to <code>amount</code> and 381 * written to the specified column. 382 * 383 * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail 384 * scenario you will lose any increments that have not been flushed. 385 * @param row The row that contains the cell to increment. 386 * @param family The column family of the cell to increment. 387 * @param qualifier The column qualifier of the cell to increment. 388 * @param amount The amount to increment the cell with (or decrement, if the 389 * amount is negative). 390 * @param durability The persistence guarantee for this increment. 391 * @return The new value, post increment. 392 * @throws IOException if a remote or network exception occurs. 393 */ 394 long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, 395 long amount, Durability durability) throws IOException; 396 397 /** 398 * @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} 399 */ 400 @Deprecated 401 long incrementColumnValue(final byte [] row, final byte [] family, 402 final byte [] qualifier, final long amount, final boolean writeToWAL) 403 throws IOException; 404 405 /** 406 * Tells whether or not 'auto-flush' is turned on. 407 * 408 * @return {@code true} if 'auto-flush' is enabled (default), meaning 409 * {@link Put} operations don't get buffered/delayed and are immediately 410 * executed. 411 */ 412 boolean isAutoFlush(); 413 414 /** 415 * Executes all the buffered {@link Put} operations. 416 * <p> 417 * This method gets called once automatically for every {@link Put} or batch 418 * of {@link Put}s (when <code>put(List<Put>)</code> is used) when 419 * {@link #isAutoFlush} is {@code true}. 420 * @throws IOException if a remote or network exception occurs. 421 */ 422 void flushCommits() throws IOException; 423 424 /** 425 * Releases any resources held or pending changes in internal buffers. 426 * 427 * @throws IOException if a remote or network exception occurs. 428 */ 429 void close() throws IOException; 430 431 /** 432 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the 433 * table region containing the specified row. The row given does not actually have 434 * to exist. Whichever region would contain the row based on start and end keys will 435 * be used. Note that the {@code row} parameter is also not passed to the 436 * coprocessor handler registered for this protocol, unless the {@code row} 437 * is separately passed as an argument in the service request. The parameter 438 * here is only used to locate the region used to handle the call. 439 * 440 * <p> 441 * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published 442 * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: 443 * </p> 444 * 445 * <div style="background-color: #cccccc; padding: 2px"> 446 * <blockquote><pre> 447 * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey); 448 * MyService.BlockingInterface service = MyService.newBlockingStub(channel); 449 * MyCallRequest request = MyCallRequest.newBuilder() 450 * ... 451 * .build(); 452 * MyCallResponse response = service.myCall(null, request); 453 * </pre></blockquote></div> 454 * 455 * @param row The row key used to identify the remote region location 456 * @return A CoprocessorRpcChannel instance 457 */ 458 @InterfaceAudience.Private // TODO add coproc audience level 459 CoprocessorRpcChannel coprocessorService(byte[] row); 460 461 /** 462 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table 463 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), 464 * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} 465 * method with each {@link Service} 466 * instance. 467 * 468 * @param service the protocol buffer {@code Service} implementation to call 469 * @param startKey start region selection with region containing this row. If {@code null}, the 470 * selection will start with the first table region. 471 * @param endKey select regions up to and including the region containing this row. 472 * If {@code null}, selection will continue through the last table region. 473 * @param callable this instance's 474 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} 475 * method will be invoked once per table region, using the {@link Service} 476 * instance connected to that region. 477 * @param <T> the {@link Service} subclass to connect to 478 * @param <R> Return type for the {@code callable} parameter's 479 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method 480 * @return a map of result values keyed by region name 481 */ 482 @InterfaceAudience.Private // TODO add coproc audience level 483 <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service, 484 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable) 485 throws ServiceException, Throwable; 486 487 /** 488 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table 489 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), 490 * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} 491 * method with each {@link Service} instance. 492 * 493 * <p> 494 * The given 495 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} 496 * method will be called with the return value from each region's 497 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. 498 *</p> 499 * 500 * @param service the protocol buffer {@code Service} implementation to call 501 * @param startKey start region selection with region containing this row. If {@code null}, the 502 * selection will start with the first table region. 503 * @param endKey select regions up to and including the region containing this row. 504 * If {@code null}, selection will continue through the last table region. 505 * @param callable this instance's 506 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method 507 * will be invoked once per table region, using the {@link Service} instance 508 * connected to that region. 509 * @param callback 510 * @param <T> the {@link Service} subclass to connect to 511 * @param <R> Return type for the {@code callable} parameter's 512 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method 513 */ 514 @InterfaceAudience.Private // TODO add coproc audience level 515 <T extends Service, R> void coprocessorService(final Class<T> service, 516 byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable, 517 final Batch.Callback<R> callback) throws ServiceException, Throwable; 518 519 /** 520 * See {@link #setAutoFlush(boolean, boolean)} 521 * 522 * @param autoFlush 523 * Whether or not to enable 'auto-flush'. 524 * @deprecated in 0.96. When called with setAutoFlush(false), this function also 525 * set clearBufferOnFail to true, which is unexpected but kept for historical reasons. 526 * Replace it with setAutoFlush(false, false) if this is exactly what you want, or by 527 * {@link #setAutoFlushTo(boolean)} for all other cases. 528 */ 529 @Deprecated 530 void setAutoFlush(boolean autoFlush); 531 532 /** 533 * Turns 'auto-flush' on or off. 534 * <p> 535 * When enabled (default), {@link Put} operations don't get buffered/delayed 536 * and are immediately executed. Failed operations are not retried. This is 537 * slower but safer. 538 * <p> 539 * Turning off {@code #autoFlush} means that multiple {@link Put}s will be 540 * accepted before any RPC is actually sent to do the write operations. If the 541 * application dies before pending writes get flushed to HBase, data will be 542 * lost. 543 * <p> 544 * When you turn {@code #autoFlush} off, you should also consider the 545 * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put} 546 * requests will be retried on failure until successful. However, this can 547 * pollute the writeBuffer and slow down batching performance. Additionally, 548 * you may want to issue a number of Put requests and call 549 * {@link #flushCommits()} as a barrier. In both use cases, consider setting 550 * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} 551 * has been called, regardless of success. 552 * <p> 553 * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each 554 * flushCommit, including the last one when closing the table. This is NOT recommended, 555 * most of the time you want to call {@code #setAutoFlush(false, true)}. 556 * 557 * @param autoFlush 558 * Whether or not to enable 'auto-flush'. 559 * @param clearBufferOnFail 560 * Whether to keep Put failures in the writeBuffer. If autoFlush is true, then 561 * the value of this parameter is ignored and clearBufferOnFail is set to true. 562 * Setting clearBufferOnFail to false is deprecated since 0.96. 563 * @see #flushCommits 564 */ 565 void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); 566 567 /** 568 * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail} 569 */ 570 void setAutoFlushTo(boolean autoFlush); 571 572 /** 573 * Returns the maximum size in bytes of the write buffer for this HTable. 574 * <p> 575 * The default value comes from the configuration parameter 576 * {@code hbase.client.write.buffer}. 577 * @return The size of the write buffer in bytes. 578 */ 579 long getWriteBufferSize(); 580 581 /** 582 * Sets the size of the buffer in bytes. 583 * <p> 584 * If the new size is less than the current amount of data in the 585 * write buffer, the buffer gets flushed. 586 * @param writeBufferSize The new write buffer size, in bytes. 587 * @throws IOException if a remote or network exception occurs. 588 */ 589 void setWriteBufferSize(long writeBufferSize) throws IOException; 590 591 /** 592 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table 593 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all 594 * the invocations to the same region server will be batched into one call. The coprocessor 595 * service is invoked according to the service instance, method name and parameters. 596 * 597 * @param methodDescriptor 598 * the descriptor for the protobuf service method to call. 599 * @param request 600 * the method call parameters 601 * @param startKey 602 * start region selection with region containing this row. If {@code null}, the 603 * selection will start with the first table region. 604 * @param endKey 605 * select regions up to and including the region containing this row. If {@code null}, 606 * selection will continue through the last table region. 607 * @param responsePrototype 608 * the proto type of the response of the method in Service. 609 * @param <R> 610 * the response type for the coprocessor Service method 611 * @throws ServiceException 612 * @throws Throwable 613 * @return a map of result values keyed by region name 614 */ 615 @InterfaceAudience.Private 616 <R extends Message> Map<byte[], R> batchCoprocessorService( 617 Descriptors.MethodDescriptor methodDescriptor, Message request, 618 byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable; 619 620 /** 621 * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table 622 * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all 623 * the invocations to the same region server will be batched into one call. The coprocessor 624 * service is invoked according to the service instance, method name and parameters. 625 * 626 * <p> 627 * The given 628 * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} 629 * method will be called with the return value from each region's invocation. 630 * </p> 631 * 632 * @param methodDescriptor 633 * the descriptor for the protobuf service method to call. 634 * @param request 635 * the method call parameters 636 * @param startKey 637 * start region selection with region containing this row. If {@code null}, the 638 * selection will start with the first table region. 639 * @param endKey 640 * select regions up to and including the region containing this row. If {@code null}, 641 * selection will continue through the last table region. 642 * @param responsePrototype 643 * the proto type of the response of the method in Service. 644 * @param callback 645 * callback to invoke with the response for each region 646 * @param <R> 647 * the response type for the coprocessor Service method 648 * @throws ServiceException 649 * @throws Throwable 650 */ 651 @InterfaceAudience.Private 652 <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor, 653 Message request, byte[] startKey, byte[] endKey, R responsePrototype, 654 Batch.Callback<R> callback) throws ServiceException, Throwable; 655 }