View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import com.google.protobuf.Service;
22  import com.google.protobuf.ServiceException;
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.classification.InterfaceStability;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.hbase.Cell;
29  import org.apache.hadoop.hbase.HBaseConfiguration;
30  import org.apache.hadoop.hbase.HConstants;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.HRegionLocation;
33  import org.apache.hadoop.hbase.HTableDescriptor;
34  import org.apache.hadoop.hbase.KeyValue;
35  import org.apache.hadoop.hbase.KeyValueUtil;
36  import org.apache.hadoop.hbase.ServerName;
37  import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
38  import org.apache.hadoop.hbase.client.coprocessor.Batch;
39  import org.apache.hadoop.hbase.filter.BinaryComparator;
40  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
41  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
42  import org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel;
43  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
44  import org.apache.hadoop.hbase.protobuf.RequestConverter;
45  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
46  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
47  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiGetRequest;
48  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiGetResponse;
49  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
50  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
51  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
52  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.Pair;
55  import org.apache.hadoop.hbase.util.Threads;
56  
57  import java.io.Closeable;
58  import java.io.IOException;
59  import java.io.InterruptedIOException;
60  import java.util.ArrayList;
61  import java.util.Collections;
62  import java.util.HashMap;
63  import java.util.List;
64  import java.util.Map;
65  import java.util.NavigableMap;
66  import java.util.TreeMap;
67  import java.util.concurrent.Callable;
68  import java.util.concurrent.ExecutionException;
69  import java.util.concurrent.ExecutorService;
70  import java.util.concurrent.Future;
71  import java.util.concurrent.SynchronousQueue;
72  import java.util.concurrent.ThreadPoolExecutor;
73  import java.util.concurrent.TimeUnit;
74  
75  /**
76   * <p>Used to communicate with a single HBase table.
77   *
78   * <p>This class is not thread safe for reads nor write.
79   *
80   * <p>In case of writes (Put, Delete), the underlying write buffer can
81   * be corrupted if multiple threads contend over a single HTable instance.
82   *
83   * <p>In case of reads, some fields used by a Scan are shared among all threads.
84   * The HTable implementation can either not contract to be safe in case of a Get
85   *
86   * <p>To access a table in a multi threaded environment, please consider
87   * using the {@link HTablePool} class to create your HTable instances.
88   *
89   * <p>Instances of HTable passed the same {@link Configuration} instance will
90   * share connections to servers out on the cluster and to the zookeeper ensemble
91   * as well as caches of region locations.  This is usually a *good* thing and it
92   * is recommended to reuse the same configuration object for all your tables.
93   * This happens because they will all share the same underlying
94   * {@link HConnection} instance. See {@link HConnectionManager} for more on
95   * how this mechanism works.
96   *
97   * <p>{@link HConnection} will read most of the
98   * configuration it needs from the passed {@link Configuration} on initial
99   * construction.  Thereafter, for settings such as
100  * <code>hbase.client.pause</code>, <code>hbase.client.retries.number</code>,
101  * and <code>hbase.client.rpc.maxattempts</code> updating their values in the
102  * passed {@link Configuration} subsequent to {@link HConnection} construction
103  * will go unnoticed.  To run with changed values, make a new
104  * {@link HTable} passing a new {@link Configuration} instance that has the
105  * new configuration.
106  *
107  * <p>Note that this class implements the {@link Closeable} interface. When a
108  * HTable instance is no longer required, it *should* be closed in order to ensure
109  * that the underlying resources are promptly released. Please note that the close
110  * method can throw java.io.IOException that must be handled.
111  *
112  * @see HBaseAdmin for create, drop, list, enable and disable of tables.
113  * @see HConnection
114  * @see HConnectionManager
115  */
116 @InterfaceAudience.Public
117 @InterfaceStability.Stable
118 public class HTable implements HTableInterface {
119   private static final Log LOG = LogFactory.getLog(HTable.class);
120   private HConnection connection;
121   private final byte [] tableName;
122   private volatile Configuration configuration;
123   private final ArrayList<Put> writeBuffer = new ArrayList<Put>();
124   private long writeBufferSize;
125   private boolean clearBufferOnFail;
126   private boolean autoFlush;
127   private long currentWriteBufferSize;
128   protected int scannerCaching;
129   private int maxKeyValueSize;
130   private ExecutorService pool;  // For Multi
131   private boolean closed;
132   private int operationTimeout;
133   private final boolean cleanupPoolOnClose; // shutdown the pool in close()
134   private final boolean cleanupConnectionOnClose; // close the connection in close()
135 
136   /**
137    * Creates an object to access a HBase table.
138    * Shares zookeeper connection and other resources with other HTable instances
139    * created with the same <code>conf</code> instance.  Uses already-populated
140    * region cache if one is available, populated by any other HTable instances
141    * sharing this <code>conf</code> instance.  Recommended.
142    * @param conf Configuration object to use.
143    * @param tableName Name of the table.
144    * @throws IOException if a remote or network exception occurs
145    */
146   public HTable(Configuration conf, final String tableName)
147   throws IOException {
148     this(conf, Bytes.toBytes(tableName));
149   }
150 
151 
152   /**
153    * Creates an object to access a HBase table.
154    * Shares zookeeper connection and other resources with other HTable instances
155    * created with the same <code>conf</code> instance.  Uses already-populated
156    * region cache if one is available, populated by any other HTable instances
157    * sharing this <code>conf</code> instance.  Recommended.
158    * @param conf Configuration object to use.
159    * @param tableName Name of the table.
160    * @throws IOException if a remote or network exception occurs
161    */
162   public HTable(Configuration conf, final byte [] tableName)
163   throws IOException {
164     this.tableName = tableName;
165     this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true;
166     if (conf == null) {
167       this.connection = null;
168       return;
169     }
170     this.connection = HConnectionManager.getConnection(conf);
171     this.configuration = conf;
172 
173     int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
174     if (maxThreads == 0) {
175       maxThreads = 1; // is there a better default?
176     }
177     long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60);
178 
179     // Using the "direct handoff" approach, new threads will only be created
180     // if it is necessary and will grow unbounded. This could be bad but in HCM
181     // we only create as many Runnables as there are region servers. It means
182     // it also scales when new region servers are added.
183     this.pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
184         new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("hbase-table"));
185     ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true);
186 
187     this.finishSetup();
188   }
189 
190   /**
191    * Creates an object to access a HBase table.
192    * Shares zookeeper connection and other resources with other HTable instances
193    * created with the same <code>conf</code> instance.  Uses already-populated
194    * region cache if one is available, populated by any other HTable instances
195    * sharing this <code>conf</code> instance.
196    * Use this constructor when the ExecutorService is externally managed.
197    * @param conf Configuration object to use.
198    * @param tableName Name of the table.
199    * @param pool ExecutorService to be used.
200    * @throws IOException if a remote or network exception occurs
201    */
202   public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool)
203       throws IOException {
204     this.connection = HConnectionManager.getConnection(conf);
205     this.configuration = conf;
206     this.pool = pool;
207     this.tableName = tableName;
208     this.cleanupPoolOnClose = false;
209     this.cleanupConnectionOnClose = true;
210 
211     this.finishSetup();
212   }
213 
214   /**
215    * Creates an object to access a HBase table.
216    * Shares zookeeper connection and other resources with other HTable instances
217    * created with the same <code>connection</code> instance.
218    * Use this constructor when the ExecutorService and HConnection instance are
219    * externally managed.
220    * @param tableName Name of the table.
221    * @param connection HConnection to be used.
222    * @param pool ExecutorService to be used.
223    * @throws IOException if a remote or network exception occurs
224    */
225   public HTable(final byte[] tableName, final HConnection connection,
226       final ExecutorService pool) throws IOException {
227     if (pool == null || pool.isShutdown()) {
228       throw new IllegalArgumentException("Pool is null or shut down.");
229     }
230     if (connection == null || connection.isClosed()) {
231       throw new IllegalArgumentException("Connection is null or closed.");
232     }
233     this.tableName = tableName;
234     this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false;
235     this.connection = connection;
236     this.configuration = connection.getConfiguration();
237     this.pool = pool;
238 
239     this.finishSetup();
240   }
241 
242   /**
243    * setup this HTable's parameter based on the passed configuration
244    */
245   private void finishSetup() throws IOException {
246     this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
247     this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT
248         : this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
249             HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
250     this.writeBufferSize = this.configuration.getLong(
251         "hbase.client.write.buffer", 2097152);
252     this.clearBufferOnFail = true;
253     this.autoFlush = true;
254     this.currentWriteBufferSize = 0;
255     this.scannerCaching = this.configuration.getInt(
256         HConstants.HBASE_CLIENT_SCANNER_CACHING,
257         HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
258 
259     this.maxKeyValueSize = this.configuration.getInt(
260         "hbase.client.keyvalue.maxsize", -1);
261     this.closed = false;
262   }
263 
264   /**
265    * {@inheritDoc}
266    */
267   @Override
268   public Configuration getConfiguration() {
269     return configuration;
270   }
271 
272   /**
273    * Tells whether or not a table is enabled or not. This method creates a
274    * new HBase configuration, so it might make your unit tests fail due to
275    * incorrect ZK client port.
276    * @param tableName Name of table to check.
277    * @return {@code true} if table is online.
278    * @throws IOException if a remote or network exception occurs
279 	* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
280    */
281   @Deprecated
282   public static boolean isTableEnabled(String tableName) throws IOException {
283     return isTableEnabled(Bytes.toBytes(tableName));
284   }
285 
286   /**
287    * Tells whether or not a table is enabled or not. This method creates a
288    * new HBase configuration, so it might make your unit tests fail due to
289    * incorrect ZK client port.
290    * @param tableName Name of table to check.
291    * @return {@code true} if table is online.
292    * @throws IOException if a remote or network exception occurs
293    * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
294    */
295   @Deprecated
296   public static boolean isTableEnabled(byte[] tableName) throws IOException {
297     return isTableEnabled(HBaseConfiguration.create(), tableName);
298   }
299 
300   /**
301    * Tells whether or not a table is enabled or not.
302    * @param conf The Configuration object to use.
303    * @param tableName Name of table to check.
304    * @return {@code true} if table is online.
305    * @throws IOException if a remote or network exception occurs
306 	 * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
307    */
308   @Deprecated
309   public static boolean isTableEnabled(Configuration conf, String tableName)
310   throws IOException {
311     return isTableEnabled(conf, Bytes.toBytes(tableName));
312   }
313 
314   /**
315    * Tells whether or not a table is enabled or not.
316    * @param conf The Configuration object to use.
317    * @param tableName Name of table to check.
318    * @return {@code true} if table is online.
319    * @throws IOException if a remote or network exception occurs
320    * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[] tableName)}
321    */
322   @Deprecated
323   public static boolean isTableEnabled(Configuration conf,
324       final byte[] tableName) throws IOException {
325     return HConnectionManager.execute(new HConnectable<Boolean>(conf) {
326       @Override
327       public Boolean connect(HConnection connection) throws IOException {
328         return connection.isTableEnabled(tableName);
329       }
330     });
331   }
332 
333   /**
334    * Find region location hosting passed row using cached info
335    * @param row Row to find.
336    * @return The location of the given row.
337    * @throws IOException if a remote or network exception occurs
338    */
339   public HRegionLocation getRegionLocation(final String row)
340   throws IOException {
341     return connection.getRegionLocation(tableName, Bytes.toBytes(row), false);
342   }
343 
344   /**
345    * Finds the region on which the given row is being served. Does not reload the cache.
346    * @param row Row to find.
347    * @return Location of the row.
348    * @throws IOException if a remote or network exception occurs
349    */
350   public HRegionLocation getRegionLocation(final byte [] row)
351   throws IOException {
352     return connection.getRegionLocation(tableName, row, false);
353   }
354 
355   /**
356    * Finds the region on which the given row is being served.
357    * @param row Row to find.
358    * @param reload true to reload information or false to use cached information
359    * @return Location of the row.
360    * @throws IOException if a remote or network exception occurs
361    */
362   public HRegionLocation getRegionLocation(final byte [] row, boolean reload)
363   throws IOException {
364     return connection.getRegionLocation(tableName, row, reload);
365   }
366 
367   /**
368    * {@inheritDoc}
369    */
370   @Override
371   public byte [] getTableName() {
372     return this.tableName;
373   }
374 
375   /**
376    * <em>INTERNAL</em> Used by unit tests and tools to do low-level
377    * manipulations.
378    * @return An HConnection instance.
379    * @deprecated This method will be changed from public to package protected.
380    */
381   // TODO(tsuna): Remove this.  Unit tests shouldn't require public helpers.
382   @Deprecated
383   public HConnection getConnection() {
384     return this.connection;
385   }
386 
387   /**
388    * Gets the number of rows that a scanner will fetch at once.
389    * <p>
390    * The default value comes from {@code hbase.client.scanner.caching}.
391    * @deprecated Use {@link Scan#setCaching(int)} and {@link Scan#getCaching()}
392    */
393   @Deprecated
394   public int getScannerCaching() {
395     return scannerCaching;
396   }
397 
398   /**
399    * Sets the number of rows that a scanner will fetch at once.
400    * <p>
401    * This will override the value specified by
402    * {@code hbase.client.scanner.caching}.
403    * Increasing this value will reduce the amount of work needed each time
404    * {@code next()} is called on a scanner, at the expense of memory use
405    * (since more rows will need to be maintained in memory by the scanners).
406    * @param scannerCaching the number of rows a scanner will fetch at once.
407    * @deprecated Use {@link Scan#setCaching(int)}
408    */
409   @Deprecated
410   public void setScannerCaching(int scannerCaching) {
411     this.scannerCaching = scannerCaching;
412   }
413 
414   /**
415    * {@inheritDoc}
416    */
417   @Override
418   public HTableDescriptor getTableDescriptor() throws IOException {
419     return new UnmodifyableHTableDescriptor(
420       this.connection.getHTableDescriptor(this.tableName));
421   }
422 
423   /**
424    * Gets the starting row key for every region in the currently open table.
425    * <p>
426    * This is mainly useful for the MapReduce integration.
427    * @return Array of region starting row keys
428    * @throws IOException if a remote or network exception occurs
429    */
430   public byte [][] getStartKeys() throws IOException {
431     return getStartEndKeys().getFirst();
432   }
433 
434   /**
435    * Gets the ending row key for every region in the currently open table.
436    * <p>
437    * This is mainly useful for the MapReduce integration.
438    * @return Array of region ending row keys
439    * @throws IOException if a remote or network exception occurs
440    */
441   public byte[][] getEndKeys() throws IOException {
442     return getStartEndKeys().getSecond();
443   }
444 
445   /**
446    * Gets the starting and ending row keys for every region in the currently
447    * open table.
448    * <p>
449    * This is mainly useful for the MapReduce integration.
450    * @return Pair of arrays of region starting and ending row keys
451    * @throws IOException if a remote or network exception occurs
452    */
453   public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
454     NavigableMap<HRegionInfo, ServerName> regions = getRegionLocations();
455     final List<byte[]> startKeyList = new ArrayList<byte[]>(regions.size());
456     final List<byte[]> endKeyList = new ArrayList<byte[]>(regions.size());
457 
458     for (HRegionInfo region : regions.keySet()) {
459       startKeyList.add(region.getStartKey());
460       endKeyList.add(region.getEndKey());
461     }
462 
463     return new Pair<byte [][], byte [][]>(
464       startKeyList.toArray(new byte[startKeyList.size()][]),
465       endKeyList.toArray(new byte[endKeyList.size()][]));
466   }
467 
468   /**
469    * Gets all the regions and their address for this table.
470    * <p>
471    * This is mainly useful for the MapReduce integration.
472    * @return A map of HRegionInfo with it's server address
473    * @throws IOException if a remote or network exception occurs
474    */
475   public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws IOException {
476     // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation.
477     return MetaScanner.allTableRegions(getConfiguration(), getTableName(), false);
478   }
479 
480   /**
481    * Get the corresponding regions for an arbitrary range of keys.
482    * <p>
483    * @param startKey Starting row in range, inclusive
484    * @param endKey Ending row in range, exclusive
485    * @return A list of HRegionLocations corresponding to the regions that
486    * contain the specified range
487    * @throws IOException if a remote or network exception occurs
488    */
489   public List<HRegionLocation> getRegionsInRange(final byte [] startKey,
490     final byte [] endKey) throws IOException {
491     final boolean endKeyIsEndOfTable = Bytes.equals(endKey,
492                                                     HConstants.EMPTY_END_ROW);
493     if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
494       throw new IllegalArgumentException(
495         "Invalid range: " + Bytes.toStringBinary(startKey) +
496         " > " + Bytes.toStringBinary(endKey));
497     }
498     final List<HRegionLocation> regionList = new ArrayList<HRegionLocation>();
499     byte [] currentKey = startKey;
500     do {
501       HRegionLocation regionLocation = getRegionLocation(currentKey, false);
502       regionList.add(regionLocation);
503       currentKey = regionLocation.getRegionInfo().getEndKey();
504     } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) &&
505              (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0));
506     return regionList;
507   }
508 
509   /**
510    * {@inheritDoc}
511    */
512    @Override
513    public Result getRowOrBefore(final byte[] row, final byte[] family)
514    throws IOException {
515      return new ServerCallable<Result>(connection, tableName, row, operationTimeout) {
516        public Result call() throws IOException {
517          return ProtobufUtil.getRowOrBefore(server,
518            location.getRegionInfo().getRegionName(), row, family);
519        }
520      }.withRetries();
521    }
522 
523    /**
524     * {@inheritDoc}
525     */
526   @Override
527   public ResultScanner getScanner(final Scan scan) throws IOException {
528     if (scan.getCaching() <= 0) {
529       scan.setCaching(getScannerCaching());
530     }
531     return new ClientScanner(getConfiguration(), scan, getTableName(),
532         this.connection);
533   }
534 
535   /**
536    * {@inheritDoc}
537    */
538   @Override
539   public ResultScanner getScanner(byte [] family) throws IOException {
540     Scan scan = new Scan();
541     scan.addFamily(family);
542     return getScanner(scan);
543   }
544 
545   /**
546    * {@inheritDoc}
547    */
548   @Override
549   public ResultScanner getScanner(byte [] family, byte [] qualifier)
550   throws IOException {
551     Scan scan = new Scan();
552     scan.addColumn(family, qualifier);
553     return getScanner(scan);
554   }
555 
556   /**
557    * {@inheritDoc}
558    */
559   @Override
560   public Result get(final Get get) throws IOException {
561     return new ServerCallable<Result>(connection, tableName, get.getRow(), operationTimeout) {
562           public Result call() throws IOException {
563             return ProtobufUtil.get(server,
564               location.getRegionInfo().getRegionName(), get);
565           }
566         }.withRetries();
567   }
568 
569   /**
570    * {@inheritDoc}
571    */
572   @Override
573   public Result[] get(List<Get> gets) throws IOException {
574     try {
575       Object [] r1 = batch((List)gets);
576 
577       // translate.
578       Result [] results = new Result[r1.length];
579       int i=0;
580       for (Object o : r1) {
581         // batch ensures if there is a failure we get an exception instead
582         results[i++] = (Result) o;
583       }
584 
585       return results;
586     } catch (InterruptedException e) {
587       throw new IOException(e);
588     }
589   }
590 
591   @Override
592   public void batch(final List<?extends Row> actions, final Object[] results)
593       throws InterruptedException, IOException {
594     connection.processBatchCallback(actions, tableName, pool, results, null);
595   }
596 
597   @Override
598   public Object[] batch(final List<? extends Row> actions)
599      throws InterruptedException, IOException {
600     Object[] results = new Object[actions.size()];
601     connection.processBatchCallback(actions, tableName, pool, results, null);
602     return results;
603   }
604 
605   @Override
606   public <R> void batchCallback(
607     final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback)
608     throws IOException, InterruptedException {
609     connection.processBatchCallback(actions, tableName, pool, results, callback);
610   }
611 
612   @Override
613   public <R> Object[] batchCallback(
614     final List<? extends Row> actions, final Batch.Callback<R> callback) throws IOException,
615       InterruptedException {
616     Object[] results = new Object[actions.size()];
617     connection.processBatchCallback(actions, tableName, pool, results, callback);
618     return results;
619   }
620 
621   /**
622    * {@inheritDoc}
623    */
624   @Override
625   public void delete(final Delete delete)
626   throws IOException {
627     new ServerCallable<Boolean>(connection, tableName, delete.getRow(), operationTimeout) {
628           public Boolean call() throws IOException {
629             try {
630               MutateRequest request = RequestConverter.buildMutateRequest(
631                 location.getRegionInfo().getRegionName(), delete);
632               MutateResponse response = server.mutate(null, request);
633               return Boolean.valueOf(response.getProcessed());
634             } catch (ServiceException se) {
635               throw ProtobufUtil.getRemoteException(se);
636             }
637           }
638         }.withRetries();
639   }
640 
641   /**
642    * {@inheritDoc}
643    */
644   @Override
645   public void delete(final List<Delete> deletes)
646   throws IOException {
647     Object[] results = new Object[deletes.size()];
648     try {
649       connection.processBatch((List) deletes, tableName, pool, results);
650     } catch (InterruptedException e) {
651       throw new IOException(e);
652     } finally {
653       // mutate list so that it is empty for complete success, or contains only failed records
654       // results are returned in the same order as the requests in list
655       // walk the list backwards, so we can remove from list without impacting the indexes of earlier members
656       for (int i = results.length - 1; i>=0; i--) {
657         // if result is not null, it succeeded
658         if (results[i] instanceof Result) {
659           deletes.remove(i);
660         }
661       }
662     }
663   }
664 
665   /**
666    * {@inheritDoc}
667    */
668   @Override
669   public void put(final Put put) throws IOException {
670     doPut(put);
671     if (autoFlush) {
672       flushCommits();
673     }
674   }
675 
676   /**
677    * {@inheritDoc}
678    */
679   @Override
680   public void put(final List<Put> puts) throws IOException {
681     for (Put put : puts) {
682       doPut(put);
683     }
684     if (autoFlush) {
685       flushCommits();
686     }
687   }
688 
689   private void doPut(Put put) throws IOException{
690     validatePut(put);
691     writeBuffer.add(put);
692     currentWriteBufferSize += put.heapSize();
693     if (currentWriteBufferSize > writeBufferSize) {
694       flushCommits();
695     }
696   }
697 
698   /**
699    * {@inheritDoc}
700    */
701   @Override
702   public void mutateRow(final RowMutations rm) throws IOException {
703     new ServerCallable<Void>(connection, tableName, rm.getRow(),
704         operationTimeout) {
705       public Void call() throws IOException {
706         try {
707           MultiRequest request = RequestConverter.buildMultiRequest(
708             location.getRegionInfo().getRegionName(), rm);
709           server.multi(null, request);
710         } catch (ServiceException se) {
711           throw ProtobufUtil.getRemoteException(se);
712         }
713         return null;
714       }
715     }.withRetries();
716   }
717 
718   /**
719    * {@inheritDoc}
720    */
721   @Override
722   public Result append(final Append append) throws IOException {
723     if (append.numFamilies() == 0) {
724       throw new IOException(
725           "Invalid arguments to append, no columns specified");
726     }
727     return new ServerCallable<Result>(connection, tableName, append.getRow(), operationTimeout) {
728           public Result call() throws IOException {
729             try {
730               MutateRequest request = RequestConverter.buildMutateRequest(
731                 location.getRegionInfo().getRegionName(), append);
732               PayloadCarryingRpcController rpcController =
733                 new PayloadCarryingRpcController();
734               MutateResponse response = server.mutate(rpcController, request);
735               if (!response.hasResult()) return null;
736               return ProtobufUtil.toResult(response.getResult(), rpcController.cellScanner());
737             } catch (ServiceException se) {
738               throw ProtobufUtil.getRemoteException(se);
739             }
740           }
741         }.withRetries();
742   }
743 
744   /**
745    * {@inheritDoc}
746    */
747   @Override
748   public Result increment(final Increment increment) throws IOException {
749     if (!increment.hasFamilies()) {
750       throw new IOException(
751           "Invalid arguments to increment, no columns specified");
752     }
753     return new ServerCallable<Result>(connection, tableName, increment.getRow(), operationTimeout) {
754           public Result call() throws IOException {
755             try {
756               MutateRequest request = RequestConverter.buildMutateRequest(
757                 location.getRegionInfo().getRegionName(), increment);
758               PayloadCarryingRpcController rpcContoller = new PayloadCarryingRpcController();
759               MutateResponse response = server.mutate(rpcContoller, request);
760               return ProtobufUtil.toResult(response.getResult(), rpcContoller.cellScanner());
761             } catch (ServiceException se) {
762               throw ProtobufUtil.getRemoteException(se);
763             }
764           }
765         }.withRetries();
766   }
767 
768   /**
769    * {@inheritDoc}
770    */
771   @Override
772   public long incrementColumnValue(final byte [] row, final byte [] family,
773       final byte [] qualifier, final long amount)
774   throws IOException {
775     return incrementColumnValue(row, family, qualifier, amount, true);
776   }
777 
778   /**
779    * {@inheritDoc}
780    */
781   @Override
782   public long incrementColumnValue(final byte [] row, final byte [] family,
783       final byte [] qualifier, final long amount, final boolean writeToWAL)
784   throws IOException {
785     NullPointerException npe = null;
786     if (row == null) {
787       npe = new NullPointerException("row is null");
788     } else if (family == null) {
789       npe = new NullPointerException("family is null");
790     } else if (qualifier == null) {
791       npe = new NullPointerException("qualifier is null");
792     }
793     if (npe != null) {
794       throw new IOException(
795           "Invalid arguments to incrementColumnValue", npe);
796     }
797     return new ServerCallable<Long>(connection, tableName, row, operationTimeout) {
798           public Long call() throws IOException {
799             try {
800               MutateRequest request = RequestConverter.buildMutateRequest(
801                 location.getRegionInfo().getRegionName(), row, family,
802                 qualifier, amount, writeToWAL);
803               PayloadCarryingRpcController rpcController = new PayloadCarryingRpcController();
804               MutateResponse response = server.mutate(rpcController, request);
805               Result result =
806                 ProtobufUtil.toResult(response.getResult(), rpcController.cellScanner());
807               return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier)));
808             } catch (ServiceException se) {
809               throw ProtobufUtil.getRemoteException(se);
810             }
811           }
812         }.withRetries();
813   }
814 
815   /**
816    * {@inheritDoc}
817    */
818   @Override
819   public boolean checkAndPut(final byte [] row,
820       final byte [] family, final byte [] qualifier, final byte [] value,
821       final Put put)
822   throws IOException {
823     return new ServerCallable<Boolean>(connection, tableName, row, operationTimeout) {
824           public Boolean call() throws IOException {
825             try {
826               MutateRequest request = RequestConverter.buildMutateRequest(
827                 location.getRegionInfo().getRegionName(), row, family, qualifier,
828                 new BinaryComparator(value), CompareType.EQUAL, put);
829               MutateResponse response = server.mutate(null, request);
830               return Boolean.valueOf(response.getProcessed());
831             } catch (ServiceException se) {
832               throw ProtobufUtil.getRemoteException(se);
833             }
834           }
835         }.withRetries();
836   }
837 
838 
839   /**
840    * {@inheritDoc}
841    */
842   @Override
843   public boolean checkAndDelete(final byte [] row,
844       final byte [] family, final byte [] qualifier, final byte [] value,
845       final Delete delete)
846   throws IOException {
847     return new ServerCallable<Boolean>(connection, tableName, row, operationTimeout) {
848           public Boolean call() throws IOException {
849             try {
850               MutateRequest request = RequestConverter.buildMutateRequest(
851                 location.getRegionInfo().getRegionName(), row, family, qualifier,
852                 new BinaryComparator(value), CompareType.EQUAL, delete);
853               MutateResponse response = server.mutate(null, request);
854               return Boolean.valueOf(response.getProcessed());
855             } catch (ServiceException se) {
856               throw ProtobufUtil.getRemoteException(se);
857             }
858           }
859         }.withRetries();
860   }
861 
862   /**
863    * {@inheritDoc}
864    */
865   @Override
866   public boolean exists(final Get get) throws IOException {
867     return new ServerCallable<Boolean>(connection, tableName, get.getRow(), operationTimeout) {
868           public Boolean call() throws IOException {
869             try {
870               GetRequest request = RequestConverter.buildGetRequest(
871                   location.getRegionInfo().getRegionName(), get, true);
872               GetResponse response = server.get(null, request);
873               return response.getExists();
874             } catch (ServiceException se) {
875               throw ProtobufUtil.getRemoteException(se);
876             }
877           }
878         }.withRetries();
879   }
880 
881   /**
882    * Goal of this inner class is to keep track of the initial position of a get in a list before
883    * sorting it. This is used to send back results in the same orders we got the Gets before we sort
884    * them.
885    */
886   private static class SortedGet implements Comparable<SortedGet> {
887     protected int initialIndex = -1; // Used to store the get initial index in a list.
888     protected Get get; // Encapsulated Get instance.
889 
890     public SortedGet (Get get, int initialIndex) {
891       this.get = get;
892       this.initialIndex = initialIndex;
893     }
894 
895     public int getInitialIndex() {
896       return initialIndex;
897     }
898 
899     @Override
900     public int compareTo(SortedGet o) {
901       return get.compareTo(o.get);
902     }
903 
904     public Get getGet() {
905       return get;
906     }
907 
908     @Override
909     public int hashCode() {
910       return get.hashCode();
911     }
912 
913     @Override
914     public boolean equals(Object obj) {
915       if (obj instanceof SortedGet)
916         return get.equals(((SortedGet)obj).get);
917       else
918         return false;
919     }
920   }
921 
922   /**
923    * {@inheritDoc}
924    */
925   @Override
926   public Boolean[] exists(final List<Get> gets) throws IOException {
927     // Prepare the sorted list of gets. Take the list of gets received, and encapsulate them into
928     // a list of SortedGet instances. Simple list parsing, so complexity here is O(n)
929     // The list is later used to recreate the response order based on the order the Gets
930     // got received.
931     ArrayList<SortedGet> sortedGetsList = new ArrayList<HTable.SortedGet>();
932     for (int indexGet = 0; indexGet < gets.size(); indexGet++) {
933       sortedGetsList.add(new SortedGet (gets.get(indexGet), indexGet));
934     }
935 
936     // Sorting the list to get the Gets ordered based on the key.
937     Collections.sort(sortedGetsList); // O(n log n)
938 
939     // step 1: sort the requests by regions to send them bundled.
940     // Map key is startKey index. Map value is the list of Gets related to the region starting
941     // with the startKey.
942     Map<Integer, List<Get>> getsByRegion = new HashMap<Integer, List<Get>>();
943 
944     // Reference map to quickly find back in which region a get belongs.
945     Map<Get, Integer> getToRegionIndexMap = new HashMap<Get, Integer>();
946     Pair<byte[][], byte[][]> startEndKeys = getStartEndKeys();
947 
948     int regionIndex = 0;
949     for (final SortedGet get : sortedGetsList) {
950       // Progress on the regions until we find the one the current get resides in.
951       while ((regionIndex < startEndKeys.getSecond().length) && ((Bytes.compareTo(startEndKeys.getSecond()[regionIndex], get.getGet().getRow()) <= 0))) {
952         regionIndex++;
953       }
954       List<Get> regionGets = getsByRegion.get(regionIndex);
955       if (regionGets == null) {
956         regionGets = new ArrayList<Get>();
957         getsByRegion.put(regionIndex, regionGets);
958       }
959       regionGets.add(get.getGet());
960       getToRegionIndexMap.put(get.getGet(), regionIndex);
961     }
962 
963     // step 2: make the requests
964     Map<Integer, Future<List<Boolean>>> futures =
965         new HashMap<Integer, Future<List<Boolean>>>(sortedGetsList.size());
966     for (final Map.Entry<Integer, List<Get>> getsByRegionEntry : getsByRegion.entrySet()) {
967       Callable<List<Boolean>> callable = new Callable<List<Boolean>>() {
968         public List<Boolean> call() throws Exception {
969           return new ServerCallable<List<Boolean>>(connection, tableName, getsByRegionEntry.getValue()
970               .get(0).getRow(), operationTimeout) {
971             public List<Boolean> call() throws IOException {
972               try {
973                 MultiGetRequest requests = RequestConverter.buildMultiGetRequest(location
974                     .getRegionInfo().getRegionName(), getsByRegionEntry.getValue(), true, false);
975                 MultiGetResponse responses = server.multiGet(null, requests);
976                 return responses.getExistsList();
977               } catch (ServiceException se) {
978                 throw ProtobufUtil.getRemoteException(se);
979               }
980             }
981           }.withRetries();
982         }
983       };
984       futures.put(getsByRegionEntry.getKey(), pool.submit(callable));
985     }
986 
987     // step 3: collect the failures and successes
988     Map<Integer, List<Boolean>> responses = new HashMap<Integer, List<Boolean>>();
989     for (final Map.Entry<Integer, List<Get>> sortedGetEntry : getsByRegion.entrySet()) {
990       try {
991         Future<List<Boolean>> future = futures.get(sortedGetEntry.getKey());
992         List<Boolean> resp = future.get();
993 
994         if (resp == null) {
995           LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
996         }
997         responses.put(sortedGetEntry.getKey(), resp);
998       } catch (ExecutionException e) {
999         LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
1000       } catch (InterruptedException e) {
1001         LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
1002         Thread.currentThread().interrupt();
1003       }
1004     }
1005     Boolean[] results = new Boolean[sortedGetsList.size()];
1006 
1007     // step 4: build the response.
1008     Map<Integer, Integer> indexes = new HashMap<Integer, Integer>();
1009     for (int i = 0; i < sortedGetsList.size(); i++) {
1010       Integer regionInfoIndex = getToRegionIndexMap.get(sortedGetsList.get(i).getGet());
1011       Integer index = indexes.get(regionInfoIndex);
1012       if (index == null) {
1013         index = 0;
1014       }
1015       results[sortedGetsList.get(i).getInitialIndex()] = responses.get(regionInfoIndex).get(index);
1016       indexes.put(regionInfoIndex, index + 1);
1017     }
1018 
1019     return results;
1020   }
1021 
1022   /**
1023    * {@inheritDoc}
1024    */
1025   @Override
1026   public void flushCommits() throws IOException {
1027     if (writeBuffer.isEmpty()){
1028       // Early exit: we can be called on empty buffers.
1029       return;
1030     }
1031 
1032     Object[] results = new Object[writeBuffer.size()];
1033     boolean success = false;
1034     try {
1035       this.connection.processBatch(writeBuffer, tableName, pool, results);
1036       success = true;
1037     } catch (InterruptedException e) {
1038       throw new InterruptedIOException(e.getMessage());
1039     } finally {
1040       // mutate list so that it is empty for complete success, or contains
1041       // only failed records. Results are returned in the same order as the
1042       // requests in list. Walk the list backwards, so we can remove from list
1043       // without impacting the indexes of earlier members
1044       currentWriteBufferSize = 0;
1045       if (success || clearBufferOnFail) {
1046         writeBuffer.clear();
1047       } else {
1048         for (int i = results.length - 1; i >= 0; i--) {
1049           if (results[i] instanceof Result) {
1050             writeBuffer.remove(i);
1051           } else {
1052             currentWriteBufferSize += writeBuffer.get(i).heapSize();
1053           }
1054         }
1055       }
1056     }
1057   }
1058 
1059   /**
1060    * Process a mixed batch of Get, Put and Delete actions. All actions for a
1061    * RegionServer are forwarded in one RPC call. Queries are executed in parallel.
1062    *
1063    * @param list The collection of actions.
1064    * @param results An empty array, same size as list. If an exception is thrown,
1065    * you can test here for partial results, and to determine which actions
1066    * processed successfully.
1067    * @throws IOException if there are problems talking to META. Per-item
1068    * exceptions are stored in the results array.
1069    */
1070   public <R> void processBatchCallback(
1071     final List<? extends Row> list, final Object[] results, final Batch.Callback<R> callback)
1072     throws IOException, InterruptedException {
1073     connection.processBatchCallback(list, tableName, pool, results, callback);
1074   }
1075 
1076 
1077   /**
1078    * Parameterized batch processing, allowing varying return types for different
1079    * {@link Row} implementations.
1080    */
1081   public void processBatch(final List<? extends Row> list, final Object[] results)
1082     throws IOException, InterruptedException {
1083 
1084     this.processBatchCallback(list, results, null);
1085   }
1086 
1087 
1088   @Override
1089   public void close() throws IOException {
1090     if (this.closed) {
1091       return;
1092     }
1093     flushCommits();
1094     if (cleanupPoolOnClose) {
1095       this.pool.shutdown();
1096     }
1097     if (cleanupConnectionOnClose) {
1098       if (this.connection != null) {
1099         this.connection.close();
1100       }
1101     }
1102     this.closed = true;
1103   }
1104 
1105   // validate for well-formedness
1106   public void validatePut(final Put put) throws IllegalArgumentException{
1107     if (put.isEmpty()) {
1108       throw new IllegalArgumentException("No columns to insert");
1109     }
1110     if (maxKeyValueSize > 0) {
1111       for (List<? extends Cell> list : put.getFamilyMap().values()) {
1112         for (Cell cell : list) {
1113           // KeyValue v1 expectation.  Cast for now.
1114           KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
1115           if (kv.getLength() > maxKeyValueSize) {
1116             throw new IllegalArgumentException("KeyValue size too large");
1117           }
1118         }
1119       }
1120     }
1121   }
1122 
1123   /**
1124    * {@inheritDoc}
1125    */
1126   @Override
1127   public boolean isAutoFlush() {
1128     return autoFlush;
1129   }
1130 
1131   /**
1132    * See {@link #setAutoFlush(boolean, boolean)}
1133    *
1134    * @param autoFlush
1135    *          Whether or not to enable 'auto-flush'.
1136    */
1137   public void setAutoFlush(boolean autoFlush) {
1138     setAutoFlush(autoFlush, autoFlush);
1139   }
1140 
1141   /**
1142    * Turns 'auto-flush' on or off.
1143    * <p>
1144    * When enabled (default), {@link Put} operations don't get buffered/delayed
1145    * and are immediately executed. Failed operations are not retried. This is
1146    * slower but safer.
1147    * <p>
1148    * Turning off {@link #autoFlush} means that multiple {@link Put}s will be
1149    * accepted before any RPC is actually sent to do the write operations. If the
1150    * application dies before pending writes get flushed to HBase, data will be
1151    * lost.
1152    * <p>
1153    * When you turn {@link #autoFlush} off, you should also consider the
1154    * {@link #clearBufferOnFail} option. By default, asynchronous {@link Put}
1155    * requests will be retried on failure until successful. However, this can
1156    * pollute the writeBuffer and slow down batching performance. Additionally,
1157    * you may want to issue a number of Put requests and call
1158    * {@link #flushCommits()} as a barrier. In both use cases, consider setting
1159    * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
1160    * has been called, regardless of success.
1161    *
1162    * @param autoFlush
1163    *          Whether or not to enable 'auto-flush'.
1164    * @param clearBufferOnFail
1165    *          Whether to keep Put failures in the writeBuffer
1166    * @see #flushCommits
1167    */
1168   public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
1169     this.autoFlush = autoFlush;
1170     this.clearBufferOnFail = autoFlush || clearBufferOnFail;
1171   }
1172 
1173   /**
1174    * Returns the maximum size in bytes of the write buffer for this HTable.
1175    * <p>
1176    * The default value comes from the configuration parameter
1177    * {@code hbase.client.write.buffer}.
1178    * @return The size of the write buffer in bytes.
1179    */
1180   public long getWriteBufferSize() {
1181     return writeBufferSize;
1182   }
1183 
1184   /**
1185    * Sets the size of the buffer in bytes.
1186    * <p>
1187    * If the new size is less than the current amount of data in the
1188    * write buffer, the buffer gets flushed.
1189    * @param writeBufferSize The new write buffer size, in bytes.
1190    * @throws IOException if a remote or network exception occurs.
1191    */
1192   public void setWriteBufferSize(long writeBufferSize) throws IOException {
1193     this.writeBufferSize = writeBufferSize;
1194     if(currentWriteBufferSize > writeBufferSize) {
1195       flushCommits();
1196     }
1197   }
1198 
1199   /**
1200    * Returns the write buffer.
1201    * @return The current write buffer.
1202    */
1203   public ArrayList<Put> getWriteBuffer() {
1204     return writeBuffer;
1205   }
1206 
1207   /**
1208    * The pool is used for mutli requests for this HTable
1209    * @return the pool used for mutli
1210    */
1211   ExecutorService getPool() {
1212     return this.pool;
1213   }
1214 
1215   /**
1216    * Enable or disable region cache prefetch for the table. It will be
1217    * applied for the given table's all HTable instances who share the same
1218    * connection. By default, the cache prefetch is enabled.
1219    * @param tableName name of table to configure.
1220    * @param enable Set to true to enable region cache prefetch. Or set to
1221    * false to disable it.
1222    * @throws IOException
1223    */
1224   public static void setRegionCachePrefetch(final byte[] tableName,
1225       final boolean enable) throws IOException {
1226     HConnectionManager.execute(new HConnectable<Void>(HBaseConfiguration
1227         .create()) {
1228       @Override
1229       public Void connect(HConnection connection) throws IOException {
1230         connection.setRegionCachePrefetch(tableName, enable);
1231         return null;
1232       }
1233     });
1234   }
1235 
1236   /**
1237    * Enable or disable region cache prefetch for the table. It will be
1238    * applied for the given table's all HTable instances who share the same
1239    * connection. By default, the cache prefetch is enabled.
1240    * @param conf The Configuration object to use.
1241    * @param tableName name of table to configure.
1242    * @param enable Set to true to enable region cache prefetch. Or set to
1243    * false to disable it.
1244    * @throws IOException
1245    */
1246   public static void setRegionCachePrefetch(final Configuration conf,
1247       final byte[] tableName, final boolean enable) throws IOException {
1248     HConnectionManager.execute(new HConnectable<Void>(conf) {
1249       @Override
1250       public Void connect(HConnection connection) throws IOException {
1251         connection.setRegionCachePrefetch(tableName, enable);
1252         return null;
1253       }
1254     });
1255   }
1256 
1257   /**
1258    * Check whether region cache prefetch is enabled or not for the table.
1259    * @param conf The Configuration object to use.
1260    * @param tableName name of table to check
1261    * @return true if table's region cache prefecth is enabled. Otherwise
1262    * it is disabled.
1263    * @throws IOException
1264    */
1265   public static boolean getRegionCachePrefetch(final Configuration conf,
1266       final byte[] tableName) throws IOException {
1267     return HConnectionManager.execute(new HConnectable<Boolean>(conf) {
1268       @Override
1269       public Boolean connect(HConnection connection) throws IOException {
1270         return connection.getRegionCachePrefetch(tableName);
1271       }
1272     });
1273   }
1274 
1275   /**
1276    * Check whether region cache prefetch is enabled or not for the table.
1277    * @param tableName name of table to check
1278    * @return true if table's region cache prefecth is enabled. Otherwise
1279    * it is disabled.
1280    * @throws IOException
1281    */
1282   public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException {
1283     return HConnectionManager.execute(new HConnectable<Boolean>(
1284         HBaseConfiguration.create()) {
1285       @Override
1286       public Boolean connect(HConnection connection) throws IOException {
1287         return connection.getRegionCachePrefetch(tableName);
1288       }
1289     });
1290  }
1291 
1292   /**
1293    * Explicitly clears the region cache to fetch the latest value from META.
1294    * This is a power user function: avoid unless you know the ramifications.
1295    */
1296   public void clearRegionCache() {
1297     this.connection.clearRegionCache();
1298   }
1299 
1300   /**
1301    * {@inheritDoc}
1302    */
1303   public CoprocessorRpcChannel coprocessorService(byte[] row) {
1304     return new RegionCoprocessorRpcChannel(connection, tableName, row);
1305   }
1306 
1307   /**
1308    * {@inheritDoc}
1309    */
1310   @Override
1311   public <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
1312       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
1313       throws ServiceException, Throwable {
1314     final Map<byte[],R> results =  Collections.synchronizedMap(
1315         new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
1316     coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() {
1317       public void update(byte[] region, byte[] row, R value) {
1318         results.put(region, value);
1319       }
1320     });
1321     return results;
1322   }
1323 
1324   /**
1325    * {@inheritDoc}
1326    */
1327   @Override
1328   public <T extends Service, R> void coprocessorService(final Class<T> service,
1329       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
1330       final Batch.Callback<R> callback) throws ServiceException, Throwable {
1331 
1332     // get regions covered by the row range
1333     List<byte[]> keys = getStartKeysInRange(startKey, endKey);
1334 
1335     Map<byte[],Future<R>> futures =
1336         new TreeMap<byte[],Future<R>>(Bytes.BYTES_COMPARATOR);
1337     for (final byte[] r : keys) {
1338       final RegionCoprocessorRpcChannel channel =
1339           new RegionCoprocessorRpcChannel(connection, tableName, r);
1340       Future<R> future = pool.submit(
1341           new Callable<R>() {
1342             public R call() throws Exception {
1343               T instance = ProtobufUtil.newServiceStub(service, channel);
1344               R result = callable.call(instance);
1345               byte[] region = channel.getLastRegion();
1346               if (callback != null) {
1347                 callback.update(region, r, result);
1348               }
1349               return result;
1350             }
1351           });
1352       futures.put(r, future);
1353     }
1354     for (Map.Entry<byte[],Future<R>> e : futures.entrySet()) {
1355       try {
1356         e.getValue().get();
1357       } catch (ExecutionException ee) {
1358         LOG.warn("Error calling coprocessor service " + service.getName() + " for row "
1359             + Bytes.toStringBinary(e.getKey()), ee);
1360         throw ee.getCause();
1361       } catch (InterruptedException ie) {
1362         Thread.currentThread().interrupt();
1363         throw new InterruptedIOException("Interrupted calling coprocessor service " + service.getName()
1364             + " for row " + Bytes.toStringBinary(e.getKey()))
1365             .initCause(ie);
1366       }
1367     }
1368   }
1369 
1370   private List<byte[]> getStartKeysInRange(byte[] start, byte[] end)
1371   throws IOException {
1372     Pair<byte[][],byte[][]> startEndKeys = getStartEndKeys();
1373     byte[][] startKeys = startEndKeys.getFirst();
1374     byte[][] endKeys = startEndKeys.getSecond();
1375 
1376     if (start == null) {
1377       start = HConstants.EMPTY_START_ROW;
1378     }
1379     if (end == null) {
1380       end = HConstants.EMPTY_END_ROW;
1381     }
1382 
1383     List<byte[]> rangeKeys = new ArrayList<byte[]>();
1384     for (int i=0; i<startKeys.length; i++) {
1385       if (Bytes.compareTo(start, startKeys[i]) >= 0 ) {
1386         if (Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW) ||
1387             Bytes.compareTo(start, endKeys[i]) < 0) {
1388           rangeKeys.add(start);
1389         }
1390       } else if (Bytes.equals(end, HConstants.EMPTY_END_ROW) ||
1391           Bytes.compareTo(startKeys[i], end) <= 0) {
1392         rangeKeys.add(startKeys[i]);
1393       } else {
1394         break; // past stop
1395       }
1396     }
1397 
1398     return rangeKeys;
1399   }
1400 
1401   public void setOperationTimeout(int operationTimeout) {
1402     this.operationTimeout = operationTimeout;
1403   }
1404 
1405   public int getOperationTimeout() {
1406     return operationTimeout;
1407   }
1408 
1409 }