View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.client;
21  
22  import java.io.DataInput;
23  import java.io.DataOutput;
24  import java.io.IOException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.Iterator;
28  import java.util.LinkedList;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.TreeMap;
32  import java.util.concurrent.ExecutorService;
33  import java.util.concurrent.SynchronousQueue;
34  import java.util.concurrent.ThreadFactory;
35  import java.util.concurrent.ThreadPoolExecutor;
36  import java.util.concurrent.TimeUnit;
37  import java.util.concurrent.atomic.AtomicInteger;
38  
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.hbase.DoNotRetryIOException;
43  import org.apache.hadoop.hbase.HBaseConfiguration;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.HRegionLocation;
47  import org.apache.hadoop.hbase.HServerAddress;
48  import org.apache.hadoop.hbase.HTableDescriptor;
49  import org.apache.hadoop.hbase.KeyValue;
50  import org.apache.hadoop.hbase.NotServingRegionException;
51  import org.apache.hadoop.hbase.UnknownScannerException;
52  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
53  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
54  import org.apache.hadoop.hbase.util.Bytes;
55  import org.apache.hadoop.hbase.util.Pair;
56  import org.apache.hadoop.hbase.util.Writables;
57  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
58  import org.apache.zookeeper.KeeperException;
59  
60  /**
61   * Used to communicate with a single HBase table.
62   *
63   * This class is not thread safe for updates; the underlying write buffer can
64   * be corrupted if multiple threads contend over a single HTable instance.
65   *
66   * <p>Instances of HTable passed the same {@link Configuration} instance will
67   * share connections to servers out on the cluster and to the zookeeper ensemble
68   * as well as caches of region locations.  This is usually a *good* thing and it
69   * is recommended to reuse the same configuration object for all your tables.
70   * This happens because they will all share the same underlying
71   * {@link HConnection} instance. See {@link HConnectionManager} for more on
72   * how this mechanism works.
73   *
74   * <p>{@link HConnection} will read most of the
75   * configuration it needs from the passed {@link Configuration} on initial
76   * construction.  Thereafter, for settings such as
77   * <code>hbase.client.pause</code>, <code>hbase.client.retries.number</code>,
78   * and <code>hbase.client.rpc.maxattempts</code> updating their values in the
79   * passed {@link Configuration} subsequent to {@link HConnection} construction
80   * will go unnoticed.  To run with changed values, make a new
81   * {@link HTable} passing a new {@link Configuration} instance that has the
82   * new configuration.
83   *
84   * @see HBaseAdmin for create, drop, list, enable and disable of tables.
85   * @see HConnection
86   * @see HConnectionManager
87   */
88  public class HTable implements HTableInterface {
89    private static final Log LOG = LogFactory.getLog(HTable.class);
90    private final HConnection connection;
91    private final byte [] tableName;
92    protected final int scannerTimeout;
93    private volatile Configuration configuration;
94    private final ArrayList<Put> writeBuffer = new ArrayList<Put>();
95    private long writeBufferSize;
96    private boolean clearBufferOnFail;
97    private boolean autoFlush;
98    private long currentWriteBufferSize;
99    protected int scannerCaching;
100   private int maxKeyValueSize;
101   private ExecutorService pool;  // For Multi
102   private long maxScannerResultSize;
103 
104   /**
105    * Creates an object to access a HBase table.
106    * Internally it creates a new instance of {@link Configuration} and a new
107    * client to zookeeper as well as other resources.  It also comes up with
108    * a fresh view of the cluster and must do discovery from scratch of region
109    * locations; i.e. it will not make use of already-cached region locations if
110    * available. Use only when being quick and dirty.
111    * @throws IOException if a remote or network exception occurs
112    * @see #HTable(Configuration, String)
113    */
114   public HTable(final String tableName)
115   throws IOException {
116     this(HBaseConfiguration.create(), Bytes.toBytes(tableName));
117   }
118 
119   /**
120    * Creates an object to access a HBase table.
121    * Internally it creates a new instance of {@link Configuration} and a new
122    * client to zookeeper as well as other resources.  It also comes up with
123    * a fresh view of the cluster and must do discovery from scratch of region
124    * locations; i.e. it will not make use of already-cached region locations if
125    * available. Use only when being quick and dirty.
126    * @param tableName Name of the table.
127    * @throws IOException if a remote or network exception occurs
128    * @see #HTable(Configuration, String)
129    */
130   public HTable(final byte [] tableName)
131   throws IOException {
132     this(HBaseConfiguration.create(), tableName);
133   }
134 
135   /**
136    * Creates an object to access a HBase table.
137    * Shares zookeeper connection and other resources with other HTable instances
138    * created with the same <code>conf</code> instance.  Uses already-populated
139    * region cache if one is available, populated by any other HTable instances
140    * sharing this <code>conf</code> instance.  Recommended.
141    * @param conf Configuration object to use.
142    * @param tableName Name of the table.
143    * @throws IOException if a remote or network exception occurs
144    */
145   public HTable(Configuration conf, final String tableName)
146   throws IOException {
147     this(conf, Bytes.toBytes(tableName));
148   }
149 
150 
151   /**
152    * Creates an object to access a HBase table.
153    * Shares zookeeper connection and other resources with other HTable instances
154    * created with the same <code>conf</code> instance.  Uses already-populated
155    * region cache if one is available, populated by any other HTable instances
156    * sharing this <code>conf</code> instance.  Recommended.
157    * @param conf Configuration object to use.
158    * @param tableName Name of the table.
159    * @throws IOException if a remote or network exception occurs
160    */
161   public HTable(Configuration conf, final byte [] tableName)
162   throws IOException {
163     this.tableName = tableName;
164     if (conf == null) {
165       this.scannerTimeout = 0;
166       this.connection = null;
167       return;
168     }
169     this.connection = HConnectionManager.getConnection(conf);
170     this.scannerTimeout =
171       (int) conf.getLong(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD);
172     this.configuration = conf;
173     this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
174     this.writeBufferSize = conf.getLong("hbase.client.write.buffer", 2097152);
175     this.clearBufferOnFail = true;
176     this.autoFlush = true;
177     this.currentWriteBufferSize = 0;
178     this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1);
179 
180     this.maxScannerResultSize = conf.getLong(
181       HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
182       HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
183     this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1);
184 
185     int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
186     if (maxThreads == 0) {
187       maxThreads = 1; // is there a better default?
188     }
189 
190     // Using the "direct handoff" approach, new threads will only be created
191     // if it is necessary and will grow unbounded. This could be bad but in HCM
192     // we only create as many Runnables as there are region servers. It means
193     // it also scales when new region servers are added.
194     this.pool = new ThreadPoolExecutor(1, maxThreads,
195         60, TimeUnit.SECONDS,
196         new SynchronousQueue<Runnable>(),
197         new DaemonThreadFactory());
198     ((ThreadPoolExecutor)this.pool).allowCoreThreadTimeOut(true);
199   }
200 
201   /**
202    * @return the number of region servers that are currently running
203    * @throws IOException if a remote or network exception occurs
204    */
205   public int getCurrentNrHRS() throws IOException {
206     try {
207       // We go to zk rather than to master to get count of regions to avoid
208       // HTable having a Master dependency.  See HBase-2828
209       return ZKUtil.getNumberOfChildren(this.connection.getZooKeeperWatcher(),
210           this.connection.getZooKeeperWatcher().rsZNode);
211     } catch (KeeperException ke) {
212       throw new IOException("Unexpected ZooKeeper exception", ke);
213     }
214   }
215 
216   public Configuration getConfiguration() {
217     return configuration;
218   }
219 
220   /**
221    * Tells whether or not a table is enabled or not.
222    * Warning: use {@link HBaseAdmin#isTableEnabled(byte[])} instead.
223    * @param tableName Name of table to check.
224    * @return {@code true} if table is online.
225    * @throws IOException if a remote or network exception occurs
226    */
227   public static boolean isTableEnabled(String tableName) throws IOException {
228     return isTableEnabled(Bytes.toBytes(tableName));
229   }
230 
231   /**
232    * Tells whether or not a table is enabled or not.
233    * Warning: use {@link HBaseAdmin#isTableEnabled(byte[])} instead.
234    * @param tableName Name of table to check.
235    * @return {@code true} if table is online.
236    * @throws IOException if a remote or network exception occurs
237    */
238   public static boolean isTableEnabled(byte[] tableName) throws IOException {
239     return isTableEnabled(HBaseConfiguration.create(), tableName);
240   }
241 
242   /**
243    * Tells whether or not a table is enabled or not.
244    * Warning: use {@link HBaseAdmin#isTableEnabled(byte[])} instead.
245    * @param conf The Configuration object to use.
246    * @param tableName Name of table to check.
247    * @return {@code true} if table is online.
248    * @throws IOException if a remote or network exception occurs
249    */
250   public static boolean isTableEnabled(Configuration conf, String tableName)
251   throws IOException {
252     return isTableEnabled(conf, Bytes.toBytes(tableName));
253   }
254 
255   /**
256    * Tells whether or not a table is enabled or not.
257    * @param conf The Configuration object to use.
258    * @param tableName Name of table to check.
259    * @return {@code true} if table is online.
260    * @throws IOException if a remote or network exception occurs
261    */
262   public static boolean isTableEnabled(Configuration conf, byte[] tableName)
263   throws IOException {
264     return HConnectionManager.getConnection(conf).isTableEnabled(tableName);
265   }
266 
267   /**
268    * Find region location hosting passed row using cached info
269    * @param row Row to find.
270    * @return The location of the given row.
271    * @throws IOException if a remote or network exception occurs
272    */
273   public HRegionLocation getRegionLocation(final String row)
274   throws IOException {
275     return connection.getRegionLocation(tableName, Bytes.toBytes(row), false);
276   }
277 
278   /**
279    * Finds the region on which the given row is being served.
280    * @param row Row to find.
281    * @return Location of the row.
282    * @throws IOException if a remote or network exception occurs
283    */
284   public HRegionLocation getRegionLocation(final byte [] row)
285   throws IOException {
286     return connection.getRegionLocation(tableName, row, false);
287   }
288 
289   @Override
290   public byte [] getTableName() {
291     return this.tableName;
292   }
293 
294   /**
295    * <em>INTERNAL</em> Used by unit tests and tools to do low-level
296    * manipulations.
297    * @return An HConnection instance.
298    */
299   // TODO(tsuna): Remove this.  Unit tests shouldn't require public helpers.
300   public HConnection getConnection() {
301     return this.connection;
302   }
303 
304   /**
305    * Gets the number of rows that a scanner will fetch at once.
306    * <p>
307    * The default value comes from {@code hbase.client.scanner.caching}.
308    */
309   public int getScannerCaching() {
310     return scannerCaching;
311   }
312 
313   /**
314    * Sets the number of rows that a scanner will fetch at once.
315    * <p>
316    * This will override the value specified by
317    * {@code hbase.client.scanner.caching}.
318    * Increasing this value will reduce the amount of work needed each time
319    * {@code next()} is called on a scanner, at the expense of memory use
320    * (since more rows will need to be maintained in memory by the scanners).
321    * @param scannerCaching the number of rows a scanner will fetch at once.
322    */
323   public void setScannerCaching(int scannerCaching) {
324     this.scannerCaching = scannerCaching;
325   }
326 
327   @Override
328   public HTableDescriptor getTableDescriptor() throws IOException {
329     return new UnmodifyableHTableDescriptor(
330       this.connection.getHTableDescriptor(this.tableName));
331   }
332 
333   /**
334    * Gets the starting row key for every region in the currently open table.
335    * <p>
336    * This is mainly useful for the MapReduce integration.
337    * @return Array of region starting row keys
338    * @throws IOException if a remote or network exception occurs
339    */
340   public byte [][] getStartKeys() throws IOException {
341     return getStartEndKeys().getFirst();
342   }
343 
344   /**
345    * Gets the ending row key for every region in the currently open table.
346    * <p>
347    * This is mainly useful for the MapReduce integration.
348    * @return Array of region ending row keys
349    * @throws IOException if a remote or network exception occurs
350    */
351   public byte[][] getEndKeys() throws IOException {
352     return getStartEndKeys().getSecond();
353   }
354 
355   /**
356    * Gets the starting and ending row keys for every region in the currently
357    * open table.
358    * <p>
359    * This is mainly useful for the MapReduce integration.
360    * @return Pair of arrays of region starting and ending row keys
361    * @throws IOException if a remote or network exception occurs
362    */
363   @SuppressWarnings("unchecked")
364   public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
365     final List<byte[]> startKeyList = new ArrayList<byte[]>();
366     final List<byte[]> endKeyList = new ArrayList<byte[]>();
367     MetaScannerVisitor visitor = new MetaScannerVisitor() {
368       public boolean processRow(Result rowResult) throws IOException {
369         byte [] bytes = rowResult.getValue(HConstants.CATALOG_FAMILY,
370           HConstants.REGIONINFO_QUALIFIER);
371         if (bytes == null) {
372           LOG.warn("Null " + HConstants.REGIONINFO_QUALIFIER + " cell in " +
373             rowResult);
374           return true;
375         }
376         HRegionInfo info = Writables.getHRegionInfo(bytes);
377         if (Bytes.equals(info.getTableDesc().getName(), getTableName())) {
378           if (!(info.isOffline() || info.isSplit())) {
379             startKeyList.add(info.getStartKey());
380             endKeyList.add(info.getEndKey());
381           }
382         }
383         return true;
384       }
385     };
386     MetaScanner.metaScan(configuration, visitor, this.tableName);
387     return new Pair(startKeyList.toArray(new byte[startKeyList.size()][]),
388                 endKeyList.toArray(new byte[endKeyList.size()][]));
389   }
390 
391   /**
392    * Gets all the regions and their address for this table.
393    * <p>
394    * This is mainly useful for the MapReduce integration.
395    * @return A map of HRegionInfo with it's server address
396    * @throws IOException if a remote or network exception occurs
397    */
398   public Map<HRegionInfo, HServerAddress> getRegionsInfo() throws IOException {
399     final Map<HRegionInfo, HServerAddress> regionMap =
400       new TreeMap<HRegionInfo, HServerAddress>();
401 
402     MetaScannerVisitor visitor = new MetaScannerVisitor() {
403       public boolean processRow(Result rowResult) throws IOException {
404         HRegionInfo info = Writables.getHRegionInfo(
405             rowResult.getValue(HConstants.CATALOG_FAMILY,
406                 HConstants.REGIONINFO_QUALIFIER));
407 
408         if (!(Bytes.equals(info.getTableDesc().getName(), getTableName()))) {
409           return false;
410         }
411 
412         HServerAddress server = new HServerAddress();
413         byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
414             HConstants.SERVER_QUALIFIER);
415         if (value != null && value.length > 0) {
416           String address = Bytes.toString(value);
417           server = new HServerAddress(address);
418         }
419 
420         if (!(info.isOffline() || info.isSplit())) {
421           regionMap.put(new UnmodifyableHRegionInfo(info), server);
422         }
423         return true;
424       }
425 
426     };
427     MetaScanner.metaScan(configuration, visitor, tableName);
428     return regionMap;
429   }
430 
431   /**
432    * Save the passed region information and the table's regions
433    * cache.
434    * <p>
435    * This is mainly useful for the MapReduce integration. You can call
436    * {@link #deserializeRegionInfo deserializeRegionInfo}
437    * to deserialize regions information from a
438    * {@link DataInput}, then call this method to load them to cache.
439    *
440    * <pre>
441    * {@code
442    * HTable t1 = new HTable("foo");
443    * FileInputStream fis = new FileInputStream("regions.dat");
444    * DataInputStream dis = new DataInputStream(fis);
445    *
446    * Map<HRegionInfo, HServerAddress> hm = t1.deserializeRegionInfo(dis);
447    * t1.prewarmRegionCache(hm);
448    * }
449    * </pre>
450    * @param regionMap This piece of regions information will be loaded
451    * to region cache.
452    */
453   public void prewarmRegionCache(Map<HRegionInfo, HServerAddress> regionMap) {
454     this.connection.prewarmRegionCache(this.getTableName(), regionMap);
455   }
456 
457   /**
458    * Serialize the regions information of this table and output
459    * to <code>out</code>.
460    * <p>
461    * This is mainly useful for the MapReduce integration. A client could
462    * perform a large scan for all the regions for the table, serialize the
463    * region info to a file. MR job can ship a copy of the meta for the table in
464    * the DistributedCache.
465    * <pre>
466    * {@code
467    * FileOutputStream fos = new FileOutputStream("regions.dat");
468    * DataOutputStream dos = new DataOutputStream(fos);
469    * table.serializeRegionInfo(dos);
470    * dos.flush();
471    * dos.close();
472    * }
473    * </pre>
474    * @param out {@link DataOutput} to serialize this object into.
475    * @throws IOException if a remote or network exception occurs
476    */
477   public void serializeRegionInfo(DataOutput out) throws IOException {
478     Map<HRegionInfo, HServerAddress> allRegions = this.getRegionsInfo();
479     // first, write number of regions
480     out.writeInt(allRegions.size());
481     for (Map.Entry<HRegionInfo, HServerAddress> es : allRegions.entrySet()) {
482       es.getKey().write(out);
483       es.getValue().write(out);
484     }
485   }
486 
487   /**
488    * Read from <code>in</code> and deserialize the regions information.
489    *
490    * <p>It behaves similarly as {@link #getRegionsInfo getRegionsInfo}, except
491    * that it loads the region map from a {@link DataInput} object.
492    *
493    * <p>It is supposed to be followed immediately by  {@link
494    * #prewarmRegionCache prewarmRegionCache}.
495    *
496    * <p>
497    * Please refer to {@link #prewarmRegionCache prewarmRegionCache} for usage.
498    *
499    * @param in {@link DataInput} object.
500    * @return A map of HRegionInfo with its server address.
501    * @throws IOException if an I/O exception occurs.
502    */
503   public Map<HRegionInfo, HServerAddress> deserializeRegionInfo(DataInput in)
504   throws IOException {
505     final Map<HRegionInfo, HServerAddress> allRegions =
506       new TreeMap<HRegionInfo, HServerAddress>();
507 
508     // the first integer is expected to be the size of records
509     int regionsCount = in.readInt();
510     for (int i = 0; i < regionsCount; ++i) {
511       HRegionInfo hri = new HRegionInfo();
512       hri.readFields(in);
513       HServerAddress hsa = new HServerAddress();
514       hsa.readFields(in);
515       allRegions.put(hri, hsa);
516     }
517     return allRegions;
518   }
519 
520    @Override
521    public Result getRowOrBefore(final byte[] row, final byte[] family)
522    throws IOException {
523      return connection.getRegionServerWithRetries(
524          new ServerCallable<Result>(connection, tableName, row) {
525        public Result call() throws IOException {
526          return server.getClosestRowBefore(location.getRegionInfo().getRegionName(),
527            row, family);
528        }
529      });
530    }
531 
532   @Override
533   public ResultScanner getScanner(final Scan scan) throws IOException {
534     ClientScanner s = new ClientScanner(scan);
535     s.initialize();
536     return s;
537   }
538 
539   @Override
540   public ResultScanner getScanner(byte [] family) throws IOException {
541     Scan scan = new Scan();
542     scan.addFamily(family);
543     return getScanner(scan);
544   }
545 
546   @Override
547   public ResultScanner getScanner(byte [] family, byte [] qualifier)
548   throws IOException {
549     Scan scan = new Scan();
550     scan.addColumn(family, qualifier);
551     return getScanner(scan);
552   }
553 
554   public Result get(final Get get) throws IOException {
555     return connection.getRegionServerWithRetries(
556         new ServerCallable<Result>(connection, tableName, get.getRow()) {
557           public Result call() throws IOException {
558             return server.get(location.getRegionInfo().getRegionName(), get);
559           }
560         }
561     );
562   }
563 
564    public Result[] get(List<Get> gets) throws IOException {
565      try {
566        Object [] r1 = batch((List)gets);
567 
568        // translate.
569        Result [] results = new Result[r1.length];
570        int i=0;
571        for (Object o : r1) {
572          // batch ensures if there is a failure we get an exception instead
573          results[i++] = (Result) o;
574        }
575 
576        return results;
577      } catch (InterruptedException e) {
578        throw new IOException(e);
579      }
580    }
581 
582   /**
583    * Method that does a batch call on Deletes, Gets and Puts.  The ordering of
584    * execution of the actions is not defined. Meaning if you do a Put and a
585    * Get in the same {@link #batch} call, you will not necessarily be
586    * guaranteed that the Get returns what the Put had put.
587    *
588    * @param actions list of Get, Put, Delete objects
589    * @param results Empty Result[], same size as actions. Provides access to
590    * partial results, in case an exception is thrown. If there are any failures,
591    * there will be a null or Throwable will be in the results array, AND an
592    * exception will be thrown.
593    * @throws IOException
594    */
595   @Override
596   public synchronized void batch(final List<Row> actions, final Object[] results)
597       throws InterruptedException, IOException {
598     connection.processBatch(actions, tableName, pool, results);
599   }
600 
601   /**
602    * Method that does a batch call on Deletes, Gets and Puts.
603    *
604    * @param actions list of Get, Put, Delete objects
605    * @return the results from the actions. A null in the return array means that
606    * the call for that action failed, even after retries
607    * @throws IOException
608    */
609   @Override
610   public synchronized Object[] batch(final List<Row> actions) throws InterruptedException, IOException {
611     Object[] results = new Object[actions.size()];
612     connection.processBatch(actions, tableName, pool, results);
613     return results;
614   }
615 
616   /**
617    * Deletes the specified cells/row.
618    *
619    * @param delete The object that specifies what to delete.
620    * @throws IOException if a remote or network exception occurs.
621    * @since 0.20.0
622    */
623   @Override
624   public void delete(final Delete delete)
625   throws IOException {
626     connection.getRegionServerWithRetries(
627         new ServerCallable<Boolean>(connection, tableName, delete.getRow()) {
628           public Boolean call() throws IOException {
629             server.delete(location.getRegionInfo().getRegionName(), delete);
630             return null; // FindBugs NP_BOOLEAN_RETURN_NULL
631           }
632         }
633     );
634   }
635 
636   /**
637    * Deletes the specified cells/rows in bulk.
638    * @param deletes List of things to delete. As a side effect, it will be modified:
639    * successful {@link Delete}s are removed. The ordering of the list will not change.
640    * @throws IOException if a remote or network exception occurs. In that case
641    * the {@code deletes} argument will contain the {@link Delete} instances
642    * that have not be successfully applied.
643    * @since 0.20.1
644    * @see #batch(java.util.List, Object[])
645    */
646   @Override
647   public void delete(final List<Delete> deletes)
648   throws IOException {
649     Object[] results = new Object[deletes.size()];
650     try {
651       connection.processBatch((List) deletes, tableName, pool, results);
652     } catch (InterruptedException e) {
653       throw new IOException(e);
654     } finally {
655       // mutate list so that it is empty for complete success, or contains only failed records
656       // results are returned in the same order as the requests in list
657       // walk the list backwards, so we can remove from list without impacting the indexes of earlier members
658       for (int i = results.length - 1; i>=0; i--) {
659         // if result is not null, it succeeded
660         if (results[i] instanceof Result) {
661           deletes.remove(i);
662         }
663       }
664     }
665   }
666 
667   @Override
668   public void put(final Put put) throws IOException {
669     doPut(Arrays.asList(put));
670   }
671 
672   @Override
673   public void put(final List<Put> puts) throws IOException {
674     doPut(puts);
675   }
676 
677   private void doPut(final List<Put> puts) throws IOException {
678     for (Put put : puts) {
679       validatePut(put);
680       writeBuffer.add(put);
681       currentWriteBufferSize += put.heapSize();
682     }
683     if (autoFlush || currentWriteBufferSize > writeBufferSize) {
684       flushCommits();
685     }
686   }
687 
688   @Override
689   public Result increment(final Increment increment) throws IOException {
690     if (!increment.hasFamilies()) {
691       throw new IOException(
692           "Invalid arguments to increment, no columns specified");
693     }
694     return connection.getRegionServerWithRetries(
695         new ServerCallable<Result>(connection, tableName, increment.getRow()) {
696           public Result call() throws IOException {
697             return server.increment(
698                 location.getRegionInfo().getRegionName(), increment);
699           }
700         }
701     );
702   }
703 
704   @Override
705   public long incrementColumnValue(final byte [] row, final byte [] family,
706       final byte [] qualifier, final long amount)
707   throws IOException {
708     return incrementColumnValue(row, family, qualifier, amount, true);
709   }
710 
711   @Override
712   public long incrementColumnValue(final byte [] row, final byte [] family,
713       final byte [] qualifier, final long amount, final boolean writeToWAL)
714   throws IOException {
715     NullPointerException npe = null;
716     if (row == null) {
717       npe = new NullPointerException("row is null");
718     } else if (family == null) {
719       npe = new NullPointerException("column is null");
720     }
721     if (npe != null) {
722       throw new IOException(
723           "Invalid arguments to incrementColumnValue", npe);
724     }
725     return connection.getRegionServerWithRetries(
726         new ServerCallable<Long>(connection, tableName, row) {
727           public Long call() throws IOException {
728             return server.incrementColumnValue(
729                 location.getRegionInfo().getRegionName(), row, family,
730                 qualifier, amount, writeToWAL);
731           }
732         }
733     );
734   }
735 
736   /**
737    * Atomically checks if a row/family/qualifier value match the expectedValue.
738    * If it does, it adds the put.  If value == null, checks for non-existence
739    * of the value.
740    *
741    * @param row to check
742    * @param family column family
743    * @param qualifier column qualifier
744    * @param value the expected value
745    * @param put put to execute if value matches.
746    * @throws IOException
747    * @return true if the new put was execute, false otherwise
748    */
749   @Override
750   public boolean checkAndPut(final byte [] row,
751       final byte [] family, final byte [] qualifier, final byte [] value,
752       final Put put)
753   throws IOException {
754     return connection.getRegionServerWithRetries(
755         new ServerCallable<Boolean>(connection, tableName, row) {
756           public Boolean call() throws IOException {
757             return server.checkAndPut(location.getRegionInfo().getRegionName(),
758                 row, family, qualifier, value, put) ? Boolean.TRUE : Boolean.FALSE;
759           }
760         }
761     );
762   }
763 
764   /**
765    * Atomically checks if a row/family/qualifier value match the expectedValue.
766    * If it does, it adds the delete.  If value == null, checks for non-existence
767    * of the value.
768    *
769    * @param row to check
770    * @param family column family
771    * @param qualifier column qualifier
772    * @param value the expected value
773    * @param delete delete to execute if value matches.
774    * @throws IOException
775    * @return true if the new delete was executed, false otherwise
776    */
777   @Override
778   public boolean checkAndDelete(final byte [] row,
779       final byte [] family, final byte [] qualifier, final byte [] value,
780       final Delete delete)
781   throws IOException {
782     return connection.getRegionServerWithRetries(
783         new ServerCallable<Boolean>(connection, tableName, row) {
784           public Boolean call() throws IOException {
785             return server.checkAndDelete(
786                 location.getRegionInfo().getRegionName(),
787                 row, family, qualifier, value, delete)
788             ? Boolean.TRUE : Boolean.FALSE;
789           }
790         }
791     );
792   }
793 
794   /**
795    * Test for the existence of columns in the table, as specified in the Get.<p>
796    *
797    * This will return true if the Get matches one or more keys, false if not.<p>
798    *
799    * This is a server-side call so it prevents any data from being transfered
800    * to the client.
801    * @param get param to check for
802    * @return true if the specified Get matches one or more keys, false if not
803    * @throws IOException
804    */
805   @Override
806   public boolean exists(final Get get) throws IOException {
807     return connection.getRegionServerWithRetries(
808         new ServerCallable<Boolean>(connection, tableName, get.getRow()) {
809           public Boolean call() throws IOException {
810             return server.
811                 exists(location.getRegionInfo().getRegionName(), get);
812           }
813         }
814     );
815   }
816 
817   /**
818    * Executes all the buffered {@link Put} operations.
819    * <p>
820    * This method gets called once automatically for every {@link Put} or batch
821    * of {@link Put}s (when {@link #batch(List)} is used) when
822    * {@link #isAutoFlush()} is {@code true}.
823    * @throws IOException if a remote or network exception occurs.
824    */
825   @Override
826   public void flushCommits() throws IOException {
827     try {
828       connection.processBatchOfPuts(writeBuffer, tableName, pool);
829     } finally {
830       if (clearBufferOnFail) {
831         writeBuffer.clear();
832         currentWriteBufferSize = 0;
833       } else {
834         // the write buffer was adjusted by processBatchOfPuts
835         currentWriteBufferSize = 0;
836         for (Put aPut : writeBuffer) {
837           currentWriteBufferSize += aPut.heapSize();
838         }
839       }
840     }
841   }
842 
843   @Override
844   public void close() throws IOException {
845     flushCommits();
846     this.pool.shutdown();
847   }
848 
849   // validate for well-formedness
850   private void validatePut(final Put put) throws IllegalArgumentException{
851     if (put.isEmpty()) {
852       throw new IllegalArgumentException("No columns to insert");
853     }
854     if (maxKeyValueSize > 0) {
855       for (List<KeyValue> list : put.getFamilyMap().values()) {
856         for (KeyValue kv : list) {
857           if (kv.getLength() > maxKeyValueSize) {
858             throw new IllegalArgumentException("KeyValue size too large");
859           }
860         }
861       }
862     }
863   }
864 
865   @Override
866   public RowLock lockRow(final byte [] row)
867   throws IOException {
868     return connection.getRegionServerWithRetries(
869       new ServerCallable<RowLock>(connection, tableName, row) {
870         public RowLock call() throws IOException {
871           long lockId =
872               server.lockRow(location.getRegionInfo().getRegionName(), row);
873           return new RowLock(row,lockId);
874         }
875       }
876     );
877   }
878 
879   @Override
880   public void unlockRow(final RowLock rl)
881   throws IOException {
882     connection.getRegionServerWithRetries(
883       new ServerCallable<Boolean>(connection, tableName, rl.getRow()) {
884         public Boolean call() throws IOException {
885           server.unlockRow(location.getRegionInfo().getRegionName(),
886               rl.getLockId());
887           return null; // FindBugs NP_BOOLEAN_RETURN_NULL
888         }
889       }
890     );
891   }
892 
893   /**
894    * Explicitly clears the region cache to fetch the latest value from META.
895    * This is a power user function: avoid unless you know the ramifications.
896    */
897   public void clearRegionCache() {
898     this.connection.clearRegionCache();
899   }
900 
901   @Override
902   public boolean isAutoFlush() {
903     return autoFlush;
904   }
905 
906   /**
907    * See {@link #setAutoFlush(boolean, boolean)}
908    *
909    * @param autoFlush
910    *          Whether or not to enable 'auto-flush'.
911    */
912   public void setAutoFlush(boolean autoFlush) {
913     setAutoFlush(autoFlush, autoFlush);
914   }
915 
916   /**
917    * Turns 'auto-flush' on or off.
918    * <p>
919    * When enabled (default), {@link Put} operations don't get buffered/delayed
920    * and are immediately executed. Failed operations are not retried. This is
921    * slower but safer.
922    * <p>
923    * Turning off {@link #autoFlush} means that multiple {@link Put}s will be
924    * accepted before any RPC is actually sent to do the write operations. If the
925    * application dies before pending writes get flushed to HBase, data will be
926    * lost.
927    * <p>
928    * When you turn {@link #autoFlush} off, you should also consider the
929    * {@link #clearBufferOnFail} option. By default, asynchronous {@link Put}
930    * requests will be retried on failure until successful. However, this can
931    * pollute the writeBuffer and slow down batching performance. Additionally,
932    * you may want to issue a number of Put requests and call
933    * {@link #flushCommits()} as a barrier. In both use cases, consider setting
934    * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
935    * has been called, regardless of success.
936    *
937    * @param autoFlush
938    *          Whether or not to enable 'auto-flush'.
939    * @param clearBufferOnFail
940    *          Whether to keep Put failures in the writeBuffer
941    * @see #flushCommits
942    */
943   public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
944     this.autoFlush = autoFlush;
945     this.clearBufferOnFail = autoFlush || clearBufferOnFail;
946   }
947 
948   /**
949    * Returns the maximum size in bytes of the write buffer for this HTable.
950    * <p>
951    * The default value comes from the configuration parameter
952    * {@code hbase.client.write.buffer}.
953    * @return The size of the write buffer in bytes.
954    */
955   public long getWriteBufferSize() {
956     return writeBufferSize;
957   }
958 
959   /**
960    * Sets the size of the buffer in bytes.
961    * <p>
962    * If the new size is less than the current amount of data in the
963    * write buffer, the buffer gets flushed.
964    * @param writeBufferSize The new write buffer size, in bytes.
965    * @throws IOException if a remote or network exception occurs.
966    */
967   public void setWriteBufferSize(long writeBufferSize) throws IOException {
968     this.writeBufferSize = writeBufferSize;
969     if(currentWriteBufferSize > writeBufferSize) {
970       flushCommits();
971     }
972   }
973 
974   /**
975    * Returns the write buffer.
976    * @return The current write buffer.
977    */
978   public ArrayList<Put> getWriteBuffer() {
979     return writeBuffer;
980   }
981 
982   /**
983    * Implements the scanner interface for the HBase client.
984    * If there are multiple regions in a table, this scanner will iterate
985    * through them all.
986    */
987   protected class ClientScanner implements ResultScanner {
988     private final Log CLIENT_LOG = LogFactory.getLog(this.getClass());
989     // HEADSUP: The scan internal start row can change as we move through table.
990     private Scan scan;
991     private boolean closed = false;
992     // Current region scanner is against.  Gets cleared if current region goes
993     // wonky: e.g. if it splits on us.
994     private HRegionInfo currentRegion = null;
995     private ScannerCallable callable = null;
996     private final LinkedList<Result> cache = new LinkedList<Result>();
997     private final int caching;
998     private long lastNext;
999     // Keep lastResult returned successfully in case we have to reset scanner.
1000     private Result lastResult = null;
1001 
1002     protected ClientScanner(final Scan scan) {
1003       if (CLIENT_LOG.isDebugEnabled()) {
1004         CLIENT_LOG.debug("Creating scanner over "
1005             + Bytes.toString(getTableName())
1006             + " starting at key '" + Bytes.toStringBinary(scan.getStartRow()) + "'");
1007       }
1008       this.scan = scan;
1009       this.lastNext = System.currentTimeMillis();
1010 
1011       // Use the caching from the Scan.  If not set, use the default cache setting for this table.
1012       if (this.scan.getCaching() > 0) {
1013         this.caching = this.scan.getCaching();
1014       } else {
1015         this.caching = HTable.this.scannerCaching;
1016       }
1017 
1018       // Removed filter validation.  We have a new format now, only one of all
1019       // the current filters has a validate() method.  We can add it back,
1020       // need to decide on what we're going to do re: filter redesign.
1021       // Need, at the least, to break up family from qualifier as separate
1022       // checks, I think it's important server-side filters are optimal in that
1023       // respect.
1024     }
1025 
1026     public void initialize() throws IOException {
1027       nextScanner(this.caching, false);
1028     }
1029 
1030     protected Scan getScan() {
1031       return scan;
1032     }
1033 
1034     protected long getTimestamp() {
1035       return lastNext;
1036     }
1037 
1038     // returns true if the passed region endKey
1039     private boolean checkScanStopRow(final byte [] endKey) {
1040       if (this.scan.getStopRow().length > 0) {
1041         // there is a stop row, check to see if we are past it.
1042         byte [] stopRow = scan.getStopRow();
1043         int cmp = Bytes.compareTo(stopRow, 0, stopRow.length,
1044           endKey, 0, endKey.length);
1045         if (cmp <= 0) {
1046           // stopRow <= endKey (endKey is equals to or larger than stopRow)
1047           // This is a stop.
1048           return true;
1049         }
1050       }
1051       return false; //unlikely.
1052     }
1053 
1054     /*
1055      * Gets a scanner for the next region.  If this.currentRegion != null, then
1056      * we will move to the endrow of this.currentRegion.  Else we will get
1057      * scanner at the scan.getStartRow().  We will go no further, just tidy
1058      * up outstanding scanners, if <code>currentRegion != null</code> and
1059      * <code>done</code> is true.
1060      * @param nbRows
1061      * @param done Server-side says we're done scanning.
1062      */
1063     private boolean nextScanner(int nbRows, final boolean done)
1064     throws IOException {
1065       // Close the previous scanner if it's open
1066       if (this.callable != null) {
1067         this.callable.setClose();
1068         getConnection().getRegionServerWithRetries(callable);
1069         this.callable = null;
1070       }
1071 
1072       // Where to start the next scanner
1073       byte [] localStartKey;
1074 
1075       // if we're at end of table, close and return false to stop iterating
1076       if (this.currentRegion != null) {
1077         byte [] endKey = this.currentRegion.getEndKey();
1078         if (endKey == null ||
1079             Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY) ||
1080             checkScanStopRow(endKey) ||
1081             done) {
1082           close();
1083           if (CLIENT_LOG.isDebugEnabled()) {
1084             CLIENT_LOG.debug("Finished with scanning at " + this.currentRegion);
1085           }
1086           return false;
1087         }
1088         localStartKey = endKey;
1089         if (CLIENT_LOG.isDebugEnabled()) {
1090           CLIENT_LOG.debug("Finished with region " + this.currentRegion);
1091         }
1092       } else {
1093         localStartKey = this.scan.getStartRow();
1094       }
1095 
1096       if (CLIENT_LOG.isDebugEnabled()) {
1097         CLIENT_LOG.debug("Advancing internal scanner to startKey at '" +
1098           Bytes.toStringBinary(localStartKey) + "'");
1099       }
1100       try {
1101         callable = getScannerCallable(localStartKey, nbRows);
1102         // Open a scanner on the region server starting at the
1103         // beginning of the region
1104         getConnection().getRegionServerWithRetries(callable);
1105         this.currentRegion = callable.getHRegionInfo();
1106       } catch (IOException e) {
1107         close();
1108         throw e;
1109       }
1110       return true;
1111     }
1112 
1113     protected ScannerCallable getScannerCallable(byte [] localStartKey,
1114         int nbRows) {
1115       scan.setStartRow(localStartKey);
1116       ScannerCallable s = new ScannerCallable(getConnection(),
1117         getTableName(), scan);
1118       s.setCaching(nbRows);
1119       return s;
1120     }
1121 
1122     public Result next() throws IOException {
1123       // If the scanner is closed but there is some rows left in the cache,
1124       // it will first empty it before returning null
1125       if (cache.size() == 0 && this.closed) {
1126         return null;
1127       }
1128       if (cache.size() == 0) {
1129         Result [] values = null;
1130         long remainingResultSize = maxScannerResultSize;
1131         int countdown = this.caching;
1132         // We need to reset it if it's a new callable that was created
1133         // with a countdown in nextScanner
1134         callable.setCaching(this.caching);
1135         // This flag is set when we want to skip the result returned.  We do
1136         // this when we reset scanner because it split under us.
1137         boolean skipFirst = false;
1138         do {
1139           try {
1140             if (skipFirst) {
1141               // Skip only the first row (which was the last row of the last
1142               // already-processed batch).
1143               callable.setCaching(1);
1144               values = getConnection().getRegionServerWithRetries(callable);
1145               callable.setCaching(this.caching);
1146               skipFirst = false;
1147             }
1148             // Server returns a null values if scanning is to stop.  Else,
1149             // returns an empty array if scanning is to go on and we've just
1150             // exhausted current region.
1151             values = getConnection().getRegionServerWithRetries(callable);
1152           } catch (DoNotRetryIOException e) {
1153             if (e instanceof UnknownScannerException) {
1154               long timeout = lastNext + scannerTimeout;
1155               // If we are over the timeout, throw this exception to the client
1156               // Else, it's because the region moved and we used the old id
1157               // against the new region server; reset the scanner.
1158               if (timeout < System.currentTimeMillis()) {
1159                 long elapsed = System.currentTimeMillis() - lastNext;
1160                 ScannerTimeoutException ex = new ScannerTimeoutException(
1161                     elapsed + "ms passed since the last invocation, " +
1162                         "timeout is currently set to " + scannerTimeout);
1163                 ex.initCause(e);
1164                 throw ex;
1165               }
1166             } else {
1167               Throwable cause = e.getCause();
1168               if (cause == null || !(cause instanceof NotServingRegionException)) {
1169                 throw e;
1170               }
1171             }
1172             // Else, its signal from depths of ScannerCallable that we got an
1173             // NSRE on a next and that we need to reset the scanner.
1174             if (this.lastResult != null) {
1175               this.scan.setStartRow(this.lastResult.getRow());
1176               // Skip first row returned.  We already let it out on previous
1177               // invocation.
1178               skipFirst = true;
1179             }
1180             // Clear region
1181             this.currentRegion = null;
1182             continue;
1183           }
1184           lastNext = System.currentTimeMillis();
1185           if (values != null && values.length > 0) {
1186             for (Result rs : values) {
1187               cache.add(rs);
1188               for (KeyValue kv : rs.raw()) {
1189                   remainingResultSize -= kv.heapSize();
1190               }
1191               countdown--;
1192               this.lastResult = rs;
1193             }
1194           }
1195           // Values == null means server-side filter has determined we must STOP
1196         } while (remainingResultSize > 0 && countdown > 0 && nextScanner(countdown, values == null));
1197       }
1198 
1199       if (cache.size() > 0) {
1200         return cache.poll();
1201       }
1202       return null;
1203     }
1204 
1205     /**
1206      * Get <param>nbRows</param> rows.
1207      * How many RPCs are made is determined by the {@link Scan#setCaching(int)}
1208      * setting (or hbase.client.scanner.caching in hbase-site.xml).
1209      * @param nbRows number of rows to return
1210      * @return Between zero and <param>nbRows</param> RowResults.  Scan is done
1211      * if returned array is of zero-length (We never return null).
1212      * @throws IOException
1213      */
1214     public Result [] next(int nbRows) throws IOException {
1215       // Collect values to be returned here
1216       ArrayList<Result> resultSets = new ArrayList<Result>(nbRows);
1217       for(int i = 0; i < nbRows; i++) {
1218         Result next = next();
1219         if (next != null) {
1220           resultSets.add(next);
1221         } else {
1222           break;
1223         }
1224       }
1225       return resultSets.toArray(new Result[resultSets.size()]);
1226     }
1227 
1228     public void close() {
1229       if (callable != null) {
1230         callable.setClose();
1231         try {
1232           getConnection().getRegionServerWithRetries(callable);
1233         } catch (IOException e) {
1234           // We used to catch this error, interpret, and rethrow. However, we
1235           // have since decided that it's not nice for a scanner's close to
1236           // throw exceptions. Chances are it was just an UnknownScanner
1237           // exception due to lease time out.
1238         }
1239         callable = null;
1240       }
1241       closed = true;
1242     }
1243 
1244     public Iterator<Result> iterator() {
1245       return new Iterator<Result>() {
1246         // The next RowResult, possibly pre-read
1247         Result next = null;
1248 
1249         // return true if there is another item pending, false if there isn't.
1250         // this method is where the actual advancing takes place, but you need
1251         // to call next() to consume it. hasNext() will only advance if there
1252         // isn't a pending next().
1253         public boolean hasNext() {
1254           if (next == null) {
1255             try {
1256               next = ClientScanner.this.next();
1257               return next != null;
1258             } catch (IOException e) {
1259               throw new RuntimeException(e);
1260             }
1261           }
1262           return true;
1263         }
1264 
1265         // get the pending next item and advance the iterator. returns null if
1266         // there is no next item.
1267         public Result next() {
1268           // since hasNext() does the real advancing, we call this to determine
1269           // if there is a next before proceeding.
1270           if (!hasNext()) {
1271             return null;
1272           }
1273 
1274           // if we get to here, then hasNext() has given us an item to return.
1275           // we want to return the item and then null out the next pointer, so
1276           // we use a temporary variable.
1277           Result temp = next;
1278           next = null;
1279           return temp;
1280         }
1281 
1282         public void remove() {
1283           throw new UnsupportedOperationException();
1284         }
1285       };
1286     }
1287   }
1288 
1289   /**
1290    * The pool is used for mutli requests for this HTable
1291    * @return the pool used for mutli
1292    */
1293   ExecutorService getPool() {
1294     return this.pool;
1295   }
1296 
1297   static class DaemonThreadFactory implements ThreadFactory {
1298     static final AtomicInteger poolNumber = new AtomicInteger(1);
1299         final ThreadGroup group;
1300         final AtomicInteger threadNumber = new AtomicInteger(1);
1301         final String namePrefix;
1302 
1303         DaemonThreadFactory() {
1304             SecurityManager s = System.getSecurityManager();
1305             group = (s != null)? s.getThreadGroup() :
1306                                  Thread.currentThread().getThreadGroup();
1307             namePrefix = "pool-" +
1308                           poolNumber.getAndIncrement() +
1309                          "-thread-";
1310         }
1311 
1312         public Thread newThread(Runnable r) {
1313             Thread t = new Thread(group, r,
1314                                   namePrefix + threadNumber.getAndIncrement(),
1315                                   0);
1316             if (!t.isDaemon()) {
1317               t.setDaemon(true);
1318             }
1319             if (t.getPriority() != Thread.NORM_PRIORITY) {
1320               t.setPriority(Thread.NORM_PRIORITY);
1321             }
1322             return t;
1323         }
1324   }
1325 
1326   /**
1327    * Enable or disable region cache prefetch for the table. It will be
1328    * applied for the given table's all HTable instances who share the same
1329    * connection. By default, the cache prefetch is enabled.
1330    * @param tableName name of table to configure.
1331    * @param enable Set to true to enable region cache prefetch. Or set to
1332    * false to disable it.
1333    * @throws ZooKeeperConnectionException
1334    */
1335   public static void setRegionCachePrefetch(final byte[] tableName,
1336       boolean enable) throws ZooKeeperConnectionException {
1337     HConnectionManager.getConnection(HBaseConfiguration.create()).
1338     setRegionCachePrefetch(tableName, enable);
1339   }
1340 
1341   /**
1342    * Enable or disable region cache prefetch for the table. It will be
1343    * applied for the given table's all HTable instances who share the same
1344    * connection. By default, the cache prefetch is enabled.
1345    * @param conf The Configuration object to use.
1346    * @param tableName name of table to configure.
1347    * @param enable Set to true to enable region cache prefetch. Or set to
1348    * false to disable it.
1349    * @throws ZooKeeperConnectionException
1350    */
1351   public static void setRegionCachePrefetch(final Configuration conf,
1352       final byte[] tableName, boolean enable) throws ZooKeeperConnectionException {
1353     HConnectionManager.getConnection(conf).setRegionCachePrefetch(
1354         tableName, enable);
1355   }
1356 
1357   /**
1358    * Check whether region cache prefetch is enabled or not for the table.
1359    * @param conf The Configuration object to use.
1360    * @param tableName name of table to check
1361    * @return true if table's region cache prefecth is enabled. Otherwise
1362    * it is disabled.
1363    * @throws ZooKeeperConnectionException
1364    */
1365   public static boolean getRegionCachePrefetch(final Configuration conf,
1366       final byte[] tableName) throws ZooKeeperConnectionException {
1367     return HConnectionManager.getConnection(conf).getRegionCachePrefetch(
1368         tableName);
1369   }
1370 
1371   /**
1372    * Check whether region cache prefetch is enabled or not for the table.
1373    * @param tableName name of table to check
1374    * @return true if table's region cache prefecth is enabled. Otherwise
1375    * it is disabled.
1376    * @throws ZooKeeperConnectionException
1377    */
1378   public static boolean getRegionCachePrefetch(final byte[] tableName) throws ZooKeeperConnectionException {
1379     return HConnectionManager.getConnection(HBaseConfiguration.create()).
1380     getRegionCachePrefetch(tableName);
1381   }
1382 }