View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.thrift;
20  
21  import static org.apache.hadoop.hbase.util.Bytes.getBytes;
22  
23  import java.io.IOException;
24  import java.net.InetAddress;
25  import java.net.InetSocketAddress;
26  import java.net.UnknownHostException;
27  import java.nio.ByteBuffer;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collections;
31  import java.util.HashMap;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.TreeMap;
35  import java.util.concurrent.BlockingQueue;
36  import java.util.concurrent.ExecutorService;
37  import java.util.concurrent.LinkedBlockingQueue;
38  import java.util.concurrent.ThreadPoolExecutor;
39  import java.util.concurrent.TimeUnit;
40  
41  import org.apache.commons.cli.CommandLine;
42  import org.apache.commons.cli.Option;
43  import org.apache.commons.cli.OptionGroup;
44  import org.apache.commons.logging.Log;
45  import org.apache.commons.logging.LogFactory;
46  import org.apache.hadoop.conf.Configuration;
47  import org.apache.hadoop.hbase.HBaseConfiguration;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HRegionInfo;
51  import org.apache.hadoop.hbase.HTableDescriptor;
52  import org.apache.hadoop.hbase.KeyValue;
53  import org.apache.hadoop.hbase.ServerName;
54  import org.apache.hadoop.hbase.TableNotFoundException;
55  import org.apache.hadoop.hbase.client.Delete;
56  import org.apache.hadoop.hbase.client.Get;
57  import org.apache.hadoop.hbase.client.HBaseAdmin;
58  import org.apache.hadoop.hbase.client.HTable;
59  import org.apache.hadoop.hbase.client.Increment;
60  import org.apache.hadoop.hbase.client.OperationWithAttributes;
61  import org.apache.hadoop.hbase.client.Put;
62  import org.apache.hadoop.hbase.client.Result;
63  import org.apache.hadoop.hbase.client.ResultScanner;
64  import org.apache.hadoop.hbase.client.Scan;
65  import org.apache.hadoop.hbase.filter.Filter;
66  import org.apache.hadoop.hbase.filter.ParseFilter;
67  import org.apache.hadoop.hbase.filter.PrefixFilter;
68  import org.apache.hadoop.hbase.filter.WhileMatchFilter;
69  import org.apache.hadoop.hbase.thrift.CallQueue.Call;
70  import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
71  import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
72  import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
73  import org.apache.hadoop.hbase.thrift.generated.Hbase;
74  import org.apache.hadoop.hbase.thrift.generated.IOError;
75  import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
76  import org.apache.hadoop.hbase.thrift.generated.Mutation;
77  import org.apache.hadoop.hbase.thrift.generated.TCell;
78  import org.apache.hadoop.hbase.thrift.generated.TIncrement;
79  import org.apache.hadoop.hbase.thrift.generated.TRegionInfo;
80  import org.apache.hadoop.hbase.thrift.generated.TRowResult;
81  import org.apache.hadoop.hbase.thrift.generated.TScan;
82  import org.apache.hadoop.hbase.util.Addressing;
83  import org.apache.hadoop.hbase.util.Bytes;
84  import org.apache.hadoop.hbase.util.Writables;
85  import org.apache.thrift.TException;
86  import org.apache.thrift.protocol.TBinaryProtocol;
87  import org.apache.thrift.protocol.TCompactProtocol;
88  import org.apache.thrift.protocol.TProtocolFactory;
89  import org.apache.thrift.server.THsHaServer;
90  import org.apache.thrift.server.TNonblockingServer;
91  import org.apache.thrift.server.TServer;
92  import org.apache.thrift.server.TThreadedSelectorServer;
93  import org.apache.thrift.transport.TFramedTransport;
94  import org.apache.thrift.transport.TNonblockingServerSocket;
95  import org.apache.thrift.transport.TNonblockingServerTransport;
96  import org.apache.thrift.transport.TServerSocket;
97  import org.apache.thrift.transport.TServerTransport;
98  import org.apache.thrift.transport.TTransportFactory;
99  
100 import com.google.common.base.Joiner;
101 import com.google.common.util.concurrent.ThreadFactoryBuilder;
102 
103 /**
104  * ThriftServerRunner - this class starts up a Thrift server which implements
105  * the Hbase API specified in the Hbase.thrift IDL file.
106  */
107 public class ThriftServerRunner implements Runnable {
108 
109   private static final Log LOG = LogFactory.getLog(ThriftServerRunner.class);
110 
111   static final String SERVER_TYPE_CONF_KEY =
112       "hbase.regionserver.thrift.server.type";
113 
114   static final String BIND_CONF_KEY = "hbase.regionserver.thrift.ipaddress";
115   static final String COMPACT_CONF_KEY = "hbase.regionserver.thrift.compact";
116   static final String FRAMED_CONF_KEY = "hbase.regionserver.thrift.framed";
117   static final String PORT_CONF_KEY = "hbase.regionserver.thrift.port";
118   static final String COALESCE_INC_KEY = "hbase.regionserver.thrift.coalesceIncrement";
119 
120   private static final String DEFAULT_BIND_ADDR = "0.0.0.0";
121   public static final int DEFAULT_LISTEN_PORT = 9090;
122   private final int listenPort;
123 
124   private Configuration conf;
125   volatile TServer tserver;
126   private final Hbase.Iface handler;
127   private final ThriftMetrics metrics;
128 
129   /** An enum of server implementation selections */
130   enum ImplType {
131     HS_HA("hsha", true, THsHaServer.class, false),
132     NONBLOCKING("nonblocking", true, TNonblockingServer.class, false),
133     THREAD_POOL("threadpool", false, TBoundedThreadPoolServer.class, true),
134     THREADED_SELECTOR(
135         "threadedselector", true, TThreadedSelectorServer.class, false);
136 
137     public static final ImplType DEFAULT = THREAD_POOL;
138 
139     final String option;
140     final boolean isAlwaysFramed;
141     final Class<? extends TServer> serverClass;
142     final boolean canSpecifyBindIP;
143 
144     ImplType(String option, boolean isAlwaysFramed,
145         Class<? extends TServer> serverClass, boolean canSpecifyBindIP) {
146       this.option = option;
147       this.isAlwaysFramed = isAlwaysFramed;
148       this.serverClass = serverClass;
149       this.canSpecifyBindIP = canSpecifyBindIP;
150     }
151 
152     /**
153      * @return <code>-option</code> so we can get the list of options from
154      *         {@link #values()}
155      */
156     @Override
157     public String toString() {
158       return "-" + option;
159     }
160 
161     String getDescription() {
162       StringBuilder sb = new StringBuilder("Use the " +
163           serverClass.getSimpleName());
164       if (isAlwaysFramed) {
165         sb.append(" This implies the framed transport.");
166       }
167       if (this == DEFAULT) {
168         sb.append("This is the default.");
169       }
170       return sb.toString();
171     }
172 
173     static OptionGroup createOptionGroup() {
174       OptionGroup group = new OptionGroup();
175       for (ImplType t : values()) {
176         group.addOption(new Option(t.option, t.getDescription()));
177       }
178       return group;
179     }
180 
181     static ImplType getServerImpl(Configuration conf) {
182       String confType = conf.get(SERVER_TYPE_CONF_KEY, THREAD_POOL.option);
183       for (ImplType t : values()) {
184         if (confType.equals(t.option)) {
185           return t;
186         }
187       }
188       throw new AssertionError("Unknown server ImplType.option:" + confType);
189     }
190 
191     static void setServerImpl(CommandLine cmd, Configuration conf) {
192       ImplType chosenType = null;
193       int numChosen = 0;
194       for (ImplType t : values()) {
195         if (cmd.hasOption(t.option)) {
196           chosenType = t;
197           ++numChosen;
198         }
199       }
200       if (numChosen < 1) {
201         LOG.info("Using default thrift server type");
202         chosenType = DEFAULT;
203       } else if (numChosen > 1) {
204         throw new AssertionError("Exactly one option out of " +
205           Arrays.toString(values()) + " has to be specified");
206       }
207       LOG.info("Using thrift server type " + chosenType.option);
208       conf.set(SERVER_TYPE_CONF_KEY, chosenType.option);
209     }
210 
211     public String simpleClassName() {
212       return serverClass.getSimpleName();
213     }
214 
215     public static List<String> serversThatCannotSpecifyBindIP() {
216       List<String> l = new ArrayList<String>();
217       for (ImplType t : values()) {
218         if (!t.canSpecifyBindIP) {
219           l.add(t.simpleClassName());
220         }
221       }
222       return l;
223     }
224 
225   }
226 
227   public ThriftServerRunner(Configuration conf) throws IOException {
228     this(conf, new ThriftServerRunner.HBaseHandler(conf));
229   }
230 
231   public ThriftServerRunner(Configuration conf, HBaseHandler handler) {
232     this.conf = HBaseConfiguration.create(conf);
233     this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT);
234     this.metrics = new ThriftMetrics(listenPort, conf, Hbase.Iface.class);
235     handler.initMetrics(metrics);
236     this.handler = HbaseHandlerMetricsProxy.newInstance(handler, metrics, conf);
237   }
238 
239   /*
240    * Runs the Thrift server
241    */
242   @Override
243   public void run() {
244     try {
245       setupServer();
246       tserver.serve();
247     } catch (Exception e) {
248       LOG.fatal("Cannot run ThriftServer", e);
249       // Crash the process if the ThriftServer is not running
250       System.exit(-1);
251     }
252   }
253 
254   public void shutdown() {
255     if (tserver != null) {
256       tserver.stop();
257       tserver = null;
258     }
259   }
260 
261   /**
262    * Setting up the thrift TServer
263    */
264   private void setupServer() throws Exception {
265     // Construct correct ProtocolFactory
266     TProtocolFactory protocolFactory;
267     if (conf.getBoolean(COMPACT_CONF_KEY, false)) {
268       LOG.debug("Using compact protocol");
269       protocolFactory = new TCompactProtocol.Factory();
270     } else {
271       LOG.debug("Using binary protocol");
272       protocolFactory = new TBinaryProtocol.Factory();
273     }
274 
275     Hbase.Processor<Hbase.Iface> processor =
276         new Hbase.Processor<Hbase.Iface>(handler);
277     ImplType implType = ImplType.getServerImpl(conf);
278 
279     // Construct correct TransportFactory
280     TTransportFactory transportFactory;
281     if (conf.getBoolean(FRAMED_CONF_KEY, false) || implType.isAlwaysFramed) {
282       transportFactory = new TFramedTransport.Factory();
283       LOG.debug("Using framed transport");
284     } else {
285       transportFactory = new TTransportFactory();
286     }
287 
288     if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
289       LOG.error("Server types " + Joiner.on(", ").join(
290           ImplType.serversThatCannotSpecifyBindIP()) + " don't support IP " +
291           "address binding at the moment. See " +
292           "https://issues.apache.org/jira/browse/HBASE-2155 for details.");
293       throw new RuntimeException(
294           "-" + BIND_CONF_KEY + " not supported with " + implType);
295     }
296 
297     if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING ||
298         implType == ImplType.THREADED_SELECTOR) {
299 
300       TNonblockingServerTransport serverTransport =
301           new TNonblockingServerSocket(listenPort);
302 
303       if (implType == ImplType.NONBLOCKING) {
304         TNonblockingServer.Args serverArgs =
305             new TNonblockingServer.Args(serverTransport);
306         serverArgs.processor(processor)
307                   .transportFactory(transportFactory)
308                   .protocolFactory(protocolFactory);
309         tserver = new TNonblockingServer(serverArgs);
310       } else if (implType == ImplType.HS_HA) {
311         THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport);
312         CallQueue callQueue =
313             new CallQueue(new LinkedBlockingQueue<Call>(), metrics);
314         ExecutorService executorService = createExecutor(
315             callQueue, serverArgs.getWorkerThreads());
316         serverArgs.executorService(executorService)
317                   .processor(processor)
318                   .transportFactory(transportFactory)
319                   .protocolFactory(protocolFactory);
320         tserver = new THsHaServer(serverArgs);
321       } else { // THREADED_SELECTOR
322         TThreadedSelectorServer.Args serverArgs =
323             new HThreadedSelectorServerArgs(serverTransport, conf);
324         CallQueue callQueue =
325             new CallQueue(new LinkedBlockingQueue<Call>(), metrics);
326         ExecutorService executorService = createExecutor(
327             callQueue, serverArgs.getWorkerThreads());
328         serverArgs.executorService(executorService)
329                   .processor(processor)
330                   .transportFactory(transportFactory)
331                   .protocolFactory(protocolFactory);
332         tserver = new TThreadedSelectorServer(serverArgs);
333       }
334       LOG.info("starting HBase " + implType.simpleClassName() +
335           " server on " + Integer.toString(listenPort));
336     } else if (implType == ImplType.THREAD_POOL) {
337       // Thread pool server. Get the IP address to bind to.
338       InetAddress listenAddress = getBindAddress(conf);
339 
340       TServerTransport serverTransport = new TServerSocket(
341           new InetSocketAddress(listenAddress, listenPort));
342 
343       TBoundedThreadPoolServer.Args serverArgs =
344           new TBoundedThreadPoolServer.Args(serverTransport, conf);
345       serverArgs.processor(processor)
346                 .transportFactory(transportFactory)
347                 .protocolFactory(protocolFactory);
348       LOG.info("starting " + ImplType.THREAD_POOL.simpleClassName() + " on "
349           + listenAddress + ":" + Integer.toString(listenPort)
350           + "; " + serverArgs);
351       TBoundedThreadPoolServer tserver =
352           new TBoundedThreadPoolServer(serverArgs, metrics);
353       this.tserver = tserver;
354     } else {
355       throw new AssertionError("Unsupported Thrift server implementation: " +
356           implType.simpleClassName());
357     }
358 
359     // A sanity check that we instantiated the right type of server.
360     if (tserver.getClass() != implType.serverClass) {
361       throw new AssertionError("Expected to create Thrift server class " +
362           implType.serverClass.getName() + " but got " +
363           tserver.getClass().getName());
364     }
365 
366     registerFilters(conf);
367   }
368 
369   ExecutorService createExecutor(BlockingQueue<Runnable> callQueue,
370                                  int workerThreads) {
371     ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
372     tfb.setDaemon(true);
373     tfb.setNameFormat("thrift-worker-%d");
374     return new ThreadPoolExecutor(workerThreads, workerThreads,
375             Long.MAX_VALUE, TimeUnit.SECONDS, callQueue, tfb.build());
376   }
377 
378   private InetAddress getBindAddress(Configuration conf)
379       throws UnknownHostException {
380     String bindAddressStr = conf.get(BIND_CONF_KEY, DEFAULT_BIND_ADDR);
381     return InetAddress.getByName(bindAddressStr);
382   }
383 
384   protected static class ResultScannerWrapper {
385 
386     private final ResultScanner scanner; 
387     private final boolean sortColumns; 
388     public ResultScannerWrapper(ResultScanner resultScanner,
389                                 boolean sortResultColumns) {
390       scanner = resultScanner; 
391       sortColumns = sortResultColumns;  
392    } 
393 
394     public ResultScanner getScanner() {
395       return scanner; 
396     }
397 
398     public boolean isColumnSorted() {
399       return sortColumns; 
400     }
401   }
402 
403   /**
404    * The HBaseHandler is a glue object that connects Thrift RPC calls to the
405    * HBase client API primarily defined in the HBaseAdmin and HTable objects.
406    */
407   public static class HBaseHandler implements Hbase.Iface {
408     protected Configuration conf;
409     protected HBaseAdmin admin = null;
410     protected final Log LOG = LogFactory.getLog(this.getClass().getName());
411 
412     // nextScannerId and scannerMap are used to manage scanner state
413     protected int nextScannerId = 0;
414     protected HashMap<Integer, ResultScannerWrapper> scannerMap = null;
415     private ThriftMetrics metrics = null;
416 
417     private static ThreadLocal<Map<String, HTable>> threadLocalTables =
418         new ThreadLocal<Map<String, HTable>>() {
419       @Override
420       protected Map<String, HTable> initialValue() {
421         return new TreeMap<String, HTable>();
422       }
423     };
424 
425     IncrementCoalescer coalescer = null;
426 
427     /**
428      * Returns a list of all the column families for a given htable.
429      *
430      * @param table
431      * @return
432      * @throws IOException
433      */
434     byte[][] getAllColumns(HTable table) throws IOException {
435       HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies();
436       byte[][] columns = new byte[cds.length][];
437       for (int i = 0; i < cds.length; i++) {
438         columns[i] = Bytes.add(cds[i].getName(),
439             KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
440       }
441       return columns;
442     }
443 
444     /**
445      * Creates and returns an HTable instance from a given table name.
446      *
447      * @param tableName
448      *          name of table
449      * @return HTable object
450      * @throws IOException
451      * @throws IOError
452      */
453     public HTable getTable(final byte[] tableName) throws
454         IOException {
455       String table = new String(tableName);
456       Map<String, HTable> tables = threadLocalTables.get();
457       if (!tables.containsKey(table)) {
458         tables.put(table, new HTable(conf, tableName));
459       }
460       return tables.get(table);
461     }
462 
463     public HTable getTable(final ByteBuffer tableName) throws IOException {
464       return getTable(getBytes(tableName));
465     }
466 
467     /**
468      * Assigns a unique ID to the scanner and adds the mapping to an internal
469      * hash-map.
470      *
471      * @param scanner
472      * @return integer scanner id
473      */
474     protected synchronized int addScanner(ResultScanner scanner,boolean sortColumns) {
475       int id = nextScannerId++;
476       ResultScannerWrapper resultScannerWrapper = new ResultScannerWrapper(scanner, sortColumns);
477       scannerMap.put(id, resultScannerWrapper);
478       return id;
479     }
480 
481     /**
482      * Returns the scanner associated with the specified ID.
483      *
484      * @param id
485      * @return a Scanner, or null if ID was invalid.
486      */
487     protected synchronized ResultScannerWrapper getScanner(int id) {
488       return scannerMap.get(id);
489     }
490 
491     /**
492      * Removes the scanner associated with the specified ID from the internal
493      * id->scanner hash-map.
494      *
495      * @param id
496      * @return a Scanner, or null if ID was invalid.
497      */
498     protected synchronized ResultScannerWrapper removeScanner(int id) {
499       return scannerMap.remove(id);
500     }
501 
502     /**
503      * Constructs an HBaseHandler object.
504      * @throws IOException
505      */
506     protected HBaseHandler()
507     throws IOException {
508       this(HBaseConfiguration.create());
509     }
510 
511     protected HBaseHandler(final Configuration c) throws IOException {
512       this.conf = c;
513       admin = new HBaseAdmin(conf);
514       scannerMap = new HashMap<Integer, ResultScannerWrapper>();
515       this.coalescer = new IncrementCoalescer(this);
516     }
517 
518     @Override
519     public void enableTable(ByteBuffer tableName) throws IOError {
520       try{
521         admin.enableTable(getBytes(tableName));
522       } catch (IOException e) {
523         LOG.warn(e.getMessage(), e);
524         throw new IOError(e.getMessage());
525       }
526     }
527 
528     @Override
529     public void disableTable(ByteBuffer tableName) throws IOError{
530       try{
531         admin.disableTable(getBytes(tableName));
532       } catch (IOException e) {
533         LOG.warn(e.getMessage(), e);
534         throw new IOError(e.getMessage());
535       }
536     }
537 
538     @Override
539     public boolean isTableEnabled(ByteBuffer tableName) throws IOError {
540       try {
541         return HTable.isTableEnabled(this.conf, getBytes(tableName));
542       } catch (IOException e) {
543         LOG.warn(e.getMessage(), e);
544         throw new IOError(e.getMessage());
545       }
546     }
547 
548     @Override
549     public void compact(ByteBuffer tableNameOrRegionName) throws IOError {
550       try{
551         admin.compact(getBytes(tableNameOrRegionName));
552       } catch (InterruptedException e) {
553         throw new IOError(e.getMessage());
554       } catch (IOException e) {
555         LOG.warn(e.getMessage(), e);
556         throw new IOError(e.getMessage());
557       }
558     }
559 
560     @Override
561     public void majorCompact(ByteBuffer tableNameOrRegionName) throws IOError {
562       try{
563         admin.majorCompact(getBytes(tableNameOrRegionName));
564       } catch (InterruptedException e) {
565         LOG.warn(e.getMessage(), e);
566         throw new IOError(e.getMessage());
567       } catch (IOException e) {
568         LOG.warn(e.getMessage(), e);
569         throw new IOError(e.getMessage());
570       }
571     }
572 
573     @Override
574     public List<ByteBuffer> getTableNames() throws IOError {
575       try {
576         String[] tableNames = this.admin.getTableNames();
577         ArrayList<ByteBuffer> list = new ArrayList<ByteBuffer>(tableNames.length);
578         for (int i = 0; i < tableNames.length; i++) {
579           list.add(ByteBuffer.wrap(Bytes.toBytes(tableNames[i])));
580         }
581         return list;
582       } catch (IOException e) {
583         LOG.warn(e.getMessage(), e);
584         throw new IOError(e.getMessage());
585       }
586     }
587 
588     @Override
589     public List<TRegionInfo> getTableRegions(ByteBuffer tableName)
590     throws IOError {
591       try {
592         HTable table = getTable(tableName);
593         Map<HRegionInfo, ServerName> regionLocations =
594             table.getRegionLocations();
595         List<TRegionInfo> results = new ArrayList<TRegionInfo>();
596         for (Map.Entry<HRegionInfo, ServerName> entry :
597             regionLocations.entrySet()) {
598           HRegionInfo info = entry.getKey();
599           ServerName serverName = entry.getValue();
600           TRegionInfo region = new TRegionInfo();
601           region.serverName = ByteBuffer.wrap(
602               Bytes.toBytes(serverName.getHostname()));
603           region.port = serverName.getPort();
604           region.startKey = ByteBuffer.wrap(info.getStartKey());
605           region.endKey = ByteBuffer.wrap(info.getEndKey());
606           region.id = info.getRegionId();
607           region.name = ByteBuffer.wrap(info.getRegionName());
608           region.version = info.getVersion();
609           results.add(region);
610         }
611         return results;
612       } catch (TableNotFoundException e) {
613         // Return empty list for non-existing table
614         return Collections.emptyList();
615       } catch (IOException e){
616         LOG.warn(e.getMessage(), e);
617         throw new IOError(e.getMessage());
618       }
619     }
620 
621     /**
622      * Convert ByteBuffer to byte array. Note that this cannot be replaced by
623      * Bytes.toBytes().
624      */
625     public static byte[] toBytes(ByteBuffer bb) {
626       byte[] result = new byte[bb.remaining()];
627       // Make a duplicate so the position doesn't change
628       ByteBuffer dup = bb.duplicate();
629       dup.get(result, 0, result.length);
630       return result;
631     }
632 
633     @Deprecated
634     @Override
635     public List<TCell> get(
636         ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
637         Map<ByteBuffer, ByteBuffer> attributes)
638         throws IOError {
639       byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
640       if(famAndQf.length == 1) {
641         return get(tableName, row, famAndQf[0], new byte[0], attributes);
642       }
643       return get(tableName, row, famAndQf[0], famAndQf[1], attributes);
644     }
645 
646     protected List<TCell> get(ByteBuffer tableName,
647                               ByteBuffer row,
648                               byte[] family,
649                               byte[] qualifier,
650                               Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
651       try {
652         HTable table = getTable(tableName);
653         Get get = new Get(getBytes(row));
654         addAttributes(get, attributes);
655         if (qualifier == null || qualifier.length == 0) {
656           get.addFamily(family);
657         } else {
658           get.addColumn(family, qualifier);
659         }
660         Result result = table.get(get);
661         return ThriftUtilities.cellFromHBase(result.raw());
662       } catch (IOException e) {
663         LOG.warn(e.getMessage(), e);
664         throw new IOError(e.getMessage());
665       }
666     }
667 
668     @Deprecated
669     @Override
670     public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row,
671         ByteBuffer column, int numVersions,
672         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
673       byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
674       if(famAndQf.length == 1) {
675         return getVer(tableName, row, famAndQf[0],
676             new byte[0], numVersions, attributes);
677       }
678       return getVer(tableName, row,
679           famAndQf[0], famAndQf[1], numVersions, attributes);
680     }
681 
682     public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row,
683                               byte[] family,
684         byte[] qualifier, int numVersions,
685         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
686       try {
687         HTable table = getTable(tableName);
688         Get get = new Get(getBytes(row));
689         addAttributes(get, attributes);
690         get.addColumn(family, qualifier);
691         get.setMaxVersions(numVersions);
692         Result result = table.get(get);
693         return ThriftUtilities.cellFromHBase(result.raw());
694       } catch (IOException e) {
695         LOG.warn(e.getMessage(), e);
696         throw new IOError(e.getMessage());
697       }
698     }
699 
700     @Deprecated
701     @Override
702     public List<TCell> getVerTs(ByteBuffer tableName,
703                                    ByteBuffer row,
704         ByteBuffer column,
705         long timestamp,
706         int numVersions,
707         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
708       byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
709       if(famAndQf.length == 1) {
710         return getVerTs(tableName, row, famAndQf[0], new byte[0], timestamp,
711             numVersions, attributes);
712       }
713       return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp,
714           numVersions, attributes);
715     }
716 
717     protected List<TCell> getVerTs(ByteBuffer tableName,
718                                    ByteBuffer row, byte [] family,
719         byte [] qualifier, long timestamp, int numVersions,
720         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
721       try {
722         HTable table = getTable(tableName);
723         Get get = new Get(getBytes(row));
724         addAttributes(get, attributes);
725         get.addColumn(family, qualifier);
726         get.setTimeRange(Long.MIN_VALUE, timestamp);
727         get.setMaxVersions(numVersions);
728         Result result = table.get(get);
729         return ThriftUtilities.cellFromHBase(result.raw());
730       } catch (IOException e) {
731         LOG.warn(e.getMessage(), e);
732         throw new IOError(e.getMessage());
733       }
734     }
735 
736     @Override
737     public List<TRowResult> getRow(ByteBuffer tableName, ByteBuffer row,
738         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
739       return getRowWithColumnsTs(tableName, row, null,
740                                  HConstants.LATEST_TIMESTAMP,
741                                  attributes);
742     }
743 
744     @Override
745     public List<TRowResult> getRowWithColumns(ByteBuffer tableName,
746                                               ByteBuffer row,
747         List<ByteBuffer> columns,
748         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
749       return getRowWithColumnsTs(tableName, row, columns,
750                                  HConstants.LATEST_TIMESTAMP,
751                                  attributes);
752     }
753 
754     @Override
755     public List<TRowResult> getRowTs(ByteBuffer tableName, ByteBuffer row,
756         long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
757       return getRowWithColumnsTs(tableName, row, null,
758                                  timestamp, attributes);
759     }
760 
761     @Override
762     public List<TRowResult> getRowWithColumnsTs(
763         ByteBuffer tableName, ByteBuffer row, List<ByteBuffer> columns,
764         long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
765       try {
766         HTable table = getTable(tableName);
767         if (columns == null) {
768           Get get = new Get(getBytes(row));
769           addAttributes(get, attributes);
770           get.setTimeRange(Long.MIN_VALUE, timestamp);
771           Result result = table.get(get);
772           return ThriftUtilities.rowResultFromHBase(result);
773         }
774         Get get = new Get(getBytes(row));
775         addAttributes(get, attributes);
776         for(ByteBuffer column : columns) {
777           byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
778           if (famAndQf.length == 1) {
779               get.addFamily(famAndQf[0]);
780           } else {
781               get.addColumn(famAndQf[0], famAndQf[1]);
782           }
783         }
784         get.setTimeRange(Long.MIN_VALUE, timestamp);
785         Result result = table.get(get);
786         return ThriftUtilities.rowResultFromHBase(result);
787       } catch (IOException e) {
788         LOG.warn(e.getMessage(), e);
789         throw new IOError(e.getMessage());
790       }
791     }
792 
793     @Override
794     public List<TRowResult> getRows(ByteBuffer tableName,
795                                     List<ByteBuffer> rows,
796         Map<ByteBuffer, ByteBuffer> attributes)
797         throws IOError {
798       return getRowsWithColumnsTs(tableName, rows, null,
799                                   HConstants.LATEST_TIMESTAMP,
800                                   attributes);
801     }
802 
803     @Override
804     public List<TRowResult> getRowsWithColumns(ByteBuffer tableName,
805                                                List<ByteBuffer> rows,
806         List<ByteBuffer> columns,
807         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
808       return getRowsWithColumnsTs(tableName, rows, columns,
809                                   HConstants.LATEST_TIMESTAMP,
810                                   attributes);
811     }
812 
813     @Override
814     public List<TRowResult> getRowsTs(ByteBuffer tableName,
815                                       List<ByteBuffer> rows,
816         long timestamp,
817         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
818       return getRowsWithColumnsTs(tableName, rows, null,
819                                   timestamp, attributes);
820     }
821 
822     @Override
823     public List<TRowResult> getRowsWithColumnsTs(ByteBuffer tableName,
824                                                  List<ByteBuffer> rows,
825         List<ByteBuffer> columns, long timestamp,
826         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
827       try {
828         List<Get> gets = new ArrayList<Get>(rows.size());
829         HTable table = getTable(tableName);
830         if (metrics != null) {
831           metrics.incNumRowKeysInBatchGet(rows.size());
832         }
833         for (ByteBuffer row : rows) {
834           Get get = new Get(getBytes(row));
835           addAttributes(get, attributes);
836           if (columns != null) {
837 
838             for(ByteBuffer column : columns) {
839               byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
840               if (famAndQf.length == 1) {
841                 get.addFamily(famAndQf[0]);
842               } else {
843                 get.addColumn(famAndQf[0], famAndQf[1]);
844               }
845             }
846           }
847           get.setTimeRange(Long.MIN_VALUE, timestamp);
848           gets.add(get);
849         }
850         Result[] result = table.get(gets);
851         return ThriftUtilities.rowResultFromHBase(result);
852       } catch (IOException e) {
853         LOG.warn(e.getMessage(), e);
854         throw new IOError(e.getMessage());
855       }
856     }
857 
858     @Override
859     public void deleteAll(
860         ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
861         Map<ByteBuffer, ByteBuffer> attributes)
862         throws IOError {
863       deleteAllTs(tableName, row, column, HConstants.LATEST_TIMESTAMP,
864                   attributes);
865     }
866 
867     @Override
868     public void deleteAllTs(ByteBuffer tableName,
869                             ByteBuffer row,
870                             ByteBuffer column,
871         long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
872       try {
873         HTable table = getTable(tableName);
874         Delete delete  = new Delete(getBytes(row));
875         addAttributes(delete, attributes);
876         byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
877         if (famAndQf.length == 1) {
878           delete.deleteFamily(famAndQf[0], timestamp);
879         } else {
880           delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
881         }
882         table.delete(delete);
883 
884       } catch (IOException e) {
885         LOG.warn(e.getMessage(), e);
886         throw new IOError(e.getMessage());
887       }
888     }
889 
890     @Override
891     public void deleteAllRow(
892         ByteBuffer tableName, ByteBuffer row,
893         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
894       deleteAllRowTs(tableName, row, HConstants.LATEST_TIMESTAMP, attributes);
895     }
896 
897     @Override
898     public void deleteAllRowTs(
899         ByteBuffer tableName, ByteBuffer row, long timestamp,
900         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
901       try {
902         HTable table = getTable(tableName);
903         Delete delete  = new Delete(getBytes(row), timestamp, null);
904         addAttributes(delete, attributes);
905         table.delete(delete);
906       } catch (IOException e) {
907         LOG.warn(e.getMessage(), e);
908         throw new IOError(e.getMessage());
909       }
910     }
911 
912     @Override
913     public void createTable(ByteBuffer in_tableName,
914         List<ColumnDescriptor> columnFamilies) throws IOError,
915         IllegalArgument, AlreadyExists {
916       byte [] tableName = getBytes(in_tableName);
917       try {
918         if (admin.tableExists(tableName)) {
919           throw new AlreadyExists("table name already in use");
920         }
921         HTableDescriptor desc = new HTableDescriptor(tableName);
922         for (ColumnDescriptor col : columnFamilies) {
923           HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col);
924           desc.addFamily(colDesc);
925         }
926         admin.createTable(desc);
927       } catch (IOException e) {
928         LOG.warn(e.getMessage(), e);
929         throw new IOError(e.getMessage());
930       } catch (IllegalArgumentException e) {
931         LOG.warn(e.getMessage(), e);
932         throw new IllegalArgument(e.getMessage());
933       }
934     }
935 
936     @Override
937     public void deleteTable(ByteBuffer in_tableName) throws IOError {
938       byte [] tableName = getBytes(in_tableName);
939       if (LOG.isDebugEnabled()) {
940         LOG.debug("deleteTable: table=" + Bytes.toString(tableName));
941       }
942       try {
943         if (!admin.tableExists(tableName)) {
944           throw new IOException("table does not exist");
945         }
946         admin.deleteTable(tableName);
947       } catch (IOException e) {
948         LOG.warn(e.getMessage(), e);
949         throw new IOError(e.getMessage());
950       }
951     }
952 
953     @Override
954     public void mutateRow(ByteBuffer tableName, ByteBuffer row,
955         List<Mutation> mutations, Map<ByteBuffer, ByteBuffer> attributes)
956         throws IOError, IllegalArgument {
957       mutateRowTs(tableName, row, mutations, HConstants.LATEST_TIMESTAMP,
958                   attributes);
959     }
960 
961     @Override
962     public void mutateRowTs(ByteBuffer tableName, ByteBuffer row,
963         List<Mutation> mutations, long timestamp,
964         Map<ByteBuffer, ByteBuffer> attributes)
965         throws IOError, IllegalArgument {
966       HTable table = null;
967       try {
968         table = getTable(tableName);
969         Put put = new Put(getBytes(row), timestamp, null);
970         addAttributes(put, attributes);
971 
972         Delete delete = new Delete(getBytes(row));
973         addAttributes(delete, attributes);
974         if (metrics != null) {
975           metrics.incNumRowKeysInBatchMutate(mutations.size());
976         }
977 
978         // I apologize for all this mess :)
979         for (Mutation m : mutations) {
980           byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
981           if (m.isDelete) {
982             if (famAndQf.length == 1) {
983               delete.deleteFamily(famAndQf[0], timestamp);
984             } else {
985               delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
986             }
987             delete.setWriteToWAL(m.writeToWAL);
988           } else {
989             if(famAndQf.length == 1) {
990               put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY,
991                   m.value != null ? getBytes(m.value)
992                       : HConstants.EMPTY_BYTE_ARRAY);
993             } else {
994               put.add(famAndQf[0], famAndQf[1],
995                   m.value != null ? getBytes(m.value)
996                       : HConstants.EMPTY_BYTE_ARRAY);
997             }
998             put.setWriteToWAL(m.writeToWAL);
999           }
1000         }
1001         if (!delete.isEmpty())
1002           table.delete(delete);
1003         if (!put.isEmpty())
1004           table.put(put);
1005       } catch (IOException e) {
1006         LOG.warn(e.getMessage(), e);
1007         throw new IOError(e.getMessage());
1008       } catch (IllegalArgumentException e) {
1009         LOG.warn(e.getMessage(), e);
1010         throw new IllegalArgument(e.getMessage());
1011       }
1012     }
1013 
1014     @Override
1015     public void mutateRows(ByteBuffer tableName, List<BatchMutation> rowBatches,
1016         Map<ByteBuffer, ByteBuffer> attributes)
1017         throws IOError, IllegalArgument, TException {
1018       mutateRowsTs(tableName, rowBatches, HConstants.LATEST_TIMESTAMP, attributes);
1019     }
1020 
1021     @Override
1022     public void mutateRowsTs(
1023         ByteBuffer tableName, List<BatchMutation> rowBatches, long timestamp,
1024         Map<ByteBuffer, ByteBuffer> attributes)
1025         throws IOError, IllegalArgument, TException {
1026       List<Put> puts = new ArrayList<Put>();
1027       List<Delete> deletes = new ArrayList<Delete>();
1028 
1029       for (BatchMutation batch : rowBatches) {
1030         byte[] row = getBytes(batch.row);
1031         List<Mutation> mutations = batch.mutations;
1032         Delete delete = new Delete(row);
1033         addAttributes(delete, attributes);
1034         Put put = new Put(row, timestamp, null);
1035         addAttributes(put, attributes);
1036         for (Mutation m : mutations) {
1037           byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
1038           if (m.isDelete) {
1039             // no qualifier, family only.
1040             if (famAndQf.length == 1) {
1041               delete.deleteFamily(famAndQf[0], timestamp);
1042             } else {
1043               delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
1044             }
1045             delete.setWriteToWAL(m.writeToWAL);
1046           } else {
1047             if(famAndQf.length == 1) {
1048               put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY,
1049                   m.value != null ? getBytes(m.value)
1050                       : HConstants.EMPTY_BYTE_ARRAY);
1051             } else {
1052               put.add(famAndQf[0], famAndQf[1],
1053                   m.value != null ? getBytes(m.value)
1054                       : HConstants.EMPTY_BYTE_ARRAY);
1055             }
1056             put.setWriteToWAL(m.writeToWAL);
1057           }
1058         }
1059         if (!delete.isEmpty())
1060           deletes.add(delete);
1061         if (!put.isEmpty())
1062           puts.add(put);
1063       }
1064 
1065       HTable table = null;
1066       try {
1067         table = getTable(tableName);
1068         if (!puts.isEmpty())
1069           table.put(puts);
1070         if (!deletes.isEmpty()) table.delete(deletes);
1071       } catch (IOException e) {
1072         LOG.warn(e.getMessage(), e);
1073         throw new IOError(e.getMessage());
1074       } catch (IllegalArgumentException e) {
1075         LOG.warn(e.getMessage(), e);
1076         throw new IllegalArgument(e.getMessage());
1077       }
1078     }
1079 
1080     @Deprecated
1081     @Override
1082     public long atomicIncrement(
1083         ByteBuffer tableName, ByteBuffer row, ByteBuffer column, long amount)
1084             throws IOError, IllegalArgument, TException {
1085       byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
1086       if(famAndQf.length == 1) {
1087         return atomicIncrement(tableName, row, famAndQf[0], new byte[0],
1088             amount);
1089       }
1090       return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount);
1091     }
1092 
1093     protected long atomicIncrement(ByteBuffer tableName, ByteBuffer row,
1094         byte [] family, byte [] qualifier, long amount)
1095         throws IOError, IllegalArgument, TException {
1096       HTable table;
1097       try {
1098         table = getTable(tableName);
1099         return table.incrementColumnValue(
1100             getBytes(row), family, qualifier, amount);
1101       } catch (IOException e) {
1102         LOG.warn(e.getMessage(), e);
1103         throw new IOError(e.getMessage());
1104       }
1105     }
1106 
1107     public void scannerClose(int id) throws IOError, IllegalArgument {
1108       LOG.debug("scannerClose: id=" + id);
1109       ResultScannerWrapper resultScannerWrapper = getScanner(id);
1110       if (resultScannerWrapper == null) {
1111         String message = "scanner ID is invalid";
1112         LOG.warn(message);
1113         throw new IllegalArgument("scanner ID is invalid");
1114       }
1115       resultScannerWrapper.getScanner().close();
1116       removeScanner(id);
1117     }
1118 
1119     @Override
1120     public List<TRowResult> scannerGetList(int id,int nbRows)
1121         throws IllegalArgument, IOError {
1122       LOG.debug("scannerGetList: id=" + id);
1123       ResultScannerWrapper resultScannerWrapper = getScanner(id);
1124       if (null == resultScannerWrapper) {
1125         String message = "scanner ID is invalid";
1126         LOG.warn(message);
1127         throw new IllegalArgument("scanner ID is invalid");
1128       }
1129 
1130       Result [] results = null;
1131       try {
1132         results = resultScannerWrapper.getScanner().next(nbRows);
1133         if (null == results) {
1134           return new ArrayList<TRowResult>();
1135         }
1136       } catch (IOException e) {
1137         LOG.warn(e.getMessage(), e);
1138         throw new IOError(e.getMessage());
1139       }
1140       return ThriftUtilities.rowResultFromHBase(results, resultScannerWrapper.isColumnSorted());
1141     }
1142 
1143     @Override
1144     public List<TRowResult> scannerGet(int id) throws IllegalArgument, IOError {
1145       return scannerGetList(id,1);
1146     }
1147 
1148     public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
1149         Map<ByteBuffer, ByteBuffer> attributes)
1150         throws IOError {
1151       try {
1152         HTable table = getTable(tableName);
1153         Scan scan = new Scan();
1154         addAttributes(scan, attributes);
1155         if (tScan.isSetStartRow()) {
1156           scan.setStartRow(tScan.getStartRow());
1157         }
1158         if (tScan.isSetStopRow()) {
1159           scan.setStopRow(tScan.getStopRow());
1160         }
1161         if (tScan.isSetTimestamp()) {
1162           scan.setTimeRange(Long.MIN_VALUE, tScan.getTimestamp());
1163         }
1164         if (tScan.isSetCaching()) {
1165           scan.setCaching(tScan.getCaching());
1166         }
1167         if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
1168           for(ByteBuffer column : tScan.getColumns()) {
1169             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1170             if(famQf.length == 1) {
1171               scan.addFamily(famQf[0]);
1172             } else {
1173               scan.addColumn(famQf[0], famQf[1]);
1174             }
1175           }
1176         }
1177         if (tScan.isSetFilterString()) {
1178           ParseFilter parseFilter = new ParseFilter();
1179           scan.setFilter(
1180               parseFilter.parseFilterString(tScan.getFilterString()));
1181         }
1182         return addScanner(table.getScanner(scan), tScan.sortColumns);
1183       } catch (IOException e) {
1184         LOG.warn(e.getMessage(), e);
1185         throw new IOError(e.getMessage());
1186       }
1187     }
1188 
1189     @Override
1190     public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow,
1191         List<ByteBuffer> columns,
1192         Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
1193       try {
1194         HTable table = getTable(tableName);
1195         Scan scan = new Scan(getBytes(startRow));
1196         addAttributes(scan, attributes);
1197         if(columns != null && columns.size() != 0) {
1198           for(ByteBuffer column : columns) {
1199             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1200             if(famQf.length == 1) {
1201               scan.addFamily(famQf[0]);
1202             } else {
1203               scan.addColumn(famQf[0], famQf[1]);
1204             }
1205           }
1206         }
1207         return addScanner(table.getScanner(scan), false);
1208       } catch (IOException e) {
1209         LOG.warn(e.getMessage(), e);
1210         throw new IOError(e.getMessage());
1211       }
1212     }
1213 
1214     @Override
1215     public int scannerOpenWithStop(ByteBuffer tableName, ByteBuffer startRow,
1216         ByteBuffer stopRow, List<ByteBuffer> columns,
1217         Map<ByteBuffer, ByteBuffer> attributes)
1218         throws IOError, TException {
1219       try {
1220         HTable table = getTable(tableName);
1221         Scan scan = new Scan(getBytes(startRow), getBytes(stopRow));
1222         addAttributes(scan, attributes);
1223         if(columns != null && columns.size() != 0) {
1224           for(ByteBuffer column : columns) {
1225             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1226             if(famQf.length == 1) {
1227               scan.addFamily(famQf[0]);
1228             } else {
1229               scan.addColumn(famQf[0], famQf[1]);
1230             }
1231           }
1232         }
1233         return addScanner(table.getScanner(scan), false);
1234       } catch (IOException e) {
1235         LOG.warn(e.getMessage(), e);
1236         throw new IOError(e.getMessage());
1237       }
1238     }
1239 
1240     @Override
1241     public int scannerOpenWithPrefix(ByteBuffer tableName,
1242                                      ByteBuffer startAndPrefix,
1243                                      List<ByteBuffer> columns,
1244         Map<ByteBuffer, ByteBuffer> attributes)
1245         throws IOError, TException {
1246       try {
1247         HTable table = getTable(tableName);
1248         Scan scan = new Scan(getBytes(startAndPrefix));
1249         addAttributes(scan, attributes);
1250         Filter f = new WhileMatchFilter(
1251             new PrefixFilter(getBytes(startAndPrefix)));
1252         scan.setFilter(f);
1253         if (columns != null && columns.size() != 0) {
1254           for(ByteBuffer column : columns) {
1255             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1256             if(famQf.length == 1) {
1257               scan.addFamily(famQf[0]);
1258             } else {
1259               scan.addColumn(famQf[0], famQf[1]);
1260             }
1261           }
1262         }
1263         return addScanner(table.getScanner(scan), false);
1264       } catch (IOException e) {
1265         LOG.warn(e.getMessage(), e);
1266         throw new IOError(e.getMessage());
1267       }
1268     }
1269 
1270     @Override
1271     public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
1272         List<ByteBuffer> columns, long timestamp,
1273         Map<ByteBuffer, ByteBuffer> attributes) throws IOError, TException {
1274       try {
1275         HTable table = getTable(tableName);
1276         Scan scan = new Scan(getBytes(startRow));
1277         addAttributes(scan, attributes);
1278         scan.setTimeRange(Long.MIN_VALUE, timestamp);
1279         if (columns != null && columns.size() != 0) {
1280           for (ByteBuffer column : columns) {
1281             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1282             if(famQf.length == 1) {
1283               scan.addFamily(famQf[0]);
1284             } else {
1285               scan.addColumn(famQf[0], famQf[1]);
1286             }
1287           }
1288         }
1289         return addScanner(table.getScanner(scan), false);
1290       } catch (IOException e) {
1291         LOG.warn(e.getMessage(), e);
1292         throw new IOError(e.getMessage());
1293       }
1294     }
1295 
1296     @Override
1297     public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
1298         ByteBuffer stopRow, List<ByteBuffer> columns, long timestamp,
1299         Map<ByteBuffer, ByteBuffer> attributes)
1300         throws IOError, TException {
1301       try {
1302         HTable table = getTable(tableName);
1303         Scan scan = new Scan(getBytes(startRow), getBytes(stopRow));
1304         addAttributes(scan, attributes);
1305         scan.setTimeRange(Long.MIN_VALUE, timestamp);
1306         if (columns != null && columns.size() != 0) {
1307           for (ByteBuffer column : columns) {
1308             byte [][] famQf = KeyValue.parseColumn(getBytes(column));
1309             if(famQf.length == 1) {
1310               scan.addFamily(famQf[0]);
1311             } else {
1312               scan.addColumn(famQf[0], famQf[1]);
1313             }
1314           }
1315         }
1316         scan.setTimeRange(Long.MIN_VALUE, timestamp);
1317         return addScanner(table.getScanner(scan), false);
1318       } catch (IOException e) {
1319         LOG.warn(e.getMessage(), e);
1320         throw new IOError(e.getMessage());
1321       }
1322     }
1323 
1324     @Override
1325     public Map<ByteBuffer, ColumnDescriptor> getColumnDescriptors(
1326         ByteBuffer tableName) throws IOError, TException {
1327       try {
1328         TreeMap<ByteBuffer, ColumnDescriptor> columns =
1329           new TreeMap<ByteBuffer, ColumnDescriptor>();
1330 
1331         HTable table = getTable(tableName);
1332         HTableDescriptor desc = table.getTableDescriptor();
1333 
1334         for (HColumnDescriptor e : desc.getFamilies()) {
1335           ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
1336           columns.put(col.name, col);
1337         }
1338         return columns;
1339       } catch (IOException e) {
1340         LOG.warn(e.getMessage(), e);
1341         throw new IOError(e.getMessage());
1342       }
1343     }
1344 
1345     @Override
1346     public List<TCell> getRowOrBefore(ByteBuffer tableName, ByteBuffer row,
1347         ByteBuffer family) throws IOError {
1348       try {
1349         HTable table = getTable(getBytes(tableName));
1350         Result result = table.getRowOrBefore(getBytes(row), getBytes(family));
1351         return ThriftUtilities.cellFromHBase(result.raw());
1352       } catch (IOException e) {
1353         LOG.warn(e.getMessage(), e);
1354         throw new IOError(e.getMessage());
1355       }
1356     }
1357 
1358     @Override
1359     public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
1360       try {
1361         HTable table = getTable(HConstants.META_TABLE_NAME);
1362         byte[] row = toBytes(searchRow);
1363         Result startRowResult = table.getRowOrBefore(
1364           row, HConstants.CATALOG_FAMILY);
1365 
1366         if (startRowResult == null) {
1367           throw new IOException("Cannot find row in .META., row="
1368                                 + Bytes.toString(searchRow.array()));
1369         }
1370 
1371         // find region start and end keys
1372         byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
1373                                                HConstants.REGIONINFO_QUALIFIER);
1374         if (value == null || value.length == 0) {
1375           throw new IOException("HRegionInfo REGIONINFO was null or " +
1376                                 " empty in Meta for row="
1377                                 + Bytes.toString(row));
1378         }
1379         HRegionInfo regionInfo = Writables.getHRegionInfo(value);
1380         TRegionInfo region = new TRegionInfo();
1381         region.setStartKey(regionInfo.getStartKey());
1382         region.setEndKey(regionInfo.getEndKey());
1383         region.id = regionInfo.getRegionId();
1384         region.setName(regionInfo.getRegionName());
1385         region.version = regionInfo.getVersion();
1386 
1387         // find region assignment to server
1388         value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
1389                                         HConstants.SERVER_QUALIFIER);
1390         if (value != null && value.length > 0) {
1391           String hostAndPort = Bytes.toString(value);
1392           region.setServerName(Bytes.toBytes(
1393               Addressing.parseHostname(hostAndPort)));
1394           region.port = Addressing.parsePort(hostAndPort);
1395         }
1396         return region;
1397       } catch (IOException e) {
1398         LOG.warn(e.getMessage(), e);
1399         throw new IOError(e.getMessage());
1400       }
1401     }
1402 
1403     private void initMetrics(ThriftMetrics metrics) {
1404       this.metrics = metrics;
1405     }
1406 
1407     @Override
1408     public void increment(TIncrement tincrement) throws IOError, TException {
1409 
1410       if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) {
1411         throw new TException("Must supply a table and a row key; can't increment");
1412       }
1413 
1414       if (conf.getBoolean(COALESCE_INC_KEY, false)) {
1415         this.coalescer.queueIncrement(tincrement);
1416         return;
1417       }
1418 
1419       try {
1420         HTable table = getTable(tincrement.getTable());
1421         Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
1422         table.increment(inc);
1423       } catch (IOException e) {
1424         LOG.warn(e.getMessage(), e);
1425         throw new IOError(e.getMessage());
1426       }
1427     }
1428 
1429     @Override
1430     public void incrementRows(List<TIncrement> tincrements) throws IOError, TException {
1431       if (conf.getBoolean(COALESCE_INC_KEY, false)) {
1432         this.coalescer.queueIncrements(tincrements);
1433         return;
1434       }
1435       for (TIncrement tinc : tincrements) {
1436         increment(tinc);
1437       }
1438     }
1439   }
1440 
1441 
1442 
1443   /**
1444    * Adds all the attributes into the Operation object
1445    */
1446   private static void addAttributes(OperationWithAttributes op,
1447     Map<ByteBuffer, ByteBuffer> attributes) {
1448     if (attributes == null || attributes.size() == 0) {
1449       return;
1450     }
1451     for (Map.Entry<ByteBuffer, ByteBuffer> entry : attributes.entrySet()) {
1452       String name = Bytes.toStringBinary(getBytes(entry.getKey()));
1453       byte[] value =  getBytes(entry.getValue());
1454       op.setAttribute(name, value);
1455     }
1456   }
1457 
1458   public static void registerFilters(Configuration conf) {
1459     String[] filters = conf.getStrings("hbase.thrift.filters");
1460     if(filters != null) {
1461       for(String filterClass: filters) {
1462         String[] filterPart = filterClass.split(":");
1463         if(filterPart.length != 2) {
1464           LOG.warn("Invalid filter specification " + filterClass + " - skipping");
1465         } else {
1466           ParseFilter.registerFilter(filterPart[0], filterPart[1]);
1467         }
1468       }
1469     }
1470   }
1471 }