View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.ipc;
20  
21  import com.google.common.annotations.VisibleForTesting;
22  import com.google.protobuf.BlockingRpcChannel;
23  import com.google.protobuf.Descriptors;
24  import com.google.protobuf.Message;
25  import com.google.protobuf.RpcController;
26  import com.google.protobuf.ServiceException;
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.hbase.CellScanner;
31  import org.apache.hadoop.hbase.HConstants;
32  import org.apache.hadoop.hbase.ServerName;
33  import org.apache.hadoop.hbase.classification.InterfaceAudience;
34  import org.apache.hadoop.hbase.codec.Codec;
35  import org.apache.hadoop.hbase.codec.KeyValueCodec;
36  import org.apache.hadoop.hbase.security.User;
37  import org.apache.hadoop.hbase.security.UserProvider;
38  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
39  import org.apache.hadoop.hbase.util.Pair;
40  import org.apache.hadoop.hbase.util.PoolMap;
41  import org.apache.hadoop.io.compress.CompressionCodec;
42  
43  import java.io.IOException;
44  import java.net.InetSocketAddress;
45  import java.net.SocketAddress;
46  
47  /**
48   * Provides the basics for a RpcClient implementation like configuration and Logging.
49   */
50  @InterfaceAudience.Private
51  public abstract class AbstractRpcClient implements RpcClient {
52    public static final Log LOG = LogFactory.getLog(AbstractRpcClient.class);
53  
54    protected final Configuration conf;
55    protected String clusterId;
56    protected final SocketAddress localAddr;
57  
58    protected UserProvider userProvider;
59    protected final IPCUtil ipcUtil;
60  
61    protected final int minIdleTimeBeforeClose; // if the connection is idle for more than this
62    // time (in ms), it will be closed at any moment.
63    protected final int maxRetries; //the max. no. of retries for socket connections
64    protected final long failureSleep; // Time to sleep before retry on failure.
65    protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
66    protected final boolean tcpKeepAlive; // if T then use keepalives
67    protected final Codec codec;
68    protected final CompressionCodec compressor;
69    protected final boolean fallbackAllowed;
70  
71    protected final int connectTO;
72    protected final int readTO;
73    protected final int writeTO;
74  
75    /**
76     * Construct an IPC client for the cluster <code>clusterId</code>
77     *
78     * @param conf configuration
79     * @param clusterId the cluster id
80     * @param localAddr client socket bind address.
81     */
82    public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr) {
83      this.userProvider = UserProvider.instantiate(conf);
84      this.localAddr = localAddr;
85      this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true);
86      this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT;
87      this.failureSleep = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
88          HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
89      this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0);
90      this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true);
91      this.ipcUtil = new IPCUtil(conf);
92  
93      this.minIdleTimeBeforeClose = conf.getInt(IDLE_TIME, 120000); // 2 minutes
94      this.conf = conf;
95      this.codec = getCodec();
96      this.compressor = getCompressor(conf);
97      this.fallbackAllowed = conf.getBoolean(IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
98          IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
99      this.connectTO = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT);
100     this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, DEFAULT_SOCKET_TIMEOUT_READ);
101     this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, DEFAULT_SOCKET_TIMEOUT_WRITE);
102 
103     // login the server principal (if using secure Hadoop)
104     if (LOG.isDebugEnabled()) {
105       LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor +
106           ", tcpKeepAlive=" + this.tcpKeepAlive +
107           ", tcpNoDelay=" + this.tcpNoDelay +
108           ", connectTO=" + this.connectTO +
109           ", readTO=" + this.readTO +
110           ", writeTO=" + this.writeTO +
111           ", minIdleTimeBeforeClose=" + this.minIdleTimeBeforeClose +
112           ", maxRetries=" + this.maxRetries +
113           ", fallbackAllowed=" + this.fallbackAllowed +
114           ", bind address=" + (this.localAddr != null ? this.localAddr : "null"));
115     }
116   }
117 
118   @VisibleForTesting
119   public static String getDefaultCodec(final Configuration c) {
120     // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because
121     // Configuration will complain -- then no default codec (and we'll pb everything).  Else
122     // default is KeyValueCodec
123     return c.get(DEFAULT_CODEC_CLASS, KeyValueCodec.class.getCanonicalName());
124   }
125 
126   /**
127    * Encapsulate the ugly casting and RuntimeException conversion in private method.
128    * @return Codec to use on this client.
129    */
130   Codec getCodec() {
131     // For NO CODEC, "hbase.client.rpc.codec" must be configured with empty string AND
132     // "hbase.client.default.rpc.codec" also -- because default is to do cell block encoding.
133     String className = conf.get(HConstants.RPC_CODEC_CONF_KEY, getDefaultCodec(this.conf));
134     if (className == null || className.length() == 0) return null;
135     try {
136       return (Codec)Class.forName(className).newInstance();
137     } catch (Exception e) {
138       throw new RuntimeException("Failed getting codec " + className, e);
139     }
140   }
141 
142   /**
143    * Encapsulate the ugly casting and RuntimeException conversion in private method.
144    * @param conf configuration
145    * @return The compressor to use on this client.
146    */
147   private static CompressionCodec getCompressor(final Configuration conf) {
148     String className = conf.get("hbase.client.rpc.compressor", null);
149     if (className == null || className.isEmpty()) return null;
150     try {
151         return (CompressionCodec)Class.forName(className).newInstance();
152     } catch (Exception e) {
153       throw new RuntimeException("Failed getting compressor " + className, e);
154     }
155   }
156 
157   /**
158    * Return the pool type specified in the configuration, which must be set to
159    * either {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
160    * {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal},
161    * otherwise default to the former.
162    *
163    * For applications with many user threads, use a small round-robin pool. For
164    * applications with few user threads, you may want to try using a
165    * thread-local pool. In any case, the number of {@link org.apache.hadoop.hbase.ipc.RpcClient}
166    * instances should not exceed the operating system's hard limit on the number of
167    * connections.
168    *
169    * @param config configuration
170    * @return either a {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
171    *         {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}
172    */
173   protected static PoolMap.PoolType getPoolType(Configuration config) {
174     return PoolMap.PoolType
175         .valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE), PoolMap.PoolType.RoundRobin,
176             PoolMap.PoolType.ThreadLocal);
177   }
178 
179   /**
180    * Return the pool size specified in the configuration, which is applicable only if
181    * the pool type is {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin}.
182    *
183    * @param config configuration
184    * @return the maximum pool size
185    */
186   protected static int getPoolSize(Configuration config) {
187     return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1);
188   }
189 
190 
191   /**
192    * Make a blocking call. Throws exceptions if there are network problems or if the remote code
193    * threw an exception.
194    * @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
195    *          {@link UserProvider#getCurrent()} makes a new instance of User each time so will be a
196    *          new Connection each time.
197    * @return A pair with the Message response and the Cell data (if any).
198    */
199   Message callBlockingMethod(Descriptors.MethodDescriptor md, PayloadCarryingRpcController pcrc,
200       Message param, Message returnType, final User ticket, final InetSocketAddress isa)
201       throws ServiceException {
202     long startTime = 0;
203     if (LOG.isTraceEnabled()) {
204       startTime = EnvironmentEdgeManager.currentTime();
205     }
206     int callTimeout = 0;
207     CellScanner cells = null;
208     if (pcrc != null) {
209       callTimeout = pcrc.getCallTimeout();
210       cells = pcrc.cellScanner();
211       // Clear it here so we don't by mistake try and these cells processing results.
212       pcrc.setCellScanner(null);
213     }
214     Pair<Message, CellScanner> val;
215     try {
216       val = call(pcrc, md, param, cells, returnType, ticket, isa, callTimeout,
217           pcrc != null? pcrc.getPriority(): HConstants.NORMAL_QOS);
218       if (pcrc != null) {
219         // Shove the results into controller so can be carried across the proxy/pb service void.
220         if (val.getSecond() != null) pcrc.setCellScanner(val.getSecond());
221       } else if (val.getSecond() != null) {
222         throw new ServiceException("Client dropping data on the floor!");
223       }
224 
225       if (LOG.isTraceEnabled()) {
226         long callTime = EnvironmentEdgeManager.currentTime() - startTime;
227         LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms");
228       }
229       return val.getFirst();
230     } catch (Throwable e) {
231       throw new ServiceException(e);
232     }
233   }
234 
235   /**
236    * Make a call, passing <code>param</code>, to the IPC server running at
237    * <code>address</code> which is servicing the <code>protocol</code> protocol,
238    * with the <code>ticket</code> credentials, returning the value.
239    * Throws exceptions if there are network problems or if the remote code
240    * threw an exception.
241    * @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
242    *          {@link UserProvider#getCurrent()} makes a new instance of User each time so will be a
243    *          new Connection each time.
244    * @return A pair with the Message response and the Cell data (if any).
245    * @throws InterruptedException
246    * @throws java.io.IOException
247    */
248   protected abstract Pair<Message, CellScanner> call(PayloadCarryingRpcController pcrc,
249       Descriptors.MethodDescriptor md, Message param, CellScanner cells,
250       Message returnType, User ticket, InetSocketAddress addr, int callTimeout, int priority) throws
251       IOException, InterruptedException;
252 
253   /**
254    * Creates a "channel" that can be used by a blocking protobuf service.  Useful setting up
255    * protobuf blocking stubs.
256    * @return A blocking rpc channel that goes via this rpc client instance.
257    */
258   @Override
259   public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn,
260       final User ticket, int defaultOperationTimeout) {
261     return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout);
262   }
263 
264   /**
265    * Blocking rpc channel that goes via hbase rpc.
266    */
267   @VisibleForTesting
268   public static class BlockingRpcChannelImplementation implements BlockingRpcChannel {
269     private final InetSocketAddress isa;
270     private final AbstractRpcClient rpcClient;
271     private final User ticket;
272     private final int defaultOperationTimeout;
273 
274     /**
275      * @param defaultOperationTimeout - the default timeout when no timeout is given
276      *                                   by the caller.
277      */
278     protected BlockingRpcChannelImplementation(final AbstractRpcClient rpcClient,
279         final ServerName sn, final User ticket, int defaultOperationTimeout) {
280       this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort());
281       this.rpcClient = rpcClient;
282       this.ticket = ticket;
283       this.defaultOperationTimeout = defaultOperationTimeout;
284     }
285 
286     @Override
287     public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller,
288         Message param, Message returnType) throws ServiceException {
289       PayloadCarryingRpcController pcrc;
290       if (controller != null) {
291         pcrc = (PayloadCarryingRpcController) controller;
292         if (!pcrc.hasCallTimeout()){
293           pcrc.setCallTimeout(defaultOperationTimeout);
294         }
295       } else {
296         pcrc =  new PayloadCarryingRpcController();
297         pcrc.setCallTimeout(defaultOperationTimeout);
298       }
299 
300       return this.rpcClient.callBlockingMethod(md, pcrc, param, returnType, this.ticket, this.isa);
301     }
302   }
303 }