View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   * http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.ipc;
19  
20  import java.io.ByteArrayInputStream;
21  import java.io.DataInputStream;
22  import java.io.IOException;
23  import java.io.InputStream;
24  import java.io.OutputStream;
25  import java.nio.ByteBuffer;
26  
27  import org.apache.commons.io.IOUtils;
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.conf.Configurable;
32  import org.apache.hadoop.hbase.CellScanner;
33  import org.apache.hadoop.hbase.codec.Codec;
34  import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
35  import org.apache.hadoop.hbase.util.Bytes;
36  import org.apache.hadoop.io.compress.CodecPool;
37  import org.apache.hadoop.io.compress.CompressionCodec;
38  import org.apache.hadoop.io.compress.CompressionInputStream;
39  import org.apache.hadoop.io.compress.Compressor;
40  import org.apache.hadoop.io.compress.Decompressor;
41  
42  import com.google.common.base.Preconditions;
43  import com.google.protobuf.CodedInputStream;
44  import com.google.protobuf.CodedOutputStream;
45  import com.google.protobuf.Message;
46  
47  /**
48   * Utility to help ipc'ing.
49   */
50  class IPCUtil {
51    public static final Log LOG = LogFactory.getLog(IPCUtil.class);
52    private final int cellBlockBuildingInitialBufferSize;
53    /**
54     * How much we think the decompressor will expand the original compressed content.
55     */
56    private final int cellBlockDecompressionMultiplier;
57    private final Configuration conf;
58  
59    IPCUtil(final Configuration conf) {
60      super();
61      this.conf = conf;
62      this.cellBlockBuildingInitialBufferSize =
63        conf.getInt("hbase.ipc.cellblock.building.initial.buffersize", 16 * 1024);
64      this.cellBlockDecompressionMultiplier =
65          conf.getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3);
66    }
67  
68    /**
69     * Build a cell block using passed in <code>codec</code>
70     * @param codec
71     * @param compressor
72     * @Param cells
73     * @return Null or byte buffer filled with passed-in Cells encoded using passed in
74     * <code>codec</code>; the returned buffer has been flipped and is ready for
75     * reading.  Use limit to find total size.
76     * @throws IOException
77     */
78    @SuppressWarnings("resource")
79    ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
80        final CellScanner cells)
81    throws IOException {
82      if (cells == null) return null;
83      // TOOD: Reuse buffers?
84      // Presizing doesn't work because can't tell what size will be when serialized.
85      // BBOS will resize itself.
86      ByteBufferOutputStream baos =
87        new ByteBufferOutputStream(this.cellBlockBuildingInitialBufferSize);
88      OutputStream os = baos;
89      Compressor poolCompressor = null;
90      try {
91        if (compressor != null) {
92          if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
93          poolCompressor = CodecPool.getCompressor(compressor);
94          os = compressor.createOutputStream(os, poolCompressor);
95        }
96        Codec.Encoder encoder = codec.getEncoder(os);
97        while (cells.advance()) {
98          encoder.write(cells.current());
99        }
100       encoder.flush();
101     } finally {
102       os.close();
103       if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
104     }
105     if (LOG.isTraceEnabled()) {
106       if (this.cellBlockBuildingInitialBufferSize < baos.size()) {
107         LOG.trace("Buffer grew from " + this.cellBlockBuildingInitialBufferSize +
108         " to " + baos.size());
109       }
110     }
111     return baos.getByteBuffer();
112   }
113 
114   /**
115    * @param codec
116    * @param cellBlock
117    * @return CellScanner to work against the content of <code>cellBlock</code>
118    * @throws IOException
119    */
120   CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor,
121       final byte [] cellBlock)
122   throws IOException {
123     return createCellScanner(codec, compressor, cellBlock, 0, cellBlock.length);
124   }
125 
126   /**
127    * @param codec
128    * @param cellBlock
129    * @param offset
130    * @param length
131    * @return CellScanner to work against the content of <code>cellBlock</code>
132    * @throws IOException
133    */
134   CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor,
135       final byte [] cellBlock, final int offset, final int length)
136   throws IOException {
137     // If compressed, decompress it first before passing it on else we will leak compression
138     // resources if the stream is not closed properly after we let it out.
139     InputStream is = null;
140     if (compressor != null) {
141       // GZIPCodec fails w/ NPE if no configuration.
142       if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
143       Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
144       CompressionInputStream cis =
145         compressor.createInputStream(new ByteArrayInputStream(cellBlock, offset, length),
146         poolDecompressor);
147       try {
148         // TODO: This is ugly.  The buffer will be resized on us if we guess wrong.
149         // TODO: Reuse buffers.
150         ByteBufferOutputStream bbos = new ByteBufferOutputStream((length - offset) *
151           this.cellBlockDecompressionMultiplier);
152         IOUtils.copy(cis, bbos);
153         bbos.close();
154         ByteBuffer bb = bbos.getByteBuffer();
155         is = new ByteArrayInputStream(bb.array(), 0, bb.limit());
156       } finally {
157         if (is != null) is.close();
158         CodecPool.returnDecompressor(poolDecompressor);
159       }
160     } else {
161       is = new ByteArrayInputStream(cellBlock, offset, length);
162     }
163     return codec.getDecoder(is);
164   }
165 
166   /**
167    * Write out header, param, and cell block if there to a {@link ByteBufferOutputStream} sized
168    * to hold these elements.
169    * @param header
170    * @param param
171    * @param cellBlock
172    * @return A {@link ByteBufferOutputStream} filled with the content of the passed in
173    * <code>header</code>, <code>param</code>, and <code>cellBlock</code>.
174    * @throws IOException
175    */
176   static ByteBufferOutputStream write(final Message header, final Message param,
177       final ByteBuffer cellBlock)
178   throws IOException {
179     int totalSize = getTotalSizeWhenWrittenDelimited(header, param);
180     if (cellBlock != null) totalSize += cellBlock.limit();
181     ByteBufferOutputStream bbos = new ByteBufferOutputStream(totalSize);
182     write(bbos, header, param, cellBlock, totalSize);
183     bbos.close();
184     return bbos;
185   }
186 
187   /**
188    * Write out header, param, and cell block if there is one.
189    * @param dos
190    * @param header
191    * @param param
192    * @param cellBlock
193    * @return Total number of bytes written.
194    * @throws IOException
195    */
196   static int write(final OutputStream dos, final Message header, final Message param,
197       final ByteBuffer cellBlock)
198   throws IOException {
199     // Must calculate total size and write that first so other side can read it all in in one
200     // swoop.  This is dictated by how the server is currently written.  Server needs to change
201     // if we are to be able to write without the length prefixing.
202     int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header, param);
203     if (cellBlock != null) totalSize += cellBlock.remaining();
204     return write(dos, header, param, cellBlock, totalSize);
205   }
206 
207   private static int write(final OutputStream dos, final Message header, final Message param,
208     final ByteBuffer cellBlock, final int totalSize)
209   throws IOException {
210     // I confirmed toBytes does same as say DataOutputStream#writeInt.
211     dos.write(Bytes.toBytes(totalSize));
212     header.writeDelimitedTo(dos);
213     if (param != null) param.writeDelimitedTo(dos);
214     if (cellBlock != null) dos.write(cellBlock.array(), 0, cellBlock.remaining());
215     dos.flush();
216     return totalSize;
217   }
218 
219   /**
220    * @param in Stream cue'd up just before a delimited message
221    * @return Bytes that hold the bytes that make up the message read from <code>in</code>
222    * @throws IOException
223    */
224   static byte [] getDelimitedMessageBytes(final DataInputStream in) throws IOException {
225     byte b = in.readByte();
226     int size = CodedInputStream.readRawVarint32(b, in);
227     // Allocate right-sized buffer rather than let pb allocate its default minimum 4k.
228     byte [] bytes = new byte[size];
229     IOUtils.readFully(in, bytes);
230     return bytes;
231   }
232 
233   /**
234    * @param header
235    * @param body
236    * @return Size on the wire when the two messages are written with writeDelimitedTo
237    */
238   static int getTotalSizeWhenWrittenDelimited(Message ... messages) {
239     int totalSize = 0;
240     for (Message m: messages) {
241       if (m == null) continue;
242       totalSize += m.getSerializedSize();
243       totalSize += CodedOutputStream.computeRawVarint32Size(m.getSerializedSize());
244     }
245     Preconditions.checkArgument(totalSize < Integer.MAX_VALUE);
246     return totalSize;
247   }
248 }