View Javadoc

1   /**
2    * Copyright 2009 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import java.io.DataInput;
23  import java.io.DataInputStream;
24  import java.io.DataOutput;
25  import java.io.DataOutputStream;
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.List;
29  import java.util.NavigableMap;
30  import java.util.TreeMap;
31  
32  import org.apache.hadoop.hbase.KeyValue;
33  import org.apache.hadoop.hbase.codec.Decoder;
34  import org.apache.hadoop.hbase.codec.Encoder;
35  import org.apache.hadoop.hbase.io.HeapSize;
36  import org.apache.hadoop.hbase.util.Bytes;
37  import org.apache.hadoop.hbase.util.ClassSize;
38  import org.apache.hadoop.io.Writable;
39  
40  /**
41   * WALEdit: Used in HBase's transaction log (WAL) to represent
42   * the collection of edits (KeyValue objects) corresponding to a
43   * single transaction. The class implements "Writable" interface
44   * for serializing/deserializing a set of KeyValue items.
45   *
46   * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R,
47   * the HLog would have three log entries as follows:
48   *
49   *    <logseq1-for-edit1>:<KeyValue-for-edit-c1>
50   *    <logseq2-for-edit2>:<KeyValue-for-edit-c2>
51   *    <logseq3-for-edit3>:<KeyValue-for-edit-c3>
52   *
53   * This presents problems because row level atomicity of transactions
54   * was not guaranteed. If we crash after few of the above appends make
55   * it, then recovery will restore a partial transaction.
56   *
57   * In the new world, all the edits for a given transaction are written
58   * out as a single record, for example:
59   *
60   *   <logseq#-for-entire-txn>:<WALEdit-for-entire-txn>
61   *
62   * where, the WALEdit is serialized as:
63   *   <-1, # of edits, <KeyValue>, <KeyValue>, ... >
64   * For example:
65   *   <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>>
66   *
67   * The -1 marker is just a special way of being backward compatible with
68   * an old HLog which would have contained a single <KeyValue>.
69   *
70   * The deserializer for WALEdit backward compatibly detects if the record
71   * is an old style KeyValue or the new style WALEdit.
72   *
73   */
74  public class WALEdit implements Writable, HeapSize {
75  
76    private final int VERSION_2 = -1;
77  
78    private final ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();
79    private NavigableMap<byte[], Integer> scopes;
80  
81    // default to decoding uncompressed data - needed for replication, which enforces that
82    // uncompressed edits are sent across the wire. In the regular case (reading/writing WAL), the
83    // codec will be setup by the reader/writer class, not here.
84    private WALEditCodec codec = new WALEditCodec();
85  
86    public WALEdit() {
87    }
88  
89    /**
90     * {@link #setCodec(WALEditCodec)} must be called before calling this method.
91     * @param compression the {@link CompressionContext} for the underlying codec.
92     */
93    @SuppressWarnings("javadoc")
94    public void setCompressionContext(final CompressionContext compression) {
95      this.codec.setCompression(compression);
96    }
97  
98    public void setCodec(WALEditCodec codec) {
99      this.codec = codec;
100   }
101 
102 
103   public void add(KeyValue kv) {
104     this.kvs.add(kv);
105   }
106 
107   public boolean isEmpty() {
108     return kvs.isEmpty();
109   }
110 
111   public int size() {
112     return kvs.size();
113   }
114 
115   public List<KeyValue> getKeyValues() {
116     return kvs;
117   }
118 
119   public NavigableMap<byte[], Integer> getScopes() {
120     return scopes;
121   }
122 
123 
124   public void setScopes (NavigableMap<byte[], Integer> scopes) {
125     // We currently process the map outside of WALEdit,
126     // TODO revisit when replication is part of core
127     this.scopes = scopes;
128   }
129 
130   public void readFields(DataInput in) throws IOException {
131     kvs.clear();
132     if (scopes != null) {
133       scopes.clear();
134     }
135     Decoder decoder = this.codec.getDecoder((DataInputStream) in);
136     int versionOrLength = in.readInt();
137     int length = versionOrLength;
138 
139     // make sure we get the real length
140     if (versionOrLength == VERSION_2) {
141       length = in.readInt();
142     }
143 
144     // read in all the key values
145     for(int i=0; i< length && decoder.advance(); i++) {
146       kvs.add(decoder.current());
147     }
148 
149     //its a new style WAL, so we need replication scopes too
150     if (versionOrLength == VERSION_2) {
151       int numFamilies = in.readInt();
152       if (numFamilies > 0) {
153         if (scopes == null) {
154           scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
155         }
156         for (int i = 0; i < numFamilies; i++) {
157           byte[] fam = Bytes.readByteArray(in);
158           int scope = in.readInt();
159           scopes.put(fam, scope);
160         }
161       }
162     }
163   }
164 
165   public void write(DataOutput out) throws IOException {
166     Encoder kvEncoder = codec.getEncoder((DataOutputStream) out);
167     out.writeInt(VERSION_2);
168 
169     //write out the keyvalues
170     out.writeInt(kvs.size());
171     for(KeyValue kv: kvs){
172       kvEncoder.write(kv);
173     }
174     kvEncoder.flush();
175 
176     if (scopes == null) {
177       out.writeInt(0);
178     } else {
179       out.writeInt(scopes.size());
180       for (byte[] key : scopes.keySet()) {
181         Bytes.writeByteArray(out, key);
182         out.writeInt(scopes.get(key));
183       }
184     }
185   }
186 
187   public long heapSize() {
188     long ret = 0;
189     for (KeyValue kv : kvs) {
190       ret += kv.heapSize();
191     }
192     if (scopes != null) {
193       ret += ClassSize.TREEMAP;
194       ret += ClassSize.align(scopes.size() * ClassSize.MAP_ENTRY);
195       // TODO this isn't quite right, need help here
196     }
197     return ret;
198   }
199 
200   public String toString() {
201     StringBuilder sb = new StringBuilder();
202 
203     sb.append("[#edits: " + kvs.size() + " = <");
204     for (KeyValue kv : kvs) {
205       sb.append(kv.toString());
206       sb.append("; ");
207     }
208     if (scopes != null) {
209       sb.append(" scopes: " + scopes.toString());
210     }
211     sb.append(">]");
212     return sb.toString();
213   }
214 }