View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.coprocessor;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.List;
23  import java.util.SortedSet;
24  import java.util.TreeSet;
25  
26  import org.apache.hadoop.classification.InterfaceAudience;
27  import org.apache.hadoop.classification.InterfaceStability;
28  import org.apache.hadoop.hbase.Coprocessor;
29  import org.apache.hadoop.hbase.CoprocessorEnvironment;
30  import org.apache.hadoop.hbase.HRegionInfo;
31  import org.apache.hadoop.hbase.client.Mutation;
32  import org.apache.hadoop.hbase.exceptions.CoprocessorException;
33  import org.apache.hadoop.hbase.regionserver.HRegion;
34  import org.apache.hadoop.hbase.exceptions.WrongRegionException;
35  import org.apache.hadoop.hbase.util.Bytes;
36  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
37  import org.apache.hadoop.hbase.protobuf.ResponseConverter;
38  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
39  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest;
40  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateResponse;
41  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService;
42  
43  import com.google.protobuf.RpcCallback;
44  import com.google.protobuf.RpcController;
45  import com.google.protobuf.Service;
46  
47  /**
48   * This class demonstrates how to implement atomic multi row transactions using
49   * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)}
50   * and Coprocessor endpoints.
51   *
52   * Defines a protocol to perform multi row transactions.
53   * See {@link MultiRowMutationEndpoint} for the implementation.
54   * </br>
55   * See
56   * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)}
57   * for details and limitations.
58   * </br>
59   * Example:
60   * <code><pre>
61   * List<Mutation> mutations = ...;
62   * Put p1 = new Put(row1);
63   * Put p2 = new Put(row2);
64   * ...
65   * Mutate m1 = ProtobufUtil.toMutate(MutateType.PUT, p1);
66   * Mutate m2 = ProtobufUtil.toMutate(MutateType.PUT, p2);
67   * MultiMutateRequest.Builder mrmBuilder = MultiMutateRequest.newBuilder();
68   * mrmBuilder.addMutationRequest(m1);
69   * mrmBuilder.addMutationRequest(m2);
70   * CoprocessorRpcChannel channel = t.coprocessorService(ROW);
71   * MultiRowMutationService.BlockingInterface service = 
72   *    MultiRowMutationService.newBlockingStub(channel);
73   * MultiMutateRequest mrm = mrmBuilder.build();
74   * service.mutateRows(null, mrm);
75   * </pre></code>
76   */
77  @InterfaceAudience.Public
78  @InterfaceStability.Evolving
79  public class MultiRowMutationEndpoint extends MultiRowMutationService implements
80  CoprocessorService, Coprocessor {
81    private RegionCoprocessorEnvironment env;
82    @Override
83    public void mutateRows(RpcController controller, MultiMutateRequest request, 
84        RpcCallback<MultiMutateResponse> done) {
85      MultiMutateResponse response = MultiMutateResponse.getDefaultInstance();
86      try {
87        // set of rows to lock, sorted to avoid deadlocks
88        SortedSet<byte[]> rowsToLock = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
89        List<MutationProto> mutateRequestList = request.getMutationRequestList();
90        List<Mutation> mutations = new ArrayList<Mutation>(mutateRequestList.size());
91        for (MutationProto m : mutateRequestList) {
92          mutations.add(ProtobufUtil.toMutation(m));
93        }
94  
95        HRegionInfo regionInfo = env.getRegion().getRegionInfo();
96        for (Mutation m : mutations) {
97          // check whether rows are in range for this region
98          if (!HRegion.rowIsInRange(regionInfo, m.getRow())) {
99            String msg = "Requested row out of range '"
100               + Bytes.toStringBinary(m.getRow()) + "'";
101           if (rowsToLock.isEmpty()) {
102             // if this is the first row, region might have moved,
103             // allow client to retry
104             throw new WrongRegionException(msg);
105           } else {
106             // rows are split between regions, do not retry
107             throw new org.apache.hadoop.hbase.exceptions.DoNotRetryIOException(msg);
108           }
109         }
110         rowsToLock.add(m.getRow());
111       }
112       // call utility method on region
113       env.getRegion().mutateRowsWithLocks(mutations, rowsToLock);
114     } catch (IOException e) {
115       ResponseConverter.setControllerException(controller, e);
116     }
117     done.run(response);
118   }
119 
120 
121   @Override
122   public Service getService() {
123     return this;
124   }
125 
126   /**
127    * Stores a reference to the coprocessor environment provided by the
128    * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
129    * coprocessor is loaded.  Since this is a coprocessor endpoint, it always expects to be loaded
130    * on a table region, so always expects this to be an instance of
131    * {@link RegionCoprocessorEnvironment}.
132    * @param env the environment provided by the coprocessor host
133    * @throws IOException if the provided environment is not an instance of
134    * {@code RegionCoprocessorEnvironment}
135    */
136   @Override
137   public void start(CoprocessorEnvironment env) throws IOException {
138     if (env instanceof RegionCoprocessorEnvironment) {
139       this.env = (RegionCoprocessorEnvironment)env;
140     } else {
141       throw new CoprocessorException("Must be loaded on a table region!");
142     }
143   }
144 
145   @Override
146   public void stop(CoprocessorEnvironment env) throws IOException {
147     // nothing to do
148   }
149 }