View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.util;
20  
21  import java.io.IOException;
22  
23  import org.apache.hadoop.conf.Configuration;
24  import org.apache.hadoop.fs.FileStatus;
25  import org.apache.hadoop.fs.FileSystem;
26  import org.apache.hadoop.hbase.HRegionInfo;
27  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
28  import org.apache.hadoop.hbase.io.HFileLink;
29  import org.apache.hadoop.hbase.regionserver.HRegion;
30  import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
31  
32  /**
33   * Similar to {@link RegionReplicaUtil} but for the server side
34   */
35  public class ServerRegionReplicaUtil extends RegionReplicaUtil {
36  
37    /**
38     * Returns the regionInfo object to use for interacting with the file system.
39     * @return An HRegionInfo object to interact with the filesystem
40     */
41    public static HRegionInfo getRegionInfoForFs(HRegionInfo regionInfo) {
42      if (regionInfo == null) {
43        return null;
44      }
45      return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo);
46    }
47  
48    /**
49     * Returns whether this region replica can accept writes.
50     * @param region the HRegion object
51     * @return whether the replica is read only
52     */
53    public static boolean isReadOnly(HRegion region) {
54      return region.getTableDesc().isReadOnly()
55        || !isDefaultReplica(region.getRegionInfo());
56    }
57  
58    /**
59     * Returns whether to replay the recovered edits to flush the results.
60     * Currently secondary region replicas do not replay the edits, since it would
61     * cause flushes which might affect the primary region. Primary regions even opened
62     * in read only mode should replay the edits.
63     * @param region the HRegion object
64     * @return whether recovered edits should be replayed.
65     */
66    public static boolean shouldReplayRecoveredEdits(HRegion region) {
67      return isDefaultReplica(region.getRegionInfo());
68    }
69  
70    /**
71     * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the
72     * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This
73     * way ensures that the secondary will be able to continue reading the store files even if
74     * they are moved to archive after compaction
75     * @throws IOException
76     */
77    public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
78        HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, FileStatus status)
79        throws IOException {
80  
81      // if this is a primary region, just return the StoreFileInfo constructed from path
82      if (regionInfo.equals(regionInfoForFs)) {
83        return new StoreFileInfo(conf, fs, status);
84      }
85  
86      // else create a store file link. The link file does not exists on filesystem though.
87      HFileLink link = new HFileLink(conf,
88        HFileLink.createPath(regionInfoForFs.getTable(), regionInfoForFs.getEncodedName()
89          , familyName, status.getPath().getName()));
90      return new StoreFileInfo(conf, fs, status, link);
91    }
92  
93  }