View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import com.google.common.annotations.VisibleForTesting;
21  import com.google.protobuf.ServiceException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.client.ClusterConnection;
28  import org.apache.hadoop.hbase.client.Connection;
29  import org.apache.hadoop.hbase.client.ConnectionFactory;
30  import org.apache.hadoop.hbase.client.Delete;
31  import org.apache.hadoop.hbase.client.Get;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.Mutation;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.RegionLocator;
36  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
37  import org.apache.hadoop.hbase.client.Result;
38  import org.apache.hadoop.hbase.client.ResultScanner;
39  import org.apache.hadoop.hbase.client.Scan;
40  import org.apache.hadoop.hbase.client.Table;
41  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
42  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
43  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
44  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
47  import org.apache.hadoop.hbase.util.Pair;
48  import org.apache.hadoop.hbase.util.PairOfSameType;
49  import org.apache.hadoop.hbase.util.Threads;
50  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
51  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
52  
53  import java.io.IOException;
54  import java.io.InterruptedIOException;
55  import java.util.ArrayList;
56  import java.util.List;
57  import java.util.Map;
58  import java.util.NavigableMap;
59  import java.util.Set;
60  import java.util.SortedMap;
61  import java.util.TreeMap;
62  import java.util.regex.Matcher;
63  import java.util.regex.Pattern;
64  
65  /**
66   * Read/write operations on region and assignment information store in
67   * <code>hbase:meta</code>.
68   *
69   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
70   * for this is because when used on client-side (like from HBaseAdmin), we want to use
71   * short-living connection (opened before each operation, closed right after), while
72   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
73   */
74  @InterfaceAudience.Private
75  public class MetaTableAccessor {
76  
77    /*
78     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
79     * same table range (table, startKey, endKey). For every range, there will be at least one
80     * HRI defined which is called default replica.
81     *
82     * Meta layout (as of 0.98 + HBASE-10070) is like:
83     * For each table range, there is a single row, formatted like:
84     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
85     * of the default region replica.
86     * Columns are:
87     * info:regioninfo         => contains serialized HRI for the default region replica
88     * info:server             => contains hostname:port (in string form) for the server hosting
89     *                            the default regionInfo replica
90     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
91     *                            regionInfo replica with replicaId
92     * info:serverstartcode    => contains server start code (in binary long form) for the server
93     *                            hosting the default regionInfo replica
94     * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
95     *                                     server hosting the regionInfo replica with replicaId
96     * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
97     *                             the server opened the region with default replicaId
98     * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
99     *                             the time the server opened the region with replicaId
100    * info:splitA              => contains a serialized HRI for the first daughter region if the
101    *                             region is split
102    * info:splitB              => contains a serialized HRI for the second daughter region if the
103    *                             region is split
104    * info:mergeA              => contains a serialized HRI for the first parent region if the
105    *                             region is the result of a merge
106    * info:mergeB              => contains a serialized HRI for the second parent region if the
107    *                             region is the result of a merge
108    *
109    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
110    * and should not leak out of it (through Result objects, etc)
111    */
112 
113   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
114 
115   static final byte [] META_REGION_PREFIX;
116   static {
117     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
118     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
119     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
120     META_REGION_PREFIX = new byte [len];
121     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
122       META_REGION_PREFIX, 0, len);
123   }
124 
125   /** The delimiter for meta columns for replicaIds > 0 */
126   protected static final char META_REPLICA_ID_DELIMITER = '_';
127 
128   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
129   private static final Pattern SERVER_COLUMN_PATTERN
130     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
131 
132   ////////////////////////
133   // Reading operations //
134   ////////////////////////
135 
136  /**
137    * Performs a full scan of a <code>hbase:meta</code> table.
138    * @return List of {@link org.apache.hadoop.hbase.client.Result}
139    * @throws IOException
140    */
141   public static List<Result> fullScanOfMeta(Connection connection)
142   throws IOException {
143     CollectAllVisitor v = new CollectAllVisitor();
144     fullScan(connection, v, null);
145     return v.getResults();
146   }
147 
148   /**
149    * Performs a full scan of <code>hbase:meta</code>.
150    * @param connection connection we're using
151    * @param visitor Visitor invoked against each row.
152    * @throws IOException
153    */
154   public static void fullScan(Connection connection,
155       final Visitor visitor)
156   throws IOException {
157     fullScan(connection, visitor, null);
158   }
159 
160   /**
161    * Performs a full scan of <code>hbase:meta</code>.
162    * @param connection connection we're using
163    * @return List of {@link Result}
164    * @throws IOException
165    */
166   public static List<Result> fullScan(Connection connection)
167     throws IOException {
168     CollectAllVisitor v = new CollectAllVisitor();
169     fullScan(connection, v, null);
170     return v.getResults();
171   }
172 
173   /**
174    * Callers should call close on the returned {@link Table} instance.
175    * @param connection connection we're using to access Meta
176    * @return An {@link Table} for <code>hbase:meta</code>
177    * @throws IOException
178    */
179   static Table getMetaHTable(final Connection connection)
180   throws IOException {
181     // We used to pass whole CatalogTracker in here, now we just pass in Connection
182     if (connection == null || connection.isClosed()) {
183       throw new NullPointerException("No connection");
184     }
185     // If the passed in 'connection' is 'managed' -- i.e. every second test uses
186     // a Table or an HBaseAdmin with managed connections -- then doing
187     // connection.getTable will throw an exception saying you are NOT to use
188     // managed connections getting tables.  Leaving this as it is for now. Will
189     // revisit when inclined to change all tests.  User code probaby makes use of
190     // managed connections too so don't change it till post hbase 1.0.
191     //
192     // There should still be a way to use this method with an unmanaged connection.
193     if (connection instanceof ClusterConnection) {
194       if (((ClusterConnection) connection).isManaged()) {
195         return new HTable(TableName.META_TABLE_NAME, (ClusterConnection) connection);
196       }
197     }
198     return connection.getTable(TableName.META_TABLE_NAME);
199   }
200 
201   /**
202    * @param t Table to use (will be closed when done).
203    * @param g Get to run
204    * @throws IOException
205    */
206   private static Result get(final Table t, final Get g) throws IOException {
207     try {
208       return t.get(g);
209     } finally {
210       t.close();
211     }
212   }
213 
214   /**
215    * Gets the region info and assignment for the specified region.
216    * @param connection connection we're using
217    * @param regionName Region to lookup.
218    * @return Location and HRegionInfo for <code>regionName</code>
219    * @throws IOException
220    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
221    */
222   @Deprecated
223   public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
224     throws IOException {
225     HRegionLocation location = getRegionLocation(connection, regionName);
226     return location == null
227       ? null
228       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
229   }
230 
231   /**
232    * Returns the HRegionLocation from meta for the given region
233    * @param connection connection we're using
234    * @param regionName region we're looking for
235    * @return HRegionLocation for the given region
236    * @throws IOException
237    */
238   public static HRegionLocation getRegionLocation(Connection connection,
239                                                   byte[] regionName) throws IOException {
240     byte[] row = regionName;
241     HRegionInfo parsedInfo = null;
242     try {
243       parsedInfo = parseRegionInfoFromRegionName(regionName);
244       row = getMetaKeyForRegion(parsedInfo);
245     } catch (Exception parseEx) {
246       // Ignore. This is used with tableName passed as regionName.
247     }
248     Get get = new Get(row);
249     get.addFamily(HConstants.CATALOG_FAMILY);
250     Result r = get(getMetaHTable(connection), get);
251     RegionLocations locations = getRegionLocations(r);
252     return locations == null
253       ? null
254       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
255   }
256 
257   /**
258    * Returns the HRegionLocation from meta for the given region
259    * @param connection connection we're using
260    * @param regionInfo region information
261    * @return HRegionLocation for the given region
262    * @throws IOException
263    */
264   public static HRegionLocation getRegionLocation(Connection connection,
265                                                   HRegionInfo regionInfo) throws IOException {
266     byte[] row = getMetaKeyForRegion(regionInfo);
267     Get get = new Get(row);
268     get.addFamily(HConstants.CATALOG_FAMILY);
269     Result r = get(getMetaHTable(connection), get);
270     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
271   }
272 
273   /** Returns the row key to use for this regionInfo */
274   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
275     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
276   }
277 
278   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
279    * is stored in the name, so the returned object should only be used for the fields
280    * in the regionName.
281    */
282   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
283     throws IOException {
284     byte[][] fields = HRegionInfo.parseRegionName(regionName);
285     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
286     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
287     return new HRegionInfo(
288       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
289   }
290 
291   /**
292    * Gets the result in hbase:meta for the specified region.
293    * @param connection connection we're using
294    * @param regionName region we're looking for
295    * @return result of the specified region
296    * @throws IOException
297    */
298   public static Result getRegionResult(Connection connection,
299       byte[] regionName) throws IOException {
300     Get get = new Get(regionName);
301     get.addFamily(HConstants.CATALOG_FAMILY);
302     return get(getMetaHTable(connection), get);
303   }
304 
305   /**
306    * Get regions from the merge qualifier of the specified merged region
307    * @return null if it doesn't contain merge qualifier, else two merge regions
308    * @throws IOException
309    */
310   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
311       Connection connection, byte[] regionName) throws IOException {
312     Result result = getRegionResult(connection, regionName);
313     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
314     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
315     if (mergeA == null && mergeB == null) {
316       return null;
317     }
318     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
319  }
320 
321   /**
322    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
323    * the specified server.
324    * @param connection connection we're using
325    * @param tableName table to check
326    * @return true if the table exists in meta, false if not
327    * @throws IOException
328    */
329   public static boolean tableExists(Connection connection,
330       final TableName tableName)
331   throws IOException {
332     if (tableName.equals(TableName.META_TABLE_NAME)) {
333       // Catalog tables always exist.
334       return true;
335     }
336     // Make a version of ResultCollectingVisitor that only collects the first
337     CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
338       private HRegionInfo current = null;
339 
340       @Override
341       public boolean visit(Result r) throws IOException {
342         RegionLocations locations = getRegionLocations(r);
343         if (locations == null || locations.getRegionLocation().getRegionInfo() == null) {
344           LOG.warn("No serialized HRegionInfo in " + r);
345           return true;
346         }
347         this.current = locations.getRegionLocation().getRegionInfo();
348         if (this.current == null) {
349           LOG.warn("No serialized HRegionInfo in " + r);
350           return true;
351         }
352         if (!isInsideTable(this.current, tableName)) return false;
353         // Else call super and add this Result to the collection.
354         super.visit(r);
355         // Stop collecting regions from table after we get one.
356         return false;
357       }
358 
359       @Override
360       void add(Result r) {
361         // Add the current HRI.
362         this.results.add(this.current);
363       }
364     };
365     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
366     // If visitor has results >= 1 then table exists.
367     return visitor.getResults().size() >= 1;
368   }
369 
370   /**
371    * Gets all of the regions of the specified table.
372    * @param zkw zookeeper connection to access meta table
373    * @param connection connection we're using
374    * @param tableName table we're looking for
375    * @return Ordered list of {@link HRegionInfo}.
376    * @throws IOException
377    */
378   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
379       Connection connection, TableName tableName)
380   throws IOException {
381     return getTableRegions(zkw, connection, tableName, false);
382   }
383 
384   /**
385    * Gets all of the regions of the specified table.
386    * @param zkw zookeeper connection to access meta table
387    * @param connection connection we're using
388    * @param tableName table we're looking for
389    * @param excludeOfflinedSplitParents If true, do not include offlined split
390    * parents in the return.
391    * @return Ordered list of {@link HRegionInfo}.
392    * @throws IOException
393    */
394   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
395       Connection connection, TableName tableName, final boolean excludeOfflinedSplitParents)
396         throws IOException {
397     List<Pair<HRegionInfo, ServerName>> result = null;
398       result = getTableRegionsAndLocations(zkw, connection, tableName,
399         excludeOfflinedSplitParents);
400     return getListOfHRegionInfos(result);
401   }
402 
403   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
404     if (pairs == null || pairs.isEmpty()) return null;
405     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
406     for (Pair<HRegionInfo, ServerName> pair: pairs) {
407       result.add(pair.getFirst());
408     }
409     return result;
410   }
411 
412   /**
413    * @param current region of current table we're working with
414    * @param tableName table we're checking against
415    * @return True if <code>current</code> tablename is equal to
416    * <code>tableName</code>
417    */
418   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
419     return tableName.equals(current.getTable());
420   }
421 
422   /**
423    * @param tableName table we're working with
424    * @return Place to start Scan in <code>hbase:meta</code> when passed a
425    * <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
426    */
427   static byte [] getTableStartRowForMeta(TableName tableName) {
428     byte [] startRow = new byte[tableName.getName().length + 2];
429     System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
430     startRow[startRow.length - 2] = HConstants.DELIMITER;
431     startRow[startRow.length - 1] = HConstants.DELIMITER;
432     return startRow;
433   }
434 
435   /**
436    * This method creates a Scan object that will only scan catalog rows that
437    * belong to the specified table. It doesn't specify any columns.
438    * This is a better alternative to just using a start row and scan until
439    * it hits a new table since that requires parsing the HRI to get the table
440    * name.
441    * @param tableName bytes of table's name
442    * @return configured Scan object
443    */
444   public static Scan getScanForTableName(TableName tableName) {
445     String strName = tableName.getNameAsString();
446     // Start key is just the table name with delimiters
447     byte[] startKey = Bytes.toBytes(strName + ",,");
448     // Stop key appends the smallest possible char to the table name
449     byte[] stopKey = Bytes.toBytes(strName + " ,,");
450 
451     Scan scan = new Scan(startKey);
452     scan.setStopRow(stopKey);
453     return scan;
454   }
455 
456   /**
457    * @param zkw zookeeper connection to access meta table
458    * @param connection connection we're using
459    * @param tableName table we're looking for
460    * @return Return list of regioninfos and server.
461    * @throws IOException
462    */
463   public static List<Pair<HRegionInfo, ServerName>>
464   getTableRegionsAndLocations(ZooKeeperWatcher zkw,
465                               Connection connection, TableName tableName)
466   throws IOException {
467     return getTableRegionsAndLocations(zkw, connection, tableName, true);
468   }
469 
470   /**
471    * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta location
472    * @param connection connection we're using
473    * @param tableName table to work with
474    * @return Return list of regioninfos and server addresses.
475    * @throws IOException
476    */
477   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
478       ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
479       final boolean excludeOfflinedSplitParents) throws IOException {
480 
481     if (tableName.equals(TableName.META_TABLE_NAME)) {
482       // If meta, do a bit of special handling.
483       ServerName serverName = new MetaTableLocator().getMetaRegionLocation(zkw);
484       List<Pair<HRegionInfo, ServerName>> list =
485         new ArrayList<Pair<HRegionInfo, ServerName>>();
486       list.add(new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
487         serverName));
488       return list;
489     }
490     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
491     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
492       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
493         private RegionLocations current = null;
494 
495         @Override
496         public boolean visit(Result r) throws IOException {
497           current = getRegionLocations(r);
498           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
499             LOG.warn("No serialized HRegionInfo in " + r);
500             return true;
501           }
502           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
503           if (!isInsideTable(hri, tableName)) return false;
504           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
505           // Else call super and add this Result to the collection.
506           return super.visit(r);
507         }
508 
509         @Override
510         void add(Result r) {
511           if (current == null) {
512             return;
513           }
514           for (HRegionLocation loc : current.getRegionLocations()) {
515             if (loc != null) {
516               this.results.add(new Pair<HRegionInfo, ServerName>(
517                 loc.getRegionInfo(), loc.getServerName()));
518             }
519           }
520         }
521       };
522     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
523     return visitor.getResults();
524   }
525 
526   /**
527    * @param connection connection we're using
528    * @param serverName server whose regions we're interested in
529    * @return List of user regions installed on this server (does not include
530    * catalog regions).
531    * @throws IOException
532    */
533   public static NavigableMap<HRegionInfo, Result>
534   getServerUserRegions(Connection connection, final ServerName serverName)
535     throws IOException {
536     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
537     // Fill the above hris map with entries from hbase:meta that have the passed
538     // servername.
539     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
540       @Override
541       void add(Result r) {
542         if (r == null || r.isEmpty()) return;
543         RegionLocations locations = getRegionLocations(r);
544         if (locations == null) return;
545         for (HRegionLocation loc : locations.getRegionLocations()) {
546           if (loc != null) {
547             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
548               hris.put(loc.getRegionInfo(), r);
549             }
550           }
551         }
552       }
553     };
554     fullScan(connection, v);
555     return hris;
556   }
557 
558   public static void fullScanMetaAndPrint(Connection connection)
559     throws IOException {
560     Visitor v = new Visitor() {
561       @Override
562       public boolean visit(Result r) throws IOException {
563         if (r ==  null || r.isEmpty()) return true;
564         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
565         RegionLocations locations = getRegionLocations(r);
566         if (locations == null) return true;
567         for (HRegionLocation loc : locations.getRegionLocations()) {
568           if (loc != null) {
569             LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
570           }
571         }
572         return true;
573       }
574     };
575     fullScan(connection, v);
576   }
577 
578   /**
579    * Performs a full scan of a catalog table.
580    * @param connection connection we're using
581    * @param visitor Visitor invoked against each row.
582    * @param startrow Where to start the scan. Pass null if want to begin scan
583    * at first row.
584    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
585    * @throws IOException
586    */
587   public static void fullScan(Connection connection,
588     final Visitor visitor, final byte [] startrow)
589   throws IOException {
590     Scan scan = new Scan();
591     if (startrow != null) scan.setStartRow(startrow);
592     if (startrow == null) {
593       int caching = connection.getConfiguration()
594           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
595       scan.setCaching(caching);
596     }
597     scan.addFamily(HConstants.CATALOG_FAMILY);
598     Table metaTable = getMetaHTable(connection);
599     ResultScanner scanner = null;
600     try {
601       scanner = metaTable.getScanner(scan);
602       Result data;
603       while((data = scanner.next()) != null) {
604         if (data.isEmpty()) continue;
605         // Break if visit returns false.
606         if (!visitor.visit(data)) break;
607       }
608     } finally {
609       if (scanner != null) scanner.close();
610       metaTable.close();
611     }
612   }
613 
614   /**
615    * Returns the column family used for meta columns.
616    * @return HConstants.CATALOG_FAMILY.
617    */
618   protected static byte[] getFamily() {
619     return HConstants.CATALOG_FAMILY;
620   }
621 
622   /**
623    * Returns the column qualifier for serialized region info
624    * @return HConstants.REGIONINFO_QUALIFIER
625    */
626   protected static byte[] getRegionInfoColumn() {
627     return HConstants.REGIONINFO_QUALIFIER;
628   }
629 
630   /**
631    * Returns the column qualifier for server column for replicaId
632    * @param replicaId the replicaId of the region
633    * @return a byte[] for server column qualifier
634    */
635   @VisibleForTesting
636   public static byte[] getServerColumn(int replicaId) {
637     return replicaId == 0
638       ? HConstants.SERVER_QUALIFIER
639       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
640       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
641   }
642 
643   /**
644    * Returns the column qualifier for server start code column for replicaId
645    * @param replicaId the replicaId of the region
646    * @return a byte[] for server start code column qualifier
647    */
648   @VisibleForTesting
649   public static byte[] getStartCodeColumn(int replicaId) {
650     return replicaId == 0
651       ? HConstants.STARTCODE_QUALIFIER
652       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
653       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
654   }
655 
656   /**
657    * Returns the column qualifier for seqNum column for replicaId
658    * @param replicaId the replicaId of the region
659    * @return a byte[] for seqNum column qualifier
660    */
661   @VisibleForTesting
662   public static byte[] getSeqNumColumn(int replicaId) {
663     return replicaId == 0
664       ? HConstants.SEQNUM_QUALIFIER
665       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
666       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
667   }
668 
669   /**
670    * Parses the replicaId from the server column qualifier. See top of the class javadoc
671    * for the actual meta layout
672    * @param serverColumn the column qualifier
673    * @return an int for the replicaId
674    */
675   @VisibleForTesting
676   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
677     String serverStr = Bytes.toString(serverColumn);
678 
679     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
680     if (matcher.matches() && matcher.groupCount() > 0) {
681       String group = matcher.group(1);
682       if (group != null && group.length() > 0) {
683         return Integer.parseInt(group.substring(1), 16);
684       } else {
685         return 0;
686       }
687     }
688     return -1;
689   }
690 
691   /**
692    * Returns a {@link ServerName} from catalog table {@link Result}.
693    * @param r Result to pull from
694    * @return A ServerName instance or null if necessary fields not found or empty.
695    */
696   private static ServerName getServerName(final Result r, final int replicaId) {
697     byte[] serverColumn = getServerColumn(replicaId);
698     Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
699     if (cell == null || cell.getValueLength() == 0) return null;
700     String hostAndPort = Bytes.toString(
701       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
702     byte[] startcodeColumn = getStartCodeColumn(replicaId);
703     cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
704     if (cell == null || cell.getValueLength() == 0) return null;
705     return ServerName.valueOf(hostAndPort,
706       Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
707   }
708 
709   /**
710    * The latest seqnum that the server writing to meta observed when opening the region.
711    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
712    * @param r Result to pull the seqNum from
713    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
714    */
715   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
716     Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId));
717     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
718     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
719   }
720 
721   /**
722    * Returns an HRegionLocationList extracted from the result.
723    * @return an HRegionLocationList containing all locations for the region range or null if
724    *  we can't deserialize the result.
725    */
726   public static RegionLocations getRegionLocations(final Result r) {
727     if (r == null) return null;
728     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
729     if (regionInfo == null) return null;
730 
731     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
732     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
733 
734     locations.add(getRegionLocation(r, regionInfo, 0));
735 
736     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getFamily());
737     if (infoMap == null) return new RegionLocations(locations);
738 
739     // iterate until all serverName columns are seen
740     int replicaId = 0;
741     byte[] serverColumn = getServerColumn(replicaId);
742     SortedMap<byte[], byte[]> serverMap = infoMap.tailMap(serverColumn, false);
743     if (serverMap.isEmpty()) return new RegionLocations(locations);
744 
745     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
746       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
747       if (replicaId < 0) {
748         break;
749       }
750 
751       locations.add(getRegionLocation(r, regionInfo, replicaId));
752     }
753 
754     return new RegionLocations(locations);
755   }
756 
757   /**
758    * Returns the HRegionLocation parsed from the given meta row Result
759    * for the given regionInfo and replicaId. The regionInfo can be the default region info
760    * for the replica.
761    * @param r the meta row result
762    * @param regionInfo RegionInfo for default replica
763    * @param replicaId the replicaId for the HRegionLocation
764    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
765    */
766   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
767                                                    final int replicaId) {
768     ServerName serverName = getServerName(r, replicaId);
769     long seqNum = getSeqNumDuringOpen(r, replicaId);
770     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
771     return new HRegionLocation(replicaInfo, serverName, seqNum);
772   }
773 
774   /**
775    * Returns HRegionInfo object from the column
776    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
777    * table Result.
778    * @param data a Result object from the catalog table scan
779    * @return HRegionInfo or null
780    */
781   public static HRegionInfo getHRegionInfo(Result data) {
782     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
783   }
784 
785   /**
786    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
787    * <code>qualifier</code> of the catalog table result.
788    * @param r a Result object from the catalog table scan
789    * @param qualifier Column family qualifier
790    * @return An HRegionInfo instance or null.
791    */
792   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
793     Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
794     if (cell == null) return null;
795     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
796       cell.getValueOffset(), cell.getValueLength());
797   }
798 
799   /**
800    * Returns the daughter regions by reading the corresponding columns of the catalog table
801    * Result.
802    * @param data a Result object from the catalog table scan
803    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
804    * parent
805    */
806   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) {
807     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
808     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
809 
810     return new PairOfSameType<HRegionInfo>(splitA, splitB);
811   }
812 
813   /**
814    * Returns the merge regions by reading the corresponding columns of the catalog table
815    * Result.
816    * @param data a Result object from the catalog table scan
817    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
818    * parent
819    */
820   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) {
821     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
822     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
823 
824     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
825   }
826 
827   /**
828    * Implementations 'visit' a catalog table row.
829    */
830   public interface Visitor {
831     /**
832      * Visit the catalog table row.
833      * @param r A row from catalog table
834      * @return True if we are to proceed scanning the table, else false if
835      * we are to stop now.
836      */
837     boolean visit(final Result r) throws IOException;
838   }
839 
840   /**
841    * A {@link Visitor} that collects content out of passed {@link Result}.
842    */
843   static abstract class CollectingVisitor<T> implements Visitor {
844     final List<T> results = new ArrayList<T>();
845     @Override
846     public boolean visit(Result r) throws IOException {
847       if (r ==  null || r.isEmpty()) return true;
848       add(r);
849       return true;
850     }
851 
852     abstract void add(Result r);
853 
854     /**
855      * @return Collected results; wait till visits complete to collect all
856      * possible results
857      */
858     List<T> getResults() {
859       return this.results;
860     }
861   }
862 
863   /**
864    * Collects all returned.
865    */
866   static class CollectAllVisitor extends CollectingVisitor<Result> {
867     @Override
868     void add(Result r) {
869       this.results.add(r);
870     }
871   }
872 
873   /**
874    * Count regions in <code>hbase:meta</code> for passed table.
875    * @param c Configuration object
876    * @param tableName table name to count regions for
877    * @return Count or regions in table <code>tableName</code>
878    * @throws IOException
879    */
880   @Deprecated
881   public static int getRegionCount(final Configuration c, final String tableName)
882       throws IOException {
883     return getRegionCount(c, TableName.valueOf(tableName));
884   }
885 
886   /**
887    * Count regions in <code>hbase:meta</code> for passed table.
888    * @param c Configuration object
889    * @param tableName table name to count regions for
890    * @return Count or regions in table <code>tableName</code>
891    * @throws IOException
892    */
893   public static int getRegionCount(final Configuration c, final TableName tableName)
894   throws IOException {
895     try (Connection connection = ConnectionFactory.createConnection(c)) {
896       return getRegionCount(connection, tableName);
897     }
898   }
899 
900   /**
901    * Count regions in <code>hbase:meta</code> for passed table.
902    * @param connection Connection object
903    * @param tableName table name to count regions for
904    * @return Count or regions in table <code>tableName</code>
905    * @throws IOException
906    */
907   public static int getRegionCount(final Connection connection, final TableName tableName)
908   throws IOException {
909     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
910       List<HRegionLocation> locations = locator.getAllRegionLocations();
911       return locations == null? 0: locations.size();
912     }
913   }
914 
915   ////////////////////////
916   // Editing operations //
917   ////////////////////////
918 
919   /**
920    * Generates and returns a Put containing the region into for the catalog table
921    */
922   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
923     throws IOException {
924     Put put = new Put(regionInfo.getRegionName());
925     addRegionInfo(put, regionInfo);
926     return put;
927   }
928 
929   /**
930    * Generates and returns a Delete containing the region info for the catalog
931    * table
932    */
933   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
934     if (regionInfo == null) {
935       throw new IllegalArgumentException("Can't make a delete for null region");
936     }
937     Delete delete = new Delete(regionInfo.getRegionName());
938     return delete;
939   }
940 
941   /**
942    * Adds split daughters to the Put
943    */
944   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
945     if (splitA != null) {
946       put.addImmutable(
947         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
948     }
949     if (splitB != null) {
950       put.addImmutable(
951         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
952     }
953     return put;
954   }
955 
956   /**
957    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
958    * @param connection connection we're using
959    * @param p Put to add to hbase:meta
960    * @throws IOException
961    */
962   static void putToMetaTable(final Connection connection, final Put p)
963     throws IOException {
964     put(getMetaHTable(connection), p);
965   }
966 
967   /**
968    * @param t Table to use (will be closed when done).
969    * @param p put to make
970    * @throws IOException
971    */
972   private static void put(final Table t, final Put p) throws IOException {
973     try {
974       t.put(p);
975     } finally {
976       t.close();
977     }
978   }
979 
980   /**
981    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
982    * @param connection connection we're using
983    * @param ps Put to add to hbase:meta
984    * @throws IOException
985    */
986   public static void putsToMetaTable(final Connection connection, final List<Put> ps)
987     throws IOException {
988     Table t = getMetaHTable(connection);
989     try {
990       t.put(ps);
991     } finally {
992       t.close();
993     }
994   }
995 
996   /**
997    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
998    * @param connection connection we're using
999    * @param d Delete to add to hbase:meta
1000    * @throws IOException
1001    */
1002   static void deleteFromMetaTable(final Connection connection, final Delete d)
1003     throws IOException {
1004     List<Delete> dels = new ArrayList<Delete>(1);
1005     dels.add(d);
1006     deleteFromMetaTable(connection, dels);
1007   }
1008 
1009   /**
1010    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
1011    * @param connection connection we're using
1012    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
1013    * @throws IOException
1014    */
1015   public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
1016     throws IOException {
1017     Table t = getMetaHTable(connection);
1018     try {
1019       t.delete(deletes);
1020     } finally {
1021       t.close();
1022     }
1023   }
1024 
1025   /**
1026    * Deletes some replica columns corresponding to replicas for the passed rows
1027    * @param metaRows rows in hbase:meta
1028    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1029    * @param numReplicasToRemove how many replicas to remove
1030    * @param connection connection we're using to access meta table
1031    * @throws IOException
1032    */
1033   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1034     int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
1035       throws IOException {
1036     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1037     for (byte[] row : metaRows) {
1038       Delete deleteReplicaLocations = new Delete(row);
1039       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1040         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1041           getServerColumn(i));
1042         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1043           getSeqNumColumn(i));
1044         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1045           getStartCodeColumn(i));
1046       }
1047       deleteFromMetaTable(connection, deleteReplicaLocations);
1048     }
1049   }
1050 
1051   /**
1052    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1053    * @param connection connection we're using
1054    * @param mutations Puts and Deletes to execute on hbase:meta
1055    * @throws IOException
1056    */
1057   public static void mutateMetaTable(final Connection connection,
1058                                      final List<Mutation> mutations)
1059     throws IOException {
1060     Table t = getMetaHTable(connection);
1061     try {
1062       t.batch(mutations);
1063     } catch (InterruptedException e) {
1064       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1065       ie.initCause(e);
1066       throw ie;
1067     } finally {
1068       t.close();
1069     }
1070   }
1071 
1072   /**
1073    * Adds a hbase:meta row for the specified new region.
1074    * @param connection connection we're using
1075    * @param regionInfo region information
1076    * @throws IOException if problem connecting or updating meta
1077    */
1078   public static void addRegionToMeta(Connection connection,
1079                                      HRegionInfo regionInfo)
1080     throws IOException {
1081     putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
1082     LOG.info("Added " + regionInfo.getRegionNameAsString());
1083   }
1084 
1085   /**
1086    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1087    * Table is not flushed or closed.
1088    * @param meta the Table for META
1089    * @param regionInfo region information
1090    * @throws IOException if problem connecting or updating meta
1091    */
1092   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1093     addRegionToMeta(meta, regionInfo, null, null);
1094   }
1095 
1096   /**
1097    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1098    * does not add its daughter's as different rows, but adds information about the daughters
1099    * in the same row as the parent. Use
1100    * {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
1101    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1102    * if you want to do that.
1103    * @param meta the Table for META
1104    * @param regionInfo region information
1105    * @param splitA first split daughter of the parent regionInfo
1106    * @param splitB second split daughter of the parent regionInfo
1107    * @throws IOException if problem connecting or updating meta
1108    */
1109   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1110                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1111     Put put = makePutFromRegionInfo(regionInfo);
1112     addDaughtersToPut(put, splitA, splitB);
1113     meta.put(put);
1114     if (LOG.isDebugEnabled()) {
1115       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1116     }
1117   }
1118 
1119   /**
1120    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1121    * does not add its daughter's as different rows, but adds information about the daughters
1122    * in the same row as the parent. Use
1123    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1124    * if you want to do that.
1125    * @param connection connection we're using
1126    * @param regionInfo region information
1127    * @param splitA first split daughter of the parent regionInfo
1128    * @param splitB second split daughter of the parent regionInfo
1129    * @throws IOException if problem connecting or updating meta
1130    */
1131   public static void addRegionToMeta(Connection connection, HRegionInfo regionInfo,
1132                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1133     Table meta = getMetaHTable(connection);
1134     try {
1135       addRegionToMeta(meta, regionInfo, splitA, splitB);
1136     } finally {
1137       meta.close();
1138     }
1139   }
1140 
1141   /**
1142    * Adds a hbase:meta row for each of the specified new regions.
1143    * @param connection connection we're using
1144    * @param regionInfos region information list
1145    * @throws IOException if problem connecting or updating meta
1146    */
1147   public static void addRegionsToMeta(Connection connection,
1148                                       List<HRegionInfo> regionInfos)
1149     throws IOException {
1150     List<Put> puts = new ArrayList<Put>();
1151     for (HRegionInfo regionInfo : regionInfos) {
1152       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1153         puts.add(makePutFromRegionInfo(regionInfo));
1154       }
1155     }
1156     putsToMetaTable(connection, puts);
1157     LOG.info("Added " + puts.size());
1158   }
1159 
1160   /**
1161    * Adds a daughter region entry to meta.
1162    * @param regionInfo the region to put
1163    * @param sn the location of the region
1164    * @param openSeqNum the latest sequence number obtained when the region was open
1165    */
1166   public static void addDaughter(final Connection connection,
1167       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1168       throws NotAllMetaRegionsOnlineException, IOException {
1169     Put put = new Put(regionInfo.getRegionName());
1170     addRegionInfo(put, regionInfo);
1171     if (sn != null) {
1172       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1173     }
1174     putToMetaTable(connection, put);
1175     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1176       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1177   }
1178 
1179   /**
1180    * Merge the two regions into one in an atomic operation. Deletes the two
1181    * merging regions in hbase:meta and adds the merged region with the information of
1182    * two merging regions.
1183    * @param connection connection we're using
1184    * @param mergedRegion the merged region
1185    * @param regionA
1186    * @param regionB
1187    * @param sn the location of the region
1188    * @throws IOException
1189    */
1190   public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
1191       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws IOException {
1192     Table meta = getMetaHTable(connection);
1193     try {
1194       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1195 
1196       // Put for parent
1197       Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
1198       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1199         regionA.toByteArray());
1200       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1201         regionB.toByteArray());
1202 
1203       // Deletes for merging regions
1204       Delete deleteA = makeDeleteFromRegionInfo(regionA);
1205       Delete deleteB = makeDeleteFromRegionInfo(regionB);
1206 
1207       // The merged is a new region, openSeqNum = 1 is fine.
1208       addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
1209 
1210       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1211         + HConstants.DELIMITER);
1212       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1213     } finally {
1214       meta.close();
1215     }
1216   }
1217 
1218   /**
1219    * Splits the region into two in an atomic operation. Offlines the parent
1220    * region with the information that it is split into two, and also adds
1221    * the daughter regions. Does not add the location information to the daughter
1222    * regions since they are not open yet.
1223    * @param connection connection we're using
1224    * @param parent the parent region which is split
1225    * @param splitA Split daughter region A
1226    * @param splitB Split daughter region A
1227    * @param sn the location of the region
1228    */
1229   public static void splitRegion(final Connection connection,
1230                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1231                                  ServerName sn) throws IOException {
1232     Table meta = getMetaHTable(connection);
1233     try {
1234       HRegionInfo copyOfParent = new HRegionInfo(parent);
1235       copyOfParent.setOffline(true);
1236       copyOfParent.setSplit(true);
1237 
1238       //Put for parent
1239       Put putParent = makePutFromRegionInfo(copyOfParent);
1240       addDaughtersToPut(putParent, splitA, splitB);
1241 
1242       //Puts for daughters
1243       Put putA = makePutFromRegionInfo(splitA);
1244       Put putB = makePutFromRegionInfo(splitB);
1245 
1246       addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1247       addLocation(putB, sn, 1, splitB.getReplicaId());
1248 
1249       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1250       multiMutate(meta, tableRow, putParent, putA, putB);
1251     } finally {
1252       meta.close();
1253     }
1254   }
1255 
1256   /**
1257    * Performs an atomic multi-Mutate operation against the given table.
1258    */
1259   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1260       throws IOException {
1261     CoprocessorRpcChannel channel = table.coprocessorService(row);
1262     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1263       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1264     for (Mutation mutation : mutations) {
1265       if (mutation instanceof Put) {
1266         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1267           ClientProtos.MutationProto.MutationType.PUT, mutation));
1268       } else if (mutation instanceof Delete) {
1269         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1270           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1271       } else {
1272         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1273           + mutation.getClass().getName());
1274       }
1275     }
1276 
1277     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1278       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1279     try {
1280       service.mutateRows(null, mmrBuilder.build());
1281     } catch (ServiceException ex) {
1282       ProtobufUtil.toIOException(ex);
1283     }
1284   }
1285 
1286   /**
1287    * Updates the location of the specified region in hbase:meta to be the specified
1288    * server hostname and startcode.
1289    * <p>
1290    * Uses passed catalog tracker to get a connection to the server hosting
1291    * hbase:meta and makes edits to that region.
1292    *
1293    * @param connection connection we're using
1294    * @param regionInfo region to update location of
1295    * @param sn Server name
1296    * @throws IOException
1297    */
1298   public static void updateRegionLocation(Connection connection,
1299                                           HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
1300     throws IOException {
1301     updateLocation(connection, regionInfo, sn, updateSeqNum);
1302   }
1303 
1304   /**
1305    * Updates the location of the specified region to be the specified server.
1306    * <p>
1307    * Connects to the specified server which should be hosting the specified
1308    * catalog region name to perform the edit.
1309    *
1310    * @param connection connection we're using
1311    * @param regionInfo region to update location of
1312    * @param sn Server name
1313    * @param openSeqNum the latest sequence number obtained when the region was open
1314    * @throws IOException In particular could throw {@link java.net.ConnectException}
1315    * if the server is down on other end.
1316    */
1317   private static void updateLocation(final Connection connection,
1318                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
1319     throws IOException {
1320     // region replicas are kept in the primary region's row
1321     Put put = new Put(getMetaKeyForRegion(regionInfo));
1322     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1323     putToMetaTable(connection, put);
1324     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
1325       " with server=" + sn);
1326   }
1327 
1328   /**
1329    * Deletes the specified region from META.
1330    * @param connection connection we're using
1331    * @param regionInfo region to be deleted from META
1332    * @throws IOException
1333    */
1334   public static void deleteRegion(Connection connection,
1335                                   HRegionInfo regionInfo)
1336     throws IOException {
1337     Delete delete = new Delete(regionInfo.getRegionName());
1338     deleteFromMetaTable(connection, delete);
1339     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1340   }
1341 
1342   /**
1343    * Deletes the specified regions from META.
1344    * @param connection connection we're using
1345    * @param regionsInfo list of regions to be deleted from META
1346    * @throws IOException
1347    */
1348   public static void deleteRegions(Connection connection,
1349                                    List<HRegionInfo> regionsInfo) throws IOException {
1350     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1351     for (HRegionInfo hri: regionsInfo) {
1352       deletes.add(new Delete(hri.getRegionName()));
1353     }
1354     deleteFromMetaTable(connection, deletes);
1355     LOG.info("Deleted " + regionsInfo);
1356   }
1357 
1358   /**
1359    * Adds and Removes the specified regions from hbase:meta
1360    * @param connection connection we're using
1361    * @param regionsToRemove list of regions to be deleted from META
1362    * @param regionsToAdd list of regions to be added to META
1363    * @throws IOException
1364    */
1365   public static void mutateRegions(Connection connection,
1366                                    final List<HRegionInfo> regionsToRemove,
1367                                    final List<HRegionInfo> regionsToAdd)
1368     throws IOException {
1369     List<Mutation> mutation = new ArrayList<Mutation>();
1370     if (regionsToRemove != null) {
1371       for (HRegionInfo hri: regionsToRemove) {
1372         mutation.add(new Delete(hri.getRegionName()));
1373       }
1374     }
1375     if (regionsToAdd != null) {
1376       for (HRegionInfo hri: regionsToAdd) {
1377         mutation.add(makePutFromRegionInfo(hri));
1378       }
1379     }
1380     mutateMetaTable(connection, mutation);
1381     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1382       LOG.debug("Deleted " + regionsToRemove);
1383     }
1384     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1385       LOG.debug("Added " + regionsToAdd);
1386     }
1387   }
1388 
1389   /**
1390    * Overwrites the specified regions from hbase:meta
1391    * @param connection connection we're using
1392    * @param regionInfos list of regions to be added to META
1393    * @throws IOException
1394    */
1395   public static void overwriteRegions(Connection connection,
1396                                       List<HRegionInfo> regionInfos) throws IOException {
1397     deleteRegions(connection, regionInfos);
1398     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1399     // eclipse the following puts, that might happen in the same ts from the server.
1400     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1401     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1402     Threads.sleep(20);
1403     addRegionsToMeta(connection, regionInfos);
1404     LOG.info("Overwritten " + regionInfos);
1405   }
1406 
1407   /**
1408    * Deletes merge qualifiers for the specified merged region.
1409    * @param connection connection we're using
1410    * @param mergedRegion
1411    * @throws IOException
1412    */
1413   public static void deleteMergeQualifiers(Connection connection,
1414                                            final HRegionInfo mergedRegion) throws IOException {
1415     Delete delete = new Delete(mergedRegion.getRegionName());
1416     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
1417     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
1418     deleteFromMetaTable(connection, delete);
1419     LOG.info("Deleted references in merged region "
1420       + mergedRegion.getRegionNameAsString() + ", qualifier="
1421       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1422       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1423   }
1424 
1425   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1426     throws IOException {
1427     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1428       hri.toByteArray());
1429     return p;
1430   }
1431 
1432   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
1433     // using regionserver's local time as the timestamp of Put.
1434     // See: HBASE-11536
1435     long now = EnvironmentEdgeManager.currentTime();
1436     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now,
1437       Bytes.toBytes(sn.getHostAndPort()));
1438     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now,
1439       Bytes.toBytes(sn.getStartcode()));
1440     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now,
1441       Bytes.toBytes(openSeqNum));
1442     return p;
1443   }
1444 }