View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.master;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.Comparator;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.NavigableSet;
29  import java.util.Random;
30  import java.util.TreeMap;
31  import java.util.TreeSet;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.BlockLocation;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HServerAddress;
42  import org.apache.hadoop.hbase.HServerInfo;
43  
44  /**
45   * Makes decisions about the placement and movement of Regions across
46   * RegionServers.
47   *
48   * <p>Cluster-wide load balancing will occur only when there are no regions in
49   * transition and according to a fixed period of a time using {@link #balanceCluster(Map)}.
50   *
51   * <p>Inline region placement with {@link #immediateAssignment} can be used when
52   * the Master needs to handle closed regions that it currently does not have
53   * a destination set for.  This can happen during master failover.
54   *
55   * <p>On cluster startup, bulk assignment can be used to determine
56   * locations for all Regions in a cluster.
57   *
58   * <p>This classes produces plans for the {@link AssignmentManager} to execute.
59   */
60  public class LoadBalancer {
61    private static final Log LOG = LogFactory.getLog(LoadBalancer.class);
62    private static final Random RANDOM = new Random(System.currentTimeMillis());
63    // slop for regions
64    private float slop;
65  
66    LoadBalancer(Configuration conf) {
67      this.slop = conf.getFloat("hbase.regions.slop", (float) 0.0);
68      if (slop < 0) slop = 0;
69      else if (slop > 1) slop = 1;
70    }
71    
72    static class RegionPlanComparator implements Comparator<RegionPlan> {
73      @Override
74      public int compare(RegionPlan l, RegionPlan r) {
75        long diff = r.getRegionInfo().getRegionId() - l.getRegionInfo().getRegionId();
76        if (diff < 0) return -1;
77        if (diff > 0) return 1;
78        return 0;
79      }
80    }
81    static RegionPlanComparator rpComparator = new RegionPlanComparator();
82  
83    /**
84     * Generate a global load balancing plan according to the specified map of
85     * server information to the most loaded regions of each server.
86     *
87     * The load balancing invariant is that all servers are within 1 region of the
88     * average number of regions per server.  If the average is an integer number,
89     * all servers will be balanced to the average.  Otherwise, all servers will
90     * have either floor(average) or ceiling(average) regions.
91     *
92     * The algorithm is currently implemented as such:
93     *
94     * <ol>
95     * <li>Determine the two valid numbers of regions each server should have,
96     *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
97     *
98     * <li>Iterate down the most loaded servers, shedding regions from each so
99     *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
100    *     server that already has &lt;= <b>MAX</b> regions.
101    *     <p>
102    *     Order the regions to move from most recent to least.
103    *
104    * <li>Iterate down the least loaded servers, assigning regions so each server
105    *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
106    *     already has &gt;= <b>MIN</b> regions.
107    *
108    *     Regions being assigned to underloaded servers are those that were shed
109    *     in the previous step.  It is possible that there were not enough
110    *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
111    *     end up with a number of regions required to do so, <b>neededRegions</b>.
112    *
113    *     It is also possible that we were able fill each underloaded but ended
114    *     up with regions that were unassigned from overloaded servers but that
115    *     still do not have assignment.
116    *
117    *     If neither of these conditions hold (no regions needed to fill the
118    *     underloaded servers, no regions leftover from overloaded servers),
119    *     we are done and return.  Otherwise we handle these cases below.
120    *
121    * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
122    *     we iterate the most loaded servers again, shedding a single server from
123    *     each (this brings them from having <b>MAX</b> regions to having
124    *     <b>MIN</b> regions).
125    *
126    * <li>We now definitely have more regions that need assignment, either from
127    *     the previous step or from the original shedding from overloaded servers.
128    *
129    *     Iterate the least loaded servers filling each to <b>MIN</b>.
130    *
131    * <li>If we still have more regions that need assignment, again iterate the
132    *     least loaded servers, this time giving each one (filling them to
133    *     </b>MAX</b>) until we run out.
134    *
135    * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
136    *
137    *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
138    *     to end up with <b>MAX</b> regions at the end of the balancing.  This
139    *     ensures the minimal number of regions possible are moved.
140    * </ol>
141    *
142    * TODO: We can at-most reassign the number of regions away from a particular
143    *       server to be how many they report as most loaded.
144    *       Should we just keep all assignment in memory?  Any objections?
145    *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
146    *       (current thinking is we will hold all assignments in memory)
147    *
148    * @param clusterState Map of regionservers and their load/region information to
149    *                   a list of their most loaded regions
150    * @return a list of regions to be moved, including source and destination,
151    *         or null if cluster is already balanced
152    */
153   public List<RegionPlan> balanceCluster(
154       Map<HServerInfo,List<HRegionInfo>> clusterState) {
155     long startTime = System.currentTimeMillis();
156 
157     // Make a map sorted by load and count regions
158     TreeMap<HServerInfo,List<HRegionInfo>> serversByLoad =
159       new TreeMap<HServerInfo,List<HRegionInfo>>(
160           new HServerInfo.LoadComparator());
161     int numServers = clusterState.size();
162     if (numServers == 0) {
163       LOG.debug("numServers=0 so skipping load balancing");
164       return null;
165     }
166     int numRegions = 0;
167     // Iterate so we can count regions as we build the map
168     for(Map.Entry<HServerInfo, List<HRegionInfo>> server:
169         clusterState.entrySet()) {
170       server.getKey().getLoad().setNumberOfRegions(server.getValue().size());
171       numRegions += server.getKey().getLoad().getNumberOfRegions();
172       serversByLoad.put(server.getKey(), server.getValue());
173     }
174 
175     // Check if we even need to do any load balancing
176     float average = (float)numRegions / numServers; // for logging
177     // HBASE-3681 check sloppiness first
178     int floor = (int) Math.floor(average * (1 - slop));
179     int ceiling = (int) Math.ceil(average * (1 + slop));
180     if(serversByLoad.lastKey().getLoad().getNumberOfRegions() <= ceiling &&
181        serversByLoad.firstKey().getLoad().getNumberOfRegions() >= floor) {
182       // Skipped because no server outside (min,max) range
183       LOG.info("Skipping load balancing.  servers=" + numServers + " " +
184           "regions=" + numRegions + " average=" + average + " " +
185           "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions() +
186           " leastloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions());
187       return null;
188     }
189     int min = numRegions / numServers;
190     int max = numRegions % numServers == 0 ? min : min + 1;
191 
192     // Balance the cluster
193     // TODO: Look at data block locality or a more complex load to do this
194     List<RegionPlan> regionsToMove = new ArrayList<RegionPlan>();
195     int regionidx = 0; // track the index in above list for setting destination
196 
197     // Walk down most loaded, pruning each to the max
198     int serversOverloaded = 0;
199     Map<HServerInfo,BalanceInfo> serverBalanceInfo =
200       new TreeMap<HServerInfo,BalanceInfo>();
201     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
202       serversByLoad.descendingMap().entrySet()) {
203       HServerInfo serverInfo = server.getKey();
204       int regionCount = serverInfo.getLoad().getNumberOfRegions();
205       if(regionCount <= max) {
206         serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0));
207         break;
208       }
209       serversOverloaded++;
210       List<HRegionInfo> regions = randomize(server.getValue());
211       int numToOffload = Math.min(regionCount - max, regions.size());
212       int numTaken = 0;
213       for (int i = regions.size() - 1; i >= 0; i--) {
214         HRegionInfo hri = regions.get(i);
215         // Don't rebalance meta regions.
216         if (hri.isMetaRegion()) continue;
217         regionsToMove.add(new RegionPlan(hri, serverInfo, null));
218         numTaken++;
219         if (numTaken >= numToOffload) break;
220       }
221       serverBalanceInfo.put(serverInfo,
222           new BalanceInfo(numToOffload, (-1)*numTaken));
223     }
224 
225     // Walk down least loaded, filling each to the min
226     int serversUnderloaded = 0; // number of servers that get new regions
227     int neededRegions = 0; // number of regions needed to bring all up to min
228     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
229       serversByLoad.entrySet()) {
230       int regionCount = server.getKey().getLoad().getNumberOfRegions();
231       if(regionCount >= min) {
232         break;
233       }
234       serversUnderloaded++;
235       int numToTake = min - regionCount;
236       int numTaken = 0;
237       while(numTaken < numToTake && regionidx < regionsToMove.size()) {
238         regionsToMove.get(regionidx).setDestination(server.getKey());
239         numTaken++;
240         regionidx++;
241       }
242       serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken));
243       // If we still want to take some, increment needed
244       if(numTaken < numToTake) {
245         neededRegions += (numToTake - numTaken);
246       }
247     }
248 
249     // If none needed to fill all to min and none left to drain all to max,
250     // we are done
251     if(neededRegions == 0 && regionidx == regionsToMove.size()) {
252       long endTime = System.currentTimeMillis();
253       LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
254           "Moving " + regionsToMove.size() + " regions off of " +
255           serversOverloaded + " overloaded servers onto " +
256           serversUnderloaded + " less loaded servers");
257       return regionsToMove;
258     }
259 
260     // Need to do a second pass.
261     // Either more regions to assign out or servers that are still underloaded
262 
263     // If we need more to fill min, grab one from each most loaded until enough
264     if (neededRegions != 0) {
265       // Walk down most loaded, grabbing one from each until we get enough
266       for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
267         serversByLoad.descendingMap().entrySet()) {
268         BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
269         int idx =
270           balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
271         if (idx >= server.getValue().size()) break;
272         HRegionInfo region = server.getValue().get(idx);
273         if (region.isMetaRegion()) continue; // Don't move meta regions.
274         regionsToMove.add(new RegionPlan(region, server.getKey(), null));
275         if(--neededRegions == 0) {
276           // No more regions needed, done shedding
277           break;
278         }
279       }
280     }
281 
282     // Now we have a set of regions that must be all assigned out
283     // Assign each underloaded up to the min, then if leftovers, assign to max
284 
285     // Walk down least loaded, assigning to each to fill up to min
286     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
287       serversByLoad.entrySet()) {
288       int regionCount = server.getKey().getLoad().getNumberOfRegions();
289       if (regionCount >= min) break;
290       BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
291       if(balanceInfo != null) {
292         regionCount += balanceInfo.getNumRegionsAdded();
293       }
294       if(regionCount >= min) {
295         continue;
296       }
297       int numToTake = min - regionCount;
298       int numTaken = 0;
299       while(numTaken < numToTake && regionidx < regionsToMove.size()) {
300         regionsToMove.get(regionidx).setDestination(server.getKey());
301         numTaken++;
302         regionidx++;
303       }
304     }
305 
306     // If we still have regions to dish out, assign underloaded to max
307     if(regionidx != regionsToMove.size()) {
308       for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
309         serversByLoad.entrySet()) {
310         int regionCount = server.getKey().getLoad().getNumberOfRegions();
311         if(regionCount >= max) {
312           break;
313         }
314         regionsToMove.get(regionidx).setDestination(server.getKey());
315         regionidx++;
316         if(regionidx == regionsToMove.size()) {
317           break;
318         }
319       }
320     }
321 
322     long endTime = System.currentTimeMillis();
323 
324     if (regionidx != regionsToMove.size() || neededRegions != 0) {
325       // Emit data so can diagnose how balancer went astray.
326       LOG.warn("regionidx=" + regionidx + ", regionsToMove=" + regionsToMove.size() +
327       ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded +
328       ", serversUnderloaded=" + serversUnderloaded);
329       StringBuilder sb = new StringBuilder();
330       for (Map.Entry<HServerInfo, List<HRegionInfo>> e: clusterState.entrySet()) {
331         if (sb.length() > 0) sb.append(", ");
332         sb.append(e.getKey().getServerName());
333         sb.append(" ");
334         sb.append(e.getValue().size());
335       }
336       LOG.warn("Input " + sb.toString());
337     }
338 
339     // All done!
340     LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
341         "Moving " + regionsToMove.size() + " regions off of " +
342         serversOverloaded + " overloaded servers onto " +
343         serversUnderloaded + " less loaded servers");
344 
345     return regionsToMove;
346   }
347 
348   /**
349    * @param regions
350    * @return Randomization of passed <code>regions</code>
351    */
352   static List<HRegionInfo> randomize(final List<HRegionInfo> regions) {
353     Collections.shuffle(regions, RANDOM);
354     return regions;
355   }
356 
357   /**
358    * Stores additional per-server information about the regions added/removed
359    * during the run of the balancing algorithm.
360    *
361    * For servers that receive additional regions, we are not updating the number
362    * of regions in HServerInfo once we decide to reassign regions to a server,
363    * but we need this information later in the algorithm.  This is stored in
364    * <b>numRegionsAdded</b>.
365    *
366    * For servers that shed regions, we need to track which regions we have
367    * already shed.  <b>nextRegionForUnload</b> contains the index in the list
368    * of regions on the server that is the next to be shed.
369    */
370   private static class BalanceInfo {
371 
372     private final int nextRegionForUnload;
373     private final int numRegionsAdded;
374 
375     public BalanceInfo(int nextRegionForUnload, int numRegionsAdded) {
376       this.nextRegionForUnload = nextRegionForUnload;
377       this.numRegionsAdded = numRegionsAdded;
378     }
379 
380     public int getNextRegionForUnload() {
381       return nextRegionForUnload;
382     }
383 
384     public int getNumRegionsAdded() {
385       return numRegionsAdded;
386     }
387   }
388 
389   /**
390    * Generates a bulk assignment plan to be used on cluster startup using a
391    * simple round-robin assignment.
392    * <p>
393    * Takes a list of all the regions and all the servers in the cluster and
394    * returns a map of each server to the regions that it should be assigned.
395    * <p>
396    * Currently implemented as a round-robin assignment.  Same invariant as
397    * load balancing, all servers holding floor(avg) or ceiling(avg).
398    *
399    * TODO: Use block locations from HDFS to place regions with their blocks
400    *
401    * @param regions all regions
402    * @param servers all servers
403    * @return map of server to the regions it should take, or null if no
404    *         assignment is possible (ie. no regions or no servers)
405    */
406   public static Map<HServerInfo, List<HRegionInfo>> roundRobinAssignment(
407       List<HRegionInfo> regions, List<HServerInfo> servers) {
408     if(regions.size() == 0 || servers.size() == 0) {
409       return null;
410     }
411     Map<HServerInfo,List<HRegionInfo>> assignments =
412       new TreeMap<HServerInfo,List<HRegionInfo>>();
413     int numRegions = regions.size();
414     int numServers = servers.size();
415     int max = (int)Math.ceil((float)numRegions/numServers);
416     int serverIdx = 0;
417     if (numServers > 1) {
418       serverIdx = RANDOM.nextInt(numServers);
419     }
420     int regionIdx = 0;
421     for (int j = 0; j < numServers; j++) {
422       HServerInfo server = servers.get((j+serverIdx) % numServers);
423       List<HRegionInfo> serverRegions = new ArrayList<HRegionInfo>(max);
424       for (int i=regionIdx; i<numRegions; i += numServers) {
425         serverRegions.add(regions.get(i % numRegions));
426       }
427       assignments.put(server, serverRegions);
428       regionIdx++;
429     }
430     return assignments;
431   }
432 
433   /**
434    * Generates a bulk assignment startup plan, attempting to reuse the existing
435    * assignment information from META, but adjusting for the specified list of
436    * available/online servers available for assignment.
437    * <p>
438    * Takes a map of all regions to their existing assignment from META.  Also
439    * takes a list of online servers for regions to be assigned to.  Attempts to
440    * retain all assignment, so in some instances initial assignment will not be
441    * completely balanced.
442    * <p>
443    * Any leftover regions without an existing server to be assigned to will be
444    * assigned randomly to available servers.
445    * @param regions regions and existing assignment from meta
446    * @param servers available servers
447    * @return map of servers and regions to be assigned to them
448    */
449   public static Map<HServerInfo, List<HRegionInfo>> retainAssignment(
450       Map<HRegionInfo, HServerAddress> regions, List<HServerInfo> servers) {
451     Map<HServerInfo, List<HRegionInfo>> assignments =
452       new TreeMap<HServerInfo, List<HRegionInfo>>();
453     // Build a map of server addresses to server info so we can match things up
454     Map<HServerAddress, HServerInfo> serverMap =
455       new TreeMap<HServerAddress, HServerInfo>();
456     for (HServerInfo server : servers) {
457       serverMap.put(server.getServerAddress(), server);
458       assignments.put(server, new ArrayList<HRegionInfo>());
459     }
460     for (Map.Entry<HRegionInfo, HServerAddress> region : regions.entrySet()) {
461       HServerAddress hsa = region.getValue();
462       HServerInfo server = hsa == null? null: serverMap.get(hsa);
463       if (server != null) {
464         assignments.get(server).add(region.getKey());
465       } else {
466         assignments.get(servers.get(RANDOM.nextInt(assignments.size()))).add(
467             region.getKey());
468       }
469     }
470     return assignments;
471   }
472 
473   /**
474    * Find the block locations for all of the files for the specified region.
475    *
476    * Returns an ordered list of hosts that are hosting the blocks for this
477    * region.  The weight of each host is the sum of the block lengths of all
478    * files on that host, so the first host in the list is the server which
479    * holds the most bytes of the given region's HFiles.
480    *
481    * TODO: Make this work.  Need to figure out how to match hadoop's hostnames
482    *       given for block locations with our HServerAddress.
483    * TODO: Use the right directory for the region
484    * TODO: Use getFileBlockLocations on the files not the directory
485    *
486    * @param fs the filesystem
487    * @param region region
488    * @return ordered list of hosts holding blocks of the specified region
489    * @throws IOException if any filesystem errors
490    */
491   @SuppressWarnings("unused")
492   private List<String> getTopBlockLocations(FileSystem fs, HRegionInfo region)
493   throws IOException {
494     String encodedName = region.getEncodedName();
495     Path path = new Path("/hbase/table/" + encodedName);
496     FileStatus status = fs.getFileStatus(path);
497     BlockLocation [] blockLocations =
498       fs.getFileBlockLocations(status, 0, status.getLen());
499     Map<HostAndWeight,HostAndWeight> hostWeights =
500       new TreeMap<HostAndWeight,HostAndWeight>(new HostAndWeight.HostComparator());
501     for(BlockLocation bl : blockLocations) {
502       String [] hosts = bl.getHosts();
503       long len = bl.getLength();
504       for(String host : hosts) {
505         HostAndWeight haw = hostWeights.get(host);
506         if(haw == null) {
507           haw = new HostAndWeight(host, len);
508           hostWeights.put(haw, haw);
509         } else {
510           haw.addWeight(len);
511         }
512       }
513     }
514     NavigableSet<HostAndWeight> orderedHosts = new TreeSet<HostAndWeight>(
515         new HostAndWeight.WeightComparator());
516     orderedHosts.addAll(hostWeights.values());
517     List<String> topHosts = new ArrayList<String>(orderedHosts.size());
518     for(HostAndWeight haw : orderedHosts.descendingSet()) {
519       topHosts.add(haw.getHost());
520     }
521     return topHosts;
522   }
523 
524   /**
525    * Stores the hostname and weight for that hostname.
526    *
527    * This is used when determining the physical locations of the blocks making
528    * up a region.
529    *
530    * To make a prioritized list of the hosts holding the most data of a region,
531    * this class is used to count the total weight for each host.  The weight is
532    * currently just the size of the file.
533    */
534   private static class HostAndWeight {
535 
536     private final String host;
537     private long weight;
538 
539     public HostAndWeight(String host, long weight) {
540       this.host = host;
541       this.weight = weight;
542     }
543 
544     public void addWeight(long weight) {
545       this.weight += weight;
546     }
547 
548     public String getHost() {
549       return host;
550     }
551 
552     public long getWeight() {
553       return weight;
554     }
555 
556     private static class HostComparator implements Comparator<HostAndWeight> {
557       @Override
558       public int compare(HostAndWeight l, HostAndWeight r) {
559         return l.getHost().compareTo(r.getHost());
560       }
561     }
562 
563     private static class WeightComparator implements Comparator<HostAndWeight> {
564       @Override
565       public int compare(HostAndWeight l, HostAndWeight r) {
566         if(l.getWeight() == r.getWeight()) {
567           return l.getHost().compareTo(r.getHost());
568         }
569         return l.getWeight() < r.getWeight() ? -1 : 1;
570       }
571     }
572   }
573 
574   /**
575    * Generates an immediate assignment plan to be used by a new master for
576    * regions in transition that do not have an already known destination.
577    *
578    * Takes a list of regions that need immediate assignment and a list of
579    * all available servers.  Returns a map of regions to the server they
580    * should be assigned to.
581    *
582    * This method will return quickly and does not do any intelligent
583    * balancing.  The goal is to make a fast decision not the best decision
584    * possible.
585    *
586    * Currently this is random.
587    *
588    * @param regions
589    * @param servers
590    * @return map of regions to the server it should be assigned to
591    */
592   public static Map<HRegionInfo,HServerInfo> immediateAssignment(
593       List<HRegionInfo> regions, List<HServerInfo> servers) {
594     Map<HRegionInfo,HServerInfo> assignments =
595       new TreeMap<HRegionInfo,HServerInfo>();
596     for(HRegionInfo region : regions) {
597       assignments.put(region, servers.get(RANDOM.nextInt(servers.size())));
598     }
599     return assignments;
600   }
601 
602   public static HServerInfo randomAssignment(List<HServerInfo> servers) {
603     if (servers == null || servers.isEmpty()) {
604       LOG.warn("Wanted to do random assignment but no servers to assign to");
605       return null;
606     }
607     return servers.get(RANDOM.nextInt(servers.size()));
608   }
609 
610   /**
611    * Stores the plan for the move of an individual region.
612    *
613    * Contains info for the region being moved, info for the server the region
614    * should be moved from, and info for the server the region should be moved
615    * to.
616    *
617    * The comparable implementation of this class compares only the region
618    * information and not the source/dest server info.
619    */
620   public static class RegionPlan implements Comparable<RegionPlan> {
621     private final HRegionInfo hri;
622     private final HServerInfo source;
623     private HServerInfo dest;
624 
625     /**
626      * Instantiate a plan for a region move, moving the specified region from
627      * the specified source server to the specified destination server.
628      *
629      * Destination server can be instantiated as null and later set
630      * with {@link #setDestination(HServerInfo)}.
631      *
632      * @param hri region to be moved
633      * @param source regionserver region should be moved from
634      * @param dest regionserver region should be moved to
635      */
636     public RegionPlan(final HRegionInfo hri, HServerInfo source, HServerInfo dest) {
637       this.hri = hri;
638       this.source = source;
639       this.dest = dest;
640     }
641 
642     /**
643      * Set the destination server for the plan for this region.
644      */
645     public void setDestination(HServerInfo dest) {
646       this.dest = dest;
647     }
648 
649     /**
650      * Get the source server for the plan for this region.
651      * @return server info for source
652      */
653     public HServerInfo getSource() {
654       return source;
655     }
656 
657     /**
658      * Get the destination server for the plan for this region.
659      * @return server info for destination
660      */
661     public HServerInfo getDestination() {
662       return dest;
663     }
664 
665     /**
666      * Get the encoded region name for the region this plan is for.
667      * @return Encoded region name
668      */
669     public String getRegionName() {
670       return this.hri.getEncodedName();
671     }
672 
673     public HRegionInfo getRegionInfo() {
674       return this.hri;
675     }
676 
677     /**
678      * Compare the region info.
679      * @param o region plan you are comparing against
680      */
681     @Override
682     public int compareTo(RegionPlan o) {
683       return getRegionName().compareTo(o.getRegionName());
684     }
685 
686     @Override
687     public String toString() {
688       return "hri=" + this.hri.getRegionNameAsString() + ", src=" +
689         (this.source == null? "": this.source.getServerName()) +
690         ", dest=" + (this.dest == null? "": this.dest.getServerName());
691     }
692   }
693 }