View Javadoc

1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.master;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.Collections;
25  import java.util.Comparator;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.NavigableSet;
29  import java.util.Random;
30  import java.util.TreeMap;
31  import java.util.TreeSet;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.BlockLocation;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HServerAddress;
42  import org.apache.hadoop.hbase.HServerInfo;
43  
44  /**
45   * Makes decisions about the placement and movement of Regions across
46   * RegionServers.
47   *
48   * <p>Cluster-wide load balancing will occur only when there are no regions in
49   * transition and according to a fixed period of a time using {@link #balanceCluster(Map)}.
50   *
51   * <p>Inline region placement with {@link #immediateAssignment} can be used when
52   * the Master needs to handle closed regions that it currently does not have
53   * a destination set for.  This can happen during master failover.
54   *
55   * <p>On cluster startup, bulk assignment can be used to determine
56   * locations for all Regions in a cluster.
57   *
58   * <p>This classes produces plans for the {@link AssignmentManager} to execute.
59   */
60  public class LoadBalancer {
61    private static final Log LOG = LogFactory.getLog(LoadBalancer.class);
62    private static final Random RANDOM = new Random(System.currentTimeMillis());
63    // slop for regions
64    private float slop;
65  
66    LoadBalancer(Configuration conf) {
67      this.slop = conf.getFloat("hbase.regions.slop", (float) 0.0);
68      if (slop < 0) slop = 0;
69      else if (slop > 1) slop = 1;
70    }
71    
72    static class RegionPlanComparator implements Comparator<RegionPlan> {
73      @Override
74      public int compare(RegionPlan l, RegionPlan r) {
75        long diff = r.getRegionInfo().getRegionId() - l.getRegionInfo().getRegionId();
76        if (diff < 0) return -1;
77        if (diff > 0) return 1;
78        return 0;
79      }
80    }
81    static RegionPlanComparator rpComparator = new RegionPlanComparator();
82  
83    /**
84     * Generate a global load balancing plan according to the specified map of
85     * server information to the most loaded regions of each server.
86     *
87     * The load balancing invariant is that all servers are within 1 region of the
88     * average number of regions per server.  If the average is an integer number,
89     * all servers will be balanced to the average.  Otherwise, all servers will
90     * have either floor(average) or ceiling(average) regions.
91     *
92     * The algorithm is currently implemented as such:
93     *
94     * <ol>
95     * <li>Determine the two valid numbers of regions each server should have,
96     *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
97     *
98     * <li>Iterate down the most loaded servers, shedding regions from each so
99     *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
100    *     server that already has &lt;= <b>MAX</b> regions.
101    *     <p>
102    *     Order the regions to move from most recent to least.
103    *
104    * <li>Iterate down the least loaded servers, assigning regions so each server
105    *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
106    *     already has &gt;= <b>MIN</b> regions.
107    *
108    *     Regions being assigned to underloaded servers are those that were shed
109    *     in the previous step.  It is possible that there were not enough
110    *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
111    *     end up with a number of regions required to do so, <b>neededRegions</b>.
112    *
113    *     It is also possible that we were able fill each underloaded but ended
114    *     up with regions that were unassigned from overloaded servers but that
115    *     still do not have assignment.
116    *
117    *     If neither of these conditions hold (no regions needed to fill the
118    *     underloaded servers, no regions leftover from overloaded servers),
119    *     we are done and return.  Otherwise we handle these cases below.
120    *
121    * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
122    *     we iterate the most loaded servers again, shedding a single server from
123    *     each (this brings them from having <b>MAX</b> regions to having
124    *     <b>MIN</b> regions).
125    *
126    * <li>We now definitely have more regions that need assignment, either from
127    *     the previous step or from the original shedding from overloaded servers.
128    *
129    *     Iterate the least loaded servers filling each to <b>MIN</b>.
130    *
131    * <li>If we still have more regions that need assignment, again iterate the
132    *     least loaded servers, this time giving each one (filling them to
133    *     </b>MAX</b>) until we run out.
134    *
135    * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
136    *
137    *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
138    *     to end up with <b>MAX</b> regions at the end of the balancing.  This
139    *     ensures the minimal number of regions possible are moved.
140    * </ol>
141    *
142    * TODO: We can at-most reassign the number of regions away from a particular
143    *       server to be how many they report as most loaded.
144    *       Should we just keep all assignment in memory?  Any objections?
145    *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
146    *       (current thinking is we will hold all assignments in memory)
147    *
148    * @param clusterState Map of regionservers and their load/region information to
149    *                   a list of their most loaded regions
150    * @return a list of regions to be moved, including source and destination,
151    *         or null if cluster is already balanced
152    */
153   public List<RegionPlan> balanceCluster(
154       Map<HServerInfo,List<HRegionInfo>> clusterState) {
155     long startTime = System.currentTimeMillis();
156 
157     // Make a map sorted by load and count regions
158     TreeMap<HServerInfo,List<HRegionInfo>> serversByLoad =
159       new TreeMap<HServerInfo,List<HRegionInfo>>(
160           new HServerInfo.LoadComparator());
161     int numServers = clusterState.size();
162     if (numServers == 0) {
163       LOG.debug("numServers=0 so skipping load balancing");
164       return null;
165     }
166     int numRegions = 0;
167     StringBuilder strBalanceParam = new StringBuilder("Server information: ");
168     
169     // Iterate so we can count regions as we build the map
170     for(Map.Entry<HServerInfo, List<HRegionInfo>> server:
171         clusterState.entrySet()) {
172       server.getKey().getLoad().setNumberOfRegions(server.getValue().size());
173       numRegions += server.getKey().getLoad().getNumberOfRegions();
174       serversByLoad.put(server.getKey(), server.getValue());
175       
176       strBalanceParam.append(server.getKey().getServerName()).append("=")
177           .append(server.getValue().size()).append(", ");
178     }
179     strBalanceParam.delete(strBalanceParam.length() - 2,
180         strBalanceParam.length());
181     LOG.debug(strBalanceParam.toString());
182 
183     // Check if we even need to do any load balancing
184     float average = (float)numRegions / numServers; // for logging
185     // HBASE-3681 check sloppiness first
186     int floor = (int) Math.floor(average * (1 - slop));
187     int ceiling = (int) Math.ceil(average * (1 + slop));
188     if(serversByLoad.lastKey().getLoad().getNumberOfRegions() <= ceiling &&
189        serversByLoad.firstKey().getLoad().getNumberOfRegions() >= floor) {
190       // Skipped because no server outside (min,max) range
191       LOG.info("Skipping load balancing.  servers=" + numServers + " " +
192           "regions=" + numRegions + " average=" + average + " " +
193           "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions() +
194           " leastloaded=" + serversByLoad.firstKey().getLoad().getNumberOfRegions());
195       return null;
196     }
197     int min = numRegions / numServers;
198     int max = numRegions % numServers == 0 ? min : min + 1;
199 
200     // Using to check banance result.
201     strBalanceParam.delete(0, strBalanceParam.length());
202     strBalanceParam.append("Balance parameter: numRegions=").append(numRegions)
203         .append(", numServers=").append(numServers).append(", max=").append(max)
204         .append(", min=").append(min);
205     LOG.debug(strBalanceParam.toString());
206     
207     // Balance the cluster
208     // TODO: Look at data block locality or a more complex load to do this
209     List<RegionPlan> regionsToMove = new ArrayList<RegionPlan>();
210     int regionidx = 0; // track the index in above list for setting destination
211 
212     // Walk down most loaded, pruning each to the max
213     int serversOverloaded = 0;
214     Map<HServerInfo,BalanceInfo> serverBalanceInfo =
215       new TreeMap<HServerInfo,BalanceInfo>();
216     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
217       serversByLoad.descendingMap().entrySet()) {
218       HServerInfo serverInfo = server.getKey();
219       int regionCount = serverInfo.getLoad().getNumberOfRegions();
220       if(regionCount <= max) {
221         serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0));
222         break;
223       }
224       serversOverloaded++;
225       List<HRegionInfo> regions = randomize(server.getValue());
226       int numToOffload = Math.min(regionCount - max, regions.size());
227       int numTaken = 0;
228       
229       for (HRegionInfo hri: regions) {
230         // Don't rebalance meta regions.
231         if (hri.isMetaRegion()) continue;
232         regionsToMove.add(new RegionPlan(hri, serverInfo, null));
233         numTaken++;
234         if (numTaken >= numToOffload) break;
235       }
236       serverBalanceInfo.put(serverInfo,
237           new BalanceInfo(numToOffload, (-1)*numTaken));
238     }
239 
240     // Walk down least loaded, filling each to the min
241     int serversUnderloaded = 0; // number of servers that get new regions
242     int neededRegions = 0; // number of regions needed to bring all up to min
243     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
244       serversByLoad.entrySet()) {
245       int regionCount = server.getKey().getLoad().getNumberOfRegions();
246       if(regionCount >= min) {
247         break;
248       }
249       serversUnderloaded++;
250       int numToTake = min - regionCount;
251       int numTaken = 0;
252       while(numTaken < numToTake && regionidx < regionsToMove.size()) {
253         regionsToMove.get(regionidx).setDestination(server.getKey());
254         numTaken++;
255         regionidx++;
256       }
257       serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken));
258       // If we still want to take some, increment needed
259       if(numTaken < numToTake) {
260         neededRegions += (numToTake - numTaken);
261       }
262     }
263 
264     // If none needed to fill all to min and none left to drain all to max,
265     // we are done
266     if(neededRegions == 0 && regionidx == regionsToMove.size()) {
267       long endTime = System.currentTimeMillis();
268       LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
269           "Moving " + regionsToMove.size() + " regions off of " +
270           serversOverloaded + " overloaded servers onto " +
271           serversUnderloaded + " less loaded servers");
272       return regionsToMove;
273     }
274 
275     // Need to do a second pass.
276     // Either more regions to assign out or servers that are still underloaded
277 
278     // If we need more to fill min, grab one from each most loaded until enough
279     if (neededRegions != 0) {
280       // Walk down most loaded, grabbing one from each until we get enough
281       for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
282         serversByLoad.descendingMap().entrySet()) {
283         BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
284         int idx =
285           balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
286         if (idx >= server.getValue().size()) break;
287         HRegionInfo region = server.getValue().get(idx);
288         if (region.isMetaRegion()) continue; // Don't move meta regions.
289         regionsToMove.add(new RegionPlan(region, server.getKey(), null));
290         if(--neededRegions == 0) {
291           // No more regions needed, done shedding
292           break;
293         }
294       }
295     }
296 
297     // Now we have a set of regions that must be all assigned out
298     // Assign each underloaded up to the min, then if leftovers, assign to max
299 
300     // Walk down least loaded, assigning to each to fill up to min
301     for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
302       serversByLoad.entrySet()) {
303       int regionCount = server.getKey().getLoad().getNumberOfRegions();
304       if (regionCount >= min) break;
305       BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
306       if(balanceInfo != null) {
307         regionCount += balanceInfo.getNumRegionsAdded();
308       }
309       if(regionCount >= min) {
310         continue;
311       }
312       int numToTake = min - regionCount;
313       int numTaken = 0;
314       while(numTaken < numToTake && regionidx < regionsToMove.size()) {
315         regionsToMove.get(regionidx).setDestination(server.getKey());
316         numTaken++;
317         regionidx++;
318       }
319     }
320 
321     // If we still have regions to dish out, assign underloaded to max
322     if(regionidx != regionsToMove.size()) {
323       for(Map.Entry<HServerInfo, List<HRegionInfo>> server :
324         serversByLoad.entrySet()) {
325         int regionCount = server.getKey().getLoad().getNumberOfRegions();
326         if(regionCount >= max) {
327           break;
328         }
329         regionsToMove.get(regionidx).setDestination(server.getKey());
330         regionidx++;
331         if(regionidx == regionsToMove.size()) {
332           break;
333         }
334       }
335     }
336 
337     long endTime = System.currentTimeMillis();
338 
339     if (regionidx != regionsToMove.size() || neededRegions != 0) {
340       // Emit data so can diagnose how balancer went astray.
341       LOG.warn("regionidx=" + regionidx + ", regionsToMove=" + regionsToMove.size() +
342       ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded +
343       ", serversUnderloaded=" + serversUnderloaded);
344       StringBuilder sb = new StringBuilder();
345       for (Map.Entry<HServerInfo, List<HRegionInfo>> e: clusterState.entrySet()) {
346         if (sb.length() > 0) sb.append(", ");
347         sb.append(e.getKey().getServerName());
348         sb.append(" ");
349         sb.append(e.getValue().size());
350       }
351       LOG.warn("Input " + sb.toString());
352     }
353 
354     // All done!
355     LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
356         "Moving " + regionsToMove.size() + " regions off of " +
357         serversOverloaded + " overloaded servers onto " +
358         serversUnderloaded + " less loaded servers");
359 
360     return regionsToMove;
361   }
362 
363   /**
364    * @param regions
365    * @return Randomization of passed <code>regions</code>
366    */
367   static List<HRegionInfo> randomize(final List<HRegionInfo> regions) {
368     Collections.shuffle(regions, RANDOM);
369     return regions;
370   }
371 
372   /**
373    * Stores additional per-server information about the regions added/removed
374    * during the run of the balancing algorithm.
375    *
376    * For servers that receive additional regions, we are not updating the number
377    * of regions in HServerInfo once we decide to reassign regions to a server,
378    * but we need this information later in the algorithm.  This is stored in
379    * <b>numRegionsAdded</b>.
380    *
381    * For servers that shed regions, we need to track which regions we have
382    * already shed.  <b>nextRegionForUnload</b> contains the index in the list
383    * of regions on the server that is the next to be shed.
384    */
385   private static class BalanceInfo {
386 
387     private final int nextRegionForUnload;
388     private final int numRegionsAdded;
389 
390     public BalanceInfo(int nextRegionForUnload, int numRegionsAdded) {
391       this.nextRegionForUnload = nextRegionForUnload;
392       this.numRegionsAdded = numRegionsAdded;
393     }
394 
395     public int getNextRegionForUnload() {
396       return nextRegionForUnload;
397     }
398 
399     public int getNumRegionsAdded() {
400       return numRegionsAdded;
401     }
402   }
403 
404   /**
405    * Generates a bulk assignment plan to be used on cluster startup using a
406    * simple round-robin assignment.
407    * <p>
408    * Takes a list of all the regions and all the servers in the cluster and
409    * returns a map of each server to the regions that it should be assigned.
410    * <p>
411    * Currently implemented as a round-robin assignment.  Same invariant as
412    * load balancing, all servers holding floor(avg) or ceiling(avg).
413    *
414    * TODO: Use block locations from HDFS to place regions with their blocks
415    *
416    * @param regions all regions
417    * @param servers all servers
418    * @return map of server to the regions it should take, or null if no
419    *         assignment is possible (ie. no regions or no servers)
420    */
421   public static Map<HServerInfo, List<HRegionInfo>> roundRobinAssignment(
422       List<HRegionInfo> regions, List<HServerInfo> servers) {
423     if(regions.size() == 0 || servers.size() == 0) {
424       return null;
425     }
426     Map<HServerInfo,List<HRegionInfo>> assignments =
427       new TreeMap<HServerInfo,List<HRegionInfo>>();
428     int numRegions = regions.size();
429     int numServers = servers.size();
430     int max = (int)Math.ceil((float)numRegions/numServers);
431     int serverIdx = 0;
432     if (numServers > 1) {
433       serverIdx = RANDOM.nextInt(numServers);
434     }
435     int regionIdx = 0;
436     for (int j = 0; j < numServers; j++) {
437       HServerInfo server = servers.get((j+serverIdx) % numServers);
438       List<HRegionInfo> serverRegions = new ArrayList<HRegionInfo>(max);
439       for (int i=regionIdx; i<numRegions; i += numServers) {
440         serverRegions.add(regions.get(i % numRegions));
441       }
442       assignments.put(server, serverRegions);
443       regionIdx++;
444     }
445     return assignments;
446   }
447 
448   /**
449    * Generates a bulk assignment startup plan, attempting to reuse the existing
450    * assignment information from META, but adjusting for the specified list of
451    * available/online servers available for assignment.
452    * <p>
453    * Takes a map of all regions to their existing assignment from META.  Also
454    * takes a list of online servers for regions to be assigned to.  Attempts to
455    * retain all assignment, so in some instances initial assignment will not be
456    * completely balanced.
457    * <p>
458    * Any leftover regions without an existing server to be assigned to will be
459    * assigned randomly to available servers.
460    * @param regions regions and existing assignment from meta
461    * @param servers available servers
462    * @return map of servers and regions to be assigned to them
463    */
464   public static Map<HServerInfo, List<HRegionInfo>> retainAssignment(
465       Map<HRegionInfo, HServerAddress> regions, List<HServerInfo> servers) {
466     Map<HServerInfo, List<HRegionInfo>> assignments =
467       new TreeMap<HServerInfo, List<HRegionInfo>>();
468     // Build a map of server addresses to server info so we can match things up
469     Map<HServerAddress, HServerInfo> serverMap =
470       new TreeMap<HServerAddress, HServerInfo>();
471     for (HServerInfo server : servers) {
472       serverMap.put(server.getServerAddress(), server);
473       assignments.put(server, new ArrayList<HRegionInfo>());
474     }
475     for (Map.Entry<HRegionInfo, HServerAddress> region : regions.entrySet()) {
476       HServerAddress hsa = region.getValue();
477       HServerInfo server = hsa == null? null: serverMap.get(hsa);
478       if (server != null) {
479         assignments.get(server).add(region.getKey());
480       } else {
481         assignments.get(servers.get(RANDOM.nextInt(assignments.size()))).add(
482             region.getKey());
483       }
484     }
485     return assignments;
486   }
487 
488   /**
489    * Find the block locations for all of the files for the specified region.
490    *
491    * Returns an ordered list of hosts that are hosting the blocks for this
492    * region.  The weight of each host is the sum of the block lengths of all
493    * files on that host, so the first host in the list is the server which
494    * holds the most bytes of the given region's HFiles.
495    *
496    * TODO: Make this work.  Need to figure out how to match hadoop's hostnames
497    *       given for block locations with our HServerAddress.
498    * TODO: Use the right directory for the region
499    * TODO: Use getFileBlockLocations on the files not the directory
500    *
501    * @param fs the filesystem
502    * @param region region
503    * @return ordered list of hosts holding blocks of the specified region
504    * @throws IOException if any filesystem errors
505    */
506   @SuppressWarnings("unused")
507   private List<String> getTopBlockLocations(FileSystem fs, HRegionInfo region)
508   throws IOException {
509     String encodedName = region.getEncodedName();
510     Path path = new Path("/hbase/table/" + encodedName);
511     FileStatus status = fs.getFileStatus(path);
512     BlockLocation [] blockLocations =
513       fs.getFileBlockLocations(status, 0, status.getLen());
514     Map<HostAndWeight,HostAndWeight> hostWeights =
515       new TreeMap<HostAndWeight,HostAndWeight>(new HostAndWeight.HostComparator());
516     for(BlockLocation bl : blockLocations) {
517       String [] hosts = bl.getHosts();
518       long len = bl.getLength();
519       for(String host : hosts) {
520         HostAndWeight haw = hostWeights.get(host);
521         if(haw == null) {
522           haw = new HostAndWeight(host, len);
523           hostWeights.put(haw, haw);
524         } else {
525           haw.addWeight(len);
526         }
527       }
528     }
529     NavigableSet<HostAndWeight> orderedHosts = new TreeSet<HostAndWeight>(
530         new HostAndWeight.WeightComparator());
531     orderedHosts.addAll(hostWeights.values());
532     List<String> topHosts = new ArrayList<String>(orderedHosts.size());
533     for(HostAndWeight haw : orderedHosts.descendingSet()) {
534       topHosts.add(haw.getHost());
535     }
536     return topHosts;
537   }
538 
539   /**
540    * Stores the hostname and weight for that hostname.
541    *
542    * This is used when determining the physical locations of the blocks making
543    * up a region.
544    *
545    * To make a prioritized list of the hosts holding the most data of a region,
546    * this class is used to count the total weight for each host.  The weight is
547    * currently just the size of the file.
548    */
549   private static class HostAndWeight {
550 
551     private final String host;
552     private long weight;
553 
554     public HostAndWeight(String host, long weight) {
555       this.host = host;
556       this.weight = weight;
557     }
558 
559     public void addWeight(long weight) {
560       this.weight += weight;
561     }
562 
563     public String getHost() {
564       return host;
565     }
566 
567     public long getWeight() {
568       return weight;
569     }
570 
571     private static class HostComparator implements Comparator<HostAndWeight> {
572       @Override
573       public int compare(HostAndWeight l, HostAndWeight r) {
574         return l.getHost().compareTo(r.getHost());
575       }
576     }
577 
578     private static class WeightComparator implements Comparator<HostAndWeight> {
579       @Override
580       public int compare(HostAndWeight l, HostAndWeight r) {
581         if(l.getWeight() == r.getWeight()) {
582           return l.getHost().compareTo(r.getHost());
583         }
584         return l.getWeight() < r.getWeight() ? -1 : 1;
585       }
586     }
587   }
588 
589   /**
590    * Generates an immediate assignment plan to be used by a new master for
591    * regions in transition that do not have an already known destination.
592    *
593    * Takes a list of regions that need immediate assignment and a list of
594    * all available servers.  Returns a map of regions to the server they
595    * should be assigned to.
596    *
597    * This method will return quickly and does not do any intelligent
598    * balancing.  The goal is to make a fast decision not the best decision
599    * possible.
600    *
601    * Currently this is random.
602    *
603    * @param regions
604    * @param servers
605    * @return map of regions to the server it should be assigned to
606    */
607   public static Map<HRegionInfo,HServerInfo> immediateAssignment(
608       List<HRegionInfo> regions, List<HServerInfo> servers) {
609     Map<HRegionInfo,HServerInfo> assignments =
610       new TreeMap<HRegionInfo,HServerInfo>();
611     for(HRegionInfo region : regions) {
612       assignments.put(region, servers.get(RANDOM.nextInt(servers.size())));
613     }
614     return assignments;
615   }
616 
617   public static HServerInfo randomAssignment(List<HServerInfo> servers) {
618     if (servers == null || servers.isEmpty()) {
619       LOG.warn("Wanted to do random assignment but no servers to assign to");
620       return null;
621     }
622     return servers.get(RANDOM.nextInt(servers.size()));
623   }
624 
625   /**
626    * Stores the plan for the move of an individual region.
627    *
628    * Contains info for the region being moved, info for the server the region
629    * should be moved from, and info for the server the region should be moved
630    * to.
631    *
632    * The comparable implementation of this class compares only the region
633    * information and not the source/dest server info.
634    */
635   public static class RegionPlan implements Comparable<RegionPlan> {
636     private final HRegionInfo hri;
637     private final HServerInfo source;
638     private HServerInfo dest;
639 
640     /**
641      * Instantiate a plan for a region move, moving the specified region from
642      * the specified source server to the specified destination server.
643      *
644      * Destination server can be instantiated as null and later set
645      * with {@link #setDestination(HServerInfo)}.
646      *
647      * @param hri region to be moved
648      * @param source regionserver region should be moved from
649      * @param dest regionserver region should be moved to
650      */
651     public RegionPlan(final HRegionInfo hri, HServerInfo source, HServerInfo dest) {
652       this.hri = hri;
653       this.source = source;
654       this.dest = dest;
655     }
656 
657     /**
658      * Set the destination server for the plan for this region.
659      */
660     public void setDestination(HServerInfo dest) {
661       this.dest = dest;
662     }
663 
664     /**
665      * Get the source server for the plan for this region.
666      * @return server info for source
667      */
668     public HServerInfo getSource() {
669       return source;
670     }
671 
672     /**
673      * Get the destination server for the plan for this region.
674      * @return server info for destination
675      */
676     public HServerInfo getDestination() {
677       return dest;
678     }
679 
680     /**
681      * Get the encoded region name for the region this plan is for.
682      * @return Encoded region name
683      */
684     public String getRegionName() {
685       return this.hri.getEncodedName();
686     }
687 
688     public HRegionInfo getRegionInfo() {
689       return this.hri;
690     }
691 
692     /**
693      * Compare the region info.
694      * @param o region plan you are comparing against
695      */
696     @Override
697     public int compareTo(RegionPlan o) {
698       return getRegionName().compareTo(o.getRegionName());
699     }
700 
701     @Override
702     public String toString() {
703       return "hri=" + this.hri.getRegionNameAsString() + ", src=" +
704         (this.source == null? "": this.source.getServerName()) +
705         ", dest=" + (this.dest == null? "": this.dest.getServerName());
706     }
707   }
708 }