1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.master;
20
21 import java.io.IOException;
22 import java.net.InetAddress;
23 import java.util.ArrayList;
24 import java.util.Collections;
25 import java.util.HashMap;
26 import java.util.HashSet;
27 import java.util.Iterator;
28 import java.util.List;
29 import java.util.Map;
30 import java.util.Map.Entry;
31 import java.util.Set;
32 import java.util.SortedMap;
33 import java.util.concurrent.ConcurrentHashMap;
34 import java.util.concurrent.ConcurrentSkipListMap;
35
36 import org.apache.commons.logging.Log;
37 import org.apache.commons.logging.LogFactory;
38 import org.apache.hadoop.classification.InterfaceAudience;
39 import org.apache.hadoop.conf.Configuration;
40 import org.apache.hadoop.hbase.ClockOutOfSyncException;
41 import org.apache.hadoop.hbase.HRegionInfo;
42 import org.apache.hadoop.hbase.RegionLoad;
43 import org.apache.hadoop.hbase.Server;
44 import org.apache.hadoop.hbase.ServerLoad;
45 import org.apache.hadoop.hbase.ServerName;
46 import org.apache.hadoop.hbase.YouAreDeadException;
47 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
48 import org.apache.hadoop.hbase.client.HConnection;
49 import org.apache.hadoop.hbase.client.HConnectionManager;
50 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
51 import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
52 import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
53 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
54 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
55 import org.apache.hadoop.hbase.protobuf.RequestConverter;
56 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
57 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
58 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
59 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
60 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
61 import org.apache.hadoop.hbase.util.Bytes;
62 import org.apache.hadoop.hbase.util.Triple;
63
64 import com.google.protobuf.ServiceException;
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 @InterfaceAudience.Private
89 public class ServerManager {
90 public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART =
91 "hbase.master.wait.on.regionservers.maxtostart";
92
93 public static final String WAIT_ON_REGIONSERVERS_MINTOSTART =
94 "hbase.master.wait.on.regionservers.mintostart";
95
96 public static final String WAIT_ON_REGIONSERVERS_TIMEOUT =
97 "hbase.master.wait.on.regionservers.timeout";
98
99 public static final String WAIT_ON_REGIONSERVERS_INTERVAL =
100 "hbase.master.wait.on.regionservers.interval";
101
102 private static final Log LOG = LogFactory.getLog(ServerManager.class);
103
104
105 private volatile boolean clusterShutdown = false;
106
107 private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
108 new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
109
110
111 private final Map<ServerName, ServerLoad> onlineServers =
112 new ConcurrentHashMap<ServerName, ServerLoad>();
113
114
115
116
117
118 private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
119 new HashMap<ServerName, AdminService.BlockingInterface>();
120
121
122
123
124
125 private final ArrayList<ServerName> drainingServers =
126 new ArrayList<ServerName>();
127
128 private final Server master;
129 private final MasterServices services;
130 private final HConnection connection;
131
132 private final DeadServer deadservers = new DeadServer();
133
134 private final long maxSkew;
135 private final long warningSkew;
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153 private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 private Map<ServerName, Boolean> requeuedDeadServers = new HashMap<ServerName, Boolean>();
171
172
173
174
175
176
177
178 public ServerManager(final Server master, final MasterServices services)
179 throws IOException {
180 this(master, services, true);
181 }
182
183 ServerManager(final Server master, final MasterServices services,
184 final boolean connect) throws IOException {
185 this.master = master;
186 this.services = services;
187 Configuration c = master.getConfiguration();
188 maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
189 warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
190 this.connection = connect ? HConnectionManager.getConnection(c) : null;
191 }
192
193
194
195
196
197
198
199
200
201
202 ServerName regionServerStartup(final InetAddress ia, final int port,
203 final long serverStartcode, long serverCurrentTime)
204 throws IOException {
205
206
207
208
209
210
211
212 ServerName sn = new ServerName(ia.getHostName(), port, serverStartcode);
213 checkClockSkew(sn, serverCurrentTime);
214 checkIsDead(sn, "STARTUP");
215 if (!checkAlreadySameHostPortAndRecordNewServer(
216 sn, ServerLoad.EMPTY_SERVERLOAD)) {
217 LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
218 + " could not record the server: " + sn);
219 }
220 return sn;
221 }
222
223
224
225
226
227
228 private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
229 Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
230 for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
231 Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
232 long l = entry.getValue().getCompleteSequenceId();
233 if (existingValue != null) {
234 if (l != -1 && l < existingValue) {
235 if (LOG.isDebugEnabled()) {
236 LOG.debug("RegionServer " + sn +
237 " indicates a last flushed sequence id (" + entry.getValue() +
238 ") that is less than the previous last flushed sequence id (" +
239 existingValue + ") for region " +
240 Bytes.toString(entry.getKey()) + " Ignoring.");
241 }
242 continue;
243
244 }
245 }
246 flushedSequenceIdByRegion.put(entry.getKey(), l);
247 }
248 }
249
250 void regionServerReport(ServerName sn,
251 ServerLoad sl) throws YouAreDeadException {
252 checkIsDead(sn, "REPORT");
253 if (!this.onlineServers.containsKey(sn)) {
254
255
256
257
258
259
260 if (!checkAlreadySameHostPortAndRecordNewServer(sn, sl)) {
261 LOG.info("RegionServerReport ignored, could not record the sever: " + sn);
262 return;
263 }
264 } else {
265 this.onlineServers.put(sn, sl);
266 }
267 updateLastFlushedSequenceIds(sn, sl);
268 }
269
270
271
272
273
274
275
276
277
278 boolean checkAlreadySameHostPortAndRecordNewServer(
279 final ServerName serverName, final ServerLoad sl) {
280 ServerName existingServer = findServerWithSameHostnamePort(serverName);
281 if (existingServer != null) {
282 if (existingServer.getStartcode() > serverName.getStartcode()) {
283 LOG.info("Server serverName=" + serverName +
284 " rejected; we already have " + existingServer.toString() +
285 " registered with same hostname and port");
286 return false;
287 }
288 LOG.info("Triggering server recovery; existingServer " +
289 existingServer + " looks stale, new server:" + serverName);
290 expireServer(existingServer);
291 }
292 recordNewServer(serverName, sl);
293 return true;
294 }
295
296
297
298
299
300
301
302
303
304 private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
305 throws ClockOutOfSyncException {
306 long skew = System.currentTimeMillis() - serverCurrentTime;
307 if (skew > maxSkew) {
308 String message = "Server " + serverName + " has been " +
309 "rejected; Reported time is too far out of sync with master. " +
310 "Time difference of " + skew + "ms > max allowed of " + maxSkew + "ms";
311 LOG.warn(message);
312 throw new ClockOutOfSyncException(message);
313 } else if (skew > warningSkew){
314 String message = "Reported time for server " + serverName + " is out of sync with master " +
315 "by " + skew + "ms. (Warning threshold is " + warningSkew + "ms; " +
316 "error threshold is " + maxSkew + "ms)";
317 LOG.warn(message);
318 }
319 }
320
321
322
323
324
325
326
327
328
329 private void checkIsDead(final ServerName serverName, final String what)
330 throws YouAreDeadException {
331 if (this.deadservers.isDeadServer(serverName)) {
332
333
334 String message = "Server " + what + " rejected; currently processing " +
335 serverName + " as dead server";
336 LOG.debug(message);
337 throw new YouAreDeadException(message);
338 }
339
340
341 if ((this.services == null || ((HMaster) this.services).isInitialized())
342 && this.deadservers.cleanPreviousInstance(serverName)) {
343
344
345 LOG.debug(what + ":" + " Server " + serverName + " came back up," +
346 " removed it from the dead servers list");
347 }
348 }
349
350
351
352
353 private ServerName findServerWithSameHostnamePort(
354 final ServerName serverName) {
355 for (ServerName sn: getOnlineServersList()) {
356 if (ServerName.isSameHostnameAndPort(serverName, sn)) return sn;
357 }
358 return null;
359 }
360
361
362
363
364
365
366 void recordNewServer(final ServerName serverName, final ServerLoad sl) {
367 LOG.info("Registering server=" + serverName);
368 this.onlineServers.put(serverName, sl);
369 this.rsAdmins.remove(serverName);
370 }
371
372 public long getLastFlushedSequenceId(byte[] regionName) {
373 long seqId = -1;
374 if (flushedSequenceIdByRegion.containsKey(regionName)) {
375 seqId = flushedSequenceIdByRegion.get(regionName);
376 }
377 return seqId;
378 }
379
380
381
382
383
384 public ServerLoad getLoad(final ServerName serverName) {
385 return this.onlineServers.get(serverName);
386 }
387
388
389
390
391
392
393
394 public double getAverageLoad() {
395 int totalLoad = 0;
396 int numServers = 0;
397 double averageLoad;
398 for (ServerLoad sl: this.onlineServers.values()) {
399 numServers++;
400 totalLoad += sl.getNumberOfRegions();
401 }
402 averageLoad = (double)totalLoad / (double)numServers;
403 return averageLoad;
404 }
405
406
407 int countOfRegionServers() {
408
409 return this.onlineServers.size();
410 }
411
412
413
414
415 public Map<ServerName, ServerLoad> getOnlineServers() {
416
417 synchronized (this.onlineServers) {
418 return Collections.unmodifiableMap(this.onlineServers);
419 }
420 }
421
422
423 public DeadServer getDeadServers() {
424 return this.deadservers;
425 }
426
427
428
429
430
431 public boolean areDeadServersInProgress() {
432 return this.deadservers.areDeadServersInProgress();
433 }
434
435 void letRegionServersShutdown() {
436 long previousLogTime = 0;
437 while (!onlineServers.isEmpty()) {
438
439 if (System.currentTimeMillis() > (previousLogTime + 1000)) {
440 StringBuilder sb = new StringBuilder();
441 for (ServerName key : this.onlineServers.keySet()) {
442 if (sb.length() > 0) {
443 sb.append(", ");
444 }
445 sb.append(key);
446 }
447 LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
448 previousLogTime = System.currentTimeMillis();
449 }
450
451 synchronized (onlineServers) {
452 try {
453 onlineServers.wait(100);
454 } catch (InterruptedException ignored) {
455
456 }
457 }
458 }
459 }
460
461
462
463
464
465 public synchronized void expireServer(final ServerName serverName) {
466 if (!services.isServerShutdownHandlerEnabled()) {
467 LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
468 + "delay expiring server " + serverName);
469 this.queuedDeadServers.add(serverName);
470 return;
471 }
472 if (!this.onlineServers.containsKey(serverName)) {
473 LOG.warn("Received expiration of " + serverName +
474 " but server is not currently online");
475 }
476 if (this.deadservers.isDeadServer(serverName)) {
477
478 LOG.warn("Received expiration of " + serverName +
479 " but server shutdown is already in progress");
480 return;
481 }
482
483
484
485 this.deadservers.add(serverName);
486 this.onlineServers.remove(serverName);
487 synchronized (onlineServers) {
488 onlineServers.notifyAll();
489 }
490 this.rsAdmins.remove(serverName);
491
492
493 if (this.clusterShutdown) {
494 LOG.info("Cluster shutdown set; " + serverName +
495 " expired; onlineServers=" + this.onlineServers.size());
496 if (this.onlineServers.isEmpty()) {
497 master.stop("Cluster shutdown set; onlineServer=0");
498 }
499 return;
500 }
501
502 boolean carryingMeta = services.getAssignmentManager().isCarryingMeta(serverName);
503 if (carryingMeta) {
504 this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
505 this.services, this.deadservers, serverName));
506 } else {
507 this.services.getExecutorService().submit(new ServerShutdownHandler(this.master,
508 this.services, this.deadservers, serverName, true));
509 }
510 LOG.debug("Added=" + serverName +
511 " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
512 }
513
514 public synchronized void processDeadServer(final ServerName serverName) {
515 this.processDeadServer(serverName, false);
516 }
517
518 public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
519
520
521
522
523
524
525
526
527 if (!services.getAssignmentManager().isFailoverCleanupDone()) {
528 requeuedDeadServers.put(serverName, shouldSplitHlog);
529 return;
530 }
531
532 this.deadservers.add(serverName);
533 this.services.getExecutorService().submit(
534 new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
535 shouldSplitHlog));
536 }
537
538
539
540
541
542 synchronized void processQueuedDeadServers() {
543 if (!services.isServerShutdownHandlerEnabled()) {
544 LOG.info("Master hasn't enabled ServerShutdownHandler");
545 }
546 Iterator<ServerName> serverIterator = queuedDeadServers.iterator();
547 while (serverIterator.hasNext()) {
548 ServerName tmpServerName = serverIterator.next();
549 expireServer(tmpServerName);
550 serverIterator.remove();
551 requeuedDeadServers.remove(tmpServerName);
552 }
553
554 if (!services.getAssignmentManager().isFailoverCleanupDone()) {
555 LOG.info("AssignmentManager hasn't finished failover cleanup");
556 }
557
558 for(ServerName tmpServerName : requeuedDeadServers.keySet()){
559 processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName));
560 }
561 requeuedDeadServers.clear();
562 }
563
564
565
566
567 public boolean removeServerFromDrainList(final ServerName sn) {
568
569
570
571 if (!this.isServerOnline(sn)) {
572 LOG.warn("Server " + sn + " is not currently online. " +
573 "Removing from draining list anyway, as requested.");
574 }
575
576 return this.drainingServers.remove(sn);
577 }
578
579
580
581
582 public boolean addServerToDrainList(final ServerName sn) {
583
584
585
586 if (!this.isServerOnline(sn)) {
587 LOG.warn("Server " + sn + " is not currently online. " +
588 "Ignoring request to add it to draining list.");
589 return false;
590 }
591
592
593 if (this.drainingServers.contains(sn)) {
594 LOG.warn("Server " + sn + " is already in the draining server list." +
595 "Ignoring request to add it again.");
596 return false;
597 }
598 return this.drainingServers.add(sn);
599 }
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614 public RegionOpeningState sendRegionOpen(final ServerName server,
615 HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
616 throws IOException {
617 AdminService.BlockingInterface admin = getRsAdmin(server);
618 if (admin == null) {
619 LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
620 " failed because no RPC connection found to this server");
621 return RegionOpeningState.FAILED_OPENING;
622 }
623 OpenRegionRequest request =
624 RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode, favoredNodes);
625 try {
626 OpenRegionResponse response = admin.openRegion(null, request);
627 return ResponseConverter.getRegionOpeningState(response);
628 } catch (ServiceException se) {
629 throw ProtobufUtil.getRemoteException(se);
630 }
631 }
632
633
634
635
636
637
638
639
640
641
642 public List<RegionOpeningState> sendRegionOpen(ServerName server,
643 List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
644 throws IOException {
645 AdminService.BlockingInterface admin = getRsAdmin(server);
646 if (admin == null) {
647 LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
648 " failed because no RPC connection found to this server");
649 return null;
650 }
651
652 OpenRegionRequest request =
653 RequestConverter.buildOpenRegionRequest(regionOpenInfos);
654 try {
655 OpenRegionResponse response = admin.openRegion(null, request);
656 return ResponseConverter.getRegionOpeningStateList(response);
657 } catch (ServiceException se) {
658 throw ProtobufUtil.getRemoteException(se);
659 }
660 }
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676 public boolean sendRegionClose(ServerName server, HRegionInfo region,
677 int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
678 if (server == null) throw new NullPointerException("Passed server is null");
679 AdminService.BlockingInterface admin = getRsAdmin(server);
680 if (admin == null) {
681 throw new IOException("Attempting to send CLOSE RPC to server " +
682 server.toString() + " for region " +
683 region.getRegionNameAsString() +
684 " failed because no RPC connection found to this server");
685 }
686 return ProtobufUtil.closeRegion(admin, region.getRegionName(),
687 versionOfClosingNode, dest, transitionInZK);
688 }
689
690 public boolean sendRegionClose(ServerName server,
691 HRegionInfo region, int versionOfClosingNode) throws IOException {
692 return sendRegionClose(server, region, versionOfClosingNode, null, true);
693 }
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708 public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
709 HRegionInfo region_b, boolean forcible) throws IOException {
710 if (server == null)
711 throw new NullPointerException("Passed server is null");
712 if (region_a == null || region_b == null)
713 throw new NullPointerException("Passed region is null");
714 AdminService.BlockingInterface admin = getRsAdmin(server);
715 if (admin == null) {
716 throw new IOException("Attempting to send MERGE REGIONS RPC to server "
717 + server.toString() + " for region "
718 + region_a.getRegionNameAsString() + ","
719 + region_b.getRegionNameAsString()
720 + " failed because no RPC connection found to this server");
721 }
722 ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible);
723 }
724
725
726
727
728
729
730
731 private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
732 throws IOException {
733 AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
734 if (admin == null) {
735 LOG.debug("New admin connection to " + sn.toString());
736 admin = this.connection.getAdmin(sn);
737 this.rsAdmins.put(sn, admin);
738 }
739 return admin;
740 }
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755 public void waitForRegionServers(MonitoredTask status)
756 throws InterruptedException {
757 final long interval = this.master.getConfiguration().
758 getLong(WAIT_ON_REGIONSERVERS_INTERVAL, 1500);
759 final long timeout = this.master.getConfiguration().
760 getLong(WAIT_ON_REGIONSERVERS_TIMEOUT, 4500);
761 int minToStart = this.master.getConfiguration().
762 getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
763 if (minToStart < 1) {
764 LOG.warn(String.format(
765 "The value of '%s' (%d) can not be less than 1, ignoring.",
766 WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
767 minToStart = 1;
768 }
769 int maxToStart = this.master.getConfiguration().
770 getInt(WAIT_ON_REGIONSERVERS_MAXTOSTART, Integer.MAX_VALUE);
771 if (maxToStart < minToStart) {
772 LOG.warn(String.format(
773 "The value of '%s' (%d) is set less than '%s' (%d), ignoring.",
774 WAIT_ON_REGIONSERVERS_MAXTOSTART, maxToStart,
775 WAIT_ON_REGIONSERVERS_MINTOSTART, minToStart));
776 maxToStart = Integer.MAX_VALUE;
777 }
778
779 long now = System.currentTimeMillis();
780 final long startTime = now;
781 long slept = 0;
782 long lastLogTime = 0;
783 long lastCountChange = startTime;
784 int count = countOfRegionServers();
785 int oldCount = 0;
786 while (
787 !this.master.isStopped() &&
788 count < maxToStart &&
789 (lastCountChange+interval > now || timeout > slept || count < minToStart)
790 ){
791
792
793 if (oldCount != count || lastLogTime+interval < now){
794 lastLogTime = now;
795 String msg =
796 "Waiting for region servers count to settle; currently"+
797 " checked in " + count + ", slept for " + slept + " ms," +
798 " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+
799 ", timeout of "+timeout+" ms, interval of "+interval+" ms.";
800 LOG.info(msg);
801 status.setStatus(msg);
802 }
803
804
805 final long sleepTime = 50;
806 Thread.sleep(sleepTime);
807 now = System.currentTimeMillis();
808 slept = now - startTime;
809
810 oldCount = count;
811 count = countOfRegionServers();
812 if (count != oldCount) {
813 lastCountChange = now;
814 }
815 }
816
817 LOG.info("Finished waiting for region servers count to settle;" +
818 " checked in " + count + ", slept for " + slept + " ms," +
819 " expecting minimum of " + minToStart + ", maximum of "+ maxToStart+","+
820 " master is "+ (this.master.isStopped() ? "stopped.": "running.")
821 );
822 }
823
824
825
826
827 public List<ServerName> getOnlineServersList() {
828
829
830 return new ArrayList<ServerName>(this.onlineServers.keySet());
831 }
832
833
834
835
836 public List<ServerName> getDrainingServersList() {
837 return new ArrayList<ServerName>(this.drainingServers);
838 }
839
840
841
842
843 Set<ServerName> getDeadNotExpiredServers() {
844 return new HashSet<ServerName>(this.queuedDeadServers);
845 }
846
847
848
849
850
851 Map<ServerName, Boolean> getRequeuedDeadServers() {
852 return Collections.unmodifiableMap(this.requeuedDeadServers);
853 }
854
855 public boolean isServerOnline(ServerName serverName) {
856 return serverName != null && onlineServers.containsKey(serverName);
857 }
858
859
860
861
862
863
864
865 public synchronized boolean isServerDead(ServerName serverName) {
866 return serverName == null || deadservers.isDeadServer(serverName)
867 || queuedDeadServers.contains(serverName)
868 || requeuedDeadServers.containsKey(serverName);
869 }
870
871 public void shutdownCluster() {
872 this.clusterShutdown = true;
873 this.master.stop("Cluster shutdown requested");
874 }
875
876 public boolean isClusterShutdown() {
877 return this.clusterShutdown;
878 }
879
880
881
882
883 public void stop() {
884 if (connection != null) {
885 try {
886 connection.close();
887 } catch (IOException e) {
888 LOG.error("Attempt to close connection to master failed", e);
889 }
890 }
891 }
892
893
894
895
896
897
898 public List<ServerName> createDestinationServersList(final ServerName serverToExclude){
899 final List<ServerName> destServers = getOnlineServersList();
900
901 if (serverToExclude != null){
902 destServers.remove(serverToExclude);
903 }
904
905
906 final List<ServerName> drainingServersCopy = getDrainingServersList();
907 if (!drainingServersCopy.isEmpty()) {
908 for (final ServerName server: drainingServersCopy) {
909 destServers.remove(server);
910 }
911 }
912
913
914 removeDeadNotExpiredServers(destServers);
915
916 return destServers;
917 }
918
919
920
921
922 public List<ServerName> createDestinationServersList(){
923 return createDestinationServersList(null);
924 }
925
926
927
928
929
930
931
932 void removeDeadNotExpiredServers(List<ServerName> servers) {
933 Set<ServerName> deadNotExpiredServersCopy = this.getDeadNotExpiredServers();
934 if (!deadNotExpiredServersCopy.isEmpty()) {
935 for (ServerName server : deadNotExpiredServersCopy) {
936 LOG.debug("Removing dead but not expired server: " + server
937 + " from eligible server pool.");
938 servers.remove(server);
939 }
940 }
941 }
942
943
944
945
946 void clearDeadServersWithSameHostNameAndPortOfOnlineServer() {
947 for (ServerName serverName : getOnlineServersList()) {
948 deadservers.cleanAllPreviousInstances(serverName);
949 }
950 }
951 }