blob: cba7db5f0f21c720436d1ece5dfabe6f6d6fcbd6 [file] [log] [blame]
Willy Tarreauf89c1872009-10-01 11:19:37 +02001/*
2 * Fast Weighted Round Robin load balancing algorithm.
3 *
4 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/compat.h>
14#include <common/config.h>
15#include <common/debug.h>
Willy Tarreau45cb4fb2009-10-26 21:10:04 +010016#include <eb32tree.h>
Willy Tarreauf89c1872009-10-01 11:19:37 +020017
18#include <types/global.h>
19#include <types/server.h>
20
21#include <proto/backend.h>
22#include <proto/queue.h>
23
24static inline void fwrr_remove_from_tree(struct server *s);
25static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s);
26static inline void fwrr_dequeue_srv(struct server *s);
27static void fwrr_get_srv(struct server *s);
28static void fwrr_queue_srv(struct server *s);
29
30
31/* This function updates the server trees according to server <srv>'s new
32 * state. It should be called when server <srv>'s status changes to down.
33 * It is not important whether the server was already down or not. It is not
34 * important either that the new state is completely down (the caller may not
35 * know all the variables of a server's state).
36 */
37static void fwrr_set_server_status_down(struct server *srv)
38{
39 struct proxy *p = srv->proxy;
40 struct fwrr_group *grp;
41
Willy Tarreauc5150da2014-05-13 19:27:31 +020042 if (!srv_lb_status_changed(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +020043 return;
44
Emeric Brun52a91d32017-08-31 14:41:55 +020045 if (srv_willbe_usable(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +020046 goto out_update_state;
47
Emeric Brun52a91d32017-08-31 14:41:55 +020048 if (!srv_currently_usable(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +020049 /* server was already down */
50 goto out_update_backend;
51
Willy Tarreauc93cd162014-05-13 15:54:22 +020052 grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
Emeric Brun52a91d32017-08-31 14:41:55 +020053 grp->next_weight -= srv->cur_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +020054
Willy Tarreauc93cd162014-05-13 15:54:22 +020055 if (srv->flags & SRV_F_BACKUP) {
Willy Tarreauf89c1872009-10-01 11:19:37 +020056 p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
57 p->srv_bck--;
58
59 if (srv == p->lbprm.fbck) {
60 /* we lost the first backup server in a single-backup
61 * configuration, we must search another one.
62 */
63 struct server *srv2 = p->lbprm.fbck;
64 do {
65 srv2 = srv2->next;
66 } while (srv2 &&
Willy Tarreauc93cd162014-05-13 15:54:22 +020067 !((srv2->flags & SRV_F_BACKUP) &&
Emeric Brun52a91d32017-08-31 14:41:55 +020068 srv_willbe_usable(srv2)));
Willy Tarreauf89c1872009-10-01 11:19:37 +020069 p->lbprm.fbck = srv2;
70 }
71 } else {
72 p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
73 p->srv_act--;
74 }
75
76 fwrr_dequeue_srv(srv);
77 fwrr_remove_from_tree(srv);
78
79out_update_backend:
80 /* check/update tot_used, tot_weight */
81 update_backend_weight(p);
82 out_update_state:
Willy Tarreauc5150da2014-05-13 19:27:31 +020083 srv_lb_commit_status(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +020084}
85
86/* This function updates the server trees according to server <srv>'s new
87 * state. It should be called when server <srv>'s status changes to up.
88 * It is not important whether the server was already down or not. It is not
89 * important either that the new state is completely UP (the caller may not
90 * know all the variables of a server's state). This function will not change
91 * the weight of a server which was already up.
92 */
93static void fwrr_set_server_status_up(struct server *srv)
94{
95 struct proxy *p = srv->proxy;
96 struct fwrr_group *grp;
97
Willy Tarreauc5150da2014-05-13 19:27:31 +020098 if (!srv_lb_status_changed(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +020099 return;
100
Emeric Brun52a91d32017-08-31 14:41:55 +0200101 if (!srv_willbe_usable(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +0200102 goto out_update_state;
103
Emeric Brun52a91d32017-08-31 14:41:55 +0200104 if (srv_currently_usable(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +0200105 /* server was already up */
106 goto out_update_backend;
107
Willy Tarreauc93cd162014-05-13 15:54:22 +0200108 grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
Emeric Brun52a91d32017-08-31 14:41:55 +0200109 grp->next_weight += srv->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200110
Willy Tarreauc93cd162014-05-13 15:54:22 +0200111 if (srv->flags & SRV_F_BACKUP) {
Willy Tarreauf89c1872009-10-01 11:19:37 +0200112 p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
113 p->srv_bck++;
114
115 if (!(p->options & PR_O_USE_ALL_BK)) {
116 if (!p->lbprm.fbck) {
117 /* there was no backup server anymore */
118 p->lbprm.fbck = srv;
119 } else {
120 /* we may have restored a backup server prior to fbck,
121 * in which case it should replace it.
122 */
123 struct server *srv2 = srv;
124 do {
125 srv2 = srv2->next;
126 } while (srv2 && (srv2 != p->lbprm.fbck));
127 if (srv2)
128 p->lbprm.fbck = srv;
129 }
130 }
131 } else {
132 p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
133 p->srv_act++;
134 }
135
136 /* note that eweight cannot be 0 here */
137 fwrr_get_srv(srv);
Emeric Brun52a91d32017-08-31 14:41:55 +0200138 srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200139 fwrr_queue_srv(srv);
140
141out_update_backend:
142 /* check/update tot_used, tot_weight */
143 update_backend_weight(p);
144 out_update_state:
Willy Tarreauc5150da2014-05-13 19:27:31 +0200145 srv_lb_commit_status(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200146}
147
148/* This function must be called after an update to server <srv>'s effective
149 * weight. It may be called after a state change too.
150 */
151static void fwrr_update_server_weight(struct server *srv)
152{
153 int old_state, new_state;
154 struct proxy *p = srv->proxy;
155 struct fwrr_group *grp;
156
Willy Tarreauc5150da2014-05-13 19:27:31 +0200157 if (!srv_lb_status_changed(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +0200158 return;
159
160 /* If changing the server's weight changes its state, we simply apply
161 * the procedures we already have for status change. If the state
162 * remains down, the server is not in any tree, so it's as easy as
163 * updating its values. If the state remains up with different weights,
164 * there are some computations to perform to find a new place and
165 * possibly a new tree for this server.
166 */
167
Emeric Brun52a91d32017-08-31 14:41:55 +0200168 old_state = srv_currently_usable(srv);
169 new_state = srv_willbe_usable(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200170
171 if (!old_state && !new_state) {
Willy Tarreauc5150da2014-05-13 19:27:31 +0200172 srv_lb_commit_status(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200173 return;
174 }
175 else if (!old_state && new_state) {
176 fwrr_set_server_status_up(srv);
177 return;
178 }
179 else if (old_state && !new_state) {
180 fwrr_set_server_status_down(srv);
181 return;
182 }
183
Willy Tarreauc93cd162014-05-13 15:54:22 +0200184 grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
Emeric Brun52a91d32017-08-31 14:41:55 +0200185 grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200186
187 p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
188 p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
189
190 if (srv->lb_tree == grp->init) {
191 fwrr_dequeue_srv(srv);
192 fwrr_queue_by_weight(grp->init, srv);
193 }
194 else if (!srv->lb_tree) {
195 /* FIXME: server was down. This is not possible right now but
196 * may be needed soon for slowstart or graceful shutdown.
197 */
198 fwrr_dequeue_srv(srv);
199 fwrr_get_srv(srv);
Emeric Brun52a91d32017-08-31 14:41:55 +0200200 srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200201 fwrr_queue_srv(srv);
202 } else {
203 /* The server is either active or in the next queue. If it's
204 * still in the active queue and it has not consumed all of its
205 * places, let's adjust its next position.
206 */
207 fwrr_get_srv(srv);
208
Emeric Brun52a91d32017-08-31 14:41:55 +0200209 if (srv->next_eweight > 0) {
Willy Tarreauf89c1872009-10-01 11:19:37 +0200210 int prev_next = srv->npos;
Emeric Brun52a91d32017-08-31 14:41:55 +0200211 int step = grp->next_weight / srv->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200212
213 srv->npos = srv->lpos + step;
214 srv->rweight = 0;
215
216 if (srv->npos > prev_next)
217 srv->npos = prev_next;
218 if (srv->npos < grp->curr_pos + 2)
219 srv->npos = grp->curr_pos + step;
220 } else {
221 /* push it into the next tree */
222 srv->npos = grp->curr_pos + grp->curr_weight;
223 }
224
225 fwrr_dequeue_srv(srv);
226 fwrr_queue_srv(srv);
227 }
228
229 update_backend_weight(p);
Willy Tarreauc5150da2014-05-13 19:27:31 +0200230 srv_lb_commit_status(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200231}
232
233/* Remove a server from a tree. It must have previously been dequeued. This
234 * function is meant to be called when a server is going down or has its
235 * weight disabled.
236 */
237static inline void fwrr_remove_from_tree(struct server *s)
238{
239 s->lb_tree = NULL;
240}
241
242/* Queue a server in the weight tree <root>, assuming the weight is >0.
243 * We want to sort them by inverted weights, because we need to place
244 * heavy servers first in order to get a smooth distribution.
245 */
246static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s)
247{
Emeric Brun52a91d32017-08-31 14:41:55 +0200248 s->lb_node.key = SRV_EWGHT_MAX - s->next_eweight;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200249 eb32_insert(root, &s->lb_node);
250 s->lb_tree = root;
251}
252
253/* This function is responsible for building the weight trees in case of fast
254 * weighted round-robin. It also sets p->lbprm.wdiv to the eweight to uweight
255 * ratio. Both active and backup groups are initialized.
256 */
257void fwrr_init_server_groups(struct proxy *p)
258{
259 struct server *srv;
260 struct eb_root init_head = EB_ROOT;
261
262 p->lbprm.set_server_status_up = fwrr_set_server_status_up;
263 p->lbprm.set_server_status_down = fwrr_set_server_status_down;
264 p->lbprm.update_server_eweight = fwrr_update_server_weight;
265
266 p->lbprm.wdiv = BE_WEIGHT_SCALE;
267 for (srv = p->srv; srv; srv = srv->next) {
Emeric Brun52a91d32017-08-31 14:41:55 +0200268 srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
Willy Tarreauc5150da2014-05-13 19:27:31 +0200269 srv_lb_commit_status(srv);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200270 }
271
272 recount_servers(p);
273 update_backend_weight(p);
274
275 /* prepare the active servers group */
276 p->lbprm.fwrr.act.curr_pos = p->lbprm.fwrr.act.curr_weight =
277 p->lbprm.fwrr.act.next_weight = p->lbprm.tot_wact;
278 p->lbprm.fwrr.act.curr = p->lbprm.fwrr.act.t0 =
279 p->lbprm.fwrr.act.t1 = init_head;
280 p->lbprm.fwrr.act.init = &p->lbprm.fwrr.act.t0;
281 p->lbprm.fwrr.act.next = &p->lbprm.fwrr.act.t1;
282
283 /* prepare the backup servers group */
284 p->lbprm.fwrr.bck.curr_pos = p->lbprm.fwrr.bck.curr_weight =
285 p->lbprm.fwrr.bck.next_weight = p->lbprm.tot_wbck;
286 p->lbprm.fwrr.bck.curr = p->lbprm.fwrr.bck.t0 =
287 p->lbprm.fwrr.bck.t1 = init_head;
288 p->lbprm.fwrr.bck.init = &p->lbprm.fwrr.bck.t0;
289 p->lbprm.fwrr.bck.next = &p->lbprm.fwrr.bck.t1;
290
291 /* queue active and backup servers in two distinct groups */
292 for (srv = p->srv; srv; srv = srv->next) {
Emeric Brun52a91d32017-08-31 14:41:55 +0200293 if (!srv_currently_usable(srv))
Willy Tarreauf89c1872009-10-01 11:19:37 +0200294 continue;
Willy Tarreauc93cd162014-05-13 15:54:22 +0200295 fwrr_queue_by_weight((srv->flags & SRV_F_BACKUP) ?
Willy Tarreauf89c1872009-10-01 11:19:37 +0200296 p->lbprm.fwrr.bck.init :
297 p->lbprm.fwrr.act.init,
298 srv);
299 }
300}
301
302/* simply removes a server from a weight tree */
303static inline void fwrr_dequeue_srv(struct server *s)
304{
305 eb32_delete(&s->lb_node);
306}
307
308/* queues a server into the appropriate group and tree depending on its
309 * backup status, and ->npos. If the server is disabled, simply assign
310 * it to the NULL tree.
311 */
312static void fwrr_queue_srv(struct server *s)
313{
314 struct proxy *p = s->proxy;
315 struct fwrr_group *grp;
316
Willy Tarreauc93cd162014-05-13 15:54:22 +0200317 grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
Christopher Faulet5b517552017-06-09 14:17:53 +0200318
Willy Tarreauf89c1872009-10-01 11:19:37 +0200319 /* Delay everything which does not fit into the window and everything
320 * which does not fit into the theorical new window.
321 */
Emeric Brun52a91d32017-08-31 14:41:55 +0200322 if (!srv_willbe_usable(s)) {
Willy Tarreauf89c1872009-10-01 11:19:37 +0200323 fwrr_remove_from_tree(s);
324 }
Emeric Brun52a91d32017-08-31 14:41:55 +0200325 else if (s->next_eweight <= 0 ||
Willy Tarreauf89c1872009-10-01 11:19:37 +0200326 s->npos >= 2 * grp->curr_weight ||
327 s->npos >= grp->curr_weight + grp->next_weight) {
328 /* put into next tree, and readjust npos in case we could
329 * finally take this back to current. */
Christopher Faulet5b517552017-06-09 14:17:53 +0200330 HA_ATOMIC_SUB(&s->npos, grp->curr_weight);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200331 fwrr_queue_by_weight(grp->next, s);
332 }
333 else {
334 /* The sorting key is stored in units of s->npos * user_weight
335 * in order to avoid overflows. As stated in backend.h, the
336 * lower the scale, the rougher the weights modulation, and the
337 * higher the scale, the lower the number of servers without
338 * overflow. With this formula, the result is always positive,
Godbacha34bdc02013-07-22 07:44:53 +0800339 * so we can use eb32_insert().
Willy Tarreauf89c1872009-10-01 11:19:37 +0200340 */
341 s->lb_node.key = SRV_UWGHT_RANGE * s->npos +
Emeric Brun52a91d32017-08-31 14:41:55 +0200342 (unsigned)(SRV_EWGHT_MAX + s->rweight - s->next_eweight) / BE_WEIGHT_SCALE;
Willy Tarreauf89c1872009-10-01 11:19:37 +0200343
344 eb32_insert(&grp->curr, &s->lb_node);
345 s->lb_tree = &grp->curr;
346 }
347}
348
349/* prepares a server when extracting it from the "init" tree */
350static inline void fwrr_get_srv_init(struct server *s)
351{
352 s->npos = s->rweight = 0;
353}
354
355/* prepares a server when extracting it from the "next" tree */
356static inline void fwrr_get_srv_next(struct server *s)
357{
Willy Tarreauc93cd162014-05-13 15:54:22 +0200358 struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
Willy Tarreauf89c1872009-10-01 11:19:37 +0200359 &s->proxy->lbprm.fwrr.bck :
360 &s->proxy->lbprm.fwrr.act;
361
Christopher Faulet5b517552017-06-09 14:17:53 +0200362 HA_ATOMIC_ADD(&s->npos, grp->curr_weight);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200363}
364
365/* prepares a server when it was marked down */
366static inline void fwrr_get_srv_down(struct server *s)
367{
Willy Tarreauc93cd162014-05-13 15:54:22 +0200368 struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
Willy Tarreauf89c1872009-10-01 11:19:37 +0200369 &s->proxy->lbprm.fwrr.bck :
370 &s->proxy->lbprm.fwrr.act;
371
372 s->npos = grp->curr_pos;
373}
374
375/* prepares a server when extracting it from its tree */
376static void fwrr_get_srv(struct server *s)
377{
378 struct proxy *p = s->proxy;
Willy Tarreauc93cd162014-05-13 15:54:22 +0200379 struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
Willy Tarreauf89c1872009-10-01 11:19:37 +0200380 &p->lbprm.fwrr.bck :
381 &p->lbprm.fwrr.act;
382
383 if (s->lb_tree == grp->init) {
384 fwrr_get_srv_init(s);
385 }
386 else if (s->lb_tree == grp->next) {
387 fwrr_get_srv_next(s);
388 }
389 else if (s->lb_tree == NULL) {
390 fwrr_get_srv_down(s);
391 }
392}
393
394/* switches trees "init" and "next" for FWRR group <grp>. "init" should be empty
395 * when this happens, and "next" filled with servers sorted by weights.
396 */
397static inline void fwrr_switch_trees(struct fwrr_group *grp)
398{
399 struct eb_root *swap;
400 swap = grp->init;
401 grp->init = grp->next;
402 grp->next = swap;
403 grp->curr_weight = grp->next_weight;
404 grp->curr_pos = grp->curr_weight;
405}
406
407/* return next server from the current tree in FWRR group <grp>, or a server
408 * from the "init" tree if appropriate. If both trees are empty, return NULL.
409 */
410static struct server *fwrr_get_server_from_group(struct fwrr_group *grp)
411{
412 struct eb32_node *node;
413 struct server *s;
414
415 node = eb32_first(&grp->curr);
416 s = eb32_entry(node, struct server, lb_node);
Christopher Faulet5b517552017-06-09 14:17:53 +0200417
Willy Tarreauf89c1872009-10-01 11:19:37 +0200418 if (!node || s->npos > grp->curr_pos) {
419 /* either we have no server left, or we have a hole */
420 struct eb32_node *node2;
421 node2 = eb32_first(grp->init);
422 if (node2) {
423 node = node2;
424 s = eb32_entry(node, struct server, lb_node);
425 fwrr_get_srv_init(s);
Emeric Brun52a91d32017-08-31 14:41:55 +0200426 if (s->cur_eweight == 0) /* FIXME: is it possible at all ? */
Willy Tarreauf89c1872009-10-01 11:19:37 +0200427 node = NULL;
428 }
429 }
430 if (node)
431 return s;
432 else
433 return NULL;
434}
435
436/* Computes next position of server <s> in the group. It is mandatory for <s>
437 * to have a non-zero, positive eweight.
438*/
439static inline void fwrr_update_position(struct fwrr_group *grp, struct server *s)
440{
441 if (!s->npos) {
442 /* first time ever for this server */
443 s->lpos = grp->curr_pos;
Emeric Brun52a91d32017-08-31 14:41:55 +0200444 s->npos = grp->curr_pos + grp->next_weight / s->cur_eweight;
Christopher Faulet5b517552017-06-09 14:17:53 +0200445 HA_ATOMIC_ADD(&s->rweight, (grp->next_weight % s->cur_eweight));
Willy Tarreauf89c1872009-10-01 11:19:37 +0200446
Emeric Brun52a91d32017-08-31 14:41:55 +0200447 if (s->rweight >= s->cur_eweight) {
Christopher Faulet5b517552017-06-09 14:17:53 +0200448 HA_ATOMIC_SUB(&s->rweight, s->cur_eweight);
449 HA_ATOMIC_ADD(&s->npos, 1);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200450 }
451 } else {
452 s->lpos = s->npos;
Christopher Faulet5b517552017-06-09 14:17:53 +0200453 HA_ATOMIC_ADD(&s->npos, (grp->next_weight / s->cur_eweight));
454 HA_ATOMIC_ADD(&s->rweight, (grp->next_weight % s->cur_eweight));
Willy Tarreauf89c1872009-10-01 11:19:37 +0200455
Emeric Brun52a91d32017-08-31 14:41:55 +0200456 if (s->rweight >= s->cur_eweight) {
Christopher Faulet5b517552017-06-09 14:17:53 +0200457 HA_ATOMIC_SUB(&s->rweight, s->cur_eweight);
458 HA_ATOMIC_ADD(&s->npos, 1);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200459 }
460 }
461}
462
463/* Return next server from the current tree in backend <p>, or a server from
464 * the init tree if appropriate. If both trees are empty, return NULL.
465 * Saturated servers are skipped and requeued.
466 */
467struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
468{
469 struct server *srv, *full, *avoided;
470 struct fwrr_group *grp;
471 int switched;
472
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100473 HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200474 if (p->srv_act)
475 grp = &p->lbprm.fwrr.act;
Christopher Faulet5b517552017-06-09 14:17:53 +0200476 else if (p->lbprm.fbck) {
477 srv = p->lbprm.fbck;
478 goto out;
479 }
Willy Tarreauf89c1872009-10-01 11:19:37 +0200480 else if (p->srv_bck)
481 grp = &p->lbprm.fwrr.bck;
Christopher Faulet5b517552017-06-09 14:17:53 +0200482 else {
483 srv = NULL;
484 goto out;
485 }
Willy Tarreauf89c1872009-10-01 11:19:37 +0200486
487 switched = 0;
488 avoided = NULL;
489 full = NULL; /* NULL-terminated list of saturated servers */
490 while (1) {
491 /* if we see an empty group, let's first try to collect weights
492 * which might have recently changed.
493 */
494 if (!grp->curr_weight)
495 grp->curr_pos = grp->curr_weight = grp->next_weight;
496
497 /* get first server from the "current" tree. When the end of
498 * the tree is reached, we may have to switch, but only once.
499 */
500 while (1) {
501 srv = fwrr_get_server_from_group(grp);
502 if (srv)
503 break;
504 if (switched) {
505 if (avoided) {
506 srv = avoided;
507 break;
508 }
509 goto requeue_servers;
510 }
511 switched = 1;
512 fwrr_switch_trees(grp);
513
514 }
515
516 /* OK, we have a server. However, it may be saturated, in which
517 * case we don't want to reconsider it for now. We'll update
518 * its position and dequeue it anyway, so that we can move it
519 * to a better place afterwards.
520 */
521 fwrr_update_position(grp, srv);
522 fwrr_dequeue_srv(srv);
523 grp->curr_pos++;
524 if (!srv->maxconn || (!srv->nbpend && srv->served < srv_dynamic_maxconn(srv))) {
525 /* make sure it is not the server we are trying to exclude... */
526 if (srv != srvtoavoid || avoided)
527 break;
528
529 avoided = srv; /* ...but remember that is was selected yet avoided */
530 }
531
532 /* the server is saturated or avoided, let's chain it for later reinsertion */
533 srv->next_full = full;
534 full = srv;
535 }
536
537 /* OK, we got the best server, let's update it */
538 fwrr_queue_srv(srv);
539
540 requeue_servers:
541 /* Requeue all extracted servers. If full==srv then it was
542 * avoided (unsucessfully) and chained, omit it now.
543 */
544 if (unlikely(full != NULL)) {
545 if (switched) {
546 /* the tree has switched, requeue all extracted servers
547 * into "init", because their place was lost, and only
548 * their weight matters.
549 */
550 do {
551 if (likely(full != srv))
552 fwrr_queue_by_weight(grp->init, full);
553 full = full->next_full;
554 } while (full);
555 } else {
556 /* requeue all extracted servers just as if they were consumed
557 * so that they regain their expected place.
558 */
559 do {
560 if (likely(full != srv))
561 fwrr_queue_srv(full);
562 full = full->next_full;
563 } while (full);
564 }
565 }
Christopher Faulet5b517552017-06-09 14:17:53 +0200566 out:
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100567 HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
Willy Tarreauf89c1872009-10-01 11:19:37 +0200568 return srv;
569}
570
571/*
572 * Local variables:
573 * c-indent-level: 8
574 * c-basic-offset: 8
575 * End:
576 */