Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Fast Weighted Round Robin load balancing algorithm. |
| 3 | * |
| 4 | * Copyright 2000-2009 Willy Tarreau <w@1wt.eu> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 13 | #include <haproxy/api.h> |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 14 | #include <common/debug.h> |
Willy Tarreau | 8d2b777 | 2020-05-27 10:58:19 +0200 | [diff] [blame] | 15 | #include <import/eb32tree.h> |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 16 | |
| 17 | #include <types/global.h> |
| 18 | #include <types/server.h> |
| 19 | |
| 20 | #include <proto/backend.h> |
| 21 | #include <proto/queue.h> |
| 22 | |
| 23 | static inline void fwrr_remove_from_tree(struct server *s); |
| 24 | static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s); |
| 25 | static inline void fwrr_dequeue_srv(struct server *s); |
| 26 | static void fwrr_get_srv(struct server *s); |
| 27 | static void fwrr_queue_srv(struct server *s); |
| 28 | |
| 29 | |
| 30 | /* This function updates the server trees according to server <srv>'s new |
| 31 | * state. It should be called when server <srv>'s status changes to down. |
| 32 | * It is not important whether the server was already down or not. It is not |
| 33 | * important either that the new state is completely down (the caller may not |
| 34 | * know all the variables of a server's state). |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 35 | * |
| 36 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 37 | */ |
| 38 | static void fwrr_set_server_status_down(struct server *srv) |
| 39 | { |
| 40 | struct proxy *p = srv->proxy; |
| 41 | struct fwrr_group *grp; |
| 42 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 43 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 44 | return; |
| 45 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 46 | if (srv_willbe_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 47 | goto out_update_state; |
| 48 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 49 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 50 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 51 | if (!srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 52 | /* server was already down */ |
| 53 | goto out_update_backend; |
| 54 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 55 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 56 | grp->next_weight -= srv->cur_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 57 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 58 | if (srv->flags & SRV_F_BACKUP) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 59 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 60 | p->srv_bck--; |
| 61 | |
| 62 | if (srv == p->lbprm.fbck) { |
| 63 | /* we lost the first backup server in a single-backup |
| 64 | * configuration, we must search another one. |
| 65 | */ |
| 66 | struct server *srv2 = p->lbprm.fbck; |
| 67 | do { |
| 68 | srv2 = srv2->next; |
| 69 | } while (srv2 && |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 70 | !((srv2->flags & SRV_F_BACKUP) && |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 71 | srv_willbe_usable(srv2))); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 72 | p->lbprm.fbck = srv2; |
| 73 | } |
| 74 | } else { |
| 75 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 76 | p->srv_act--; |
| 77 | } |
| 78 | |
| 79 | fwrr_dequeue_srv(srv); |
| 80 | fwrr_remove_from_tree(srv); |
| 81 | |
| 82 | out_update_backend: |
| 83 | /* check/update tot_used, tot_weight */ |
| 84 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 85 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 86 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 87 | out_update_state: |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 88 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | /* This function updates the server trees according to server <srv>'s new |
| 92 | * state. It should be called when server <srv>'s status changes to up. |
| 93 | * It is not important whether the server was already down or not. It is not |
| 94 | * important either that the new state is completely UP (the caller may not |
| 95 | * know all the variables of a server's state). This function will not change |
| 96 | * the weight of a server which was already up. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 97 | * |
| 98 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 99 | */ |
| 100 | static void fwrr_set_server_status_up(struct server *srv) |
| 101 | { |
| 102 | struct proxy *p = srv->proxy; |
| 103 | struct fwrr_group *grp; |
| 104 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 105 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 106 | return; |
| 107 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 108 | if (!srv_willbe_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 109 | goto out_update_state; |
| 110 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 111 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 112 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 113 | if (srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 114 | /* server was already up */ |
| 115 | goto out_update_backend; |
| 116 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 117 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 118 | grp->next_weight += srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 119 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 120 | if (srv->flags & SRV_F_BACKUP) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 121 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 122 | p->srv_bck++; |
| 123 | |
| 124 | if (!(p->options & PR_O_USE_ALL_BK)) { |
| 125 | if (!p->lbprm.fbck) { |
| 126 | /* there was no backup server anymore */ |
| 127 | p->lbprm.fbck = srv; |
| 128 | } else { |
| 129 | /* we may have restored a backup server prior to fbck, |
| 130 | * in which case it should replace it. |
| 131 | */ |
| 132 | struct server *srv2 = srv; |
| 133 | do { |
| 134 | srv2 = srv2->next; |
| 135 | } while (srv2 && (srv2 != p->lbprm.fbck)); |
| 136 | if (srv2) |
| 137 | p->lbprm.fbck = srv; |
| 138 | } |
| 139 | } |
| 140 | } else { |
| 141 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 142 | p->srv_act++; |
| 143 | } |
| 144 | |
| 145 | /* note that eweight cannot be 0 here */ |
| 146 | fwrr_get_srv(srv); |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 147 | srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 148 | fwrr_queue_srv(srv); |
| 149 | |
| 150 | out_update_backend: |
| 151 | /* check/update tot_used, tot_weight */ |
| 152 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 153 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 154 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 155 | out_update_state: |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 156 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | /* This function must be called after an update to server <srv>'s effective |
| 160 | * weight. It may be called after a state change too. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 161 | * |
| 162 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 163 | */ |
| 164 | static void fwrr_update_server_weight(struct server *srv) |
| 165 | { |
| 166 | int old_state, new_state; |
| 167 | struct proxy *p = srv->proxy; |
| 168 | struct fwrr_group *grp; |
| 169 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 170 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 171 | return; |
| 172 | |
| 173 | /* If changing the server's weight changes its state, we simply apply |
| 174 | * the procedures we already have for status change. If the state |
| 175 | * remains down, the server is not in any tree, so it's as easy as |
| 176 | * updating its values. If the state remains up with different weights, |
| 177 | * there are some computations to perform to find a new place and |
| 178 | * possibly a new tree for this server. |
| 179 | */ |
| 180 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 181 | old_state = srv_currently_usable(srv); |
| 182 | new_state = srv_willbe_usable(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 183 | |
| 184 | if (!old_state && !new_state) { |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 185 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 186 | return; |
| 187 | } |
| 188 | else if (!old_state && new_state) { |
| 189 | fwrr_set_server_status_up(srv); |
| 190 | return; |
| 191 | } |
| 192 | else if (old_state && !new_state) { |
| 193 | fwrr_set_server_status_down(srv); |
| 194 | return; |
| 195 | } |
| 196 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 197 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 198 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 199 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 200 | grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 201 | |
| 202 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 203 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 204 | |
| 205 | if (srv->lb_tree == grp->init) { |
| 206 | fwrr_dequeue_srv(srv); |
| 207 | fwrr_queue_by_weight(grp->init, srv); |
| 208 | } |
| 209 | else if (!srv->lb_tree) { |
| 210 | /* FIXME: server was down. This is not possible right now but |
| 211 | * may be needed soon for slowstart or graceful shutdown. |
| 212 | */ |
| 213 | fwrr_dequeue_srv(srv); |
| 214 | fwrr_get_srv(srv); |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 215 | srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 216 | fwrr_queue_srv(srv); |
| 217 | } else { |
| 218 | /* The server is either active or in the next queue. If it's |
| 219 | * still in the active queue and it has not consumed all of its |
| 220 | * places, let's adjust its next position. |
| 221 | */ |
| 222 | fwrr_get_srv(srv); |
| 223 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 224 | if (srv->next_eweight > 0) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 225 | int prev_next = srv->npos; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 226 | int step = grp->next_weight / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 227 | |
| 228 | srv->npos = srv->lpos + step; |
| 229 | srv->rweight = 0; |
| 230 | |
| 231 | if (srv->npos > prev_next) |
| 232 | srv->npos = prev_next; |
| 233 | if (srv->npos < grp->curr_pos + 2) |
| 234 | srv->npos = grp->curr_pos + step; |
| 235 | } else { |
| 236 | /* push it into the next tree */ |
| 237 | srv->npos = grp->curr_pos + grp->curr_weight; |
| 238 | } |
| 239 | |
| 240 | fwrr_dequeue_srv(srv); |
| 241 | fwrr_queue_srv(srv); |
| 242 | } |
| 243 | |
| 244 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 245 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 246 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 247 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | /* Remove a server from a tree. It must have previously been dequeued. This |
| 251 | * function is meant to be called when a server is going down or has its |
| 252 | * weight disabled. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 253 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 254 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 255 | */ |
| 256 | static inline void fwrr_remove_from_tree(struct server *s) |
| 257 | { |
| 258 | s->lb_tree = NULL; |
| 259 | } |
| 260 | |
| 261 | /* Queue a server in the weight tree <root>, assuming the weight is >0. |
| 262 | * We want to sort them by inverted weights, because we need to place |
| 263 | * heavy servers first in order to get a smooth distribution. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 264 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 265 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 266 | */ |
| 267 | static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s) |
| 268 | { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 269 | s->lb_node.key = SRV_EWGHT_MAX - s->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 270 | eb32_insert(root, &s->lb_node); |
| 271 | s->lb_tree = root; |
| 272 | } |
| 273 | |
| 274 | /* This function is responsible for building the weight trees in case of fast |
| 275 | * weighted round-robin. It also sets p->lbprm.wdiv to the eweight to uweight |
| 276 | * ratio. Both active and backup groups are initialized. |
| 277 | */ |
| 278 | void fwrr_init_server_groups(struct proxy *p) |
| 279 | { |
| 280 | struct server *srv; |
| 281 | struct eb_root init_head = EB_ROOT; |
| 282 | |
| 283 | p->lbprm.set_server_status_up = fwrr_set_server_status_up; |
| 284 | p->lbprm.set_server_status_down = fwrr_set_server_status_down; |
| 285 | p->lbprm.update_server_eweight = fwrr_update_server_weight; |
| 286 | |
| 287 | p->lbprm.wdiv = BE_WEIGHT_SCALE; |
| 288 | for (srv = p->srv; srv; srv = srv->next) { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 289 | srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult; |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 290 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | recount_servers(p); |
| 294 | update_backend_weight(p); |
| 295 | |
| 296 | /* prepare the active servers group */ |
| 297 | p->lbprm.fwrr.act.curr_pos = p->lbprm.fwrr.act.curr_weight = |
| 298 | p->lbprm.fwrr.act.next_weight = p->lbprm.tot_wact; |
| 299 | p->lbprm.fwrr.act.curr = p->lbprm.fwrr.act.t0 = |
| 300 | p->lbprm.fwrr.act.t1 = init_head; |
| 301 | p->lbprm.fwrr.act.init = &p->lbprm.fwrr.act.t0; |
| 302 | p->lbprm.fwrr.act.next = &p->lbprm.fwrr.act.t1; |
| 303 | |
| 304 | /* prepare the backup servers group */ |
| 305 | p->lbprm.fwrr.bck.curr_pos = p->lbprm.fwrr.bck.curr_weight = |
| 306 | p->lbprm.fwrr.bck.next_weight = p->lbprm.tot_wbck; |
| 307 | p->lbprm.fwrr.bck.curr = p->lbprm.fwrr.bck.t0 = |
| 308 | p->lbprm.fwrr.bck.t1 = init_head; |
| 309 | p->lbprm.fwrr.bck.init = &p->lbprm.fwrr.bck.t0; |
| 310 | p->lbprm.fwrr.bck.next = &p->lbprm.fwrr.bck.t1; |
| 311 | |
| 312 | /* queue active and backup servers in two distinct groups */ |
| 313 | for (srv = p->srv; srv; srv = srv->next) { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 314 | if (!srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 315 | continue; |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 316 | fwrr_queue_by_weight((srv->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 317 | p->lbprm.fwrr.bck.init : |
| 318 | p->lbprm.fwrr.act.init, |
| 319 | srv); |
| 320 | } |
| 321 | } |
| 322 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 323 | /* simply removes a server from a weight tree. |
| 324 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 325 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 326 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 327 | static inline void fwrr_dequeue_srv(struct server *s) |
| 328 | { |
| 329 | eb32_delete(&s->lb_node); |
| 330 | } |
| 331 | |
| 332 | /* queues a server into the appropriate group and tree depending on its |
| 333 | * backup status, and ->npos. If the server is disabled, simply assign |
| 334 | * it to the NULL tree. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 335 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 336 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 337 | */ |
| 338 | static void fwrr_queue_srv(struct server *s) |
| 339 | { |
| 340 | struct proxy *p = s->proxy; |
| 341 | struct fwrr_group *grp; |
| 342 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 343 | grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 344 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 345 | /* Delay everything which does not fit into the window and everything |
| 346 | * which does not fit into the theorical new window. |
| 347 | */ |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 348 | if (!srv_willbe_usable(s)) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 349 | fwrr_remove_from_tree(s); |
| 350 | } |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 351 | else if (s->next_eweight <= 0 || |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 352 | s->npos >= 2 * grp->curr_weight || |
| 353 | s->npos >= grp->curr_weight + grp->next_weight) { |
| 354 | /* put into next tree, and readjust npos in case we could |
| 355 | * finally take this back to current. */ |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 356 | s->npos -= grp->curr_weight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 357 | fwrr_queue_by_weight(grp->next, s); |
| 358 | } |
| 359 | else { |
| 360 | /* The sorting key is stored in units of s->npos * user_weight |
| 361 | * in order to avoid overflows. As stated in backend.h, the |
| 362 | * lower the scale, the rougher the weights modulation, and the |
| 363 | * higher the scale, the lower the number of servers without |
| 364 | * overflow. With this formula, the result is always positive, |
Godbach | a34bdc0 | 2013-07-22 07:44:53 +0800 | [diff] [blame] | 365 | * so we can use eb32_insert(). |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 366 | */ |
| 367 | s->lb_node.key = SRV_UWGHT_RANGE * s->npos + |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 368 | (unsigned)(SRV_EWGHT_MAX + s->rweight - s->next_eweight) / BE_WEIGHT_SCALE; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 369 | |
| 370 | eb32_insert(&grp->curr, &s->lb_node); |
| 371 | s->lb_tree = &grp->curr; |
| 372 | } |
| 373 | } |
| 374 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 375 | /* prepares a server when extracting it from the "init" tree. |
| 376 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 377 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 378 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 379 | static inline void fwrr_get_srv_init(struct server *s) |
| 380 | { |
| 381 | s->npos = s->rweight = 0; |
| 382 | } |
| 383 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 384 | /* prepares a server when extracting it from the "next" tree. |
| 385 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 386 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 387 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 388 | static inline void fwrr_get_srv_next(struct server *s) |
| 389 | { |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 390 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 391 | &s->proxy->lbprm.fwrr.bck : |
| 392 | &s->proxy->lbprm.fwrr.act; |
| 393 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 394 | s->npos += grp->curr_weight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 395 | } |
| 396 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 397 | /* prepares a server when it was marked down. |
| 398 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 399 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 400 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 401 | static inline void fwrr_get_srv_down(struct server *s) |
| 402 | { |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 403 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 404 | &s->proxy->lbprm.fwrr.bck : |
| 405 | &s->proxy->lbprm.fwrr.act; |
| 406 | |
| 407 | s->npos = grp->curr_pos; |
| 408 | } |
| 409 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 410 | /* prepares a server when extracting it from its tree. |
| 411 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 412 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 413 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 414 | static void fwrr_get_srv(struct server *s) |
| 415 | { |
| 416 | struct proxy *p = s->proxy; |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 417 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 418 | &p->lbprm.fwrr.bck : |
| 419 | &p->lbprm.fwrr.act; |
| 420 | |
| 421 | if (s->lb_tree == grp->init) { |
| 422 | fwrr_get_srv_init(s); |
| 423 | } |
| 424 | else if (s->lb_tree == grp->next) { |
| 425 | fwrr_get_srv_next(s); |
| 426 | } |
| 427 | else if (s->lb_tree == NULL) { |
| 428 | fwrr_get_srv_down(s); |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | /* switches trees "init" and "next" for FWRR group <grp>. "init" should be empty |
| 433 | * when this happens, and "next" filled with servers sorted by weights. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 434 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 435 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 436 | */ |
| 437 | static inline void fwrr_switch_trees(struct fwrr_group *grp) |
| 438 | { |
| 439 | struct eb_root *swap; |
| 440 | swap = grp->init; |
| 441 | grp->init = grp->next; |
| 442 | grp->next = swap; |
| 443 | grp->curr_weight = grp->next_weight; |
| 444 | grp->curr_pos = grp->curr_weight; |
| 445 | } |
| 446 | |
| 447 | /* return next server from the current tree in FWRR group <grp>, or a server |
| 448 | * from the "init" tree if appropriate. If both trees are empty, return NULL. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 449 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 450 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 451 | */ |
| 452 | static struct server *fwrr_get_server_from_group(struct fwrr_group *grp) |
| 453 | { |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 454 | struct eb32_node *node1; |
| 455 | struct eb32_node *node2; |
| 456 | struct server *s1 = NULL; |
| 457 | struct server *s2 = NULL; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 458 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 459 | node1 = eb32_first(&grp->curr); |
| 460 | if (node1) { |
| 461 | s1 = eb32_entry(node1, struct server, lb_node); |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 462 | if (s1->cur_eweight && s1->npos <= grp->curr_pos) |
| 463 | return s1; |
| 464 | } |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 465 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 466 | /* Either we have no server left, or we have a hole. We'll look in the |
| 467 | * init tree or a better proposal. At this point, if <s1> is non-null, |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 468 | * it is guaranteed to remain available as the tree is locked. |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 469 | */ |
| 470 | node2 = eb32_first(grp->init); |
| 471 | if (node2) { |
| 472 | s2 = eb32_entry(node2, struct server, lb_node); |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 473 | if (s2->cur_eweight) { |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 474 | fwrr_get_srv_init(s2); |
| 475 | return s2; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 476 | } |
| 477 | } |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 478 | return s1; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 479 | } |
| 480 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 481 | /* Computes next position of server <s> in the group. Nothing is done if <s> |
| 482 | * has a zero weight. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 483 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 484 | * The lbprm's lock must be held to protect lpos/npos/rweight. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 485 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 486 | static inline void fwrr_update_position(struct fwrr_group *grp, struct server *s) |
| 487 | { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 488 | unsigned int eweight = *(volatile unsigned int *)&s->cur_eweight; |
| 489 | |
| 490 | if (!eweight) |
| 491 | return; |
| 492 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 493 | if (!s->npos) { |
| 494 | /* first time ever for this server */ |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 495 | s->npos = grp->curr_pos; |
| 496 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 497 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 498 | s->lpos = s->npos; |
| 499 | s->npos += grp->next_weight / eweight; |
| 500 | s->rweight += grp->next_weight % eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 501 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 502 | if (s->rweight >= eweight) { |
| 503 | s->rweight -= eweight; |
| 504 | s->npos++; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 505 | } |
| 506 | } |
| 507 | |
| 508 | /* Return next server from the current tree in backend <p>, or a server from |
| 509 | * the init tree if appropriate. If both trees are empty, return NULL. |
| 510 | * Saturated servers are skipped and requeued. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 511 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 512 | * The lbprm's lock will be used. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 513 | */ |
| 514 | struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid) |
| 515 | { |
| 516 | struct server *srv, *full, *avoided; |
| 517 | struct fwrr_group *grp; |
| 518 | int switched; |
| 519 | |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 520 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 521 | if (p->srv_act) |
| 522 | grp = &p->lbprm.fwrr.act; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 523 | else if (p->lbprm.fbck) { |
| 524 | srv = p->lbprm.fbck; |
| 525 | goto out; |
| 526 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 527 | else if (p->srv_bck) |
| 528 | grp = &p->lbprm.fwrr.bck; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 529 | else { |
| 530 | srv = NULL; |
| 531 | goto out; |
| 532 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 533 | |
| 534 | switched = 0; |
| 535 | avoided = NULL; |
| 536 | full = NULL; /* NULL-terminated list of saturated servers */ |
| 537 | while (1) { |
| 538 | /* if we see an empty group, let's first try to collect weights |
| 539 | * which might have recently changed. |
| 540 | */ |
| 541 | if (!grp->curr_weight) |
| 542 | grp->curr_pos = grp->curr_weight = grp->next_weight; |
| 543 | |
| 544 | /* get first server from the "current" tree. When the end of |
| 545 | * the tree is reached, we may have to switch, but only once. |
| 546 | */ |
| 547 | while (1) { |
| 548 | srv = fwrr_get_server_from_group(grp); |
| 549 | if (srv) |
| 550 | break; |
| 551 | if (switched) { |
| 552 | if (avoided) { |
| 553 | srv = avoided; |
Willy Tarreau | b6195ef | 2019-05-27 10:17:05 +0200 | [diff] [blame] | 554 | goto take_this_one; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 555 | } |
| 556 | goto requeue_servers; |
| 557 | } |
| 558 | switched = 1; |
| 559 | fwrr_switch_trees(grp); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 560 | } |
| 561 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 562 | /* OK, we have a server. However, it may be saturated, in which |
| 563 | * case we don't want to reconsider it for now. We'll update |
| 564 | * its position and dequeue it anyway, so that we can move it |
| 565 | * to a better place afterwards. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 566 | */ |
| 567 | fwrr_update_position(grp, srv); |
| 568 | fwrr_dequeue_srv(srv); |
| 569 | grp->curr_pos++; |
| 570 | if (!srv->maxconn || (!srv->nbpend && srv->served < srv_dynamic_maxconn(srv))) { |
| 571 | /* make sure it is not the server we are trying to exclude... */ |
| 572 | if (srv != srvtoavoid || avoided) |
| 573 | break; |
| 574 | |
| 575 | avoided = srv; /* ...but remember that is was selected yet avoided */ |
| 576 | } |
| 577 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 578 | /* the server is saturated or avoided, let's chain it for later reinsertion. |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 579 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 580 | srv->next_full = full; |
| 581 | full = srv; |
| 582 | } |
| 583 | |
Willy Tarreau | b6195ef | 2019-05-27 10:17:05 +0200 | [diff] [blame] | 584 | take_this_one: |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 585 | /* OK, we got the best server, let's update it */ |
| 586 | fwrr_queue_srv(srv); |
| 587 | |
| 588 | requeue_servers: |
| 589 | /* Requeue all extracted servers. If full==srv then it was |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 590 | * avoided (unsuccessfully) and chained, omit it now. The |
| 591 | * only way to get there is by having <avoided>==NULL or |
| 592 | * <avoided>==<srv>. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 593 | */ |
| 594 | if (unlikely(full != NULL)) { |
| 595 | if (switched) { |
| 596 | /* the tree has switched, requeue all extracted servers |
| 597 | * into "init", because their place was lost, and only |
| 598 | * their weight matters. |
| 599 | */ |
| 600 | do { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 601 | if (likely(full != srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 602 | fwrr_queue_by_weight(grp->init, full); |
| 603 | full = full->next_full; |
| 604 | } while (full); |
| 605 | } else { |
| 606 | /* requeue all extracted servers just as if they were consumed |
| 607 | * so that they regain their expected place. |
| 608 | */ |
| 609 | do { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 610 | if (likely(full != srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 611 | fwrr_queue_srv(full); |
| 612 | full = full->next_full; |
| 613 | } while (full); |
| 614 | } |
| 615 | } |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 616 | out: |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 617 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 618 | return srv; |
| 619 | } |
| 620 | |
| 621 | /* |
| 622 | * Local variables: |
| 623 | * c-indent-level: 8 |
| 624 | * c-basic-offset: 8 |
| 625 | * End: |
| 626 | */ |