Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Fast Weighted Round Robin load balancing algorithm. |
| 3 | * |
| 4 | * Copyright 2000-2009 Willy Tarreau <w@1wt.eu> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 13 | #include <haproxy/api.h> |
Willy Tarreau | 8d2b777 | 2020-05-27 10:58:19 +0200 | [diff] [blame] | 14 | #include <import/eb32tree.h> |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 15 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 16 | #include <types/server.h> |
| 17 | |
| 18 | #include <proto/backend.h> |
| 19 | #include <proto/queue.h> |
| 20 | |
| 21 | static inline void fwrr_remove_from_tree(struct server *s); |
| 22 | static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s); |
| 23 | static inline void fwrr_dequeue_srv(struct server *s); |
| 24 | static void fwrr_get_srv(struct server *s); |
| 25 | static void fwrr_queue_srv(struct server *s); |
| 26 | |
| 27 | |
| 28 | /* This function updates the server trees according to server <srv>'s new |
| 29 | * state. It should be called when server <srv>'s status changes to down. |
| 30 | * It is not important whether the server was already down or not. It is not |
| 31 | * important either that the new state is completely down (the caller may not |
| 32 | * know all the variables of a server's state). |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 33 | * |
| 34 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 35 | */ |
| 36 | static void fwrr_set_server_status_down(struct server *srv) |
| 37 | { |
| 38 | struct proxy *p = srv->proxy; |
| 39 | struct fwrr_group *grp; |
| 40 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 41 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 42 | return; |
| 43 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 44 | if (srv_willbe_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 45 | goto out_update_state; |
| 46 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 47 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 48 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 49 | if (!srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 50 | /* server was already down */ |
| 51 | goto out_update_backend; |
| 52 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 53 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 54 | grp->next_weight -= srv->cur_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 55 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 56 | if (srv->flags & SRV_F_BACKUP) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 57 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 58 | p->srv_bck--; |
| 59 | |
| 60 | if (srv == p->lbprm.fbck) { |
| 61 | /* we lost the first backup server in a single-backup |
| 62 | * configuration, we must search another one. |
| 63 | */ |
| 64 | struct server *srv2 = p->lbprm.fbck; |
| 65 | do { |
| 66 | srv2 = srv2->next; |
| 67 | } while (srv2 && |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 68 | !((srv2->flags & SRV_F_BACKUP) && |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 69 | srv_willbe_usable(srv2))); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 70 | p->lbprm.fbck = srv2; |
| 71 | } |
| 72 | } else { |
| 73 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 74 | p->srv_act--; |
| 75 | } |
| 76 | |
| 77 | fwrr_dequeue_srv(srv); |
| 78 | fwrr_remove_from_tree(srv); |
| 79 | |
| 80 | out_update_backend: |
| 81 | /* check/update tot_used, tot_weight */ |
| 82 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 83 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 84 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 85 | out_update_state: |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 86 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* This function updates the server trees according to server <srv>'s new |
| 90 | * state. It should be called when server <srv>'s status changes to up. |
| 91 | * It is not important whether the server was already down or not. It is not |
| 92 | * important either that the new state is completely UP (the caller may not |
| 93 | * know all the variables of a server's state). This function will not change |
| 94 | * the weight of a server which was already up. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 95 | * |
| 96 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 97 | */ |
| 98 | static void fwrr_set_server_status_up(struct server *srv) |
| 99 | { |
| 100 | struct proxy *p = srv->proxy; |
| 101 | struct fwrr_group *grp; |
| 102 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 103 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 104 | return; |
| 105 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 106 | if (!srv_willbe_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 107 | goto out_update_state; |
| 108 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 109 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 110 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 111 | if (srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 112 | /* server was already up */ |
| 113 | goto out_update_backend; |
| 114 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 115 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 116 | grp->next_weight += srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 117 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 118 | if (srv->flags & SRV_F_BACKUP) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 119 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 120 | p->srv_bck++; |
| 121 | |
| 122 | if (!(p->options & PR_O_USE_ALL_BK)) { |
| 123 | if (!p->lbprm.fbck) { |
| 124 | /* there was no backup server anymore */ |
| 125 | p->lbprm.fbck = srv; |
| 126 | } else { |
| 127 | /* we may have restored a backup server prior to fbck, |
| 128 | * in which case it should replace it. |
| 129 | */ |
| 130 | struct server *srv2 = srv; |
| 131 | do { |
| 132 | srv2 = srv2->next; |
| 133 | } while (srv2 && (srv2 != p->lbprm.fbck)); |
| 134 | if (srv2) |
| 135 | p->lbprm.fbck = srv; |
| 136 | } |
| 137 | } |
| 138 | } else { |
| 139 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 140 | p->srv_act++; |
| 141 | } |
| 142 | |
| 143 | /* note that eweight cannot be 0 here */ |
| 144 | fwrr_get_srv(srv); |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 145 | srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 146 | fwrr_queue_srv(srv); |
| 147 | |
| 148 | out_update_backend: |
| 149 | /* check/update tot_used, tot_weight */ |
| 150 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 151 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 152 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 153 | out_update_state: |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 154 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | /* This function must be called after an update to server <srv>'s effective |
| 158 | * weight. It may be called after a state change too. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 159 | * |
| 160 | * The server's lock must be held. The lbprm's lock will be used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 161 | */ |
| 162 | static void fwrr_update_server_weight(struct server *srv) |
| 163 | { |
| 164 | int old_state, new_state; |
| 165 | struct proxy *p = srv->proxy; |
| 166 | struct fwrr_group *grp; |
| 167 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 168 | if (!srv_lb_status_changed(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 169 | return; |
| 170 | |
| 171 | /* If changing the server's weight changes its state, we simply apply |
| 172 | * the procedures we already have for status change. If the state |
| 173 | * remains down, the server is not in any tree, so it's as easy as |
| 174 | * updating its values. If the state remains up with different weights, |
| 175 | * there are some computations to perform to find a new place and |
| 176 | * possibly a new tree for this server. |
| 177 | */ |
| 178 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 179 | old_state = srv_currently_usable(srv); |
| 180 | new_state = srv_willbe_usable(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 181 | |
| 182 | if (!old_state && !new_state) { |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 183 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 184 | return; |
| 185 | } |
| 186 | else if (!old_state && new_state) { |
| 187 | fwrr_set_server_status_up(srv); |
| 188 | return; |
| 189 | } |
| 190 | else if (old_state && !new_state) { |
| 191 | fwrr_set_server_status_down(srv); |
| 192 | return; |
| 193 | } |
| 194 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 195 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 196 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 197 | grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 198 | grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 199 | |
| 200 | p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight; |
| 201 | p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight; |
| 202 | |
| 203 | if (srv->lb_tree == grp->init) { |
| 204 | fwrr_dequeue_srv(srv); |
| 205 | fwrr_queue_by_weight(grp->init, srv); |
| 206 | } |
| 207 | else if (!srv->lb_tree) { |
| 208 | /* FIXME: server was down. This is not possible right now but |
| 209 | * may be needed soon for slowstart or graceful shutdown. |
| 210 | */ |
| 211 | fwrr_dequeue_srv(srv); |
| 212 | fwrr_get_srv(srv); |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 213 | srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 214 | fwrr_queue_srv(srv); |
| 215 | } else { |
| 216 | /* The server is either active or in the next queue. If it's |
| 217 | * still in the active queue and it has not consumed all of its |
| 218 | * places, let's adjust its next position. |
| 219 | */ |
| 220 | fwrr_get_srv(srv); |
| 221 | |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 222 | if (srv->next_eweight > 0) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 223 | int prev_next = srv->npos; |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 224 | int step = grp->next_weight / srv->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 225 | |
| 226 | srv->npos = srv->lpos + step; |
| 227 | srv->rweight = 0; |
| 228 | |
| 229 | if (srv->npos > prev_next) |
| 230 | srv->npos = prev_next; |
| 231 | if (srv->npos < grp->curr_pos + 2) |
| 232 | srv->npos = grp->curr_pos + step; |
| 233 | } else { |
| 234 | /* push it into the next tree */ |
| 235 | srv->npos = grp->curr_pos + grp->curr_weight; |
| 236 | } |
| 237 | |
| 238 | fwrr_dequeue_srv(srv); |
| 239 | fwrr_queue_srv(srv); |
| 240 | } |
| 241 | |
| 242 | update_backend_weight(p); |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 243 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
| 244 | |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 245 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | /* Remove a server from a tree. It must have previously been dequeued. This |
| 249 | * function is meant to be called when a server is going down or has its |
| 250 | * weight disabled. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 251 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 252 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 253 | */ |
| 254 | static inline void fwrr_remove_from_tree(struct server *s) |
| 255 | { |
| 256 | s->lb_tree = NULL; |
| 257 | } |
| 258 | |
| 259 | /* Queue a server in the weight tree <root>, assuming the weight is >0. |
| 260 | * We want to sort them by inverted weights, because we need to place |
| 261 | * heavy servers first in order to get a smooth distribution. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 262 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 263 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 264 | */ |
| 265 | static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s) |
| 266 | { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 267 | s->lb_node.key = SRV_EWGHT_MAX - s->next_eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 268 | eb32_insert(root, &s->lb_node); |
| 269 | s->lb_tree = root; |
| 270 | } |
| 271 | |
| 272 | /* This function is responsible for building the weight trees in case of fast |
| 273 | * weighted round-robin. It also sets p->lbprm.wdiv to the eweight to uweight |
| 274 | * ratio. Both active and backup groups are initialized. |
| 275 | */ |
| 276 | void fwrr_init_server_groups(struct proxy *p) |
| 277 | { |
| 278 | struct server *srv; |
| 279 | struct eb_root init_head = EB_ROOT; |
| 280 | |
| 281 | p->lbprm.set_server_status_up = fwrr_set_server_status_up; |
| 282 | p->lbprm.set_server_status_down = fwrr_set_server_status_down; |
| 283 | p->lbprm.update_server_eweight = fwrr_update_server_weight; |
| 284 | |
| 285 | p->lbprm.wdiv = BE_WEIGHT_SCALE; |
| 286 | for (srv = p->srv; srv; srv = srv->next) { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 287 | srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult; |
Willy Tarreau | c5150da | 2014-05-13 19:27:31 +0200 | [diff] [blame] | 288 | srv_lb_commit_status(srv); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | recount_servers(p); |
| 292 | update_backend_weight(p); |
| 293 | |
| 294 | /* prepare the active servers group */ |
| 295 | p->lbprm.fwrr.act.curr_pos = p->lbprm.fwrr.act.curr_weight = |
| 296 | p->lbprm.fwrr.act.next_weight = p->lbprm.tot_wact; |
| 297 | p->lbprm.fwrr.act.curr = p->lbprm.fwrr.act.t0 = |
| 298 | p->lbprm.fwrr.act.t1 = init_head; |
| 299 | p->lbprm.fwrr.act.init = &p->lbprm.fwrr.act.t0; |
| 300 | p->lbprm.fwrr.act.next = &p->lbprm.fwrr.act.t1; |
| 301 | |
| 302 | /* prepare the backup servers group */ |
| 303 | p->lbprm.fwrr.bck.curr_pos = p->lbprm.fwrr.bck.curr_weight = |
| 304 | p->lbprm.fwrr.bck.next_weight = p->lbprm.tot_wbck; |
| 305 | p->lbprm.fwrr.bck.curr = p->lbprm.fwrr.bck.t0 = |
| 306 | p->lbprm.fwrr.bck.t1 = init_head; |
| 307 | p->lbprm.fwrr.bck.init = &p->lbprm.fwrr.bck.t0; |
| 308 | p->lbprm.fwrr.bck.next = &p->lbprm.fwrr.bck.t1; |
| 309 | |
| 310 | /* queue active and backup servers in two distinct groups */ |
| 311 | for (srv = p->srv; srv; srv = srv->next) { |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 312 | if (!srv_currently_usable(srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 313 | continue; |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 314 | fwrr_queue_by_weight((srv->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 315 | p->lbprm.fwrr.bck.init : |
| 316 | p->lbprm.fwrr.act.init, |
| 317 | srv); |
| 318 | } |
| 319 | } |
| 320 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 321 | /* simply removes a server from a weight tree. |
| 322 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 323 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 324 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 325 | static inline void fwrr_dequeue_srv(struct server *s) |
| 326 | { |
| 327 | eb32_delete(&s->lb_node); |
| 328 | } |
| 329 | |
| 330 | /* queues a server into the appropriate group and tree depending on its |
| 331 | * backup status, and ->npos. If the server is disabled, simply assign |
| 332 | * it to the NULL tree. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 333 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 334 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 335 | */ |
| 336 | static void fwrr_queue_srv(struct server *s) |
| 337 | { |
| 338 | struct proxy *p = s->proxy; |
| 339 | struct fwrr_group *grp; |
| 340 | |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 341 | grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 342 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 343 | /* Delay everything which does not fit into the window and everything |
| 344 | * which does not fit into the theorical new window. |
| 345 | */ |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 346 | if (!srv_willbe_usable(s)) { |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 347 | fwrr_remove_from_tree(s); |
| 348 | } |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 349 | else if (s->next_eweight <= 0 || |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 350 | s->npos >= 2 * grp->curr_weight || |
| 351 | s->npos >= grp->curr_weight + grp->next_weight) { |
| 352 | /* put into next tree, and readjust npos in case we could |
| 353 | * finally take this back to current. */ |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 354 | s->npos -= grp->curr_weight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 355 | fwrr_queue_by_weight(grp->next, s); |
| 356 | } |
| 357 | else { |
| 358 | /* The sorting key is stored in units of s->npos * user_weight |
| 359 | * in order to avoid overflows. As stated in backend.h, the |
| 360 | * lower the scale, the rougher the weights modulation, and the |
| 361 | * higher the scale, the lower the number of servers without |
| 362 | * overflow. With this formula, the result is always positive, |
Godbach | a34bdc0 | 2013-07-22 07:44:53 +0800 | [diff] [blame] | 363 | * so we can use eb32_insert(). |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 364 | */ |
| 365 | s->lb_node.key = SRV_UWGHT_RANGE * s->npos + |
Emeric Brun | 52a91d3 | 2017-08-31 14:41:55 +0200 | [diff] [blame] | 366 | (unsigned)(SRV_EWGHT_MAX + s->rweight - s->next_eweight) / BE_WEIGHT_SCALE; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 367 | |
| 368 | eb32_insert(&grp->curr, &s->lb_node); |
| 369 | s->lb_tree = &grp->curr; |
| 370 | } |
| 371 | } |
| 372 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 373 | /* prepares a server when extracting it from the "init" tree. |
| 374 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 375 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 376 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 377 | static inline void fwrr_get_srv_init(struct server *s) |
| 378 | { |
| 379 | s->npos = s->rweight = 0; |
| 380 | } |
| 381 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 382 | /* prepares a server when extracting it from the "next" tree. |
| 383 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 384 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 385 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 386 | static inline void fwrr_get_srv_next(struct server *s) |
| 387 | { |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 388 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 389 | &s->proxy->lbprm.fwrr.bck : |
| 390 | &s->proxy->lbprm.fwrr.act; |
| 391 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 392 | s->npos += grp->curr_weight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 393 | } |
| 394 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 395 | /* prepares a server when it was marked down. |
| 396 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 397 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 398 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 399 | static inline void fwrr_get_srv_down(struct server *s) |
| 400 | { |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 401 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 402 | &s->proxy->lbprm.fwrr.bck : |
| 403 | &s->proxy->lbprm.fwrr.act; |
| 404 | |
| 405 | s->npos = grp->curr_pos; |
| 406 | } |
| 407 | |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 408 | /* prepares a server when extracting it from its tree. |
| 409 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 410 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 411 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 412 | static void fwrr_get_srv(struct server *s) |
| 413 | { |
| 414 | struct proxy *p = s->proxy; |
Willy Tarreau | c93cd16 | 2014-05-13 15:54:22 +0200 | [diff] [blame] | 415 | struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ? |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 416 | &p->lbprm.fwrr.bck : |
| 417 | &p->lbprm.fwrr.act; |
| 418 | |
| 419 | if (s->lb_tree == grp->init) { |
| 420 | fwrr_get_srv_init(s); |
| 421 | } |
| 422 | else if (s->lb_tree == grp->next) { |
| 423 | fwrr_get_srv_next(s); |
| 424 | } |
| 425 | else if (s->lb_tree == NULL) { |
| 426 | fwrr_get_srv_down(s); |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | /* switches trees "init" and "next" for FWRR group <grp>. "init" should be empty |
| 431 | * when this happens, and "next" filled with servers sorted by weights. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 432 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 433 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 434 | */ |
| 435 | static inline void fwrr_switch_trees(struct fwrr_group *grp) |
| 436 | { |
| 437 | struct eb_root *swap; |
| 438 | swap = grp->init; |
| 439 | grp->init = grp->next; |
| 440 | grp->next = swap; |
| 441 | grp->curr_weight = grp->next_weight; |
| 442 | grp->curr_pos = grp->curr_weight; |
| 443 | } |
| 444 | |
| 445 | /* return next server from the current tree in FWRR group <grp>, or a server |
| 446 | * from the "init" tree if appropriate. If both trees are empty, return NULL. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 447 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 448 | * The lbprm's lock must be held. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 449 | */ |
| 450 | static struct server *fwrr_get_server_from_group(struct fwrr_group *grp) |
| 451 | { |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 452 | struct eb32_node *node1; |
| 453 | struct eb32_node *node2; |
| 454 | struct server *s1 = NULL; |
| 455 | struct server *s2 = NULL; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 456 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 457 | node1 = eb32_first(&grp->curr); |
| 458 | if (node1) { |
| 459 | s1 = eb32_entry(node1, struct server, lb_node); |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 460 | if (s1->cur_eweight && s1->npos <= grp->curr_pos) |
| 461 | return s1; |
| 462 | } |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 463 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 464 | /* Either we have no server left, or we have a hole. We'll look in the |
| 465 | * init tree or a better proposal. At this point, if <s1> is non-null, |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 466 | * it is guaranteed to remain available as the tree is locked. |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 467 | */ |
| 468 | node2 = eb32_first(grp->init); |
| 469 | if (node2) { |
| 470 | s2 = eb32_entry(node2, struct server, lb_node); |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 471 | if (s2->cur_eweight) { |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 472 | fwrr_get_srv_init(s2); |
| 473 | return s2; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 474 | } |
| 475 | } |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 476 | return s1; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 477 | } |
| 478 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 479 | /* Computes next position of server <s> in the group. Nothing is done if <s> |
| 480 | * has a zero weight. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 481 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 482 | * The lbprm's lock must be held to protect lpos/npos/rweight. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 483 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 484 | static inline void fwrr_update_position(struct fwrr_group *grp, struct server *s) |
| 485 | { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 486 | unsigned int eweight = *(volatile unsigned int *)&s->cur_eweight; |
| 487 | |
| 488 | if (!eweight) |
| 489 | return; |
| 490 | |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 491 | if (!s->npos) { |
| 492 | /* first time ever for this server */ |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 493 | s->npos = grp->curr_pos; |
| 494 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 495 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 496 | s->lpos = s->npos; |
| 497 | s->npos += grp->next_weight / eweight; |
| 498 | s->rweight += grp->next_weight % eweight; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 499 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 500 | if (s->rweight >= eweight) { |
| 501 | s->rweight -= eweight; |
| 502 | s->npos++; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 503 | } |
| 504 | } |
| 505 | |
| 506 | /* Return next server from the current tree in backend <p>, or a server from |
| 507 | * the init tree if appropriate. If both trees are empty, return NULL. |
| 508 | * Saturated servers are skipped and requeued. |
Willy Tarreau | 1b87748 | 2018-08-21 19:44:53 +0200 | [diff] [blame] | 509 | * |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 510 | * The lbprm's lock will be used. The server's lock is not used. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 511 | */ |
| 512 | struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid) |
| 513 | { |
| 514 | struct server *srv, *full, *avoided; |
| 515 | struct fwrr_group *grp; |
| 516 | int switched; |
| 517 | |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 518 | HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 519 | if (p->srv_act) |
| 520 | grp = &p->lbprm.fwrr.act; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 521 | else if (p->lbprm.fbck) { |
| 522 | srv = p->lbprm.fbck; |
| 523 | goto out; |
| 524 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 525 | else if (p->srv_bck) |
| 526 | grp = &p->lbprm.fwrr.bck; |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 527 | else { |
| 528 | srv = NULL; |
| 529 | goto out; |
| 530 | } |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 531 | |
| 532 | switched = 0; |
| 533 | avoided = NULL; |
| 534 | full = NULL; /* NULL-terminated list of saturated servers */ |
| 535 | while (1) { |
| 536 | /* if we see an empty group, let's first try to collect weights |
| 537 | * which might have recently changed. |
| 538 | */ |
| 539 | if (!grp->curr_weight) |
| 540 | grp->curr_pos = grp->curr_weight = grp->next_weight; |
| 541 | |
| 542 | /* get first server from the "current" tree. When the end of |
| 543 | * the tree is reached, we may have to switch, but only once. |
| 544 | */ |
| 545 | while (1) { |
| 546 | srv = fwrr_get_server_from_group(grp); |
| 547 | if (srv) |
| 548 | break; |
| 549 | if (switched) { |
| 550 | if (avoided) { |
| 551 | srv = avoided; |
Willy Tarreau | b6195ef | 2019-05-27 10:17:05 +0200 | [diff] [blame] | 552 | goto take_this_one; |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 553 | } |
| 554 | goto requeue_servers; |
| 555 | } |
| 556 | switched = 1; |
| 557 | fwrr_switch_trees(grp); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 558 | } |
| 559 | |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 560 | /* OK, we have a server. However, it may be saturated, in which |
| 561 | * case we don't want to reconsider it for now. We'll update |
| 562 | * its position and dequeue it anyway, so that we can move it |
| 563 | * to a better place afterwards. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 564 | */ |
| 565 | fwrr_update_position(grp, srv); |
| 566 | fwrr_dequeue_srv(srv); |
| 567 | grp->curr_pos++; |
| 568 | if (!srv->maxconn || (!srv->nbpend && srv->served < srv_dynamic_maxconn(srv))) { |
| 569 | /* make sure it is not the server we are trying to exclude... */ |
| 570 | if (srv != srvtoavoid || avoided) |
| 571 | break; |
| 572 | |
| 573 | avoided = srv; /* ...but remember that is was selected yet avoided */ |
| 574 | } |
| 575 | |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 576 | /* the server is saturated or avoided, let's chain it for later reinsertion. |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 577 | */ |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 578 | srv->next_full = full; |
| 579 | full = srv; |
| 580 | } |
| 581 | |
Willy Tarreau | b6195ef | 2019-05-27 10:17:05 +0200 | [diff] [blame] | 582 | take_this_one: |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 583 | /* OK, we got the best server, let's update it */ |
| 584 | fwrr_queue_srv(srv); |
| 585 | |
| 586 | requeue_servers: |
| 587 | /* Requeue all extracted servers. If full==srv then it was |
Willy Tarreau | 9df86f9 | 2019-04-16 11:21:14 +0200 | [diff] [blame] | 588 | * avoided (unsuccessfully) and chained, omit it now. The |
| 589 | * only way to get there is by having <avoided>==NULL or |
| 590 | * <avoided>==<srv>. |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 591 | */ |
| 592 | if (unlikely(full != NULL)) { |
| 593 | if (switched) { |
| 594 | /* the tree has switched, requeue all extracted servers |
| 595 | * into "init", because their place was lost, and only |
| 596 | * their weight matters. |
| 597 | */ |
| 598 | do { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 599 | if (likely(full != srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 600 | fwrr_queue_by_weight(grp->init, full); |
| 601 | full = full->next_full; |
| 602 | } while (full); |
| 603 | } else { |
| 604 | /* requeue all extracted servers just as if they were consumed |
| 605 | * so that they regain their expected place. |
| 606 | */ |
| 607 | do { |
Willy Tarreau | 274ba67 | 2019-04-24 10:48:00 +0200 | [diff] [blame] | 608 | if (likely(full != srv)) |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 609 | fwrr_queue_srv(full); |
| 610 | full = full->next_full; |
| 611 | } while (full); |
| 612 | } |
| 613 | } |
Christopher Faulet | 5b51755 | 2017-06-09 14:17:53 +0200 | [diff] [blame] | 614 | out: |
Christopher Faulet | 2a944ee | 2017-11-07 10:42:54 +0100 | [diff] [blame] | 615 | HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); |
Willy Tarreau | f89c187 | 2009-10-01 11:19:37 +0200 | [diff] [blame] | 616 | return srv; |
| 617 | } |
| 618 | |
| 619 | /* |
| 620 | * Local variables: |
| 621 | * c-indent-level: 8 |
| 622 | * c-basic-offset: 8 |
| 623 | * End: |
| 624 | */ |