Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Event rate calculation functions. |
| 3 | * |
Willy Tarreau | 2970b0b | 2010-06-20 07:15:43 +0200 | [diff] [blame] | 4 | * Copyright 2000-2010 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 13 | #include <haproxy/api.h> |
Willy Tarreau | 6634794 | 2020-06-01 12:18:08 +0200 | [diff] [blame] | 14 | #include <haproxy/freq_ctr.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 15 | #include <haproxy/tools.h> |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 16 | |
Willy Tarreau | f6a42c3 | 2022-10-11 11:55:16 +0200 | [diff] [blame] | 17 | /* Update a frequency counter by <inc> incremental units. It is automatically |
| 18 | * rotated if the period is over. It is important that it correctly initializes |
| 19 | * a null area. This one works on frequency counters which have a period |
| 20 | * different from one second. It relies on the process-wide clock that is |
| 21 | * guaranteed to be monotonic. It's important to avoid forced rotates between |
| 22 | * threads. A faster wrapper (update_freq_ctr_period) should be used instead, |
| 23 | * which uses the thread's local time whenever possible and falls back to this |
| 24 | * one when needed (less than 0.003% of the time). |
| 25 | */ |
| 26 | uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc) |
| 27 | { |
| 28 | uint curr_tick; |
| 29 | uint32_t now_ms_tmp; |
| 30 | |
| 31 | /* atomically update the counter if still within the period, even if |
| 32 | * a rotation is in progress (no big deal). |
| 33 | */ |
| 34 | for (;; __ha_cpu_relax()) { |
| 35 | curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 36 | now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms); |
| 37 | |
| 38 | if (now_ms_tmp - curr_tick < period) |
| 39 | return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc); |
| 40 | |
| 41 | /* a rotation is needed. While extremely rare, contention may |
| 42 | * happen because it will be triggered on time, and all threads |
| 43 | * see the time change simultaneously. |
| 44 | */ |
| 45 | if (!(curr_tick & 1) && |
| 46 | HA_ATOMIC_CAS(&ctr->curr_tick, &curr_tick, curr_tick | 0x1)) |
| 47 | break; |
| 48 | } |
| 49 | |
| 50 | /* atomically switch the new period into the old one without losing any |
| 51 | * potential concurrent update. We're the only one performing the rotate |
| 52 | * (locked above), others are only adding positive values to curr_ctr. |
| 53 | */ |
| 54 | HA_ATOMIC_STORE(&ctr->prev_ctr, HA_ATOMIC_XCHG(&ctr->curr_ctr, inc)); |
| 55 | curr_tick += period; |
| 56 | if (likely(now_ms_tmp - curr_tick >= period)) { |
| 57 | /* we missed at least two periods */ |
| 58 | HA_ATOMIC_STORE(&ctr->prev_ctr, 0); |
| 59 | curr_tick = now_ms_tmp; |
| 60 | } |
| 61 | |
| 62 | /* release the lock and update the time in case of rotate. */ |
| 63 | HA_ATOMIC_STORE(&ctr->curr_tick, curr_tick & ~1); |
| 64 | return inc; |
| 65 | } |
| 66 | |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 67 | /* Returns the total number of events over the current + last period, including |
| 68 | * a number of already pending events <pend>. The average frequency will be |
| 69 | * obtained by dividing the output by <period>. This is essentially made to |
| 70 | * ease implementation of higher-level read functions. |
| 71 | * |
| 72 | * As a special case, if pend < 0, it's assumed there are no pending |
| 73 | * events and a flapping correction must be applied at the end. This is used by |
| 74 | * read_freq_ctr_period() to avoid reporting ups and downs on low-frequency |
| 75 | * events when the past value is <= 1. |
| 76 | */ |
Willy Tarreau | b4476c6 | 2021-04-28 17:44:37 +0200 | [diff] [blame] | 77 | ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend) |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 78 | { |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 79 | ullong curr, past, old_curr, old_past; |
| 80 | uint tick, old_tick; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 81 | int remain; |
| 82 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 83 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 84 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 85 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 86 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 87 | while (1) { |
| 88 | if (tick & 0x1) // change in progress |
| 89 | goto redo0; |
| 90 | |
| 91 | old_tick = tick; |
| 92 | old_curr = curr; |
| 93 | old_past = past; |
| 94 | |
| 95 | /* now let's load the values a second time and make sure they |
| 96 | * did not change, which will indicate it was a stable reading. |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 97 | */ |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 98 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 99 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 100 | if (tick & 0x1) // change in progress |
| 101 | goto redo0; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 102 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 103 | if (tick != old_tick) |
| 104 | goto redo1; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 105 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 106 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 107 | if (curr != old_curr) |
| 108 | goto redo2; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 109 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 110 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
| 111 | if (past != old_past) |
| 112 | goto redo3; |
| 113 | |
| 114 | /* all values match between two loads, they're stable, let's |
| 115 | * quit now. |
| 116 | */ |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 117 | break; |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 118 | redo0: |
| 119 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 120 | redo1: |
| 121 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 122 | redo2: |
| 123 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
| 124 | redo3: |
| 125 | __ha_cpu_relax(); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 126 | }; |
| 127 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 128 | remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 129 | if (unlikely(remain < 0)) { |
| 130 | /* We're past the first period, check if we can still report a |
| 131 | * part of last period or if we're too far away. |
| 132 | */ |
| 133 | remain += period; |
| 134 | past = (remain >= 0) ? curr : 0; |
| 135 | curr = 0; |
| 136 | } |
| 137 | |
| 138 | if (pend < 0) { |
| 139 | /* enable flapping correction at very low rates */ |
| 140 | pend = 0; |
| 141 | if (!curr && past <= 1) |
| 142 | return past * period; |
| 143 | } |
| 144 | |
| 145 | /* compute the total number of confirmed events over the period */ |
| 146 | return past * remain + (curr + pend) * period; |
| 147 | } |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 148 | |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 149 | /* Returns the excess of events (may be negative) over the current period for |
Christopher Faulet | 4272934 | 2022-12-14 10:38:01 +0100 | [diff] [blame] | 150 | * target frequency <freq>. It returns 0 if the counter is in the future or if |
| 151 | * the counter is empty. The result considers the position of the current time |
| 152 | * within the current period. |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 153 | * |
| 154 | * The caller may safely add new events if result is negative or null. |
| 155 | */ |
| 156 | int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq) |
| 157 | { |
Christopher Faulet | b55e9fa | 2023-11-07 19:16:51 +0100 | [diff] [blame] | 158 | ullong curr, old_curr; |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 159 | uint tick, old_tick; |
| 160 | int elapsed; |
| 161 | |
| 162 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 163 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 164 | |
| 165 | while (1) { |
| 166 | if (tick & 0x1) // change in progress |
| 167 | goto redo0; |
| 168 | |
| 169 | old_tick = tick; |
| 170 | old_curr = curr; |
| 171 | |
| 172 | /* now let's load the values a second time and make sure they |
| 173 | * did not change, which will indicate it was a stable reading. |
| 174 | */ |
| 175 | |
| 176 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 177 | if (tick & 0x1) // change in progress |
| 178 | goto redo0; |
| 179 | |
| 180 | if (tick != old_tick) |
| 181 | goto redo1; |
| 182 | |
| 183 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 184 | if (curr != old_curr) |
| 185 | goto redo2; |
| 186 | |
| 187 | /* all values match between two loads, they're stable, let's |
| 188 | * quit now. |
| 189 | */ |
| 190 | break; |
| 191 | redo0: |
| 192 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 193 | redo1: |
| 194 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 195 | redo2: |
| 196 | __ha_cpu_relax(); |
| 197 | }; |
| 198 | |
Christopher Faulet | 4272934 | 2022-12-14 10:38:01 +0100 | [diff] [blame] | 199 | if (!curr && !tick) { |
| 200 | /* The counter is empty, there is no overshoot */ |
| 201 | return 0; |
| 202 | } |
| 203 | |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 204 | elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick; |
Christopher Faulet | b55e9fa | 2023-11-07 19:16:51 +0100 | [diff] [blame] | 205 | if (unlikely(elapsed < 0 || elapsed > period)) { |
| 206 | /* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */ |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | return curr - div64_32((uint64_t)elapsed * freq, period); |
| 211 | } |
| 212 | |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 213 | /* |
| 214 | * Local variables: |
| 215 | * c-indent-level: 8 |
| 216 | * c-basic-offset: 8 |
| 217 | * End: |
| 218 | */ |