Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Event rate calculation functions. |
| 3 | * |
Willy Tarreau | 2970b0b | 2010-06-20 07:15:43 +0200 | [diff] [blame] | 4 | * Copyright 2000-2010 Willy Tarreau <w@1wt.eu> |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | 4c7e4b7 | 2020-05-27 12:58:42 +0200 | [diff] [blame] | 13 | #include <haproxy/api.h> |
Willy Tarreau | 6634794 | 2020-06-01 12:18:08 +0200 | [diff] [blame] | 14 | #include <haproxy/freq_ctr.h> |
Willy Tarreau | b255105 | 2020-06-09 09:07:15 +0200 | [diff] [blame] | 15 | #include <haproxy/tools.h> |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 16 | |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 17 | /* Returns the total number of events over the current + last period, including |
| 18 | * a number of already pending events <pend>. The average frequency will be |
| 19 | * obtained by dividing the output by <period>. This is essentially made to |
| 20 | * ease implementation of higher-level read functions. |
| 21 | * |
| 22 | * As a special case, if pend < 0, it's assumed there are no pending |
| 23 | * events and a flapping correction must be applied at the end. This is used by |
| 24 | * read_freq_ctr_period() to avoid reporting ups and downs on low-frequency |
| 25 | * events when the past value is <= 1. |
| 26 | */ |
Willy Tarreau | b4476c6 | 2021-04-28 17:44:37 +0200 | [diff] [blame] | 27 | ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend) |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 28 | { |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 29 | ullong curr, past, old_curr, old_past; |
| 30 | uint tick, old_tick; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 31 | int remain; |
| 32 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 33 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 34 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 35 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 36 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 37 | while (1) { |
| 38 | if (tick & 0x1) // change in progress |
| 39 | goto redo0; |
| 40 | |
| 41 | old_tick = tick; |
| 42 | old_curr = curr; |
| 43 | old_past = past; |
| 44 | |
| 45 | /* now let's load the values a second time and make sure they |
| 46 | * did not change, which will indicate it was a stable reading. |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 47 | */ |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 48 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 49 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 50 | if (tick & 0x1) // change in progress |
| 51 | goto redo0; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 52 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 53 | if (tick != old_tick) |
| 54 | goto redo1; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 55 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 56 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 57 | if (curr != old_curr) |
| 58 | goto redo2; |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 59 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 60 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
| 61 | if (past != old_past) |
| 62 | goto redo3; |
| 63 | |
| 64 | /* all values match between two loads, they're stable, let's |
| 65 | * quit now. |
| 66 | */ |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 67 | break; |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 68 | redo0: |
| 69 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 70 | redo1: |
| 71 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 72 | redo2: |
| 73 | past = HA_ATOMIC_LOAD(&ctr->prev_ctr); |
| 74 | redo3: |
| 75 | __ha_cpu_relax(); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 76 | }; |
| 77 | |
Willy Tarreau | 55a0975 | 2021-07-15 15:45:44 +0200 | [diff] [blame] | 78 | remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms); |
Willy Tarreau | f3a9f8d | 2021-04-11 00:38:06 +0200 | [diff] [blame] | 79 | if (unlikely(remain < 0)) { |
| 80 | /* We're past the first period, check if we can still report a |
| 81 | * part of last period or if we're too far away. |
| 82 | */ |
| 83 | remain += period; |
| 84 | past = (remain >= 0) ? curr : 0; |
| 85 | curr = 0; |
| 86 | } |
| 87 | |
| 88 | if (pend < 0) { |
| 89 | /* enable flapping correction at very low rates */ |
| 90 | pend = 0; |
| 91 | if (!curr && past <= 1) |
| 92 | return past * period; |
| 93 | } |
| 94 | |
| 95 | /* compute the total number of confirmed events over the period */ |
| 96 | return past * remain + (curr + pend) * period; |
| 97 | } |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 98 | |
Christopher Faulet | aa55640 | 2022-06-22 15:28:16 +0200 | [diff] [blame] | 99 | /* Returns the excess of events (may be negative) over the current period for |
| 100 | * target frequency <freq>. It returns 0 if the counter is in the future. The |
| 101 | * result considers the position of the current time within the current period. |
| 102 | * |
| 103 | * The caller may safely add new events if result is negative or null. |
| 104 | */ |
| 105 | int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq) |
| 106 | { |
| 107 | uint curr, old_curr; |
| 108 | uint tick, old_tick; |
| 109 | int elapsed; |
| 110 | |
| 111 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 112 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 113 | |
| 114 | while (1) { |
| 115 | if (tick & 0x1) // change in progress |
| 116 | goto redo0; |
| 117 | |
| 118 | old_tick = tick; |
| 119 | old_curr = curr; |
| 120 | |
| 121 | /* now let's load the values a second time and make sure they |
| 122 | * did not change, which will indicate it was a stable reading. |
| 123 | */ |
| 124 | |
| 125 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 126 | if (tick & 0x1) // change in progress |
| 127 | goto redo0; |
| 128 | |
| 129 | if (tick != old_tick) |
| 130 | goto redo1; |
| 131 | |
| 132 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 133 | if (curr != old_curr) |
| 134 | goto redo2; |
| 135 | |
| 136 | /* all values match between two loads, they're stable, let's |
| 137 | * quit now. |
| 138 | */ |
| 139 | break; |
| 140 | redo0: |
| 141 | tick = HA_ATOMIC_LOAD(&ctr->curr_tick); |
| 142 | redo1: |
| 143 | curr = HA_ATOMIC_LOAD(&ctr->curr_ctr); |
| 144 | redo2: |
| 145 | __ha_cpu_relax(); |
| 146 | }; |
| 147 | |
| 148 | elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick; |
| 149 | if (unlikely(elapsed < 0)) { |
| 150 | /* The counter is in the future, there is no overshoot */ |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | return curr - div64_32((uint64_t)elapsed * freq, period); |
| 155 | } |
| 156 | |
Willy Tarreau | 7f062c4 | 2009-03-05 18:43:00 +0100 | [diff] [blame] | 157 | /* |
| 158 | * Local variables: |
| 159 | * c-indent-level: 8 |
| 160 | * c-basic-offset: 8 |
| 161 | * End: |
| 162 | */ |