blob: 9b001e32625bdb548b59ce0519ff625954ef0617 [file] [log] [blame]
Willy Tarreau7f062c42009-03-05 18:43:00 +01001/*
2 * Event rate calculation functions.
3 *
Willy Tarreau2970b0b2010-06-20 07:15:43 +02004 * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
Willy Tarreau7f062c42009-03-05 18:43:00 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020013#include <haproxy/api.h>
Willy Tarreau66347942020-06-01 12:18:08 +020014#include <haproxy/freq_ctr.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020015#include <haproxy/tools.h>
Willy Tarreau7f062c42009-03-05 18:43:00 +010016
Willy Tarreauf6a42c32022-10-11 11:55:16 +020017/* Update a frequency counter by <inc> incremental units. It is automatically
18 * rotated if the period is over. It is important that it correctly initializes
19 * a null area. This one works on frequency counters which have a period
20 * different from one second. It relies on the process-wide clock that is
21 * guaranteed to be monotonic. It's important to avoid forced rotates between
22 * threads. A faster wrapper (update_freq_ctr_period) should be used instead,
23 * which uses the thread's local time whenever possible and falls back to this
24 * one when needed (less than 0.003% of the time).
25 */
26uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
27{
28 uint curr_tick;
29 uint32_t now_ms_tmp;
30
31 /* atomically update the counter if still within the period, even if
32 * a rotation is in progress (no big deal).
33 */
34 for (;; __ha_cpu_relax()) {
35 curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
36 now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
37
38 if (now_ms_tmp - curr_tick < period)
39 return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
40
41 /* a rotation is needed. While extremely rare, contention may
42 * happen because it will be triggered on time, and all threads
43 * see the time change simultaneously.
44 */
45 if (!(curr_tick & 1) &&
46 HA_ATOMIC_CAS(&ctr->curr_tick, &curr_tick, curr_tick | 0x1))
47 break;
48 }
49
50 /* atomically switch the new period into the old one without losing any
51 * potential concurrent update. We're the only one performing the rotate
52 * (locked above), others are only adding positive values to curr_ctr.
53 */
54 HA_ATOMIC_STORE(&ctr->prev_ctr, HA_ATOMIC_XCHG(&ctr->curr_ctr, inc));
55 curr_tick += period;
56 if (likely(now_ms_tmp - curr_tick >= period)) {
57 /* we missed at least two periods */
58 HA_ATOMIC_STORE(&ctr->prev_ctr, 0);
59 curr_tick = now_ms_tmp;
60 }
61
62 /* release the lock and update the time in case of rotate. */
63 HA_ATOMIC_STORE(&ctr->curr_tick, curr_tick & ~1);
64 return inc;
65}
66
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020067/* Returns the total number of events over the current + last period, including
68 * a number of already pending events <pend>. The average frequency will be
69 * obtained by dividing the output by <period>. This is essentially made to
70 * ease implementation of higher-level read functions.
71 *
72 * As a special case, if pend < 0, it's assumed there are no pending
73 * events and a flapping correction must be applied at the end. This is used by
74 * read_freq_ctr_period() to avoid reporting ups and downs on low-frequency
75 * events when the past value is <= 1.
76 */
Willy Tarreaub4476c62021-04-28 17:44:37 +020077ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend)
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020078{
Willy Tarreau55a09752021-07-15 15:45:44 +020079 ullong curr, past, old_curr, old_past;
80 uint tick, old_tick;
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020081 int remain;
82
Willy Tarreau55a09752021-07-15 15:45:44 +020083 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
84 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
85 past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020086
Willy Tarreau55a09752021-07-15 15:45:44 +020087 while (1) {
88 if (tick & 0x1) // change in progress
89 goto redo0;
90
91 old_tick = tick;
92 old_curr = curr;
93 old_past = past;
94
95 /* now let's load the values a second time and make sure they
96 * did not change, which will indicate it was a stable reading.
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020097 */
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +020098
Willy Tarreau55a09752021-07-15 15:45:44 +020099 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
100 if (tick & 0x1) // change in progress
101 goto redo0;
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200102
Willy Tarreau55a09752021-07-15 15:45:44 +0200103 if (tick != old_tick)
104 goto redo1;
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200105
Willy Tarreau55a09752021-07-15 15:45:44 +0200106 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
107 if (curr != old_curr)
108 goto redo2;
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200109
Willy Tarreau55a09752021-07-15 15:45:44 +0200110 past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
111 if (past != old_past)
112 goto redo3;
113
114 /* all values match between two loads, they're stable, let's
115 * quit now.
116 */
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200117 break;
Willy Tarreau55a09752021-07-15 15:45:44 +0200118 redo0:
119 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
120 redo1:
121 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
122 redo2:
123 past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
124 redo3:
125 __ha_cpu_relax();
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200126 };
127
Willy Tarreau55a09752021-07-15 15:45:44 +0200128 remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
Willy Tarreauf3a9f8d2021-04-11 00:38:06 +0200129 if (unlikely(remain < 0)) {
130 /* We're past the first period, check if we can still report a
131 * part of last period or if we're too far away.
132 */
133 remain += period;
134 past = (remain >= 0) ? curr : 0;
135 curr = 0;
136 }
137
138 if (pend < 0) {
139 /* enable flapping correction at very low rates */
140 pend = 0;
141 if (!curr && past <= 1)
142 return past * period;
143 }
144
145 /* compute the total number of confirmed events over the period */
146 return past * remain + (curr + pend) * period;
147}
Willy Tarreau7f062c42009-03-05 18:43:00 +0100148
Christopher Fauletaa556402022-06-22 15:28:16 +0200149/* Returns the excess of events (may be negative) over the current period for
Christopher Faulet42729342022-12-14 10:38:01 +0100150 * target frequency <freq>. It returns 0 if the counter is in the future or if
151 * the counter is empty. The result considers the position of the current time
152 * within the current period.
Christopher Fauletaa556402022-06-22 15:28:16 +0200153 *
154 * The caller may safely add new events if result is negative or null.
155 */
156int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq)
157{
158 uint curr, old_curr;
159 uint tick, old_tick;
160 int elapsed;
161
162 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
163 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
164
165 while (1) {
166 if (tick & 0x1) // change in progress
167 goto redo0;
168
169 old_tick = tick;
170 old_curr = curr;
171
172 /* now let's load the values a second time and make sure they
173 * did not change, which will indicate it was a stable reading.
174 */
175
176 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
177 if (tick & 0x1) // change in progress
178 goto redo0;
179
180 if (tick != old_tick)
181 goto redo1;
182
183 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
184 if (curr != old_curr)
185 goto redo2;
186
187 /* all values match between two loads, they're stable, let's
188 * quit now.
189 */
190 break;
191 redo0:
192 tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
193 redo1:
194 curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
195 redo2:
196 __ha_cpu_relax();
197 };
198
Christopher Faulet42729342022-12-14 10:38:01 +0100199 if (!curr && !tick) {
200 /* The counter is empty, there is no overshoot */
201 return 0;
202 }
203
Christopher Fauletaa556402022-06-22 15:28:16 +0200204 elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
205 if (unlikely(elapsed < 0)) {
206 /* The counter is in the future, there is no overshoot */
207 return 0;
208 }
209
210 return curr - div64_32((uint64_t)elapsed * freq, period);
211}
212
Willy Tarreau7f062c42009-03-05 18:43:00 +0100213/*
214 * Local variables:
215 * c-indent-level: 8
216 * c-basic-offset: 8
217 * End:
218 */