blob: 37fb1f385e2ec7d2f38c0fff16a881450ffdf7b1 [file] [log] [blame]
Willy Tarreau7f062c42009-03-05 18:43:00 +01001/*
2 * Event rate calculation functions.
3 *
Willy Tarreau2970b0b2010-06-20 07:15:43 +02004 * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
Willy Tarreau7f062c42009-03-05 18:43:00 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/config.h>
14#include <common/standard.h>
15#include <common/time.h>
Willy Tarreau79584222009-03-06 09:18:27 +010016#include <common/tools.h>
Willy Tarreau7f062c42009-03-05 18:43:00 +010017#include <proto/freq_ctr.h>
18
19/* Read a frequency counter taking history into account for missing time in
20 * current period. Current second is sub-divided in 1000 chunks of one ms,
21 * and the missing ones are read proportionally from previous value. The
22 * return value has the same precision as one input data sample, so low rates
23 * will be inaccurate still appropriate for max checking. One trick we use for
24 * low values is to specially handle the case where the rate is between 0 and 1
25 * in order to avoid flapping while waiting for the next event.
Willy Tarreau79584222009-03-06 09:18:27 +010026 *
27 * For immediate limit checking, it's recommended to use freq_ctr_remain() and
28 * next_event_delay() instead which do not have the flapping correction, so
29 * that even frequencies as low as one event/period are properly handled.
Willy Tarreau7f062c42009-03-05 18:43:00 +010030 */
31unsigned int read_freq_ctr(struct freq_ctr *ctr)
32{
Willy Tarreau3d8c5532009-03-06 14:29:25 +010033 unsigned int curr, past;
Christopher Faulet94b71232017-10-12 09:49:09 +020034 unsigned int age, curr_sec;
Willy Tarreau7f062c42009-03-05 18:43:00 +010035
Christopher Faulet94b71232017-10-12 09:49:09 +020036 do {
37 curr = ctr->curr_ctr;
38 past = ctr->prev_ctr;
39 curr_sec = ctr->curr_sec;
40
41 } while (curr != ctr->curr_ctr
42 || past != ctr->prev_ctr
43 || curr_sec != ctr->curr_sec
44 || (curr_sec & 0x80000000));
45
46 age = now.tv_sec - curr_sec;
Willy Tarreau3d8c5532009-03-06 14:29:25 +010047 if (unlikely(age > 1))
48 return 0;
49
Christopher Faulet94b71232017-10-12 09:49:09 +020050 if (unlikely(age)) {
51 past = curr;
52 curr = 0;
Willy Tarreau3d8c5532009-03-06 14:29:25 +010053 }
Willy Tarreau7f062c42009-03-05 18:43:00 +010054
Willy Tarreau3d8c5532009-03-06 14:29:25 +010055 if (past <= 1 && !curr)
56 return past; /* very low rate, avoid flapping */
57
Willy Tarreaueab777c2012-12-29 21:50:07 +010058 return curr + mul32hi(past, ms_left_scaled);
Willy Tarreau7f062c42009-03-05 18:43:00 +010059}
60
Willy Tarreau79584222009-03-06 09:18:27 +010061/* returns the number of remaining events that can occur on this freq counter
62 * while respecting <freq> and taking into account that <pend> events are
63 * already known to be pending. Returns 0 if limit was reached.
64 */
65unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
66{
Willy Tarreau3d8c5532009-03-06 14:29:25 +010067 unsigned int curr, past;
Christopher Faulet94b71232017-10-12 09:49:09 +020068 unsigned int age, curr_sec;
69
70 do {
71 curr = ctr->curr_ctr;
72 past = ctr->prev_ctr;
73 curr_sec = ctr->curr_sec;
Willy Tarreau79584222009-03-06 09:18:27 +010074
Christopher Faulet94b71232017-10-12 09:49:09 +020075 } while (curr != ctr->curr_ctr
76 || past != ctr->prev_ctr
77 || curr_sec != ctr->curr_sec
78 || (curr_sec & 0x80000000));
Willy Tarreau79584222009-03-06 09:18:27 +010079
Christopher Faulet94b71232017-10-12 09:49:09 +020080 age = now.tv_sec - curr_sec;
81 if (unlikely(age > 1))
82 curr = 0;
83 else {
84 if (unlikely(age == 1)) {
85 past = curr;
86 curr = 0;
Willy Tarreau3d8c5532009-03-06 14:29:25 +010087 }
Willy Tarreaueab777c2012-12-29 21:50:07 +010088 curr += mul32hi(past, ms_left_scaled);
Willy Tarreau3d8c5532009-03-06 14:29:25 +010089 }
90 curr += pend;
91
92 if (curr >= freq)
Willy Tarreau79584222009-03-06 09:18:27 +010093 return 0;
Willy Tarreau3d8c5532009-03-06 14:29:25 +010094 return freq - curr;
Willy Tarreau79584222009-03-06 09:18:27 +010095}
96
97/* return the expected wait time in ms before the next event may occur,
98 * respecting frequency <freq>, and assuming there may already be some pending
99 * events. It returns zero if we can proceed immediately, otherwise the wait
100 * time, which will be rounded down 1ms for better accuracy, with a minimum
101 * of one ms.
102 */
103unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
104{
Willy Tarreau3d8c5532009-03-06 14:29:25 +0100105 unsigned int curr, past;
Christopher Faulet94b71232017-10-12 09:49:09 +0200106 unsigned int wait, age, curr_sec;
107
108 do {
109 curr = ctr->curr_ctr;
110 past = ctr->prev_ctr;
111 curr_sec = ctr->curr_sec;
Willy Tarreau79584222009-03-06 09:18:27 +0100112
Christopher Faulet94b71232017-10-12 09:49:09 +0200113 } while (curr != ctr->curr_ctr
114 || past != ctr->prev_ctr
115 || curr_sec != ctr->curr_sec
116 || (curr_sec & 0x80000000));
Willy Tarreau79584222009-03-06 09:18:27 +0100117
Christopher Faulet94b71232017-10-12 09:49:09 +0200118 age = now.tv_sec - curr_sec;
119 if (unlikely(age > 1))
120 curr = 0;
121 else {
122 if (unlikely(age == 1)) {
123 past = curr;
124 curr = 0;
Willy Tarreau3d8c5532009-03-06 14:29:25 +0100125 }
Willy Tarreaueab777c2012-12-29 21:50:07 +0100126 curr += mul32hi(past, ms_left_scaled);
Willy Tarreau3d8c5532009-03-06 14:29:25 +0100127 }
128 curr += pend;
Willy Tarreau79584222009-03-06 09:18:27 +0100129
Willy Tarreau3d8c5532009-03-06 14:29:25 +0100130 if (curr < freq)
Willy Tarreau79584222009-03-06 09:18:27 +0100131 return 0;
132
Willy Tarreau3d8c5532009-03-06 14:29:25 +0100133 wait = 999 / curr;
Willy Tarreau79584222009-03-06 09:18:27 +0100134 return MAX(wait, 1);
135}
136
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200137/* Reads a frequency counter taking history into account for missing time in
138 * current period. The period has to be passed in number of ticks and must
139 * match the one used to feed the counter. The counter value is reported for
140 * current date (now_ms). The return value has the same precision as one input
141 * data sample, so low rates over the period will be inaccurate but still
142 * appropriate for max checking. One trick we use for low values is to specially
143 * handle the case where the rate is between 0 and 1 in order to avoid flapping
144 * while waiting for the next event.
145 *
146 * For immediate limit checking, it's recommended to use freq_ctr_period_remain()
147 * instead which does not have the flapping correction, so that even frequencies
148 * as low as one event/period are properly handled.
149 *
150 * For measures over a 1-second period, it's better to use the implicit functions
151 * above.
152 */
153unsigned int read_freq_ctr_period(struct freq_ctr_period *ctr, unsigned int period)
154{
155 unsigned int curr, past;
Christopher Faulet94b71232017-10-12 09:49:09 +0200156 unsigned int remain, curr_tick;
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200157
Christopher Faulet94b71232017-10-12 09:49:09 +0200158 do {
159 curr = ctr->curr_ctr;
160 past = ctr->prev_ctr;
161 curr_tick = ctr->curr_tick;
162
163 } while (curr != ctr->curr_ctr
164 || past != ctr->prev_ctr
165 || curr_tick != ctr->curr_tick
166 || (curr_tick & 0x1));
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200167
Christopher Faulet94b71232017-10-12 09:49:09 +0200168 remain = curr_tick + period - now_ms;
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200169 if (unlikely((int)remain < 0)) {
170 /* We're past the first period, check if we can still report a
171 * part of last period or if we're too far away.
172 */
173 remain += period;
174 if ((int)remain < 0)
175 return 0;
176 past = curr;
177 curr = 0;
178 }
179 if (past <= 1 && !curr)
180 return past; /* very low rate, avoid flapping */
181
182 curr += div64_32((unsigned long long)past * remain, period);
183 return curr;
184}
185
186/* Returns the number of remaining events that can occur on this freq counter
187 * while respecting <freq> events per period, and taking into account that
188 * <pend> events are already known to be pending. Returns 0 if limit was reached.
189 */
190unsigned int freq_ctr_remain_period(struct freq_ctr_period *ctr, unsigned int period,
191 unsigned int freq, unsigned int pend)
192{
193 unsigned int curr, past;
Christopher Faulet94b71232017-10-12 09:49:09 +0200194 unsigned int remain, curr_tick;
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200195
Christopher Faulet94b71232017-10-12 09:49:09 +0200196 do {
197 curr = ctr->curr_ctr;
198 past = ctr->prev_ctr;
199 curr_tick = ctr->curr_tick;
200
201 } while (curr != ctr->curr_ctr
202 || past != ctr->prev_ctr
203 || curr_tick != ctr->curr_tick
204 || (curr_tick & 0x1));
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200205
Christopher Faulet94b71232017-10-12 09:49:09 +0200206 remain = curr_tick + period - now_ms;
Willy Tarreau2970b0b2010-06-20 07:15:43 +0200207 if (likely((int)remain < 0)) {
208 /* We're past the first period, check if we can still report a
209 * part of last period or if we're too far away.
210 */
211 past = curr;
212 curr = 0;
213 remain += period;
214 if ((int)remain < 0)
215 past = 0;
216 }
217 if (likely(past))
218 curr += div64_32((unsigned long long)past * remain, period);
219
220 curr += pend;
221 freq -= curr;
222 if ((int)freq < 0)
223 freq = 0;
224 return freq;
225}
226
Willy Tarreau7f062c42009-03-05 18:43:00 +0100227
228/*
229 * Local variables:
230 * c-indent-level: 8
231 * c-basic-offset: 8
232 * End:
233 */