blob: ce1ca6c6b4d10b022cecda53945e692cf81d36b5 [file] [log] [blame]
Willy Tarreau7f062c42009-03-05 18:43:00 +01001/*
2 * Event rate calculation functions.
3 *
4 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/config.h>
14#include <common/standard.h>
15#include <common/time.h>
Willy Tarreau79584222009-03-06 09:18:27 +010016#include <common/tools.h>
Willy Tarreau7f062c42009-03-05 18:43:00 +010017#include <proto/freq_ctr.h>
18
19/* Read a frequency counter taking history into account for missing time in
20 * current period. Current second is sub-divided in 1000 chunks of one ms,
21 * and the missing ones are read proportionally from previous value. The
22 * return value has the same precision as one input data sample, so low rates
23 * will be inaccurate still appropriate for max checking. One trick we use for
24 * low values is to specially handle the case where the rate is between 0 and 1
25 * in order to avoid flapping while waiting for the next event.
Willy Tarreau79584222009-03-06 09:18:27 +010026 *
27 * For immediate limit checking, it's recommended to use freq_ctr_remain() and
28 * next_event_delay() instead which do not have the flapping correction, so
29 * that even frequencies as low as one event/period are properly handled.
Willy Tarreau7f062c42009-03-05 18:43:00 +010030 */
31unsigned int read_freq_ctr(struct freq_ctr *ctr)
32{
33 unsigned int cur;
34 if (unlikely(ctr->curr_sec != now.tv_sec))
35 rotate_freq_ctr(ctr);
36
37 cur = ctr->curr_ctr;
38 if (ctr->prev_ctr <= 1 && !ctr->curr_ctr)
39 return ctr->prev_ctr; /* very low rate, avoid flapping */
40
41 return cur + mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
42}
43
Willy Tarreau79584222009-03-06 09:18:27 +010044/* returns the number of remaining events that can occur on this freq counter
45 * while respecting <freq> and taking into account that <pend> events are
46 * already known to be pending. Returns 0 if limit was reached.
47 */
48unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
49{
50 unsigned int cur;
51 if (unlikely(ctr->curr_sec != now.tv_sec))
52 rotate_freq_ctr(ctr);
53
54 cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
55 cur += ctr->curr_ctr + pend;
56
57 if (cur >= freq)
58 return 0;
59 return freq - cur;
60}
61
62/* return the expected wait time in ms before the next event may occur,
63 * respecting frequency <freq>, and assuming there may already be some pending
64 * events. It returns zero if we can proceed immediately, otherwise the wait
65 * time, which will be rounded down 1ms for better accuracy, with a minimum
66 * of one ms.
67 */
68unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
69{
70 unsigned int cur, wait;
71
72 if (unlikely(ctr->curr_sec != now.tv_sec))
73 rotate_freq_ctr(ctr);
74
75 cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
76 cur += ctr->curr_ctr + pend;
77
78 if (cur < freq)
79 return 0;
80
81 wait = 999 / cur;
82 return MAX(wait, 1);
83}
84
Willy Tarreau7f062c42009-03-05 18:43:00 +010085
86/*
87 * Local variables:
88 * c-indent-level: 8
89 * c-basic-offset: 8
90 * End:
91 */