blob: bf714b1eb2054533320c84231c6fa25cbe9a74c7 [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
2 * NewReno congestion control algorithm.
3 *
4 * This file contains definitions for QUIC congestion control.
5 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01006 * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01007 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation, version 2.1
11 * exclusively.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020023#include <haproxy/api-t.h>
24#include <haproxy/buf.h>
25#include <haproxy/chunk.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010026#include <haproxy/quic_cc.h>
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020027#include <haproxy/quic_conn-t.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010028#include <haproxy/trace.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010029
30#define TRACE_SOURCE &trace_quic
31
Frédéric Lécaillec5914592022-05-31 09:40:44 +020032/* Newreno state */
33struct nr {
34 uint32_t ssthresh;
35 uint32_t recovery_start_time;
36 uint32_t remain_acked;
37};
38
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010039static int quic_cc_nr_init(struct quic_cc *cc)
40{
Frédéric Lécaillec5914592022-05-31 09:40:44 +020041 struct nr *nr = quic_cc_priv(cc);
42
43 cc->algo->state = QUIC_CC_ST_SS;
44 nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
45 nr->recovery_start_time = 0;
46 nr->remain_acked = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010047
48 return 1;
49}
50
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010051/* Re-enter slow start state. */
52static void quic_cc_nr_slow_start(struct quic_cc *cc)
53{
54 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +020055 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010056
57 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010058 path->cwnd = path->min_cwnd;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010059 /* Re-entering slow start state. */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020060 cc->algo->state = QUIC_CC_ST_SS;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010061 /* Recovery start time reset */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020062 nr->recovery_start_time = 0;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010063}
64
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +010065/* Enter a recovery period. */
66static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
67{
68 struct quic_path *path;
69 struct nr *nr = quic_cc_priv(cc);
70
71 path = container_of(cc, struct quic_path, cc);
72 nr->recovery_start_time = now_ms;
73 nr->ssthresh = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
74 cc->algo->state = QUIC_CC_ST_RP;
75}
76
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010077/* Slow start callback. */
78static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
79{
80 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +020081 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010082
Frédéric Lécaille8f991942023-03-24 15:14:45 +010083 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
84 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010085 path = container_of(cc, struct quic_path, cc);
86 switch (ev->type) {
87 case QUIC_CC_EVT_ACK:
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010088 path->cwnd += ev->ack.acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010089 /* Exit to congestion avoidance if slow start threshold is reached. */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020090 if (path->cwnd > nr->ssthresh)
91 cc->algo->state = QUIC_CC_ST_CA;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010092 break;
93
94 case QUIC_CC_EVT_LOSS:
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +010095 quic_cc_nr_enter_recovery(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010096 break;
97
98 case QUIC_CC_EVT_ECN_CE:
99 /* XXX TO DO XXX */
100 break;
101 }
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100102 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
103 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100104}
105
106/* Congestion avoidance callback. */
107static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
108{
109 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200110 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100111
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100112 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
113 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100114 path = container_of(cc, struct quic_path, cc);
115 switch (ev->type) {
116 case QUIC_CC_EVT_ACK:
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100117 {
118 uint64_t acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100119
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100120 /* Increasing the congestion window by (acked / cwnd)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100121 */
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200122 acked = ev->ack.acked * path->mtu + nr->remain_acked;
123 nr->remain_acked = acked % path->cwnd;
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100124 path->cwnd += acked / path->cwnd;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100125 break;
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100126 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100127
128 case QUIC_CC_EVT_LOSS:
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100129 quic_cc_nr_enter_recovery(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100130 break;
131
132 case QUIC_CC_EVT_ECN_CE:
133 /* XXX TO DO XXX */
134 break;
135 }
136
137 out:
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100138 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
139 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100140}
141
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100142/* Recovery period callback. */
143static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
144{
145 struct quic_path *path;
146 struct nr *nr = quic_cc_priv(cc);
147
148 BUG_ON(!tick_isset(nr->recovery_start_time));
149
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100150 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
151 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100152 path = container_of(cc, struct quic_path, cc);
153 switch (ev->type) {
154 case QUIC_CC_EVT_ACK:
155 /* RFC 9022 7.3.2. Recovery
156 * A recovery period ends and the sender enters congestion avoidance when a
157 * packet sent during the recovery period is acknowledged.
158 */
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100159 if (tick_is_le(ev->ack.time_sent, nr->recovery_start_time)) {
160 TRACE_PROTO("CC reno (still in recovery period)", QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100161 goto leave;
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100162 }
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100163
164 cc->algo->state = QUIC_CC_ST_CA;
165 nr->recovery_start_time = TICK_ETERNITY;
166 path->cwnd = nr->ssthresh;
167 break;
168 case QUIC_CC_EVT_LOSS:
169 /* Do nothing */
170 break;
171 case QUIC_CC_EVT_ECN_CE:
172 /* XXX TO DO XXX */
173 break;
174 }
175
176 leave:
Frédéric Lécaille8f991942023-03-24 15:14:45 +0100177 TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100178 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
179}
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100180static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
181{
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100182 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200183 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100184
185 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100186 chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200187 quic_cc_state_str(cc->algo->state),
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100188 (unsigned long long)path->cwnd,
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200189 (long)nr->ssthresh,
190 (unsigned long long)nr->recovery_start_time);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100191}
192
193static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
194 struct quic_cc_event *ev) = {
195 [QUIC_CC_ST_SS] = quic_cc_nr_ss_cb,
196 [QUIC_CC_ST_CA] = quic_cc_nr_ca_cb,
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100197 [QUIC_CC_ST_RP] = quic_cc_nr_rp_cb,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100198};
199
200static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
201{
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200202 return quic_cc_nr_state_cbs[cc->algo->state](cc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100203}
204
205struct quic_cc_algo quic_cc_algo_nr = {
206 .type = QUIC_CC_ALGO_TP_NEWRENO,
207 .init = quic_cc_nr_init,
208 .event = quic_cc_nr_event,
Frédéric Lécaille83bfca62022-03-02 11:18:33 +0100209 .slow_start = quic_cc_nr_slow_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100210 .state_trace = quic_cc_nr_state_trace,
211};
212