blob: 2eedbfbe7a17c82701bc7de4d063616028f6b72a [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
2 * NewReno congestion control algorithm.
3 *
4 * This file contains definitions for QUIC congestion control.
5 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01006 * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01007 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation, version 2.1
11 * exclusively.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
Amaury Denoyelle5c25dc52022-09-30 17:44:15 +020023#include <haproxy/api-t.h>
24#include <haproxy/buf.h>
25#include <haproxy/chunk.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010026#include <haproxy/quic_cc.h>
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020027#include <haproxy/quic_conn-t.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010028#include <haproxy/trace.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010029
30#define TRACE_SOURCE &trace_quic
31
Frédéric Lécaillec5914592022-05-31 09:40:44 +020032/* Newreno state */
33struct nr {
34 uint32_t ssthresh;
35 uint32_t recovery_start_time;
36 uint32_t remain_acked;
37};
38
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010039static int quic_cc_nr_init(struct quic_cc *cc)
40{
Frédéric Lécaillec5914592022-05-31 09:40:44 +020041 struct nr *nr = quic_cc_priv(cc);
42
43 cc->algo->state = QUIC_CC_ST_SS;
44 nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
45 nr->recovery_start_time = 0;
46 nr->remain_acked = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010047
48 return 1;
49}
50
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010051/* Re-enter slow start state. */
52static void quic_cc_nr_slow_start(struct quic_cc *cc)
53{
54 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +020055 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010056
57 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010058 path->cwnd = path->min_cwnd;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010059 /* Re-entering slow start state. */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020060 cc->algo->state = QUIC_CC_ST_SS;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010061 /* Recovery start time reset */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020062 nr->recovery_start_time = 0;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010063}
64
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +010065/* Enter a recovery period. */
66static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
67{
68 struct quic_path *path;
69 struct nr *nr = quic_cc_priv(cc);
70
71 path = container_of(cc, struct quic_path, cc);
72 nr->recovery_start_time = now_ms;
73 nr->ssthresh = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
74 cc->algo->state = QUIC_CC_ST_RP;
75}
76
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010077/* Slow start callback. */
78static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
79{
80 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +020081 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010082
Frédéric Lécaillefde2a982021-12-27 15:12:09 +010083 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010084 path = container_of(cc, struct quic_path, cc);
85 switch (ev->type) {
86 case QUIC_CC_EVT_ACK:
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010087 path->cwnd += ev->ack.acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010088 /* Exit to congestion avoidance if slow start threshold is reached. */
Frédéric Lécaillec5914592022-05-31 09:40:44 +020089 if (path->cwnd > nr->ssthresh)
90 cc->algo->state = QUIC_CC_ST_CA;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010091 break;
92
93 case QUIC_CC_EVT_LOSS:
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +010094 quic_cc_nr_enter_recovery(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010095 break;
96
97 case QUIC_CC_EVT_ECN_CE:
98 /* XXX TO DO XXX */
99 break;
100 }
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100101 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc,, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100102}
103
104/* Congestion avoidance callback. */
105static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
106{
107 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200108 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100109
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100110 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100111 path = container_of(cc, struct quic_path, cc);
112 switch (ev->type) {
113 case QUIC_CC_EVT_ACK:
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100114 {
115 uint64_t acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100116
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100117 /* Increasing the congestion window by (acked / cwnd)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100118 */
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200119 acked = ev->ack.acked * path->mtu + nr->remain_acked;
120 nr->remain_acked = acked % path->cwnd;
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100121 path->cwnd += acked / path->cwnd;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100122 break;
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100123 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100124
125 case QUIC_CC_EVT_LOSS:
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100126 quic_cc_nr_enter_recovery(cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100127 break;
128
129 case QUIC_CC_EVT_ECN_CE:
130 /* XXX TO DO XXX */
131 break;
132 }
133
134 out:
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100135 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100136}
137
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100138/* Recovery period callback. */
139static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
140{
141 struct quic_path *path;
142 struct nr *nr = quic_cc_priv(cc);
143
144 BUG_ON(!tick_isset(nr->recovery_start_time));
145
146 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
147 path = container_of(cc, struct quic_path, cc);
148 switch (ev->type) {
149 case QUIC_CC_EVT_ACK:
150 /* RFC 9022 7.3.2. Recovery
151 * A recovery period ends and the sender enters congestion avoidance when a
152 * packet sent during the recovery period is acknowledged.
153 */
154 if (tick_is_le(ev->ack.time_sent, nr->recovery_start_time))
155 goto leave;
156
157 cc->algo->state = QUIC_CC_ST_CA;
158 nr->recovery_start_time = TICK_ETERNITY;
159 path->cwnd = nr->ssthresh;
160 break;
161 case QUIC_CC_EVT_LOSS:
162 /* Do nothing */
163 break;
164 case QUIC_CC_EVT_ECN_CE:
165 /* XXX TO DO XXX */
166 break;
167 }
168
169 leave:
170 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
171}
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100172static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
173{
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100174 struct quic_path *path;
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200175 struct nr *nr = quic_cc_priv(cc);
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100176
177 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100178 chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200179 quic_cc_state_str(cc->algo->state),
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100180 (unsigned long long)path->cwnd,
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200181 (long)nr->ssthresh,
182 (unsigned long long)nr->recovery_start_time);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100183}
184
185static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
186 struct quic_cc_event *ev) = {
187 [QUIC_CC_ST_SS] = quic_cc_nr_ss_cb,
188 [QUIC_CC_ST_CA] = quic_cc_nr_ca_cb,
Frédéric Lécaille8e6c6612023-03-22 09:13:14 +0100189 [QUIC_CC_ST_RP] = quic_cc_nr_rp_cb,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100190};
191
192static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
193{
Frédéric Lécaillec5914592022-05-31 09:40:44 +0200194 return quic_cc_nr_state_cbs[cc->algo->state](cc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100195}
196
197struct quic_cc_algo quic_cc_algo_nr = {
198 .type = QUIC_CC_ALGO_TP_NEWRENO,
199 .init = quic_cc_nr_init,
200 .event = quic_cc_nr_event,
Frédéric Lécaille83bfca62022-03-02 11:18:33 +0100201 .slow_start = quic_cc_nr_slow_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100202 .state_trace = quic_cc_nr_state_trace,
203};
204