blob: 97a0c8216de87606bbea77411bc43eff52ad389e [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
2 * NewReno congestion control algorithm.
3 *
4 * This file contains definitions for QUIC congestion control.
5 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01006 * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01007 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation, version 2.1
11 * exclusively.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <haproxy/quic_cc.h>
24#include <haproxy/trace.h>
25#include <haproxy/xprt_quic.h>
26
27#define TRACE_SOURCE &trace_quic
28
29static int quic_cc_nr_init(struct quic_cc *cc)
30{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010031 cc->algo_state.nr.state = QUIC_CC_ST_SS;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010032 cc->algo_state.nr.ssthresh = QUIC_CC_INFINITE_SSTHESH;
33 cc->algo_state.nr.recovery_start_time = 0;
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +010034 cc->algo_state.nr.remain_acked = 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010035
36 return 1;
37}
38
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010039/* Re-enter slow start state. */
40static void quic_cc_nr_slow_start(struct quic_cc *cc)
41{
42 struct quic_path *path;
43
44 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010045 path->cwnd = path->min_cwnd;
Frédéric Lécaille83bfca62022-03-02 11:18:33 +010046 /* Re-entering slow start state. */
47 cc->algo_state.nr.state = QUIC_CC_ST_SS;
48 /* Recovery start time reset */
49 cc->algo_state.nr.recovery_start_time = 0;
50}
51
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010052/* Slow start callback. */
53static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
54{
55 struct quic_path *path;
56
Frédéric Lécaillefde2a982021-12-27 15:12:09 +010057 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010058 path = container_of(cc, struct quic_path, cc);
59 switch (ev->type) {
60 case QUIC_CC_EVT_ACK:
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010061 /* Do not increase the congestion window in recovery period. */
62 if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time)
63 return;
64
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010065 path->cwnd += ev->ack.acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010066 /* Exit to congestion avoidance if slow start threshold is reached. */
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010067 if (path->cwnd > cc->algo_state.nr.ssthresh)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010068 cc->algo_state.nr.state = QUIC_CC_ST_CA;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010069 break;
70
71 case QUIC_CC_EVT_LOSS:
Frédéric Lécaille9777ead2022-03-03 08:24:53 +010072 path->cwnd = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
73 cc->algo_state.nr.ssthresh = path->cwnd;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010074 /* Exit to congestion avoidance. */
75 cc->algo_state.nr.state = QUIC_CC_ST_CA;
76 break;
77
78 case QUIC_CC_EVT_ECN_CE:
79 /* XXX TO DO XXX */
80 break;
81 }
Frédéric Lécaillefde2a982021-12-27 15:12:09 +010082 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc,, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010083}
84
85/* Congestion avoidance callback. */
86static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
87{
88 struct quic_path *path;
89
Frédéric Lécaillefde2a982021-12-27 15:12:09 +010090 TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010091 path = container_of(cc, struct quic_path, cc);
92 switch (ev->type) {
93 case QUIC_CC_EVT_ACK:
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +010094 {
95 uint64_t acked;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010096 /* Do not increase the congestion window in recovery period. */
97 if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time)
98 goto out;
99
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100100 /* Increasing the congestion window by (acked / cwnd)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100101 */
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100102 acked = ev->ack.acked * path->mtu + cc->algo_state.nr.remain_acked;
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100103 cc->algo_state.nr.remain_acked = acked % path->cwnd;
104 path->cwnd += acked / path->cwnd;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100105 break;
Frédéric Lécaille0e7c9a72022-03-03 07:50:45 +0100106 }
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100107
108 case QUIC_CC_EVT_LOSS:
Frédéric Lécaillea5ee0ae2022-03-02 14:52:56 +0100109 /* Do not decrease the congestion window when already in recovery period. */
110 if (ev->loss.time_sent <= cc->algo_state.nr.recovery_start_time)
111 goto out;
112
113 cc->algo_state.nr.recovery_start_time = now_ms;
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100114 cc->algo_state.nr.ssthresh = path->cwnd;
115 path->cwnd = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100116 break;
117
118 case QUIC_CC_EVT_ECN_CE:
119 /* XXX TO DO XXX */
120 break;
121 }
122
123 out:
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100124 TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100125}
126
127static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
128{
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100129 struct quic_path *path;
130
131 path = container_of(cc, struct quic_path, cc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100132 chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
133 quic_cc_state_str(cc->algo_state.nr.state),
Frédéric Lécaille9777ead2022-03-03 08:24:53 +0100134 (unsigned long long)path->cwnd,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100135 (long)cc->algo_state.nr.ssthresh,
136 (unsigned long long)cc->algo_state.nr.recovery_start_time);
137}
138
139static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
140 struct quic_cc_event *ev) = {
141 [QUIC_CC_ST_SS] = quic_cc_nr_ss_cb,
142 [QUIC_CC_ST_CA] = quic_cc_nr_ca_cb,
143};
144
145static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
146{
147 return quic_cc_nr_state_cbs[cc->algo_state.nr.state](cc, ev);
148}
149
150struct quic_cc_algo quic_cc_algo_nr = {
151 .type = QUIC_CC_ALGO_TP_NEWRENO,
152 .init = quic_cc_nr_init,
153 .event = quic_cc_nr_event,
Frédéric Lécaille83bfca62022-03-02 11:18:33 +0100154 .slow_start = quic_cc_nr_slow_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100155 .state_trace = quic_cc_nr_state_trace,
156};
157