MINOR: quic: Congestion control architecture refactoring
Ease the integration of new congestion control algorithm to come.
Move the congestion controller state to a private array of uint32_t
to stop using a union. We do not want to continue using such long
paths cc->algo_state.<algo>.<var> to modify the internal state variable
for each algorithm.
Must be backported to 2.6
diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h
index 09940b4..34961a9 100644
--- a/include/haproxy/quic_cc-t.h
+++ b/include/haproxy/quic_cc-t.h
@@ -31,7 +31,7 @@
#include <haproxy/buf-t.h>
-#define QUIC_CC_INFINITE_SSTHESH ((uint64_t)-1)
+#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
extern struct quic_cc_algo quic_cc_algo_nr;
extern struct quic_cc_algo *default_quic_cc_algo;
@@ -69,25 +69,16 @@
QUIC_CC_ALGO_TP_NEWRENO,
};
-union quic_cc_algo_state {
- /* NewReno */
- struct nr {
- enum quic_cc_algo_state_type state;
- uint64_t ssthresh;
- uint64_t recovery_start_time;
- uint64_t remain_acked;
- } nr;
-};
-
struct quic_cc {
/* <conn> is there only for debugging purpose. */
struct quic_conn *qc;
struct quic_cc_algo *algo;
- union quic_cc_algo_state algo_state;
+ uint32_t priv[16];
};
struct quic_cc_algo {
enum quic_cc_algo_type type;
+ enum quic_cc_algo_state_type state;
int (*init)(struct quic_cc *cc);
void (*event)(struct quic_cc *cc, struct quic_cc_event *ev);
void (*slow_start)(struct quic_cc *cc);
diff --git a/include/haproxy/quic_cc.h b/include/haproxy/quic_cc.h
index b654fff..be92336 100644
--- a/include/haproxy/quic_cc.h
+++ b/include/haproxy/quic_cc.h
@@ -65,5 +65,10 @@
}
}
+static inline void *quic_cc_priv(const struct quic_cc *cc)
+{
+ return (void *)cc->priv;
+}
+
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */
diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c
index 97a0c82..89a0524 100644
--- a/src/quic_cc_newreno.c
+++ b/src/quic_cc_newreno.c
@@ -26,12 +26,21 @@
#define TRACE_SOURCE &trace_quic
+/* Newreno state */
+struct nr {
+ uint32_t ssthresh;
+ uint32_t recovery_start_time;
+ uint32_t remain_acked;
+};
+
static int quic_cc_nr_init(struct quic_cc *cc)
{
- cc->algo_state.nr.state = QUIC_CC_ST_SS;
- cc->algo_state.nr.ssthresh = QUIC_CC_INFINITE_SSTHESH;
- cc->algo_state.nr.recovery_start_time = 0;
- cc->algo_state.nr.remain_acked = 0;
+ struct nr *nr = quic_cc_priv(cc);
+
+ cc->algo->state = QUIC_CC_ST_SS;
+ nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
+ nr->recovery_start_time = 0;
+ nr->remain_acked = 0;
return 1;
}
@@ -40,39 +49,41 @@
static void quic_cc_nr_slow_start(struct quic_cc *cc)
{
struct quic_path *path;
+ struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
path->cwnd = path->min_cwnd;
/* Re-entering slow start state. */
- cc->algo_state.nr.state = QUIC_CC_ST_SS;
+ cc->algo->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
- cc->algo_state.nr.recovery_start_time = 0;
+ nr->recovery_start_time = 0;
}
/* Slow start callback. */
static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path;
+ struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
path = container_of(cc, struct quic_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
/* Do not increase the congestion window in recovery period. */
- if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time)
+ if (ev->ack.time_sent <= nr->recovery_start_time)
return;
path->cwnd += ev->ack.acked;
/* Exit to congestion avoidance if slow start threshold is reached. */
- if (path->cwnd > cc->algo_state.nr.ssthresh)
- cc->algo_state.nr.state = QUIC_CC_ST_CA;
+ if (path->cwnd > nr->ssthresh)
+ cc->algo->state = QUIC_CC_ST_CA;
break;
case QUIC_CC_EVT_LOSS:
path->cwnd = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
- cc->algo_state.nr.ssthresh = path->cwnd;
+ nr->ssthresh = path->cwnd;
/* Exit to congestion avoidance. */
- cc->algo_state.nr.state = QUIC_CC_ST_CA;
+ cc->algo->state = QUIC_CC_ST_CA;
break;
case QUIC_CC_EVT_ECN_CE:
@@ -86,6 +97,7 @@
static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path;
+ struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
path = container_of(cc, struct quic_path, cc);
@@ -94,24 +106,24 @@
{
uint64_t acked;
/* Do not increase the congestion window in recovery period. */
- if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time)
+ if (ev->ack.time_sent <= nr->recovery_start_time)
goto out;
/* Increasing the congestion window by (acked / cwnd)
*/
- acked = ev->ack.acked * path->mtu + cc->algo_state.nr.remain_acked;
- cc->algo_state.nr.remain_acked = acked % path->cwnd;
+ acked = ev->ack.acked * path->mtu + nr->remain_acked;
+ nr->remain_acked = acked % path->cwnd;
path->cwnd += acked / path->cwnd;
break;
}
case QUIC_CC_EVT_LOSS:
/* Do not decrease the congestion window when already in recovery period. */
- if (ev->loss.time_sent <= cc->algo_state.nr.recovery_start_time)
+ if (ev->loss.time_sent <= nr->recovery_start_time)
goto out;
- cc->algo_state.nr.recovery_start_time = now_ms;
- cc->algo_state.nr.ssthresh = path->cwnd;
+ nr->recovery_start_time = now_ms;
+ nr->ssthresh = path->cwnd;
path->cwnd = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
break;
@@ -127,13 +139,14 @@
static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
struct quic_path *path;
+ struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
- quic_cc_state_str(cc->algo_state.nr.state),
+ quic_cc_state_str(cc->algo->state),
(unsigned long long)path->cwnd,
- (long)cc->algo_state.nr.ssthresh,
- (unsigned long long)cc->algo_state.nr.recovery_start_time);
+ (long)nr->ssthresh,
+ (unsigned long long)nr->recovery_start_time);
}
static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
@@ -144,7 +157,7 @@
static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
{
- return quic_cc_nr_state_cbs[cc->algo_state.nr.state](cc, ev);
+ return quic_cc_nr_state_cbs[cc->algo->state](cc, ev);
}
struct quic_cc_algo quic_cc_algo_nr = {