BUG/MAJOR: quic: Congestion algorithms states shared between the connection

This very old bug is there since the first implementation of newreno congestion
algorithm implementation. This was a very bad idea to put a state variable
into quic_cc_algo struct which only defines the congestion control algorithm used
by a QUIC listener, typically its type and its callbacks.
This bug could lead to crashes since BUG_ON() calls have been added to each algorithm
implementation. This was revealed by interop test, but not very often as there was
not very often several connections run at the time during these tests.

Hopefully this was also reported by Tristan in GH #2095.

Move the congestion algorithm state to the correct structures which are private
to a connection (see cubic and nr structs).

Must be backported to 2.7 and 2.6.
diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h
index 35996d7..60efc0a 100644
--- a/include/haproxy/quic_cc-t.h
+++ b/include/haproxy/quic_cc-t.h
@@ -89,7 +89,6 @@
 
 struct quic_cc_algo {
 	enum quic_cc_algo_type type;
-	enum quic_cc_algo_state_type state;
 	int (*init)(struct quic_cc *cc);
 	void (*event)(struct quic_cc *cc, struct quic_cc_event *ev);
 	void (*slow_start)(struct quic_cc *cc);
diff --git a/src/quic_cc_cubic.c b/src/quic_cc_cubic.c
index e04cd3e..4d31cc7 100644
--- a/src/quic_cc_cubic.c
+++ b/src/quic_cc_cubic.c
@@ -23,6 +23,7 @@
 
 /* K cube factor: (1 - beta) / c */
 struct cubic {
+	uint32_t state;
 	uint32_t ssthresh;
 	uint32_t remaining_inc;
 	uint32_t remaining_tcp_inc;
@@ -39,8 +40,7 @@
 	struct cubic *c = quic_cc_priv(cc);
 
 	TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
-	cc->algo->state = QUIC_CC_ST_SS;
-
+	c->state = QUIC_CC_ST_SS;
 	c->ssthresh = QUIC_CC_INFINITE_SSTHESH;
 	c->remaining_inc = 0;
 	c->remaining_tcp_inc = 0;
@@ -198,7 +198,7 @@
 	}
 	path->cwnd = (CUBIC_BETA * path->cwnd) >> CUBIC_BETA_SCALE_SHIFT;
 	c->ssthresh =  QUIC_MAX(path->cwnd, path->min_cwnd);
-	cc->algo->state = QUIC_CC_ST_RP;
+	c->state = QUIC_CC_ST_RP;
 	TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
 }
 
@@ -216,7 +216,7 @@
 			path->cwnd += ev->ack.acked;
 		/* Exit to congestion avoidance if slow start threshold is reached. */
 		if (path->cwnd >= c->ssthresh)
-			cc->algo->state = QUIC_CC_ST_CA;
+			c->state = QUIC_CC_ST_CA;
 		break;
 
 	case QUIC_CC_EVT_LOSS:
@@ -276,7 +276,7 @@
 			goto leave;
 		}
 
-		cc->algo->state = QUIC_CC_ST_CA;
+		c->state = QUIC_CC_ST_CA;
 		c->recovery_start_time = TICK_ETERNITY;
 		break;
 	case QUIC_CC_EVT_LOSS:
@@ -300,7 +300,9 @@
 
 static void quic_cc_cubic_event(struct quic_cc *cc, struct quic_cc_event *ev)
 {
-	return quic_cc_cubic_state_cbs[cc->algo->state](cc, ev);
+	struct cubic *c = quic_cc_priv(cc);
+
+	return quic_cc_cubic_state_cbs[c->state](cc, ev);
 }
 
 static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc)
@@ -310,7 +312,7 @@
 
 	path = container_of(cc, struct quic_path, cc);
 	chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%d rpst=%dms",
-	              quic_cc_state_str(cc->algo->state),
+	              quic_cc_state_str(c->state),
 	              (unsigned long long)path->cwnd,
 	              (int)c->ssthresh,
 	              !tick_isset(c->recovery_start_time) ? -1 :
diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c
index bf714b1..f1566ba 100644
--- a/src/quic_cc_newreno.c
+++ b/src/quic_cc_newreno.c
@@ -31,6 +31,7 @@
 
 /* Newreno state */
 struct nr {
+	uint32_t state;
 	uint32_t ssthresh;
 	uint32_t recovery_start_time;
 	uint32_t remain_acked;
@@ -40,7 +41,7 @@
 {
 	struct nr *nr = quic_cc_priv(cc);
 
-	cc->algo->state = QUIC_CC_ST_SS;
+	nr->state = QUIC_CC_ST_SS;
 	nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
 	nr->recovery_start_time = 0;
 	nr->remain_acked = 0;
@@ -57,7 +58,7 @@
 	path = container_of(cc, struct quic_path, cc);
 	path->cwnd = path->min_cwnd;
 	/* Re-entering slow start state. */
-	cc->algo->state = QUIC_CC_ST_SS;
+	nr->state = QUIC_CC_ST_SS;
 	/* Recovery start time reset */
 	nr->recovery_start_time = 0;
 }
@@ -71,7 +72,7 @@
 	path = container_of(cc, struct quic_path, cc);
 	nr->recovery_start_time = now_ms;
 	nr->ssthresh = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
-	cc->algo->state = QUIC_CC_ST_RP;
+	nr->state = QUIC_CC_ST_RP;
 }
 
 /* Slow start callback. */
@@ -88,7 +89,7 @@
 		path->cwnd += ev->ack.acked;
 		/* Exit to congestion avoidance if slow start threshold is reached. */
 		if (path->cwnd > nr->ssthresh)
-			cc->algo->state = QUIC_CC_ST_CA;
+			nr->state = QUIC_CC_ST_CA;
 		break;
 
 	case QUIC_CC_EVT_LOSS:
@@ -161,7 +162,7 @@
 			goto leave;
 		}
 
-		cc->algo->state = QUIC_CC_ST_CA;
+		nr->state = QUIC_CC_ST_CA;
 		nr->recovery_start_time = TICK_ETERNITY;
 		path->cwnd = nr->ssthresh;
 		break;
@@ -184,7 +185,7 @@
 
 	path = container_of(cc, struct quic_path, cc);
 	chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
-	              quic_cc_state_str(cc->algo->state),
+	              quic_cc_state_str(nr->state),
 	              (unsigned long long)path->cwnd,
 	              (long)nr->ssthresh,
 	              (unsigned long long)nr->recovery_start_time);
@@ -199,7 +200,9 @@
 
 static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
 {
-	return quic_cc_nr_state_cbs[cc->algo->state](cc, ev);
+	struct nr *nr = quic_cc_priv(cc);
+
+	return quic_cc_nr_state_cbs[nr->state](cc, ev);
 }
 
 struct quic_cc_algo quic_cc_algo_nr = {
diff --git a/src/quic_cc_nocc.c b/src/quic_cc_nocc.c
index 27db350..0815c9c 100644
--- a/src/quic_cc_nocc.c
+++ b/src/quic_cc_nocc.c
@@ -66,7 +66,7 @@
 
 static void quic_cc_nocc_event(struct quic_cc *cc, struct quic_cc_event *ev)
 {
-	return quic_cc_nocc_state_cbs[cc->algo->state](cc, ev);
+	return quic_cc_nocc_state_cbs[QUIC_CC_ST_SS](cc, ev);
 }
 
 struct quic_cc_algo quic_cc_algo_nocc = {