Frédéric Lécaille | a7e7ce9 | 2020-11-23 14:14:04 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NewReno congestion control algorithm. |
| 3 | * |
| 4 | * This file contains definitions for QUIC congestion control. |
| 5 | * |
| 6 | * Copyright 2019 HAProxy Technologies, Frédéric Lécaille <flecaille@haproxy.com> |
| 7 | * |
| 8 | * This library is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation, version 2.1 |
| 11 | * exclusively. |
| 12 | * |
| 13 | * This library is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with this library; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | #include <haproxy/quic_cc.h> |
| 24 | #include <haproxy/trace.h> |
| 25 | #include <haproxy/xprt_quic.h> |
| 26 | |
| 27 | #define TRACE_SOURCE &trace_quic |
| 28 | |
| 29 | static int quic_cc_nr_init(struct quic_cc *cc) |
| 30 | { |
| 31 | struct quic_path *path; |
| 32 | |
| 33 | path = container_of(cc, struct quic_path, cc); |
| 34 | cc->algo_state.nr.state = QUIC_CC_ST_SS; |
| 35 | cc->algo_state.nr.cwnd = path->cwnd; |
| 36 | cc->algo_state.nr.ssthresh = QUIC_CC_INFINITE_SSTHESH; |
| 37 | cc->algo_state.nr.recovery_start_time = 0; |
| 38 | |
| 39 | return 1; |
| 40 | } |
| 41 | |
| 42 | /* Slow start callback. */ |
| 43 | static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) |
| 44 | { |
| 45 | struct quic_path *path; |
| 46 | |
| 47 | TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc->conn, ev); |
| 48 | path = container_of(cc, struct quic_path, cc); |
| 49 | switch (ev->type) { |
| 50 | case QUIC_CC_EVT_ACK: |
| 51 | path->in_flight -= ev->ack.acked; |
| 52 | /* Do not increase the congestion window in recovery period. */ |
| 53 | if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time) |
| 54 | return; |
| 55 | |
| 56 | cc->algo_state.nr.cwnd += ev->ack.acked; |
| 57 | /* Exit to congestion avoidance if slow start threshold is reached. */ |
| 58 | if (cc->algo_state.nr.cwnd > cc->algo_state.nr.ssthresh) |
| 59 | cc->algo_state.nr.state = QUIC_CC_ST_CA; |
| 60 | path->cwnd = cc->algo_state.nr.cwnd; |
| 61 | break; |
| 62 | |
| 63 | case QUIC_CC_EVT_LOSS: |
| 64 | path->in_flight -= ev->loss.lost_bytes; |
| 65 | cc->algo_state.nr.cwnd = QUIC_MAX(cc->algo_state.nr.cwnd >> 1, path->min_cwnd); |
| 66 | path->cwnd = cc->algo_state.nr.ssthresh = cc->algo_state.nr.cwnd; |
| 67 | /* Exit to congestion avoidance. */ |
| 68 | cc->algo_state.nr.state = QUIC_CC_ST_CA; |
| 69 | break; |
| 70 | |
| 71 | case QUIC_CC_EVT_ECN_CE: |
| 72 | /* XXX TO DO XXX */ |
| 73 | break; |
| 74 | } |
| 75 | TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc->conn,, cc); |
| 76 | } |
| 77 | |
| 78 | /* Congestion avoidance callback. */ |
| 79 | static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev) |
| 80 | { |
| 81 | struct quic_path *path; |
| 82 | |
| 83 | TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc->conn); |
| 84 | path = container_of(cc, struct quic_path, cc); |
| 85 | switch (ev->type) { |
| 86 | case QUIC_CC_EVT_ACK: |
| 87 | path->in_flight -= ev->ack.acked; |
| 88 | /* Do not increase the congestion window in recovery period. */ |
| 89 | if (ev->ack.time_sent <= cc->algo_state.nr.recovery_start_time) |
| 90 | goto out; |
| 91 | |
| 92 | /* Increasing the congestion window by 1 maximum packet size by |
| 93 | * congestion window. |
| 94 | */ |
| 95 | cc->algo_state.nr.cwnd += |
| 96 | path->mtu * QUIC_MAX(1ULL, (unsigned long long)ev->ack.acked / cc->algo_state.nr.cwnd); |
| 97 | path->cwnd = cc->algo_state.nr.cwnd; |
| 98 | break; |
| 99 | |
| 100 | case QUIC_CC_EVT_LOSS: |
| 101 | path->in_flight -= ev->loss.lost_bytes; |
| 102 | if (ev->loss.newest_time_sent > cc->algo_state.nr.recovery_start_time) { |
| 103 | cc->algo_state.nr.recovery_start_time = ev->loss.now_ms; |
| 104 | cc->algo_state.nr.cwnd = QUIC_MAX(cc->algo_state.nr.cwnd >> 1, path->min_cwnd); |
| 105 | cc->algo_state.nr.ssthresh = cc->algo_state.nr.cwnd; |
| 106 | } |
| 107 | if (quic_loss_persistent_congestion(&path->loss, |
| 108 | ev->loss.period, |
| 109 | ev->loss.now_ms, |
| 110 | ev->loss.max_ack_delay)) { |
| 111 | cc->algo_state.nr.cwnd = path->min_cwnd; |
| 112 | /* Re-entering slow start state. */ |
| 113 | cc->algo_state.nr.state = QUIC_CC_ST_SS; |
| 114 | } |
| 115 | path->cwnd = cc->algo_state.nr.cwnd; |
| 116 | break; |
| 117 | |
| 118 | case QUIC_CC_EVT_ECN_CE: |
| 119 | /* XXX TO DO XXX */ |
| 120 | break; |
| 121 | } |
| 122 | |
| 123 | out: |
| 124 | TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc->conn); |
| 125 | } |
| 126 | |
| 127 | static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc) |
| 128 | { |
| 129 | chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu", |
| 130 | quic_cc_state_str(cc->algo_state.nr.state), |
| 131 | (unsigned long long)cc->algo_state.nr.cwnd, |
| 132 | (long)cc->algo_state.nr.ssthresh, |
| 133 | (unsigned long long)cc->algo_state.nr.recovery_start_time); |
| 134 | } |
| 135 | |
| 136 | static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc, |
| 137 | struct quic_cc_event *ev) = { |
| 138 | [QUIC_CC_ST_SS] = quic_cc_nr_ss_cb, |
| 139 | [QUIC_CC_ST_CA] = quic_cc_nr_ca_cb, |
| 140 | }; |
| 141 | |
| 142 | static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev) |
| 143 | { |
| 144 | return quic_cc_nr_state_cbs[cc->algo_state.nr.state](cc, ev); |
| 145 | } |
| 146 | |
| 147 | struct quic_cc_algo quic_cc_algo_nr = { |
| 148 | .type = QUIC_CC_ALGO_TP_NEWRENO, |
| 149 | .init = quic_cc_nr_init, |
| 150 | .event = quic_cc_nr_event, |
| 151 | .state_trace = quic_cc_nr_state_trace, |
| 152 | }; |
| 153 | |