Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1 | #include <haproxy/mux_quic.h> |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 2 | |
Amaury Denoyelle | eb01f59 | 2021-10-07 16:44:05 +0200 | [diff] [blame] | 3 | #include <import/eb64tree.h> |
| 4 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 5 | #include <haproxy/api.h> |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 6 | #include <haproxy/connection.h> |
Christopher Faulet | 1329f2a | 2021-12-16 17:32:56 +0100 | [diff] [blame] | 7 | #include <haproxy/conn_stream.h> |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 8 | #include <haproxy/dynbuf.h> |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 9 | #include <haproxy/htx.h> |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 10 | #include <haproxy/pool.h> |
Amaury Denoyelle | 251eadf | 2022-03-24 17:14:52 +0100 | [diff] [blame] | 11 | #include <haproxy/sink.h> |
Amaury Denoyelle | eb01f59 | 2021-10-07 16:44:05 +0200 | [diff] [blame] | 12 | #include <haproxy/ssl_sock-t.h> |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 13 | #include <haproxy/trace.h> |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 14 | #include <haproxy/xprt_quic.h> |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 15 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 16 | DECLARE_POOL(pool_head_qcc, "qcc", sizeof(struct qcc)); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 17 | DECLARE_POOL(pool_head_qcs, "qcs", sizeof(struct qcs)); |
| 18 | |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 19 | /* trace source and events */ |
| 20 | static void qmux_trace(enum trace_level level, uint64_t mask, |
| 21 | const struct trace_source *src, |
| 22 | const struct ist where, const struct ist func, |
| 23 | const void *a1, const void *a2, const void *a3, const void *a4); |
| 24 | |
| 25 | static const struct trace_event qmux_trace_events[] = { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 26 | #define QMUX_EV_QCC_RECV (1ULL << 1) |
| 27 | { .mask = QMUX_EV_QCC_RECV, .name = "qcc_recv", .desc = "Rx on QUIC connection" }, |
| 28 | #define QMUX_EV_QCC_SEND (1ULL << 2) |
| 29 | { .mask = QMUX_EV_QCC_SEND, .name = "qcc_send", .desc = "Tx on QUIC connection" }, |
| 30 | #define QMUX_EV_QCC_WAKE (1ULL << 3) |
| 31 | { .mask = QMUX_EV_QCC_WAKE, .name = "qcc_wake", .desc = "QUIC connection woken up" }, |
| 32 | #define QMUX_EV_QCC_END (1ULL << 4) |
| 33 | { .mask = QMUX_EV_QCC_END, .name = "qcc_end", .desc = "QUIC connection terminated" }, |
| 34 | #define QMUX_EV_QCC_NQCS (1ULL << 5) |
| 35 | { .mask = QMUX_EV_QCC_NQCS, .name = "qcc_no_qcs", .desc = "QUIC stream not found" }, |
| 36 | #define QMUX_EV_QCS_NEW (1ULL << 6) |
| 37 | { .mask = QMUX_EV_QCS_NEW, .name = "qcs_new", .desc = "new QUIC stream" }, |
| 38 | #define QMUX_EV_QCS_RECV (1ULL << 7) |
| 39 | { .mask = QMUX_EV_QCS_RECV, .name = "qcs_recv", .desc = "Rx on QUIC stream" }, |
| 40 | #define QMUX_EV_QCS_SEND (1ULL << 8) |
| 41 | { .mask = QMUX_EV_QCS_SEND, .name = "qcs_send", .desc = "Tx on QUIC stream" }, |
| 42 | #define QMUX_EV_QCS_END (1ULL << 9) |
| 43 | { .mask = QMUX_EV_QCS_END, .name = "qcs_end", .desc = "QUIC stream terminated" }, |
| 44 | #define QMUX_EV_STRM_RECV (1ULL << 10) |
| 45 | { .mask = QMUX_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" }, |
| 46 | #define QMUX_EV_STRM_SEND (1ULL << 11) |
| 47 | { .mask = QMUX_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" }, |
| 48 | #define QMUX_EV_STRM_END (1ULL << 12) |
| 49 | { .mask = QMUX_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" }, |
Amaury Denoyelle | fa29f33 | 2022-03-25 09:09:40 +0100 | [diff] [blame] | 50 | #define QMUX_EV_SEND_FRM (1ULL << 13) |
| 51 | { .mask = QMUX_EV_SEND_FRM, .name = "send_frm", .desc = "sending QUIC frame" }, |
Amaury Denoyelle | fdcec36 | 2022-03-25 09:28:10 +0100 | [diff] [blame] | 52 | /* special event dedicated to qcs_push_frame */ |
| 53 | #define QMUX_EV_QCS_PUSH_FRM (1ULL << 14) |
| 54 | { .mask = QMUX_EV_QCS_PUSH_FRM, .name = "qcs_push_frm", .desc = "qcs_push_frame" }, |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 55 | { } |
| 56 | }; |
| 57 | |
Amaury Denoyelle | fdcec36 | 2022-03-25 09:28:10 +0100 | [diff] [blame] | 58 | /* custom arg for QMUX_EV_QCS_PUSH_FRM */ |
| 59 | struct qcs_push_frm_trace_arg { |
| 60 | size_t sent; |
| 61 | int xfer; |
| 62 | char fin; |
| 63 | uint64_t offset; |
| 64 | }; |
| 65 | |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 66 | static const struct name_desc qmux_trace_lockon_args[4] = { |
| 67 | /* arg1 */ { /* already used by the connection */ }, |
| 68 | /* arg2 */ { .name="qcs", .desc="QUIC stream" }, |
| 69 | /* arg3 */ { }, |
| 70 | /* arg4 */ { } |
| 71 | }; |
| 72 | |
| 73 | static const struct name_desc qmux_trace_decoding[] = { |
| 74 | #define QMUX_VERB_CLEAN 1 |
| 75 | { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" }, |
| 76 | #define QMUX_VERB_MINIMAL 2 |
| 77 | { .name="minimal", .desc="report only qcc/qcs state and flags, no real decoding" }, |
| 78 | { /* end */ } |
| 79 | }; |
| 80 | |
| 81 | struct trace_source trace_qmux = { |
| 82 | .name = IST("qmux"), |
| 83 | .desc = "QUIC multiplexer", |
| 84 | .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */ |
| 85 | .default_cb = qmux_trace, |
| 86 | .known_events = qmux_trace_events, |
| 87 | .lockon_args = qmux_trace_lockon_args, |
| 88 | .decoding = qmux_trace_decoding, |
| 89 | .report_events = ~0, /* report everything by default */ |
| 90 | }; |
| 91 | |
| 92 | #define TRACE_SOURCE &trace_qmux |
| 93 | INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE); |
| 94 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 95 | /* Allocate a new QUIC streams with id <id> and type <type>. */ |
| 96 | struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 97 | { |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 98 | struct qcs *qcs; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 99 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 100 | TRACE_ENTER(QMUX_EV_QCS_NEW, qcc->conn); |
| 101 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 102 | qcs = pool_alloc(pool_head_qcs); |
| 103 | if (!qcs) |
| 104 | goto out; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 105 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 106 | qcs->qcc = qcc; |
| 107 | qcs->cs = NULL; |
| 108 | qcs->flags = QC_SF_NONE; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 109 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 110 | qcs->by_id.key = id; |
| 111 | eb64_insert(&qcc->streams_by_id, &qcs->by_id); |
| 112 | qcc->strms[type].nb_streams++; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 113 | |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 114 | /* If stream is local, use peer remote-limit, or else the opposite. */ |
| 115 | /* TODO use uni limit for unidirectional streams */ |
| 116 | qcs->tx.msd = quic_stream_is_local(qcc, id) ? qcc->rfctl.msd_bidi_r : |
| 117 | qcc->rfctl.msd_bidi_l; |
| 118 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 119 | qcs->rx.buf = BUF_NULL; |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 120 | qcs->rx.app_buf = BUF_NULL; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 121 | qcs->rx.offset = 0; |
| 122 | qcs->rx.frms = EB_ROOT_UNIQUE; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 123 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 124 | qcs->tx.buf = BUF_NULL; |
| 125 | qcs->tx.xprt_buf = BUF_NULL; |
| 126 | qcs->tx.offset = 0; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 127 | qcs->tx.sent_offset = 0; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 128 | qcs->tx.ack_offset = 0; |
Frédéric Lécaille | 2ee5c8b | 2022-03-13 12:31:36 +0100 | [diff] [blame] | 129 | qcs->tx.acked_frms = EB_ROOT; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 130 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 131 | qcs->wait_event.tasklet = NULL; |
| 132 | qcs->wait_event.events = 0; |
| 133 | qcs->subs = NULL; |
| 134 | |
| 135 | out: |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 136 | TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn, qcs); |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 137 | return qcs; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 138 | } |
| 139 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 140 | /* Free a qcs. This function must only be used for unidirectional streams. |
| 141 | * Bidirectional streams are released by the upper layer through qc_detach(). |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 142 | */ |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 143 | void uni_qcs_free(struct qcs *qcs) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 144 | { |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 145 | eb64_delete(&qcs->by_id); |
| 146 | pool_free(pool_head_qcs, qcs); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 147 | } |
| 148 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 149 | struct buffer *qc_get_buf(struct qcs *qcs, struct buffer *bptr) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 150 | { |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 151 | struct buffer *buf = b_alloc(bptr); |
| 152 | BUG_ON(!buf); |
| 153 | return buf; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 154 | } |
| 155 | |
Amaury Denoyelle | a3f222d | 2021-12-06 11:24:00 +0100 | [diff] [blame] | 156 | int qcs_subscribe(struct qcs *qcs, int event_type, struct wait_event *es) |
| 157 | { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 158 | struct qcc *qcc = qcs->qcc; |
| 159 | |
| 160 | TRACE_ENTER(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs); |
Amaury Denoyelle | a3f222d | 2021-12-06 11:24:00 +0100 | [diff] [blame] | 161 | |
| 162 | BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV)); |
| 163 | BUG_ON(qcs->subs && qcs->subs != es); |
| 164 | |
| 165 | es->events |= event_type; |
| 166 | qcs->subs = es; |
| 167 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 168 | if (event_type & SUB_RETRY_RECV) |
| 169 | TRACE_DEVEL("subscribe(recv)", QMUX_EV_STRM_RECV, qcc->conn, qcs); |
| 170 | |
| 171 | if (event_type & SUB_RETRY_SEND) |
| 172 | TRACE_DEVEL("subscribe(send)", QMUX_EV_STRM_SEND, qcc->conn, qcs); |
| 173 | |
| 174 | TRACE_LEAVE(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs); |
| 175 | |
Amaury Denoyelle | a3f222d | 2021-12-06 11:24:00 +0100 | [diff] [blame] | 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | void qcs_notify_recv(struct qcs *qcs) |
| 180 | { |
| 181 | if (qcs->subs && qcs->subs->events & SUB_RETRY_RECV) { |
| 182 | tasklet_wakeup(qcs->subs->tasklet); |
| 183 | qcs->subs->events &= ~SUB_RETRY_RECV; |
| 184 | if (!qcs->subs->events) |
| 185 | qcs->subs = NULL; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | void qcs_notify_send(struct qcs *qcs) |
| 190 | { |
| 191 | if (qcs->subs && qcs->subs->events & SUB_RETRY_SEND) { |
| 192 | tasklet_wakeup(qcs->subs->tasklet); |
| 193 | qcs->subs->events &= ~SUB_RETRY_SEND; |
| 194 | if (!qcs->subs->events) |
| 195 | qcs->subs = NULL; |
| 196 | } |
| 197 | } |
| 198 | |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 199 | /* Retrieve as an ebtree node the stream with <id> as ID, possibly allocates |
| 200 | * several streams, depending on the already open ones. |
| 201 | * Return this node if succeeded, NULL if not. |
| 202 | */ |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 203 | struct qcs *qcc_get_qcs(struct qcc *qcc, uint64_t id) |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 204 | { |
| 205 | unsigned int strm_type; |
| 206 | int64_t sub_id; |
| 207 | struct eb64_node *strm_node; |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 208 | struct qcs *qcs = NULL; |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 209 | |
| 210 | strm_type = id & QCS_ID_TYPE_MASK; |
| 211 | sub_id = id >> QCS_ID_TYPE_SHIFT; |
| 212 | strm_node = NULL; |
Amaury Denoyelle | 0dc40f0 | 2022-02-07 11:44:17 +0100 | [diff] [blame] | 213 | if (quic_stream_is_local(qcc, id)) { |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 214 | /* Local streams: this stream must be already opened. */ |
| 215 | strm_node = eb64_lookup(&qcc->streams_by_id, id); |
| 216 | if (!strm_node) { |
| 217 | /* unknown stream id */ |
| 218 | goto out; |
| 219 | } |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 220 | qcs = eb64_entry(strm_node, struct qcs, by_id); |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 221 | } |
| 222 | else { |
| 223 | /* Remote streams. */ |
| 224 | struct eb_root *strms; |
| 225 | uint64_t largest_id; |
| 226 | enum qcs_type qcs_type; |
| 227 | |
| 228 | strms = &qcc->streams_by_id; |
| 229 | qcs_type = qcs_id_type(id); |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 230 | |
| 231 | /* TODO also checks max-streams for uni streams */ |
| 232 | if (quic_stream_is_bidi(id)) { |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 233 | if (sub_id + 1 > qcc->lfctl.ms_bidi) { |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 234 | /* streams limit reached */ |
| 235 | goto out; |
| 236 | } |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | /* Note: ->largest_id was initialized with (uint64_t)-1 as value, 0 being a |
| 240 | * correct value. |
| 241 | */ |
| 242 | largest_id = qcc->strms[qcs_type].largest_id; |
| 243 | if (sub_id > (int64_t)largest_id) { |
| 244 | /* RFC: "A stream ID that is used out of order results in all streams |
| 245 | * of that type with lower-numbered stream IDs also being opened". |
| 246 | * So, let's "open" these streams. |
| 247 | */ |
| 248 | int64_t i; |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 249 | struct qcs *tmp_qcs; |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 250 | |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 251 | tmp_qcs = NULL; |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 252 | for (i = largest_id + 1; i <= sub_id; i++) { |
| 253 | uint64_t id = (i << QCS_ID_TYPE_SHIFT) | strm_type; |
| 254 | enum qcs_type type = id & QCS_ID_DIR_BIT ? QCS_CLT_UNI : QCS_CLT_BIDI; |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 255 | tmp_qcs = qcs_new(qcc, id, type); |
| 256 | if (!tmp_qcs) { |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 257 | /* allocation failure */ |
| 258 | goto out; |
| 259 | } |
| 260 | |
| 261 | qcc->strms[qcs_type].largest_id = i; |
| 262 | } |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 263 | if (tmp_qcs) |
| 264 | qcs = tmp_qcs; |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 265 | } |
| 266 | else { |
| 267 | strm_node = eb64_lookup(strms, id); |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 268 | if (strm_node) |
| 269 | qcs = eb64_entry(strm_node, struct qcs, by_id); |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 270 | } |
| 271 | } |
| 272 | |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 273 | return qcs; |
Amaury Denoyelle | 8a5b27a | 2021-12-21 11:53:10 +0100 | [diff] [blame] | 274 | |
| 275 | out: |
| 276 | return NULL; |
| 277 | } |
| 278 | |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 279 | /* Handle a new STREAM frame <strm_frm>. The frame content will be copied in |
| 280 | * the buffer of the stream instance. The stream instance will be stored in |
| 281 | * <out_qcs>. In case of success, the caller can immediatly call qcc_decode_qcs |
| 282 | * to process the frame content. |
| 283 | * |
| 284 | * Returns 0 on success. On errors, two codes are present. |
| 285 | * - 1 is returned if the frame cannot be decoded and must be discarded. |
| 286 | * - 2 is returned if the stream cannot decode at the moment the frame. The |
| 287 | * frame should be buffered to be handled later. |
| 288 | */ |
| 289 | int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset, |
| 290 | char fin, char *data, struct qcs **out_qcs) |
| 291 | { |
| 292 | struct qcs *qcs; |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 293 | size_t total, diff; |
| 294 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 295 | TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn); |
| 296 | |
Amaury Denoyelle | 5074229 | 2022-03-29 14:57:19 +0200 | [diff] [blame^] | 297 | qcs = qcc_get_qcs(qcc, id); |
| 298 | if (!qcs) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 299 | TRACE_DEVEL("leaving on stream not found", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS, qcc->conn, NULL, &id); |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 300 | return 1; |
| 301 | } |
| 302 | |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 303 | *out_qcs = qcs; |
| 304 | |
| 305 | if (offset > qcs->rx.offset) |
| 306 | return 2; |
| 307 | |
| 308 | if (offset + len <= qcs->rx.offset) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 309 | TRACE_DEVEL("leaving on already received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs); |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 310 | return 1; |
| 311 | } |
| 312 | |
| 313 | /* Last frame already handled for this stream. */ |
| 314 | BUG_ON(qcs->flags & QC_SF_FIN_RECV); |
| 315 | |
| 316 | if (!qc_get_buf(qcs, &qcs->rx.buf)) { |
| 317 | /* TODO should mark qcs as full */ |
| 318 | return 2; |
| 319 | } |
| 320 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 321 | TRACE_DEVEL("newly received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs); |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 322 | diff = qcs->rx.offset - offset; |
| 323 | |
| 324 | /* TODO do not partially copy a frame if not enough size left. Maybe |
| 325 | * this can be optimized. |
| 326 | */ |
| 327 | if (len > b_room(&qcs->rx.buf)) { |
| 328 | /* TODO handle STREAM frames larger than RX buffer. */ |
| 329 | BUG_ON(len > b_size(&qcs->rx.buf)); |
| 330 | return 2; |
| 331 | } |
| 332 | |
| 333 | len -= diff; |
| 334 | data += diff; |
| 335 | |
| 336 | total = b_putblk(&qcs->rx.buf, data, len); |
| 337 | /* TODO handle partial copy of a STREAM frame. */ |
| 338 | BUG_ON(len != total); |
| 339 | |
| 340 | qcs->rx.offset += total; |
| 341 | |
| 342 | if (fin) |
| 343 | qcs->flags |= QC_SF_FIN_RECV; |
| 344 | |
| 345 | out: |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 346 | TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn); |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 347 | return 0; |
| 348 | } |
| 349 | |
Amaury Denoyelle | 1e5e513 | 2022-03-08 16:23:03 +0100 | [diff] [blame] | 350 | /* Handle a new MAX_DATA frame. <max> must contains the maximum data field of |
| 351 | * the frame. |
| 352 | * |
| 353 | * Returns 0 on success else non-zero. |
| 354 | */ |
| 355 | int qcc_recv_max_data(struct qcc *qcc, uint64_t max) |
| 356 | { |
| 357 | if (qcc->rfctl.md < max) { |
| 358 | qcc->rfctl.md = max; |
| 359 | |
| 360 | if (qcc->flags & QC_CF_BLK_MFCTL) { |
| 361 | qcc->flags &= ~QC_CF_BLK_MFCTL; |
| 362 | tasklet_wakeup(qcc->wait_event.tasklet); |
| 363 | } |
| 364 | } |
| 365 | return 0; |
| 366 | } |
| 367 | |
Amaury Denoyelle | 8727ff4 | 2022-03-08 10:39:55 +0100 | [diff] [blame] | 368 | /* Handle a new MAX_STREAM_DATA frame. <max> must contains the maximum data |
| 369 | * field of the frame and <id> is the identifier of the QUIC stream. |
| 370 | * |
| 371 | * Returns 0 on success else non-zero. |
| 372 | */ |
| 373 | int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max) |
| 374 | { |
| 375 | struct qcs *qcs; |
| 376 | struct eb64_node *node; |
| 377 | |
| 378 | node = eb64_lookup(&qcc->streams_by_id, id); |
| 379 | if (node) { |
| 380 | qcs = eb64_entry(&node->node, struct qcs, by_id); |
| 381 | if (max > qcs->tx.msd) { |
| 382 | qcs->tx.msd = max; |
| 383 | |
| 384 | if (qcs->flags & QC_SF_BLK_SFCTL) { |
| 385 | qcs->flags &= ~QC_SF_BLK_SFCTL; |
| 386 | tasklet_wakeup(qcc->wait_event.tasklet); |
| 387 | } |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 394 | /* Decode the content of STREAM frames already received on the stream instance |
| 395 | * <qcs>. |
| 396 | * |
| 397 | * Returns 0 on success else non-zero. |
| 398 | */ |
| 399 | int qcc_decode_qcs(struct qcc *qcc, struct qcs *qcs) |
| 400 | { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 401 | TRACE_ENTER(QMUX_EV_QCS_RECV, qcc->conn, qcs); |
| 402 | |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 403 | if (qcc->app_ops->decode_qcs(qcs, qcs->flags & QC_SF_FIN_RECV, qcc->ctx) < 0) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 404 | TRACE_DEVEL("leaving on decoding error", QMUX_EV_QCS_RECV, qcc->conn, qcs); |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 405 | return 1; |
| 406 | } |
| 407 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 408 | TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs); |
| 409 | |
Amaury Denoyelle | 0e3010b | 2022-02-28 11:37:48 +0100 | [diff] [blame] | 410 | return 0; |
| 411 | } |
| 412 | |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 413 | static int qc_is_max_streams_needed(struct qcc *qcc) |
| 414 | { |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 415 | return qcc->lfctl.cl_bidi_r > qcc->lfctl.ms_bidi_init / 2; |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 416 | } |
| 417 | |
Ilya Shipitsin | 5e87bcf | 2021-12-25 11:45:52 +0500 | [diff] [blame] | 418 | /* detaches the QUIC stream from its QCC and releases it to the QCS pool. */ |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 419 | static void qcs_destroy(struct qcs *qcs) |
| 420 | { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 421 | struct connection *conn = qcs->qcc->conn; |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 422 | const uint64_t id = qcs->by_id.key; |
| 423 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 424 | TRACE_ENTER(QMUX_EV_QCS_END, conn, qcs); |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 425 | |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 426 | if (quic_stream_is_remote(qcs->qcc, id)) { |
| 427 | if (quic_stream_is_bidi(id)) { |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 428 | ++qcs->qcc->lfctl.cl_bidi_r; |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 429 | if (qc_is_max_streams_needed(qcs->qcc)) |
| 430 | tasklet_wakeup(qcs->qcc->wait_event.tasklet); |
| 431 | } |
| 432 | } |
| 433 | |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 434 | eb64_delete(&qcs->by_id); |
| 435 | |
| 436 | b_free(&qcs->rx.buf); |
| 437 | b_free(&qcs->tx.buf); |
| 438 | b_free(&qcs->tx.xprt_buf); |
| 439 | |
| 440 | --qcs->qcc->strms[qcs_id_type(qcs->by_id.key)].nb_streams; |
| 441 | |
| 442 | pool_free(pool_head_qcs, qcs); |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 443 | |
| 444 | TRACE_LEAVE(QMUX_EV_QCS_END, conn); |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | static inline int qcc_is_dead(const struct qcc *qcc) |
| 448 | { |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 449 | if (!qcc->strms[QCS_CLT_BIDI].nb_streams && !qcc->task) |
| 450 | return 1; |
| 451 | |
| 452 | return 0; |
| 453 | } |
| 454 | |
| 455 | /* Return true if the mux timeout should be armed. */ |
| 456 | static inline int qcc_may_expire(struct qcc *qcc) |
| 457 | { |
| 458 | |
| 459 | /* Consider that the timeout must be set if no bidirectional streams |
| 460 | * are opened. |
| 461 | */ |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 462 | if (!qcc->strms[QCS_CLT_BIDI].nb_streams) |
| 463 | return 1; |
| 464 | |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | /* release function. This one should be called to free all resources allocated |
| 469 | * to the mux. |
| 470 | */ |
| 471 | static void qc_release(struct qcc *qcc) |
| 472 | { |
| 473 | struct connection *conn = NULL; |
| 474 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 475 | TRACE_ENTER(QMUX_EV_QCC_END); |
| 476 | |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 477 | if (qcc) { |
| 478 | /* The connection must be aattached to this mux to be released */ |
| 479 | if (qcc->conn && qcc->conn->ctx == qcc) |
| 480 | conn = qcc->conn; |
| 481 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 482 | TRACE_DEVEL("freeing qcc", QMUX_EV_QCC_END, conn); |
| 483 | |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 484 | if (qcc->wait_event.tasklet) |
| 485 | tasklet_free(qcc->wait_event.tasklet); |
| 486 | |
| 487 | pool_free(pool_head_qcc, qcc); |
| 488 | } |
| 489 | |
| 490 | if (conn) { |
Amaury Denoyelle | 0e0969d | 2022-01-31 15:41:14 +0100 | [diff] [blame] | 491 | LIST_DEL_INIT(&conn->stopping_list); |
| 492 | |
Frédéric Lécaille | 19cd46e | 2022-01-10 11:40:33 +0100 | [diff] [blame] | 493 | conn->qc->conn = NULL; |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 494 | conn->mux = NULL; |
| 495 | conn->ctx = NULL; |
| 496 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 497 | TRACE_DEVEL("freeing conn", QMUX_EV_QCC_END, conn); |
| 498 | |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 499 | conn_stop_tracking(conn); |
| 500 | conn_full_close(conn); |
| 501 | if (conn->destroy_cb) |
| 502 | conn->destroy_cb(conn); |
| 503 | conn_free(conn); |
| 504 | } |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 505 | |
| 506 | TRACE_LEAVE(QMUX_EV_QCC_END); |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 507 | } |
| 508 | |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 509 | /* Prepare a STREAM frame for <qcs> instance. First, transfer data from |
| 510 | * <payload> to <out> buffer. The STREAM frame payload points to the <out> |
| 511 | * buffer. The frame is then pushed to <frm_list>. If <fin> is set, and the |
| 512 | * <payload> buf is emptied after transfer, FIN bit is set on the STREAM frame. |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 513 | * Transfer is automatically adjusted to not exceed the stream flow-control |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 514 | * limit. <max_data> must contains the current sum offsets for the connection. |
| 515 | * This is useful to not exceed the connection flow-control limit when using |
| 516 | * repeatdly this function on multiple streams before passing the data to the |
| 517 | * lower layer. |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 518 | * |
| 519 | * Returns the total bytes of newly transferred data or a negative error code. |
| 520 | */ |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 521 | static int qcs_push_frame(struct qcs *qcs, struct buffer *out, |
| 522 | struct buffer *payload, int fin, |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 523 | struct list *frm_list, uint64_t max_data) |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 524 | { |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 525 | struct qcc *qcc = qcs->qcc; |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 526 | struct quic_frame *frm; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 527 | int head, left, to_xfer; |
| 528 | int total = 0; |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 529 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 530 | TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs); |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 531 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 532 | qc_get_buf(qcs, out); |
| 533 | |
| 534 | /* |
| 535 | * QCS out buffer diagram |
| 536 | * head left to_xfer |
| 537 | * -------------> ----------> -----> |
Amaury Denoyelle | e0320b8 | 2022-03-11 19:12:23 +0100 | [diff] [blame] | 538 | * -------------------------------------------------- |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 539 | * |...............|xxxxxxxxxxx|<<<<< |
Amaury Denoyelle | e0320b8 | 2022-03-11 19:12:23 +0100 | [diff] [blame] | 540 | * -------------------------------------------------- |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 541 | * ^ ack-off ^ sent-off ^ off |
| 542 | * |
| 543 | * STREAM frame |
| 544 | * ^ ^ |
| 545 | * |xxxxxxxxxxxxxxxxx| |
| 546 | */ |
| 547 | |
| 548 | BUG_ON_HOT(qcs->tx.sent_offset < qcs->tx.ack_offset); |
| 549 | BUG_ON_HOT(qcs->tx.offset < qcs->tx.sent_offset); |
| 550 | |
| 551 | head = qcs->tx.sent_offset - qcs->tx.ack_offset; |
| 552 | left = qcs->tx.offset - qcs->tx.sent_offset; |
| 553 | to_xfer = QUIC_MIN(b_data(payload), b_room(out)); |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 554 | |
| 555 | BUG_ON_HOT(qcs->tx.offset > qcs->tx.msd); |
| 556 | /* do not exceed flow control limit */ |
| 557 | if (qcs->tx.offset + to_xfer > qcs->tx.msd) |
| 558 | to_xfer = qcs->tx.msd - qcs->tx.offset; |
| 559 | |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 560 | BUG_ON_HOT(max_data > qcc->rfctl.md); |
| 561 | /* do not overcome flow control limit on connection */ |
| 562 | if (max_data + to_xfer > qcc->rfctl.md) |
| 563 | to_xfer = qcc->rfctl.md - max_data; |
| 564 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 565 | if (!left && !to_xfer) |
Frédéric Lécaille | d2ba096 | 2021-09-20 17:50:03 +0200 | [diff] [blame] | 566 | goto out; |
| 567 | |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 568 | frm = pool_zalloc(pool_head_quic_frame); |
| 569 | if (!frm) |
| 570 | goto err; |
| 571 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 572 | total = b_force_xfer(out, payload, to_xfer); |
| 573 | |
| 574 | frm->type = QUIC_FT_STREAM_8; |
| 575 | frm->stream.qcs = (struct qcs *)qcs; |
| 576 | frm->stream.id = qcs->by_id.key; |
| 577 | frm->stream.buf = out; |
| 578 | frm->stream.data = (unsigned char *)b_peek(out, head); |
| 579 | |
Amaury Denoyelle | fecfa0d | 2021-12-07 16:50:14 +0100 | [diff] [blame] | 580 | /* FIN is positioned only when the buffer has been totally emptied. */ |
Frédéric Lécaille | d2ba096 | 2021-09-20 17:50:03 +0200 | [diff] [blame] | 581 | fin = fin && !b_data(payload); |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 582 | if (fin) |
| 583 | frm->type |= QUIC_STREAM_FRAME_TYPE_FIN_BIT; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 584 | |
| 585 | if (qcs->tx.sent_offset) { |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 586 | frm->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 587 | frm->stream.offset.key = qcs->tx.sent_offset; |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 588 | } |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 589 | |
| 590 | if (left + total) { |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 591 | frm->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 592 | frm->stream.len = left + total; |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 593 | } |
| 594 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 595 | LIST_APPEND(frm_list, &frm->list); |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 596 | |
Frédéric Lécaille | d2ba096 | 2021-09-20 17:50:03 +0200 | [diff] [blame] | 597 | out: |
Amaury Denoyelle | fdcec36 | 2022-03-25 09:28:10 +0100 | [diff] [blame] | 598 | { |
| 599 | struct qcs_push_frm_trace_arg arg = { |
| 600 | .sent = b_data(out), .xfer = total, .fin = fin, |
| 601 | .offset = qcs->tx.sent_offset |
| 602 | }; |
| 603 | TRACE_LEAVE(QMUX_EV_QCS_SEND|QMUX_EV_QCS_PUSH_FRM, |
| 604 | qcc->conn, qcs, &arg); |
| 605 | } |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 606 | |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 607 | return total; |
| 608 | |
| 609 | err: |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 610 | TRACE_DEVEL("leaving in error", QMUX_EV_QCS_SEND, qcc->conn, qcs); |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 611 | return -1; |
| 612 | } |
| 613 | |
Amaury Denoyelle | 54445d0 | 2022-03-10 16:44:14 +0100 | [diff] [blame] | 614 | /* This function must be called by the upper layer to inform about the sending |
| 615 | * of a STREAM frame for <qcs> instance. The frame is of <data> length and on |
| 616 | * <offset>. |
| 617 | */ |
| 618 | void qcc_streams_sent_done(struct qcs *qcs, uint64_t data, uint64_t offset) |
| 619 | { |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 620 | struct qcc *qcc = qcs->qcc; |
| 621 | uint64_t diff; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 622 | |
| 623 | BUG_ON(offset > qcs->tx.sent_offset); |
| 624 | |
Amaury Denoyelle | 54445d0 | 2022-03-10 16:44:14 +0100 | [diff] [blame] | 625 | /* check if the STREAM frame has already been notified. It can happen |
| 626 | * for retransmission. |
| 627 | */ |
| 628 | if (offset + data <= qcs->tx.sent_offset) |
| 629 | return; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 630 | |
| 631 | diff = offset + data - qcs->tx.sent_offset; |
| 632 | |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 633 | /* increase offset sum on connection */ |
| 634 | qcc->tx.sent_offsets += diff; |
| 635 | BUG_ON_HOT(qcc->tx.sent_offsets > qcc->rfctl.md); |
| 636 | if (qcc->tx.sent_offsets == qcc->rfctl.md) |
| 637 | qcc->flags |= QC_CF_BLK_MFCTL; |
| 638 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 639 | /* increase offset on stream */ |
| 640 | qcs->tx.sent_offset += diff; |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 641 | BUG_ON_HOT(qcs->tx.sent_offset > qcs->tx.msd); |
| 642 | if (qcs->tx.sent_offset == qcs->tx.msd) |
| 643 | qcs->flags |= QC_SF_BLK_SFCTL; |
Amaury Denoyelle | 54445d0 | 2022-03-10 16:44:14 +0100 | [diff] [blame] | 644 | } |
| 645 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 646 | /* Wrapper for send on transport layer. Send a list of frames <frms> for the |
| 647 | * connection <qcc>. |
| 648 | * |
| 649 | * Returns 0 if all data sent with success else non-zero. |
| 650 | */ |
| 651 | static int qc_send_frames(struct qcc *qcc, struct list *frms) |
| 652 | { |
Amaury Denoyelle | db5d1a1 | 2022-03-10 16:42:23 +0100 | [diff] [blame] | 653 | /* TODO implement an opportunistic retry mechanism. This is needed |
| 654 | * because qc_send_app_pkts is not completed. It will only prepare data |
| 655 | * up to its Tx buffer. The frames left are not send even if the Tx |
| 656 | * buffer is emptied by the sendto call. |
| 657 | * |
| 658 | * To overcome this, we call repeatedly qc_send_app_pkts until we |
| 659 | * detect that the transport layer has send nothing. This could happen |
| 660 | * on congestion or sendto syscall error. |
| 661 | * |
| 662 | * When qc_send_app_pkts is improved to handle retry by itself, we can |
| 663 | * remove the looping from the MUX. |
| 664 | */ |
| 665 | struct quic_frame *first_frm; |
| 666 | uint64_t first_offset = 0; |
| 667 | char first_stream_frame_type; |
Amaury Denoyelle | e9c4cc1 | 2022-03-04 15:29:53 +0100 | [diff] [blame] | 668 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 669 | TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn); |
| 670 | |
| 671 | if (LIST_ISEMPTY(frms)) { |
| 672 | TRACE_DEVEL("leaving with no frames to send", QMUX_EV_QCC_SEND, qcc->conn); |
Frédéric Lécaille | 4e22f28 | 2022-03-18 18:38:19 +0100 | [diff] [blame] | 673 | return 0; |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 674 | } |
Frédéric Lécaille | 4e22f28 | 2022-03-18 18:38:19 +0100 | [diff] [blame] | 675 | |
Amaury Denoyelle | e9c4cc1 | 2022-03-04 15:29:53 +0100 | [diff] [blame] | 676 | retry_send: |
Amaury Denoyelle | db5d1a1 | 2022-03-10 16:42:23 +0100 | [diff] [blame] | 677 | first_frm = LIST_ELEM(frms->n, struct quic_frame *, list); |
| 678 | if ((first_frm->type & QUIC_FT_STREAM_8) == QUIC_FT_STREAM_8) { |
| 679 | first_offset = first_frm->stream.offset.key; |
| 680 | first_stream_frame_type = 1; |
| 681 | } |
| 682 | else { |
| 683 | first_stream_frame_type = 0; |
| 684 | } |
| 685 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 686 | if (!LIST_ISEMPTY(frms)) |
| 687 | qc_send_app_pkts(qcc->conn->qc, frms); |
| 688 | |
Amaury Denoyelle | db5d1a1 | 2022-03-10 16:42:23 +0100 | [diff] [blame] | 689 | /* If there is frames left, check if the transport layer has send some |
| 690 | * data or is blocked. |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 691 | */ |
Amaury Denoyelle | db5d1a1 | 2022-03-10 16:42:23 +0100 | [diff] [blame] | 692 | if (!LIST_ISEMPTY(frms)) { |
| 693 | if (first_frm != LIST_ELEM(frms->n, struct quic_frame *, list)) |
| 694 | goto retry_send; |
| 695 | |
| 696 | /* If the first frame is STREAM, check if its offset has |
| 697 | * changed. |
| 698 | */ |
| 699 | if (first_stream_frame_type && |
| 700 | first_offset != LIST_ELEM(frms->n, struct quic_frame *, list)->stream.offset.key) { |
| 701 | goto retry_send; |
| 702 | } |
Amaury Denoyelle | e9c4cc1 | 2022-03-04 15:29:53 +0100 | [diff] [blame] | 703 | } |
| 704 | |
Amaury Denoyelle | db5d1a1 | 2022-03-10 16:42:23 +0100 | [diff] [blame] | 705 | /* If there is frames left at this stage, transport layer is blocked. |
| 706 | * Subscribe on it to retry later. |
| 707 | */ |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 708 | if (!LIST_ISEMPTY(frms)) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 709 | TRACE_DEVEL("leaving with remaining frames to send, subscribing", QMUX_EV_QCC_SEND, qcc->conn); |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 710 | qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx, |
| 711 | SUB_RETRY_SEND, &qcc->wait_event); |
| 712 | return 1; |
| 713 | } |
| 714 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 715 | TRACE_LEAVE(QMUX_EV_QCC_SEND); |
| 716 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 717 | return 0; |
| 718 | } |
| 719 | |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 720 | /* Proceed to sending. Loop through all available streams for the <qcc> |
| 721 | * instance and try to send as much as possible. |
| 722 | * |
| 723 | * Returns the total of bytes sent to the transport layer. |
| 724 | */ |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 725 | static int qc_send(struct qcc *qcc) |
| 726 | { |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 727 | struct list frms = LIST_HEAD_INIT(frms); |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 728 | struct eb64_node *node; |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 729 | int total = 0; |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 730 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 731 | TRACE_ENTER(QMUX_EV_QCC_SEND); |
Frédéric Lécaille | 8526f14 | 2021-09-20 17:58:22 +0200 | [diff] [blame] | 732 | |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 733 | if (qcc->flags & QC_CF_BLK_MFCTL) |
| 734 | return 0; |
| 735 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 736 | /* loop through all streams, construct STREAM frames if data available. |
| 737 | * TODO optimize the loop to favor streams which are not too heavy. |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 738 | */ |
| 739 | node = eb64_first(&qcc->streams_by_id); |
| 740 | while (node) { |
Amaury Denoyelle | d3d97c6 | 2021-10-05 11:45:58 +0200 | [diff] [blame] | 741 | struct qcs *qcs = container_of(node, struct qcs, by_id); |
| 742 | struct buffer *buf = &qcs->tx.buf; |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 743 | struct buffer *out = &qcs->tx.xprt_buf; |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 744 | |
Amaury Denoyelle | e2ec942 | 2022-03-10 16:46:18 +0100 | [diff] [blame] | 745 | /* TODO |
| 746 | * for the moment, unidirectional streams have their own |
| 747 | * mechanism for sending. This should be unified in the future, |
| 748 | * in this case the next check will be removed. |
| 749 | */ |
| 750 | if (quic_stream_is_uni(qcs->by_id.key)) { |
| 751 | node = eb64_next(node); |
| 752 | continue; |
| 753 | } |
| 754 | |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 755 | if (qcs->flags & QC_SF_BLK_SFCTL) { |
| 756 | node = eb64_next(node); |
| 757 | continue; |
| 758 | } |
| 759 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 760 | if (b_data(buf) || b_data(out)) { |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 761 | int ret; |
Amaury Denoyelle | 9296091 | 2022-03-24 18:23:29 +0100 | [diff] [blame] | 762 | char fin = !!(qcs->flags & QC_SF_FIN_STREAM); |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 763 | |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 764 | ret = qcs_push_frame(qcs, out, buf, fin, &frms, |
| 765 | qcc->tx.sent_offsets + total); |
Amaury Denoyelle | 1455113 | 2022-03-04 16:51:20 +0100 | [diff] [blame] | 766 | BUG_ON(ret < 0); /* TODO handle this properly */ |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 767 | |
Amaury Denoyelle | e257d9e | 2021-12-03 14:39:29 +0100 | [diff] [blame] | 768 | if (ret > 0) { |
Amaury Denoyelle | a3f222d | 2021-12-06 11:24:00 +0100 | [diff] [blame] | 769 | qcs_notify_send(qcs); |
Amaury Denoyelle | 84ea8dc | 2021-12-03 14:40:01 +0100 | [diff] [blame] | 770 | if (qcs->flags & QC_SF_BLK_MROOM) |
| 771 | qcs->flags &= ~QC_SF_BLK_MROOM; |
Amaury Denoyelle | e257d9e | 2021-12-03 14:39:29 +0100 | [diff] [blame] | 772 | } |
Amaury Denoyelle | a543eb1 | 2021-10-06 14:53:13 +0200 | [diff] [blame] | 773 | |
Amaury Denoyelle | d3d97c6 | 2021-10-05 11:45:58 +0200 | [diff] [blame] | 774 | qcs->tx.offset += ret; |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 775 | total += ret; |
Amaury Denoyelle | a2c58a7 | 2021-12-03 14:38:31 +0100 | [diff] [blame] | 776 | |
Amaury Denoyelle | 2c71fe5 | 2022-02-09 18:16:49 +0100 | [diff] [blame] | 777 | /* Subscribe if not all data can be send. */ |
Amaury Denoyelle | a2c58a7 | 2021-12-03 14:38:31 +0100 | [diff] [blame] | 778 | if (b_data(buf)) { |
| 779 | qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx, |
| 780 | SUB_RETRY_SEND, &qcc->wait_event); |
| 781 | } |
Frédéric Lécaille | 578a789 | 2021-09-13 16:13:00 +0200 | [diff] [blame] | 782 | } |
| 783 | node = eb64_next(node); |
| 784 | } |
Frédéric Lécaille | 8526f14 | 2021-09-20 17:58:22 +0200 | [diff] [blame] | 785 | |
Amaury Denoyelle | 6ccfa3c | 2022-03-10 16:45:53 +0100 | [diff] [blame] | 786 | qc_send_frames(qcc, &frms); |
Amaury Denoyelle | e257d9e | 2021-12-03 14:39:29 +0100 | [diff] [blame] | 787 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 788 | TRACE_LEAVE(QMUX_EV_QCC_SEND); |
| 789 | |
Amaury Denoyelle | 75d14ad | 2022-03-22 15:10:29 +0100 | [diff] [blame] | 790 | return total; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 791 | } |
| 792 | |
Amaury Denoyelle | 6a4aebf | 2022-02-01 10:16:05 +0100 | [diff] [blame] | 793 | /* Release all streams that are already marked as detached. This is only done |
| 794 | * if their TX buffers are empty or if a CONNECTION_CLOSE has been received. |
| 795 | * |
| 796 | * Return the number of released stream. |
| 797 | */ |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 798 | static int qc_release_detached_streams(struct qcc *qcc) |
| 799 | { |
| 800 | struct eb64_node *node; |
| 801 | int release = 0; |
| 802 | |
| 803 | node = eb64_first(&qcc->streams_by_id); |
| 804 | while (node) { |
| 805 | struct qcs *qcs = container_of(node, struct qcs, by_id); |
| 806 | node = eb64_next(node); |
| 807 | |
| 808 | if (qcs->flags & QC_SF_DETACH) { |
Amaury Denoyelle | d975148 | 2022-02-01 15:15:11 +0100 | [diff] [blame] | 809 | if ((!b_data(&qcs->tx.buf) && !b_data(&qcs->tx.xprt_buf))) { |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 810 | qcs_destroy(qcs); |
| 811 | release = 1; |
| 812 | } |
| 813 | else { |
| 814 | qcc->conn->xprt->subscribe(qcc->conn, qcc->conn->xprt_ctx, |
| 815 | SUB_RETRY_SEND, &qcc->wait_event); |
| 816 | } |
| 817 | } |
| 818 | } |
| 819 | |
| 820 | return release; |
| 821 | } |
| 822 | |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 823 | /* Send a MAX_STREAM_BIDI frame to update the limit of bidirectional streams |
| 824 | * allowed to be opened by the peer. The caller should have first checked if |
| 825 | * this is required with qc_is_max_streams_needed. |
| 826 | * |
| 827 | * Returns 0 on success else non-zero. |
| 828 | */ |
| 829 | static int qc_send_max_streams(struct qcc *qcc) |
| 830 | { |
| 831 | struct list frms = LIST_HEAD_INIT(frms); |
| 832 | struct quic_frame *frm; |
| 833 | |
| 834 | frm = pool_zalloc(pool_head_quic_frame); |
| 835 | BUG_ON(!frm); /* TODO handle this properly */ |
| 836 | |
| 837 | frm->type = QUIC_FT_MAX_STREAMS_BIDI; |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 838 | frm->max_streams_bidi.max_streams = qcc->lfctl.ms_bidi + |
| 839 | qcc->lfctl.cl_bidi_r; |
Amaury Denoyelle | fa29f33 | 2022-03-25 09:09:40 +0100 | [diff] [blame] | 840 | TRACE_DEVEL("sending MAX_STREAMS frame", QMUX_EV_SEND_FRM, qcc->conn, NULL, frm); |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 841 | LIST_APPEND(&frms, &frm->list); |
| 842 | |
| 843 | if (qc_send_frames(qcc, &frms)) |
| 844 | return 1; |
| 845 | |
| 846 | /* save the new limit if the frame has been send. */ |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 847 | qcc->lfctl.ms_bidi += qcc->lfctl.cl_bidi_r; |
| 848 | qcc->lfctl.cl_bidi_r = 0; |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 849 | |
| 850 | return 0; |
| 851 | } |
| 852 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 853 | static struct task *qc_io_cb(struct task *t, void *ctx, unsigned int status) |
| 854 | { |
Amaury Denoyelle | 769e9ff | 2021-10-05 11:43:50 +0200 | [diff] [blame] | 855 | struct qcc *qcc = ctx; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 856 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 857 | TRACE_ENTER(QMUX_EV_QCC_WAKE); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 858 | |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 859 | if (qc_is_max_streams_needed(qcc)) |
| 860 | qc_send_max_streams(qcc); |
| 861 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 862 | qc_send(qcc); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 863 | |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 864 | if (qc_release_detached_streams(qcc)) { |
Amaury Denoyelle | 1136e92 | 2022-02-01 10:33:09 +0100 | [diff] [blame] | 865 | /* Schedule the mux timeout if no bidirectional streams left. */ |
| 866 | if (qcc_may_expire(qcc)) { |
| 867 | qcc->task->expire = tick_add(now_ms, qcc->timeout); |
| 868 | task_queue(qcc->task); |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 869 | } |
| 870 | } |
| 871 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 872 | TRACE_LEAVE(QMUX_EV_QCC_WAKE); |
| 873 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 874 | return NULL; |
| 875 | } |
| 876 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 877 | static struct task *qc_timeout_task(struct task *t, void *ctx, unsigned int state) |
| 878 | { |
| 879 | struct qcc *qcc = ctx; |
| 880 | int expired = tick_is_expired(t->expire, now_ms); |
| 881 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 882 | TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc ? qcc->conn : NULL); |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 883 | |
| 884 | if (qcc) { |
| 885 | if (!expired) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 886 | TRACE_DEVEL("leaving (not expired)", QMUX_EV_QCC_WAKE, qcc->conn); |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 887 | return t; |
| 888 | } |
| 889 | |
| 890 | if (!qcc_may_expire(qcc)) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 891 | TRACE_DEVEL("leaving (cannot expired)", QMUX_EV_QCC_WAKE, qcc->conn); |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 892 | t->expire = TICK_ETERNITY; |
| 893 | return t; |
| 894 | } |
| 895 | } |
| 896 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 897 | task_destroy(t); |
Amaury Denoyelle | ea3e035 | 2022-02-21 10:05:16 +0100 | [diff] [blame] | 898 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 899 | if (!qcc) { |
| 900 | TRACE_DEVEL("leaving (not more qcc)", QMUX_EV_QCC_WAKE); |
Amaury Denoyelle | ea3e035 | 2022-02-21 10:05:16 +0100 | [diff] [blame] | 901 | return NULL; |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 902 | } |
Amaury Denoyelle | ea3e035 | 2022-02-21 10:05:16 +0100 | [diff] [blame] | 903 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 904 | qcc->task = NULL; |
| 905 | |
| 906 | if (qcc_is_dead(qcc)) |
| 907 | qc_release(qcc); |
| 908 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 909 | TRACE_LEAVE(QMUX_EV_QCC_WAKE); |
| 910 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 911 | return NULL; |
| 912 | } |
| 913 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 914 | static int qc_init(struct connection *conn, struct proxy *prx, |
| 915 | struct session *sess, struct buffer *input) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 916 | { |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 917 | struct qcc *qcc; |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 918 | struct quic_transport_params *lparams, *rparams; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 919 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 920 | qcc = pool_alloc(pool_head_qcc); |
| 921 | if (!qcc) |
| 922 | goto fail_no_qcc; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 923 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 924 | qcc->conn = conn; |
| 925 | conn->ctx = qcc; |
Amaury Denoyelle | ce1f30d | 2022-02-01 15:14:24 +0100 | [diff] [blame] | 926 | qcc->flags = 0; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 927 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 928 | qcc->app_ops = NULL; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 929 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 930 | qcc->streams_by_id = EB_ROOT_UNIQUE; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 931 | |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 932 | /* Server parameters, params used for RX flow control. */ |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 933 | lparams = &conn->qc->rx.params; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 934 | |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 935 | qcc->rx.max_data = lparams->initial_max_data; |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 936 | qcc->tx.sent_offsets = 0; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 937 | |
| 938 | /* Client initiated streams must respect the server flow control. */ |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 939 | qcc->strms[QCS_CLT_BIDI].max_streams = lparams->initial_max_streams_bidi; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 940 | qcc->strms[QCS_CLT_BIDI].nb_streams = 0; |
| 941 | qcc->strms[QCS_CLT_BIDI].largest_id = -1; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 942 | qcc->strms[QCS_CLT_BIDI].rx.max_data = 0; |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 943 | qcc->strms[QCS_CLT_BIDI].tx.max_data = lparams->initial_max_stream_data_bidi_remote; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 944 | |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 945 | qcc->strms[QCS_CLT_UNI].max_streams = lparams->initial_max_streams_uni; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 946 | qcc->strms[QCS_CLT_UNI].nb_streams = 0; |
| 947 | qcc->strms[QCS_CLT_UNI].largest_id = -1; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 948 | qcc->strms[QCS_CLT_UNI].rx.max_data = 0; |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 949 | qcc->strms[QCS_CLT_UNI].tx.max_data = lparams->initial_max_stream_data_uni; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 950 | |
| 951 | /* Server initiated streams must respect the server flow control. */ |
| 952 | qcc->strms[QCS_SRV_BIDI].max_streams = 0; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 953 | qcc->strms[QCS_SRV_BIDI].nb_streams = 0; |
| 954 | qcc->strms[QCS_SRV_BIDI].largest_id = -1; |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 955 | qcc->strms[QCS_SRV_BIDI].rx.max_data = lparams->initial_max_stream_data_bidi_local; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 956 | qcc->strms[QCS_SRV_BIDI].tx.max_data = 0; |
| 957 | |
| 958 | qcc->strms[QCS_SRV_UNI].max_streams = 0; |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 959 | qcc->strms[QCS_SRV_UNI].nb_streams = 0; |
| 960 | qcc->strms[QCS_SRV_UNI].largest_id = -1; |
Amaury Denoyelle | 749cb64 | 2022-02-09 10:25:29 +0100 | [diff] [blame] | 961 | qcc->strms[QCS_SRV_UNI].rx.max_data = lparams->initial_max_stream_data_uni; |
Amaury Denoyelle | f3b0ba7 | 2021-12-08 15:12:01 +0100 | [diff] [blame] | 962 | qcc->strms[QCS_SRV_UNI].tx.max_data = 0; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 963 | |
Amaury Denoyelle | 78396e5 | 2022-03-21 17:13:32 +0100 | [diff] [blame] | 964 | qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = lparams->initial_max_streams_bidi; |
| 965 | qcc->lfctl.cl_bidi_r = 0; |
Amaury Denoyelle | c055e30 | 2022-02-07 16:09:06 +0100 | [diff] [blame] | 966 | |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 967 | rparams = &conn->qc->tx.params; |
Amaury Denoyelle | 05ce55e | 2022-03-08 10:35:42 +0100 | [diff] [blame] | 968 | qcc->rfctl.md = rparams->initial_max_data; |
Amaury Denoyelle | 6ea7819 | 2022-03-07 15:47:02 +0100 | [diff] [blame] | 969 | qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local; |
| 970 | qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote; |
| 971 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 972 | qcc->wait_event.tasklet = tasklet_new(); |
| 973 | if (!qcc->wait_event.tasklet) |
| 974 | goto fail_no_tasklet; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 975 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 976 | qcc->subs = NULL; |
| 977 | qcc->wait_event.tasklet->process = qc_io_cb; |
| 978 | qcc->wait_event.tasklet->context = qcc; |
Frédéric Lécaille | f27b66f | 2022-03-18 22:49:22 +0100 | [diff] [blame] | 979 | qcc->wait_event.events = 0; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 980 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 981 | /* haproxy timeouts */ |
| 982 | qcc->timeout = prx->timeout.client; |
| 983 | qcc->task = task_new_here(); |
| 984 | if (!qcc->task) |
| 985 | goto fail_no_timeout_task; |
| 986 | qcc->task->process = qc_timeout_task; |
| 987 | qcc->task->context = qcc; |
| 988 | qcc->task->expire = tick_add(now_ms, qcc->timeout); |
| 989 | |
Amaury Denoyelle | 0e0969d | 2022-01-31 15:41:14 +0100 | [diff] [blame] | 990 | if (!conn_is_back(conn)) { |
| 991 | if (!LIST_INLIST(&conn->stopping_list)) { |
| 992 | LIST_APPEND(&mux_stopping_data[tid].list, |
| 993 | &conn->stopping_list); |
| 994 | } |
| 995 | } |
| 996 | |
Frédéric Lécaille | b80b20c | 2022-01-12 17:46:56 +0100 | [diff] [blame] | 997 | HA_ATOMIC_STORE(&conn->qc->qcc, qcc); |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 998 | /* init read cycle */ |
| 999 | tasklet_wakeup(qcc->wait_event.tasklet); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1000 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1001 | return 0; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1002 | |
Amaury Denoyelle | aebe26f | 2022-01-13 16:28:06 +0100 | [diff] [blame] | 1003 | fail_no_timeout_task: |
| 1004 | tasklet_free(qcc->wait_event.tasklet); |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1005 | fail_no_tasklet: |
| 1006 | pool_free(pool_head_qcc, qcc); |
| 1007 | fail_no_qcc: |
| 1008 | return -1; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1009 | } |
| 1010 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1011 | static void qc_detach(struct conn_stream *cs) |
| 1012 | { |
Amaury Denoyelle | 916f0ac | 2021-12-06 16:03:47 +0100 | [diff] [blame] | 1013 | struct qcs *qcs = cs->ctx; |
| 1014 | struct qcc *qcc = qcs->qcc; |
| 1015 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1016 | TRACE_ENTER(QMUX_EV_STRM_END, qcc->conn, qcs); |
Amaury Denoyelle | 916f0ac | 2021-12-06 16:03:47 +0100 | [diff] [blame] | 1017 | |
Amaury Denoyelle | d975148 | 2022-02-01 15:15:11 +0100 | [diff] [blame] | 1018 | /* TODO on CONNECTION_CLOSE reception, it should be possible to free |
| 1019 | * qcs instances. This should be done once the buffering and ACK |
| 1020 | * managment between xprt and mux is reorganized. |
| 1021 | */ |
| 1022 | |
| 1023 | if ((b_data(&qcs->tx.buf) || b_data(&qcs->tx.xprt_buf))) { |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1024 | TRACE_DEVEL("leaving with remaining data, detaching qcs", QMUX_EV_STRM_END, qcc->conn, qcs); |
Amaury Denoyelle | 2873a31 | 2021-12-08 14:42:55 +0100 | [diff] [blame] | 1025 | qcs->flags |= QC_SF_DETACH; |
| 1026 | return; |
| 1027 | } |
| 1028 | |
Amaury Denoyelle | 916f0ac | 2021-12-06 16:03:47 +0100 | [diff] [blame] | 1029 | qcs_destroy(qcs); |
Amaury Denoyelle | 1136e92 | 2022-02-01 10:33:09 +0100 | [diff] [blame] | 1030 | |
| 1031 | /* Schedule the mux timeout if no bidirectional streams left. */ |
| 1032 | if (qcc_may_expire(qcc)) { |
| 1033 | qcc->task->expire = tick_add(now_ms, qcc->timeout); |
| 1034 | task_queue(qcc->task); |
Amaury Denoyelle | 916f0ac | 2021-12-06 16:03:47 +0100 | [diff] [blame] | 1035 | } |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1036 | |
| 1037 | TRACE_LEAVE(QMUX_EV_STRM_END); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1038 | } |
| 1039 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1040 | /* Called from the upper layer, to receive data */ |
| 1041 | static size_t qc_rcv_buf(struct conn_stream *cs, struct buffer *buf, |
| 1042 | size_t count, int flags) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1043 | { |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1044 | struct qcs *qcs = cs->ctx; |
| 1045 | struct htx *qcs_htx = NULL; |
| 1046 | struct htx *cs_htx = NULL; |
| 1047 | size_t ret = 0; |
Amaury Denoyelle | eb53e5b | 2022-02-14 17:11:32 +0100 | [diff] [blame] | 1048 | char fin = 0; |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1049 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1050 | TRACE_ENTER(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1051 | |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1052 | qcs_htx = htx_from_buf(&qcs->rx.app_buf); |
| 1053 | if (htx_is_empty(qcs_htx)) { |
| 1054 | /* Set buffer data to 0 as HTX is empty. */ |
| 1055 | htx_to_buf(qcs_htx, &qcs->rx.app_buf); |
| 1056 | goto end; |
| 1057 | } |
| 1058 | |
| 1059 | ret = qcs_htx->data; |
| 1060 | |
| 1061 | cs_htx = htx_from_buf(buf); |
| 1062 | if (htx_is_empty(cs_htx) && htx_used_space(qcs_htx) <= count) { |
| 1063 | htx_to_buf(cs_htx, buf); |
| 1064 | htx_to_buf(qcs_htx, &qcs->rx.app_buf); |
| 1065 | b_xfer(buf, &qcs->rx.app_buf, b_data(&qcs->rx.app_buf)); |
| 1066 | goto end; |
| 1067 | } |
| 1068 | |
| 1069 | htx_xfer_blks(cs_htx, qcs_htx, count, HTX_BLK_UNUSED); |
| 1070 | BUG_ON(qcs_htx->flags & HTX_FL_PARSING_ERROR); |
| 1071 | |
| 1072 | /* Copy EOM from src to dst buffer if all data copied. */ |
Amaury Denoyelle | eb53e5b | 2022-02-14 17:11:32 +0100 | [diff] [blame] | 1073 | if (htx_is_empty(qcs_htx) && (qcs_htx->flags & HTX_FL_EOM)) { |
| 1074 | cs_htx->flags |= HTX_FL_EOM; |
| 1075 | fin = 1; |
| 1076 | } |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1077 | |
| 1078 | cs_htx->extra = qcs_htx->extra ? (qcs_htx->data + qcs_htx->extra) : 0; |
| 1079 | htx_to_buf(cs_htx, buf); |
| 1080 | htx_to_buf(qcs_htx, &qcs->rx.app_buf); |
| 1081 | ret -= qcs_htx->data; |
| 1082 | |
| 1083 | end: |
| 1084 | if (b_data(&qcs->rx.app_buf)) { |
| 1085 | cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM); |
| 1086 | } |
| 1087 | else { |
| 1088 | cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); |
| 1089 | if (cs->flags & CS_FL_ERR_PENDING) |
| 1090 | cs->flags |= CS_FL_ERROR; |
| 1091 | |
Amaury Denoyelle | eb53e5b | 2022-02-14 17:11:32 +0100 | [diff] [blame] | 1092 | if (fin) |
| 1093 | cs->flags |= (CS_FL_EOI|CS_FL_EOS); |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1094 | |
| 1095 | if (b_size(&qcs->rx.app_buf)) { |
| 1096 | b_free(&qcs->rx.app_buf); |
| 1097 | offer_buffers(NULL, 1); |
| 1098 | } |
| 1099 | } |
| 1100 | |
| 1101 | if (ret) |
| 1102 | tasklet_wakeup(qcs->qcc->wait_event.tasklet); |
| 1103 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1104 | TRACE_LEAVE(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs); |
| 1105 | |
Amaury Denoyelle | 9a327a7 | 2022-02-14 17:11:09 +0100 | [diff] [blame] | 1106 | return ret; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1107 | } |
| 1108 | |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1109 | static size_t qc_snd_buf(struct conn_stream *cs, struct buffer *buf, |
| 1110 | size_t count, int flags) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1111 | { |
| 1112 | struct qcs *qcs = cs->ctx; |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1113 | size_t ret; |
| 1114 | |
| 1115 | TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1116 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1117 | ret = qcs->qcc->app_ops->snd_buf(cs, buf, count, flags); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1118 | |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1119 | TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs); |
| 1120 | |
| 1121 | return ret; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1122 | } |
| 1123 | |
| 1124 | /* Called from the upper layer, to subscribe <es> to events <event_type>. The |
| 1125 | * event subscriber <es> is not allowed to change from a previous call as long |
| 1126 | * as at least one event is still subscribed. The <event_type> must only be a |
| 1127 | * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0. |
| 1128 | */ |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1129 | static int qc_subscribe(struct conn_stream *cs, int event_type, |
| 1130 | struct wait_event *es) |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1131 | { |
Amaury Denoyelle | a3f222d | 2021-12-06 11:24:00 +0100 | [diff] [blame] | 1132 | return qcs_subscribe(cs->ctx, event_type, es); |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1133 | } |
| 1134 | |
| 1135 | /* Called from the upper layer, to unsubscribe <es> from events <event_type>. |
| 1136 | * The <es> pointer is not allowed to differ from the one passed to the |
| 1137 | * subscribe() call. It always returns zero. |
| 1138 | */ |
| 1139 | static int qc_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es) |
| 1140 | { |
| 1141 | struct qcs *qcs = cs->ctx; |
| 1142 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1143 | BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV)); |
| 1144 | BUG_ON(qcs->subs && qcs->subs != es); |
| 1145 | |
| 1146 | es->events &= ~event_type; |
| 1147 | if (!es->events) |
| 1148 | qcs->subs = NULL; |
| 1149 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1150 | return 0; |
| 1151 | } |
| 1152 | |
Amaury Denoyelle | 0e0969d | 2022-01-31 15:41:14 +0100 | [diff] [blame] | 1153 | static int qc_wake(struct connection *conn) |
| 1154 | { |
| 1155 | struct qcc *qcc = conn->ctx; |
| 1156 | |
| 1157 | /* Check if a soft-stop is in progress. |
| 1158 | * Release idling front connection if this is the case. |
| 1159 | */ |
| 1160 | if (unlikely(conn->qc->li->bind_conf->frontend->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) { |
| 1161 | qc_release(qcc); |
| 1162 | } |
| 1163 | |
| 1164 | return 1; |
| 1165 | } |
| 1166 | |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 1167 | |
Amaury Denoyelle | fa29f33 | 2022-03-25 09:09:40 +0100 | [diff] [blame] | 1168 | static void qmux_trace_frm(const struct quic_frame *frm) |
| 1169 | { |
| 1170 | switch (frm->type) { |
| 1171 | case QUIC_FT_MAX_STREAMS_BIDI: |
| 1172 | chunk_appendf(&trace_buf, " max_streams=%lu", |
| 1173 | frm->max_streams_bidi.max_streams); |
| 1174 | break; |
| 1175 | |
| 1176 | case QUIC_FT_MAX_STREAMS_UNI: |
| 1177 | chunk_appendf(&trace_buf, " max_streams=%lu", |
| 1178 | frm->max_streams_uni.max_streams); |
| 1179 | break; |
| 1180 | |
| 1181 | default: |
| 1182 | break; |
| 1183 | } |
| 1184 | } |
| 1185 | |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 1186 | /* quic-mux trace handler */ |
| 1187 | static void qmux_trace(enum trace_level level, uint64_t mask, |
| 1188 | const struct trace_source *src, |
| 1189 | const struct ist where, const struct ist func, |
| 1190 | const void *a1, const void *a2, const void *a3, const void *a4) |
| 1191 | { |
| 1192 | const struct connection *conn = a1; |
| 1193 | const struct qcc *qcc = conn ? conn->ctx : NULL; |
| 1194 | const struct qcs *qcs = a2; |
| 1195 | |
| 1196 | if (!qcc) |
| 1197 | return; |
| 1198 | |
| 1199 | if (src->verbosity > QMUX_VERB_CLEAN) { |
| 1200 | chunk_appendf(&trace_buf, " : qcc=%p(F)", qcc); |
| 1201 | |
| 1202 | if (qcs) |
| 1203 | chunk_appendf(&trace_buf, " qcs=%p(%llu)", qcs, qcs->by_id.key); |
Amaury Denoyelle | 4f13757 | 2022-03-24 17:10:00 +0100 | [diff] [blame] | 1204 | |
| 1205 | if (mask & QMUX_EV_QCC_NQCS) { |
| 1206 | const uint64_t *id = a3; |
| 1207 | chunk_appendf(&trace_buf, " id=%lu", *id); |
| 1208 | } |
Amaury Denoyelle | fa29f33 | 2022-03-25 09:09:40 +0100 | [diff] [blame] | 1209 | |
| 1210 | if (mask & QMUX_EV_SEND_FRM) |
| 1211 | qmux_trace_frm(a3); |
Amaury Denoyelle | fdcec36 | 2022-03-25 09:28:10 +0100 | [diff] [blame] | 1212 | |
| 1213 | if (mask & QMUX_EV_QCS_PUSH_FRM) { |
| 1214 | const struct qcs_push_frm_trace_arg *arg = a3; |
| 1215 | chunk_appendf(&trace_buf, " sent=%lu xfer=%d fin=%d offset=%lu", |
| 1216 | arg->sent, arg->xfer, arg->fin, arg->offset); |
| 1217 | } |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 1218 | } |
| 1219 | } |
| 1220 | |
Amaury Denoyelle | 251eadf | 2022-03-24 17:14:52 +0100 | [diff] [blame] | 1221 | /* Function to automatically activate QUIC MUX traces on stdout. |
| 1222 | * Activated via the compilation flag -DENABLE_QUIC_STDOUT_TRACES. |
| 1223 | * Main use for now is in the docker image for QUIC interop testing. |
| 1224 | */ |
| 1225 | static void qmux_init_stdout_traces(void) |
| 1226 | { |
| 1227 | #ifdef ENABLE_QUIC_STDOUT_TRACES |
| 1228 | trace_qmux.sink = sink_find("stdout"); |
| 1229 | trace_qmux.level = TRACE_LEVEL_DEVELOPER; |
| 1230 | trace_qmux.state = TRACE_STATE_RUNNING; |
| 1231 | trace_qmux.verbosity = QMUX_VERB_MINIMAL; |
| 1232 | #endif |
| 1233 | } |
| 1234 | INITCALL0(STG_INIT, qmux_init_stdout_traces); |
| 1235 | |
Amaury Denoyelle | dd4fbfb | 2022-03-24 16:09:16 +0100 | [diff] [blame] | 1236 | |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1237 | static const struct mux_ops qc_ops = { |
| 1238 | .init = qc_init, |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1239 | .detach = qc_detach, |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1240 | .rcv_buf = qc_rcv_buf, |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1241 | .snd_buf = qc_snd_buf, |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1242 | .subscribe = qc_subscribe, |
| 1243 | .unsubscribe = qc_unsubscribe, |
Amaury Denoyelle | 0e0969d | 2022-01-31 15:41:14 +0100 | [diff] [blame] | 1244 | .wake = qc_wake, |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1245 | }; |
| 1246 | |
| 1247 | static struct mux_proto_list mux_proto_quic = |
Amaury Denoyelle | deed777 | 2021-12-03 11:36:46 +0100 | [diff] [blame] | 1248 | { .token = IST("quic"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_FE, .mux = &qc_ops }; |
Frédéric Lécaille | dfbae76 | 2021-02-18 09:59:01 +0100 | [diff] [blame] | 1249 | |
| 1250 | INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_quic); |