blob: a724dfb7f1885837949be493fca0a665040258d4 [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +02002 * QUIC xprt layer. Act as an abstraction between quic_conn and MUX layers.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020013#include <haproxy/api.h>
14#include <haproxy/connection.h>
15#include <haproxy/quic_conn.h>
16#include <haproxy/ssl_sock.h>
17#include <haproxy/trace.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010018
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020019#define TRACE_SOURCE &trace_quic
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010020
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020021static void quic_close(struct connection *conn, void *xprt_ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010022{
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020023 struct ssl_sock_ctx *conn_ctx = xprt_ctx;
24 struct quic_conn *qc = conn_ctx->qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010025
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020026 TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010027
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020028 /* Next application data can be dropped. */
29 qc->mux_state = QC_MUX_RELEASED;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020030
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020031 /* If the quic-conn timer has already expired free the quic-conn. */
32 if (qc->flags & QUIC_FL_CONN_EXP_TIMER) {
33 quic_conn_release(qc);
Frédéric Lécailleeb3e5172023-04-12 13:41:54 +020034 qc = NULL;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020035 goto leave;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010036 }
37
Amaury Denoyelle77ed6312023-02-01 09:28:55 +010038 /* Schedule a CONNECTION_CLOSE emission. If process stopping is in
39 * progress, quic-conn idle-timer will be scheduled immediately after
40 * its emission to ensure an immediate connection closing.
41 */
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020042 qc_check_close_on_released_mux(qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020043 leave:
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020044 TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010045}
46
Frédéric Lécaille422a39c2021-03-03 17:28:34 +010047/* Called from the upper layer, to subscribe <es> to events <event_type>. The
48 * event subscriber <es> is not allowed to change from a previous call as long
49 * as at least one event is still subscribed. The <event_type> must only be a
50 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
51 */
52static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
53{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020054 struct quic_conn *qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020055
56 TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020057
58 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020059 BUG_ON(qc->subs && qc->subs != es);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020060
61 es->events |= event_type;
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020062 qc->subs = es;
63
64 /* TODO implement a check_events to detect if subscriber should be
65 * woken up immediately ?
66 */
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020067
68 if (event_type & SUB_RETRY_RECV)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020069 TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020070
71 if (event_type & SUB_RETRY_SEND)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020072 TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020073
74 TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020075
76 return 0;
Frédéric Lécaille422a39c2021-03-03 17:28:34 +010077}
78
79/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
80 * The <es> pointer is not allowed to differ from the one passed to the
81 * subscribe() call. It always returns zero.
82 */
83static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
84{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020085 struct quic_conn *qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020086
87 TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
88
89 if (event_type & SUB_RETRY_RECV)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020090 TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020091 if (event_type & SUB_RETRY_SEND)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020092 TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020093
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020094 es->events &= ~event_type;
95 if (!es->events)
96 qc->subs = NULL;
97
98 /* TODO implement ignore_events similar to conn_unsubscribe() ? */
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020099
100 TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
101
Amaury Denoyellebbb1c682022-09-28 15:15:51 +0200102 return 0;
Frédéric Lécaille422a39c2021-03-03 17:28:34 +0100103}
104
Amaury Denoyelle33ac3462022-01-18 16:44:34 +0100105/* Store in <xprt_ctx> the context attached to <conn>.
106 * Returns always 0.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100107 */
108static int qc_conn_init(struct connection *conn, void **xprt_ctx)
109{
Amaury Denoyelle8fc267b2023-11-20 14:56:49 +0100110 struct quic_conn *qc = conn->handle.qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100111
Frédéric Lécailleeb3e5172023-04-12 13:41:54 +0200112 TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100113
Amaury Denoyelle8fc267b2023-11-20 14:56:49 +0100114 /* Ensure thread connection migration is finalized ASAP. */
115 if (qc->flags & QUIC_FL_CONN_AFFINITY_CHANGED)
116 qc_finalize_affinity_rebind(qc);
117
Amaury Denoyelle33ac3462022-01-18 16:44:34 +0100118 /* do not store the context if already set */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100119 if (*xprt_ctx)
120 goto out;
121
Amaury Denoyelle8fc267b2023-11-20 14:56:49 +0100122 *xprt_ctx = qc->xprt_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100123
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100124 out:
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100125 TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100126
127 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100128}
129
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200130/* Start the QUIC transport layer */
131static int qc_xprt_start(struct connection *conn, void *ctx)
132{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200133 int ret = 0;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200134 struct quic_conn *qc;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200135
Willy Tarreau784b8682022-04-11 14:18:10 +0200136 qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200137 TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100138
139 /* mux-quic can now be considered ready. */
140 qc->mux_state = QC_MUX_READY;
141
Amaury Denoyelleb75338e2024-03-04 18:41:39 +0100142 /* Schedule quic-conn to ensure post handshake frames are emitted. This
143 * is not done for 0-RTT as xprt->start happens before handshake
144 * completion.
145 */
146 if (qc->flags & QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS)
147 tasklet_wakeup(qc->wait_event.tasklet);
148
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200149 ret = 1;
150 out:
151 TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
152 return ret;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200153}
154
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200155static struct ssl_sock_ctx *qc_get_ssl_sock_ctx(struct connection *conn)
156{
Willy Tarreau784b8682022-04-11 14:18:10 +0200157 if (!conn || conn->xprt != xprt_get(XPRT_QUIC) || !conn->handle.qc || !conn->xprt_ctx)
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200158 return NULL;
159
Willy Tarreau784b8682022-04-11 14:18:10 +0200160 return conn->handle.qc->xprt_ctx;
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200161}
162
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100163/* transport-layer operations for QUIC connections. */
164static struct xprt_ops ssl_quic = {
Amaury Denoyelle414cac52021-09-22 11:14:37 +0200165 .close = quic_close,
Frédéric Lécaille422a39c2021-03-03 17:28:34 +0100166 .subscribe = quic_conn_subscribe,
167 .unsubscribe = quic_conn_unsubscribe,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100168 .init = qc_conn_init,
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200169 .start = qc_xprt_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100170 .prepare_bind_conf = ssl_sock_prepare_bind_conf,
171 .destroy_bind_conf = ssl_sock_destroy_bind_conf,
Amaury Denoyelle71e588c2021-11-12 11:23:29 +0100172 .get_alpn = ssl_sock_get_alpn,
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200173 .get_ssl_sock_ctx = qc_get_ssl_sock_ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100174 .name = "QUIC",
175};
176
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100177static void __quic_conn_init(void)
178{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100179 xprt_register(XPRT_QUIC, &ssl_quic);
180}
Willy Tarreau79367f92022-04-25 19:18:24 +0200181INITCALL0(STG_REGISTER, __quic_conn_init);