blob: c3b9c6a731713aee35950a48699008480dfff2c6 [file] [log] [blame]
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01001/*
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +02002 * QUIC xprt layer. Act as an abstraction between quic_conn and MUX layers.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01003 *
Willy Tarreau3dfb7da2022-03-02 22:33:39 +01004 * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +01005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020013#include <haproxy/api.h>
14#include <haproxy/connection.h>
15#include <haproxy/quic_conn.h>
16#include <haproxy/ssl_sock.h>
17#include <haproxy/trace.h>
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010018
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020019#define TRACE_SOURCE &trace_quic
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010020
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020021static void quic_close(struct connection *conn, void *xprt_ctx)
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010022{
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020023 struct ssl_sock_ctx *conn_ctx = xprt_ctx;
24 struct quic_conn *qc = conn_ctx->qc;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010025
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020026 TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010027
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020028 /* Next application data can be dropped. */
29 qc->mux_state = QC_MUX_RELEASED;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020030
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020031 /* If the quic-conn timer has already expired free the quic-conn. */
32 if (qc->flags & QUIC_FL_CONN_EXP_TIMER) {
33 quic_conn_release(qc);
Frédéric Lécailleeb3e5172023-04-12 13:41:54 +020034 qc = NULL;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020035 goto leave;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010036 }
37
Amaury Denoyelle77ed6312023-02-01 09:28:55 +010038 /* Schedule a CONNECTION_CLOSE emission. If process stopping is in
39 * progress, quic-conn idle-timer will be scheduled immediately after
40 * its emission to ensure an immediate connection closing.
41 */
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020042 qc_check_close_on_released_mux(qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020043 leave:
Amaury Denoyelle92fa63f2022-09-30 18:11:13 +020044 TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +010045}
46
Frédéric Lécaille422a39c2021-03-03 17:28:34 +010047/* Called from the upper layer, to subscribe <es> to events <event_type>. The
48 * event subscriber <es> is not allowed to change from a previous call as long
49 * as at least one event is still subscribed. The <event_type> must only be a
50 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
51 */
52static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
53{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020054 struct quic_conn *qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020055
56 TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020057
58 BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020059 BUG_ON(qc->subs && qc->subs != es);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020060
61 es->events |= event_type;
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020062 qc->subs = es;
63
64 /* TODO implement a check_events to detect if subscriber should be
65 * woken up immediately ?
66 */
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020067
68 if (event_type & SUB_RETRY_RECV)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020069 TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020070
71 if (event_type & SUB_RETRY_SEND)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020072 TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020073
74 TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
Frédéric Lécaille513b4f22021-09-20 15:23:17 +020075
76 return 0;
Frédéric Lécaille422a39c2021-03-03 17:28:34 +010077}
78
79/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
80 * The <es> pointer is not allowed to differ from the one passed to the
81 * subscribe() call. It always returns zero.
82 */
83static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
84{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020085 struct quic_conn *qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020086
87 TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
88
89 if (event_type & SUB_RETRY_RECV)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020090 TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020091 if (event_type & SUB_RETRY_SEND)
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020092 TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020093
Amaury Denoyellebbb1c682022-09-28 15:15:51 +020094 es->events &= ~event_type;
95 if (!es->events)
96 qc->subs = NULL;
97
98 /* TODO implement ignore_events similar to conn_unsubscribe() ? */
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +020099
100 TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
101
Amaury Denoyellebbb1c682022-09-28 15:15:51 +0200102 return 0;
Frédéric Lécaille422a39c2021-03-03 17:28:34 +0100103}
104
Amaury Denoyelle33ac3462022-01-18 16:44:34 +0100105/* Store in <xprt_ctx> the context attached to <conn>.
106 * Returns always 0.
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100107 */
108static int qc_conn_init(struct connection *conn, void **xprt_ctx)
109{
Amaury Denoyelle7ca7c842021-12-22 18:20:38 +0100110 struct quic_conn *qc = NULL;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100111
Frédéric Lécailleeb3e5172023-04-12 13:41:54 +0200112 TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100113
Amaury Denoyelle33ac3462022-01-18 16:44:34 +0100114 /* do not store the context if already set */
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100115 if (*xprt_ctx)
116 goto out;
117
Willy Tarreau784b8682022-04-11 14:18:10 +0200118 *xprt_ctx = conn->handle.qc->xprt_ctx;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100119
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100120 out:
Frédéric Lécaillefde2a982021-12-27 15:12:09 +0100121 TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100122
123 return 0;
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100124}
125
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200126/* Start the QUIC transport layer */
127static int qc_xprt_start(struct connection *conn, void *ctx)
128{
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200129 int ret = 0;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200130 struct quic_conn *qc;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200131
Willy Tarreau784b8682022-04-11 14:18:10 +0200132 qc = conn->handle.qc;
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200133 TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100134
Amaury Denoyelled6646dd2023-04-26 17:15:37 +0200135 if (qc->flags & QUIC_FL_CONN_AFFINITY_CHANGED)
136 qc_finalize_affinity_rebind(qc);
Amaury Denoyelle1acbbca2023-04-05 18:17:51 +0200137
Amaury Denoyellecfa2d562022-01-19 16:01:05 +0100138 /* mux-quic can now be considered ready. */
139 qc->mux_state = QC_MUX_READY;
140
Frédéric Lécaillea8b2f842022-08-10 17:56:45 +0200141 ret = 1;
142 out:
143 TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
144 return ret;
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200145}
146
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200147static struct ssl_sock_ctx *qc_get_ssl_sock_ctx(struct connection *conn)
148{
Willy Tarreau784b8682022-04-11 14:18:10 +0200149 if (!conn || conn->xprt != xprt_get(XPRT_QUIC) || !conn->handle.qc || !conn->xprt_ctx)
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200150 return NULL;
151
Willy Tarreau784b8682022-04-11 14:18:10 +0200152 return conn->handle.qc->xprt_ctx;
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200153}
154
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100155/* transport-layer operations for QUIC connections. */
156static struct xprt_ops ssl_quic = {
Amaury Denoyelle414cac52021-09-22 11:14:37 +0200157 .close = quic_close,
Frédéric Lécaille422a39c2021-03-03 17:28:34 +0100158 .subscribe = quic_conn_subscribe,
159 .unsubscribe = quic_conn_unsubscribe,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100160 .init = qc_conn_init,
Frédéric Lécaille3d77fa72021-05-31 09:30:14 +0200161 .start = qc_xprt_start,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100162 .prepare_bind_conf = ssl_sock_prepare_bind_conf,
163 .destroy_bind_conf = ssl_sock_destroy_bind_conf,
Amaury Denoyelle71e588c2021-11-12 11:23:29 +0100164 .get_alpn = ssl_sock_get_alpn,
Willy Tarreau54a1dcb2022-04-11 11:57:35 +0200165 .get_ssl_sock_ctx = qc_get_ssl_sock_ctx,
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100166 .name = "QUIC",
167};
168
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100169static void __quic_conn_init(void)
170{
Frédéric Lécaillea7e7ce92020-11-23 14:14:04 +0100171 xprt_register(XPRT_QUIC, &ssl_quic);
172}
Willy Tarreau79367f92022-04-25 19:18:24 +0200173INITCALL0(STG_REGISTER, __quic_conn_init);