blob: 5bd71932e324ead35e83fd237797f16c1ef6d543 [file] [log] [blame]
Willy Tarreau53a47662017-08-28 10:53:00 +02001/*
2 * Pass-through mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020013#include <haproxy/api.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020014#include <haproxy/buf.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020015#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010016#include <haproxy/conn_stream.h>
Christopher Fauletc0ae0972021-04-08 16:45:11 +020017#include <haproxy/pipe-t.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020018#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020019#include <haproxy/task.h>
Christopher Fauletc0ae0972021-04-08 16:45:11 +020020#include <haproxy/trace.h>
Olivier Houchardb6c32ee2018-11-05 18:28:43 +010021
Olivier Houchardb6c32ee2018-11-05 18:28:43 +010022struct mux_pt_ctx {
23 struct conn_stream *cs;
24 struct connection *conn;
25 struct wait_event wait_event;
26};
27
Willy Tarreau8ceae722018-11-26 11:58:30 +010028DECLARE_STATIC_POOL(pool_head_pt_ctx, "mux_pt", sizeof(struct mux_pt_ctx));
29
Christopher Fauletc0ae0972021-04-08 16:45:11 +020030/* trace source and events */
31static void pt_trace(enum trace_level level, uint64_t mask,
32 const struct trace_source *src,
33 const struct ist where, const struct ist func,
34 const void *a1, const void *a2, const void *a3, const void *a4);
35
36/* The event representation is split like this :
37 * pt_ctx - internal PT context
38 * strm - application layer
39 */
40static const struct trace_event pt_trace_events[] = {
41#define PT_EV_CONN_NEW (1ULL << 0)
42 { .mask = PT_EV_CONN_NEW, .name = "pt_conn_new", .desc = "new PT connection" },
43#define PT_EV_CONN_WAKE (1ULL << 1)
44 { .mask = PT_EV_CONN_WAKE, .name = "pt_conn_wake", .desc = "PT connection woken up" },
45#define PT_EV_CONN_END (1ULL << 2)
46 { .mask = PT_EV_CONN_END, .name = "pt_conn_end", .desc = "PT connection terminated" },
47#define PT_EV_CONN_ERR (1ULL << 3)
48 { .mask = PT_EV_CONN_ERR, .name = "pt_conn_err", .desc = "error on PT connection" },
49#define PT_EV_STRM_NEW (1ULL << 4)
50 { .mask = PT_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
51#define PT_EV_STRM_SHUT (1ULL << 5)
52 { .mask = PT_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
53#define PT_EV_STRM_END (1ULL << 6)
54 { .mask = PT_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
55#define PT_EV_STRM_ERR (1ULL << 7)
56 { .mask = PT_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
57#define PT_EV_RX_DATA (1ULL << 8)
58 { .mask = PT_EV_RX_DATA, .name = "pt_rx_data", .desc = "Rx on PT connection" },
59#define PT_EV_TX_DATA (1ULL << 9)
60 { .mask = PT_EV_TX_DATA, .name = "pt_tx_data", .desc = "Tx on PT connection" },
61
62 {}
63};
64
65
66static const struct name_desc pt_trace_decoding[] = {
67#define PT_VERB_CLEAN 1
68 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
69#define PT_VERB_MINIMAL 2
70 { .name="minimal", .desc="report only h1c/h1s state and flags, no real decoding" },
71#define PT_VERB_SIMPLE 3
72 { .name="simple", .desc="add request/response status line or htx info when available" },
73#define PT_VERB_ADVANCED 4
74 { .name="advanced", .desc="add header fields or frame decoding when available" },
75#define PT_VERB_COMPLETE 5
76 { .name="complete", .desc="add full data dump when available" },
77 { /* end */ }
78};
79
Willy Tarreau6eb3d372021-04-10 19:29:26 +020080static struct trace_source trace_pt __read_mostly = {
Christopher Fauletc0ae0972021-04-08 16:45:11 +020081 .name = IST("pt"),
82 .desc = "Passthrough multiplexer",
83 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
84 .default_cb = pt_trace,
85 .known_events = pt_trace_events,
86 .lockon_args = NULL,
87 .decoding = pt_trace_decoding,
88 .report_events = ~0, // report everything by default
89};
90
91#define TRACE_SOURCE &trace_pt
92INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
93
94static inline void pt_trace_buf(const struct buffer *buf, size_t ofs, size_t len)
95{
96 size_t block1, block2;
97 int line, ptr, newptr;
98
99 block1 = b_contig_data(buf, ofs);
100 block2 = 0;
101 if (block1 > len)
102 block1 = len;
103 block2 = len - block1;
104
105 ofs = b_peek_ofs(buf, ofs);
106
107 line = 0;
108 ptr = ofs;
109 while (ptr < ofs + block1) {
110 newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), ofs + block1, &line, ptr);
111 if (newptr == ptr)
112 break;
113 ptr = newptr;
114 }
115
116 line = ptr = 0;
117 while (ptr < block2) {
118 newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), block2, &line, ptr);
119 if (newptr == ptr)
120 break;
121 ptr = newptr;
122 }
123}
124
125/* the PT traces always expect that arg1, if non-null, is of type connection
126 * (from which we can derive the pt context), that arg2, if non-null, is a
127 * conn-stream, and that arg3, if non-null, is a buffer.
128 */
129static void pt_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
130 const struct ist where, const struct ist func,
131 const void *a1, const void *a2, const void *a3, const void *a4)
132{
133 const struct connection *conn = a1;
134 const struct mux_pt_ctx *ctx = conn ? conn->ctx : NULL;
135 const struct conn_stream *cs = a2;
136 const struct buffer *buf = a3;
137 const size_t *val = a4;
138
139 if (!ctx|| src->verbosity < PT_VERB_CLEAN)
140 return;
141
142 /* Display frontend/backend info by default */
143 chunk_appendf(&trace_buf, " : [%c]", (conn_is_back(conn) ? 'B' : 'F'));
144
145 if (src->verbosity == PT_VERB_CLEAN)
146 return;
147
148 /* Display the value to the 4th argument (level > STATE) */
149 if (src->level > TRACE_LEVEL_STATE && val)
150 chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
151
152 /* Display conn and cs info, if defined (pointer + flags) */
153 chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn->flags);
154 if (cs)
155 chunk_appendf(&trace_buf, " cs=%p(0x%08x)", cs, cs->flags);
156
157 if (src->verbosity == PT_VERB_MINIMAL)
158 return;
159
160 /* Display buffer info, if defined (level > USER & verbosity > SIMPLE) */
161 if (src->level > TRACE_LEVEL_USER && buf) {
162 int full = 0, max = 3000, chunk = 1024;
163
164 /* Full info (level > STATE && verbosity > SIMPLE) */
165 if (src->level > TRACE_LEVEL_STATE) {
166 if (src->verbosity == PT_VERB_COMPLETE)
167 full = 1;
168 else if (src->verbosity == PT_VERB_ADVANCED) {
169 full = 1;
170 max = 256;
171 chunk = 64;
172 }
173 }
174
175 chunk_appendf(&trace_buf, " buf=%u@%p+%u/%u",
176 (unsigned int)b_data(buf), b_orig(buf),
177 (unsigned int)b_head_ofs(buf), (unsigned int)b_size(buf));
178
179 if (b_data(buf) && full) {
180 chunk_memcat(&trace_buf, "\n", 1);
181 if (b_data(buf) < max)
182 pt_trace_buf(buf, 0, b_data(buf));
183 else {
184 pt_trace_buf(buf, 0, chunk);
185 chunk_memcat(&trace_buf, " ...\n", 6);
186 pt_trace_buf(buf, b_data(buf) - chunk, chunk);
187 }
188 }
189 }
190}
191
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100192static void mux_pt_destroy(struct mux_pt_ctx *ctx)
193{
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100194 struct connection *conn = NULL;
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200195
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200196 TRACE_POINT(PT_EV_CONN_END);
197
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100198 if (ctx) {
199 /* The connection must be attached to this mux to be released */
200 if (ctx->conn && ctx->conn->ctx == ctx)
201 conn = ctx->conn;
202
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200203 TRACE_DEVEL("freeing pt context", PT_EV_CONN_END, conn);
204
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200205 tasklet_free(ctx->wait_event.tasklet);
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100206
207 if (conn && ctx->wait_event.events != 0)
208 conn->xprt->unsubscribe(conn, conn->xprt_ctx, ctx->wait_event.events,
209 &ctx->wait_event);
210 pool_free(pool_head_pt_ctx, ctx);
211 }
212
213 if (conn) {
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200214 conn->mux = NULL;
215 conn->ctx = NULL;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200216 TRACE_DEVEL("freeing conn", PT_EV_CONN_END, conn);
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100217
218 conn_stop_tracking(conn);
219 conn_full_close(conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200220 if (conn->destroy_cb)
221 conn->destroy_cb(conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200222 conn_free(conn);
223 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100224}
225
Willy Tarreau691d5032021-01-20 14:55:01 +0100226/* Callback, used when we get I/Os while in idle mode. This one is exported so
227 * that "show fd" can resolve it.
228 */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100229struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status)
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100230{
231 struct mux_pt_ctx *ctx = tctx;
232
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200233 TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn, ctx->cs);
Olivier Houchardea510fc2019-10-18 13:56:40 +0200234 if (ctx->cs) {
235 /* There's a small race condition.
236 * mux_pt_io_cb() is only supposed to be called if we have no
237 * stream attached. However, maybe the tasklet got woken up,
238 * and this connection was then attached to a new stream.
Olivier Houchard2ed389d2019-10-18 14:18:29 +0200239 * If this happened, just wake the tasklet up if anybody
240 * subscribed to receive events, and otherwise call the wake
241 * method, to make sure the event is noticed.
Olivier Houchardea510fc2019-10-18 13:56:40 +0200242 */
Willy Tarreau7872d1f2020-01-10 07:06:05 +0100243 if (ctx->conn->subs) {
244 ctx->conn->subs->events = 0;
245 tasklet_wakeup(ctx->conn->subs->tasklet);
246 ctx->conn->subs = NULL;
Olivier Houchard2ed389d2019-10-18 14:18:29 +0200247 } else if (ctx->cs->data_cb->wake)
Olivier Houchardea510fc2019-10-18 13:56:40 +0200248 ctx->cs->data_cb->wake(ctx->cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200249 TRACE_DEVEL("leaving waking up CS", PT_EV_CONN_WAKE, ctx->conn, ctx->cs);
Willy Tarreau74163142021-03-13 11:30:19 +0100250 return t;
Olivier Houchardea510fc2019-10-18 13:56:40 +0200251 }
Willy Tarreau2ded48d2020-12-11 16:20:34 +0100252 conn_ctrl_drain(ctx->conn);
Willy Tarreau74163142021-03-13 11:30:19 +0100253 if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)) {
Christopher Faulete2c65ba2021-04-10 09:02:32 +0200254 TRACE_DEVEL("leaving destroying pt context", PT_EV_CONN_WAKE, ctx->conn);
Olivier Houchard9dce2c52019-10-18 10:59:30 +0200255 mux_pt_destroy(ctx);
Willy Tarreau74163142021-03-13 11:30:19 +0100256 t = NULL;
257 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200258 else {
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100259 ctx->conn->xprt->subscribe(ctx->conn, ctx->conn->xprt_ctx, SUB_RETRY_RECV,
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200260 &ctx->wait_event);
Christopher Faulete2c65ba2021-04-10 09:02:32 +0200261 TRACE_DEVEL("leaving subscribing for reads", PT_EV_CONN_WAKE, ctx->conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200262 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100263
Willy Tarreau74163142021-03-13 11:30:19 +0100264 return t;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100265}
Willy Tarreau53a47662017-08-28 10:53:00 +0200266
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100267/* Initialize the mux once it's attached. It is expected that conn->ctx
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200268 * points to the existing conn_stream (for outgoing connections) or NULL (for
269 * incoming ones, in which case one will be allocated and a new stream will be
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500270 * instantiated). Returns < 0 on error.
Willy Tarreau53a47662017-08-28 10:53:00 +0200271 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200272static int mux_pt_init(struct connection *conn, struct proxy *prx, struct session *sess,
273 struct buffer *input)
Willy Tarreau53a47662017-08-28 10:53:00 +0200274{
Christopher Fauletb669d682022-03-22 18:37:19 +0100275 struct cs_endpoint *endp;
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100276 struct conn_stream *cs = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100277 struct mux_pt_ctx *ctx = pool_alloc(pool_head_pt_ctx);
278
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200279 TRACE_ENTER(PT_EV_CONN_NEW);
280
281 if (!ctx) {
282 TRACE_ERROR("PT context allocation failure", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100283 goto fail;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200284 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100285
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200286 ctx->wait_event.tasklet = tasklet_new();
287 if (!ctx->wait_event.tasklet)
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100288 goto fail_free_ctx;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200289 ctx->wait_event.tasklet->context = ctx;
290 ctx->wait_event.tasklet->process = mux_pt_io_cb;
Willy Tarreau4f6516d2018-12-19 13:59:17 +0100291 ctx->wait_event.events = 0;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100292 ctx->conn = conn;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200293
294 if (!cs) {
Christopher Fauletb669d682022-03-22 18:37:19 +0100295 endp = cs_endpoint_new();
296 if (!endp)
297 goto fail_free_ctx;
298 endp->target = ctx;
299 endp->ctx = conn;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100300 endp->flags |= CS_EP_T_MUX;
301
302 cs = cs_new_from_mux(endp, sess, input);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200303 if (!cs) {
304 TRACE_ERROR("CS allocation failure", PT_EV_STRM_NEW|PT_EV_STRM_END|PT_EV_STRM_ERR, conn);
Christopher Fauletb669d682022-03-22 18:37:19 +0100305 cs_endpoint_free(endp);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100306 goto fail_free_ctx;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200307 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200308 TRACE_POINT(PT_EV_STRM_NEW, conn, cs);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200309 }
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100310 conn->ctx = ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100311 ctx->cs = cs;
Olivier Houchard8706c812018-12-04 19:17:25 +0100312 cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau17ccd1a2020-01-17 16:19:34 +0100313 if (global.tune.options & GTUNE_USE_SPLICE)
Christopher Faulete9e48202022-03-22 18:13:29 +0100314 cs->endp->flags |= CS_EP_MAY_SPLICE;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200315
316 TRACE_LEAVE(PT_EV_CONN_NEW, conn, cs);
Willy Tarreau53a47662017-08-28 10:53:00 +0200317 return 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200318
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100319 fail_free_ctx:
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200320 if (ctx->wait_event.tasklet)
321 tasklet_free(ctx->wait_event.tasklet);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100322 pool_free(pool_head_pt_ctx, ctx);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200323 fail:
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200324 TRACE_DEVEL("leaving in error", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200325 return -1;
Willy Tarreau53a47662017-08-28 10:53:00 +0200326}
327
328/* callback to be used by default for the pass-through mux. It calls the data
329 * layer wake() callback if it is set otherwise returns 0.
330 */
331static int mux_pt_wake(struct connection *conn)
332{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100333 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100334 struct conn_stream *cs = ctx->cs;
335 int ret = 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200336
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200337 TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn, cs);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100338 if (cs) {
339 ret = cs->data_cb->wake ? cs->data_cb->wake(cs) : 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200340
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200341 if (ret < 0) {
342 TRACE_DEVEL("leaving waking up CS", PT_EV_CONN_WAKE, ctx->conn, cs);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100343 return ret;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200344 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100345 } else {
Willy Tarreau2ded48d2020-12-11 16:20:34 +0100346 conn_ctrl_drain(conn);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100347 if (conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH)) {
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200348 TRACE_DEVEL("leaving destroying PT context", PT_EV_CONN_WAKE, ctx->conn);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100349 mux_pt_destroy(ctx);
350 return -1;
351 }
352 }
Willy Tarreauad7f0ad2018-08-24 15:48:59 +0200353
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100354 /* If we had early data, and we're done with the handshake
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500355 * then we know the data are safe, and we can remove the flag.
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100356 */
Willy Tarreau911db9b2020-01-23 16:27:54 +0100357 if ((conn->flags & (CO_FL_EARLY_DATA | CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT)) ==
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100358 CO_FL_EARLY_DATA)
359 conn->flags &= ~CO_FL_EARLY_DATA;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200360
361 TRACE_LEAVE(PT_EV_CONN_WAKE, ctx->conn);
Willy Tarreaued339a32017-11-03 15:55:24 +0100362 return ret;
Willy Tarreau53a47662017-08-28 10:53:00 +0200363}
364
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200365/*
366 * Attach a new stream to a connection
367 * (Used for outgoing connections)
368 */
Christopher Faulete00ad352021-12-16 14:44:31 +0100369static int mux_pt_attach(struct connection *conn, struct conn_stream *cs, struct session *sess)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200370{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100371 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100372
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200373 TRACE_ENTER(PT_EV_STRM_NEW, conn);
Olivier Houchardea32b0f2019-08-10 23:56:16 +0200374 if (ctx->wait_event.events)
375 conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100376 cs_attach_mux(cs, ctx, conn);
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100377 ctx->cs = cs;
Olivier Houchard8706c812018-12-04 19:17:25 +0100378 cs->flags |= CS_FL_RCV_MORE;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200379
380 TRACE_LEAVE(PT_EV_STRM_NEW, conn, cs);
Christopher Faulete00ad352021-12-16 14:44:31 +0100381 return 0;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200382}
383
Willy Tarreaufafd3982018-11-18 21:29:20 +0100384/* Retrieves a valid conn_stream from this connection, or returns NULL. For
385 * this mux, it's easy as we can only store a single conn_stream.
386 */
387static const struct conn_stream *mux_pt_get_first_cs(const struct connection *conn)
388{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100389 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100390 struct conn_stream *cs = ctx->cs;
Willy Tarreaufafd3982018-11-18 21:29:20 +0100391
392 return cs;
393}
394
Christopher Faulet73c12072019-04-08 11:23:22 +0200395/* Destroy the mux and the associated connection if still attached to this mux
396 * and no longer used */
397static void mux_pt_destroy_meth(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +0100398{
Christopher Faulet73c12072019-04-08 11:23:22 +0200399 struct mux_pt_ctx *pt = ctx;
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100400
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200401 TRACE_POINT(PT_EV_CONN_END, pt->conn, pt->cs);
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200402 if (!(pt->cs) || !(pt->conn) || pt->conn->ctx != pt)
Christopher Faulet73c12072019-04-08 11:23:22 +0200403 mux_pt_destroy(pt);
Olivier Houchard060ed432018-11-06 16:32:42 +0100404}
405
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200406/*
Willy Tarreau2c52a2b2017-10-08 11:00:17 +0200407 * Detach the stream from the connection and possibly release the connection.
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200408 */
409static void mux_pt_detach(struct conn_stream *cs)
410{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100411 struct connection *conn = __cs_conn(cs);
Christopher Faulet2da02ae2022-02-24 13:45:27 +0100412 struct mux_pt_ctx *ctx;
413
414 ALREADY_CHECKED(conn);
415 ctx = conn->ctx;
Willy Tarreau2c52a2b2017-10-08 11:00:17 +0200416
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200417 TRACE_ENTER(PT_EV_STRM_END, conn, cs);
418
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100419 /* Subscribe, to know if we got disconnected */
Christopher Fauletfbff8542022-03-09 15:55:58 +0100420 if (!conn_is_back(conn) && conn->owner != NULL &&
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100421 !(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
422 ctx->cs = NULL;
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100423 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200424 } else {
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100425 /* There's no session attached to that connection, destroy it */
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200426 TRACE_DEVEL("killing dead connection", PT_EV_STRM_END, conn, cs);
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100427 mux_pt_destroy(ctx);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200428 }
429
430 TRACE_LEAVE(PT_EV_STRM_END);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200431}
432
Willy Tarreau00f18a32019-01-26 12:19:01 +0100433/* returns the number of streams in use on a connection */
434static int mux_pt_used_streams(struct connection *conn)
Olivier Houchardd540b362018-11-05 18:37:53 +0100435{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100436 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardd540b362018-11-05 18:37:53 +0100437
Willy Tarreau00f18a32019-01-26 12:19:01 +0100438 return ctx->cs ? 1 : 0;
Olivier Houchardd540b362018-11-05 18:37:53 +0100439}
440
Willy Tarreau00f18a32019-01-26 12:19:01 +0100441/* returns the number of streams still available on a connection */
442static int mux_pt_avail_streams(struct connection *conn)
Olivier Houchard8defe4b2018-12-02 01:31:17 +0100443{
Willy Tarreau00f18a32019-01-26 12:19:01 +0100444 return 1 - mux_pt_used_streams(conn);
Olivier Houchard8defe4b2018-12-02 01:31:17 +0100445}
446
Willy Tarreauecdb3fe2017-10-05 15:25:48 +0200447static void mux_pt_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200448{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100449 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100450
451 TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200452
Christopher Faulete9e48202022-03-22 18:13:29 +0100453 if (cs->endp->flags & CS_EP_SHR)
Willy Tarreau4b795242017-10-05 18:47:38 +0200454 return;
Christopher Fauletd94f8772018-12-17 13:21:02 +0100455 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Faulet897d6122021-12-17 17:28:35 +0100456 if (conn_xprt_ready(conn) && conn->xprt->shutr)
457 conn->xprt->shutr(conn, conn->xprt_ctx,
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100458 (mode == CS_SHR_DRAIN));
Willy Tarreau7d7b11c2020-12-11 11:07:19 +0100459 else if (mode == CS_SHR_DRAIN)
Christopher Faulet897d6122021-12-17 17:28:35 +0100460 conn_ctrl_drain(conn);
Christopher Faulete9e48202022-03-22 18:13:29 +0100461 if (cs->endp->flags & CS_EP_SHW)
Christopher Faulet897d6122021-12-17 17:28:35 +0100462 conn_full_close(conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200463
Christopher Faulet897d6122021-12-17 17:28:35 +0100464 TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200465}
466
Willy Tarreauecdb3fe2017-10-05 15:25:48 +0200467static void mux_pt_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200468{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100469 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100470
471 TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200472
Christopher Faulete9e48202022-03-22 18:13:29 +0100473 if (cs->endp->flags & CS_EP_SHW)
Willy Tarreau4b795242017-10-05 18:47:38 +0200474 return;
Christopher Faulet897d6122021-12-17 17:28:35 +0100475 if (conn_xprt_ready(conn) && conn->xprt->shutw)
476 conn->xprt->shutw(conn, conn->xprt_ctx,
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100477 (mode == CS_SHW_NORMAL));
Christopher Faulete9e48202022-03-22 18:13:29 +0100478 if (!(cs->endp->flags & CS_EP_SHR))
Christopher Faulet897d6122021-12-17 17:28:35 +0100479 conn_sock_shutw(conn, (mode == CS_SHW_NORMAL));
Willy Tarreau4b795242017-10-05 18:47:38 +0200480 else
Christopher Faulet897d6122021-12-17 17:28:35 +0100481 conn_full_close(conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200482
Christopher Faulet897d6122021-12-17 17:28:35 +0100483 TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200484}
485
486/*
487 * Called from the upper layer, to get more data
Christopher Faulet564e39c2021-09-21 15:50:55 +0200488 *
489 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
490 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
491 * means the caller wants to flush input data (from the mux buffer and the
492 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
493 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
494 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
495 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
496 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
497 * copy as much data as possible.
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200498 */
Willy Tarreau7f3225f2018-06-19 06:15:17 +0200499static size_t mux_pt_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200500{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100501 struct connection *conn = __cs_conn(cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200502 size_t ret = 0;
503
Christopher Faulet897d6122021-12-17 17:28:35 +0100504 TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count});
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200505
Christopher Faulet4eb7d742018-10-11 15:29:21 +0200506 if (!count) {
Christopher Fauletd94f8772018-12-17 13:21:02 +0100507 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200508 goto end;
Christopher Faulet4eb7d742018-10-11 15:29:21 +0200509 }
Willy Tarreaue0f24ee2018-12-14 10:51:23 +0100510 b_realign_if_empty(buf);
Christopher Faulet897d6122021-12-17 17:28:35 +0100511 ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags);
512 if (conn_xprt_read0_pending(conn)) {
Willy Tarreau9cca8df2019-07-15 06:47:54 +0200513 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Faulet87a8f352019-03-22 14:51:36 +0100514 cs->flags |= CS_FL_EOS;
Christopher Faulet897d6122021-12-17 17:28:35 +0100515 TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
Olivier Houchard8706c812018-12-04 19:17:25 +0100516 }
Christopher Faulet897d6122021-12-17 17:28:35 +0100517 if (conn->flags & CO_FL_ERROR) {
Willy Tarreau9cca8df2019-07-15 06:47:54 +0200518 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Willy Tarreau4ff3b892017-10-16 15:17:17 +0200519 cs->flags |= CS_FL_ERROR;
Christopher Faulet897d6122021-12-17 17:28:35 +0100520 TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
Olivier Houchard8706c812018-12-04 19:17:25 +0100521 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200522 end:
Christopher Faulet897d6122021-12-17 17:28:35 +0100523 TRACE_LEAVE(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){ret});
Willy Tarreaud9cf5402018-07-18 11:29:06 +0200524 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200525}
526
527/* Called from the upper layer, to send data */
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200528static size_t mux_pt_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200529{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100530 struct connection *conn = __cs_conn(cs);
Olivier Houchardb72d98a2018-11-30 13:17:48 +0100531 size_t ret;
532
Christopher Faulet897d6122021-12-17 17:28:35 +0100533 TRACE_ENTER(PT_EV_TX_DATA, conn, cs, buf, (size_t[]){count});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200534
Christopher Faulet897d6122021-12-17 17:28:35 +0100535 ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, count, flags);
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200536
537 if (ret > 0)
538 b_del(buf, ret);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200539
Willy Tarreau413713f2022-03-31 16:47:46 +0200540 if (conn->flags & CO_FL_ERROR) {
541 cs->flags |= CS_FL_ERROR;
542 TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
543 }
544
Christopher Faulet897d6122021-12-17 17:28:35 +0100545 TRACE_LEAVE(PT_EV_TX_DATA, conn, cs, buf, (size_t[]){ret});
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200546 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200547}
548
Willy Tarreauee1a6fc2020-01-17 07:52:13 +0100549/* Called from the upper layer, to subscribe <es> to events <event_type>. The
550 * event subscriber <es> is not allowed to change from a previous call as long
551 * as at least one event is still subscribed. The <event_type> must only be a
552 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
553 */
554static int mux_pt_subscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +0200555{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100556 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100557
558 TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, cs, 0, (size_t[]){event_type});
559 return conn->xprt->subscribe(conn, conn->xprt_ctx, event_type, es);
Olivier Houchard6ff20392018-07-17 18:46:31 +0200560}
561
Willy Tarreauee1a6fc2020-01-17 07:52:13 +0100562/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
563 * The <es> pointer is not allowed to differ from the one passed to the
564 * subscribe() call. It always returns zero.
565 */
566static int mux_pt_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200567{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100568 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100569
570 TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, cs, 0, (size_t[]){event_type});
571 return conn->xprt->unsubscribe(conn, conn->xprt_ctx, event_type, es);
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200572}
573
Willy Tarreaue5733232019-05-22 19:24:06 +0200574#if defined(USE_LINUX_SPLICE)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200575/* Send and get, using splicing */
576static int mux_pt_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int count)
577{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100578 struct connection *conn = __cs_conn(cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200579 int ret;
580
Christopher Faulet897d6122021-12-17 17:28:35 +0100581 TRACE_ENTER(PT_EV_RX_DATA, conn, cs, 0, (size_t[]){count});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200582
Christopher Faulet897d6122021-12-17 17:28:35 +0100583 ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count);
584 if (conn_xprt_read0_pending(conn)) {
Christopher Faulet87a8f352019-03-22 14:51:36 +0100585 cs->flags |= CS_FL_EOS;
Christopher Faulet897d6122021-12-17 17:28:35 +0100586 TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200587 }
Christopher Faulet897d6122021-12-17 17:28:35 +0100588 if (conn->flags & CO_FL_ERROR) {
Willy Tarreau4ff3b892017-10-16 15:17:17 +0200589 cs->flags |= CS_FL_ERROR;
Christopher Faulet897d6122021-12-17 17:28:35 +0100590 TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200591 }
592
Christopher Faulet897d6122021-12-17 17:28:35 +0100593 TRACE_LEAVE(PT_EV_RX_DATA, conn, cs, 0, (size_t[]){ret});
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200594 return (ret);
595}
596
597static int mux_pt_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
598{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100599 struct connection *conn = __cs_conn(cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200600 int ret;
601
Christopher Faulet897d6122021-12-17 17:28:35 +0100602 TRACE_ENTER(PT_EV_TX_DATA, conn, cs, 0, (size_t[]){pipe->data});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200603
Christopher Faulet897d6122021-12-17 17:28:35 +0100604 ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200605
Willy Tarreau413713f2022-03-31 16:47:46 +0200606 if (conn->flags & CO_FL_ERROR) {
607 cs->flags |= CS_FL_ERROR;
608 TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
609 }
610
Christopher Faulet897d6122021-12-17 17:28:35 +0100611 TRACE_LEAVE(PT_EV_TX_DATA, conn, cs, 0, (size_t[]){ret});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200612 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200613}
Olivier Houchard7da120b2017-11-01 13:55:10 +0100614#endif
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200615
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200616static int mux_pt_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
617{
618 int ret = 0;
619 switch (mux_ctl) {
620 case MUX_STATUS:
Willy Tarreau911db9b2020-01-23 16:27:54 +0100621 if (!(conn->flags & CO_FL_WAIT_XPRT))
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200622 ret |= MUX_STATUS_READY;
623 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +0200624 case MUX_EXIT_STATUS:
625 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200626 default:
627 return -1;
628 }
629}
630
Willy Tarreau53a47662017-08-28 10:53:00 +0200631/* The mux operations */
Christopher Faulet28da3f52021-02-05 16:44:46 +0100632const struct mux_ops mux_tcp_ops = {
Willy Tarreau53a47662017-08-28 10:53:00 +0200633 .init = mux_pt_init,
Willy Tarreau53a47662017-08-28 10:53:00 +0200634 .wake = mux_pt_wake,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200635 .rcv_buf = mux_pt_rcv_buf,
636 .snd_buf = mux_pt_snd_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +0200637 .subscribe = mux_pt_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200638 .unsubscribe = mux_pt_unsubscribe,
Willy Tarreaue5733232019-05-22 19:24:06 +0200639#if defined(USE_LINUX_SPLICE)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200640 .rcv_pipe = mux_pt_rcv_pipe,
641 .snd_pipe = mux_pt_snd_pipe,
642#endif
643 .attach = mux_pt_attach,
Willy Tarreaufafd3982018-11-18 21:29:20 +0100644 .get_first_cs = mux_pt_get_first_cs,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200645 .detach = mux_pt_detach,
Olivier Houchardd540b362018-11-05 18:37:53 +0100646 .avail_streams = mux_pt_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +0100647 .used_streams = mux_pt_used_streams,
Olivier Houchard060ed432018-11-06 16:32:42 +0100648 .destroy = mux_pt_destroy_meth,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200649 .ctl = mux_pt_ctl,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200650 .shutr = mux_pt_shutr,
651 .shutw = mux_pt_shutw,
Willy Tarreau28f1cb92017-12-20 16:14:44 +0100652 .flags = MX_FL_NONE,
Willy Tarreau53a47662017-08-28 10:53:00 +0200653 .name = "PASS",
654};
Willy Tarreauf6490822017-09-21 19:43:21 +0200655
Christopher Faulet28da3f52021-02-05 16:44:46 +0100656
657const struct mux_ops mux_pt_ops = {
658 .init = mux_pt_init,
659 .wake = mux_pt_wake,
660 .rcv_buf = mux_pt_rcv_buf,
661 .snd_buf = mux_pt_snd_buf,
662 .subscribe = mux_pt_subscribe,
663 .unsubscribe = mux_pt_unsubscribe,
664#if defined(USE_LINUX_SPLICE)
665 .rcv_pipe = mux_pt_rcv_pipe,
666 .snd_pipe = mux_pt_snd_pipe,
667#endif
668 .attach = mux_pt_attach,
669 .get_first_cs = mux_pt_get_first_cs,
670 .detach = mux_pt_detach,
671 .avail_streams = mux_pt_avail_streams,
672 .used_streams = mux_pt_used_streams,
673 .destroy = mux_pt_destroy_meth,
674 .ctl = mux_pt_ctl,
675 .shutr = mux_pt_shutr,
676 .shutw = mux_pt_shutw,
677 .flags = MX_FL_NONE|MX_FL_NO_UPG,
678 .name = "PASS",
679};
680
Christopher Faulet32f61c02018-04-10 14:33:41 +0200681/* PROT selection : default mux has empty name */
Christopher Faulet28da3f52021-02-05 16:44:46 +0100682static struct mux_proto_list mux_proto_none =
683 { .token = IST("none"), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_pt_ops };
684static struct mux_proto_list mux_proto_tcp =
685 { .token = IST(""), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_tcp_ops };
Willy Tarreauf6490822017-09-21 19:43:21 +0200686
Christopher Faulet28da3f52021-02-05 16:44:46 +0100687INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_none);
688INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_tcp);