blob: 00cf542eda033e4a0f5b6deaae68aa06ae5e639e [file] [log] [blame]
Willy Tarreau53a47662017-08-28 10:53:00 +02001/*
2 * Pass-through mux-demux for connections
3 *
4 * Copyright 2017 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020013#include <haproxy/api.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020014#include <haproxy/buf.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020015#include <haproxy/connection.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010016#include <haproxy/conn_stream.h>
Christopher Fauletc0ae0972021-04-08 16:45:11 +020017#include <haproxy/pipe-t.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020018#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020019#include <haproxy/task.h>
Christopher Fauletc0ae0972021-04-08 16:45:11 +020020#include <haproxy/trace.h>
Olivier Houchardb6c32ee2018-11-05 18:28:43 +010021
Olivier Houchardb6c32ee2018-11-05 18:28:43 +010022struct mux_pt_ctx {
23 struct conn_stream *cs;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +010024 struct cs_endpoint *endp;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +010025 struct connection *conn;
26 struct wait_event wait_event;
27};
28
Willy Tarreau8ceae722018-11-26 11:58:30 +010029DECLARE_STATIC_POOL(pool_head_pt_ctx, "mux_pt", sizeof(struct mux_pt_ctx));
30
Christopher Fauletc0ae0972021-04-08 16:45:11 +020031/* trace source and events */
32static void pt_trace(enum trace_level level, uint64_t mask,
33 const struct trace_source *src,
34 const struct ist where, const struct ist func,
35 const void *a1, const void *a2, const void *a3, const void *a4);
36
37/* The event representation is split like this :
38 * pt_ctx - internal PT context
39 * strm - application layer
40 */
41static const struct trace_event pt_trace_events[] = {
42#define PT_EV_CONN_NEW (1ULL << 0)
43 { .mask = PT_EV_CONN_NEW, .name = "pt_conn_new", .desc = "new PT connection" },
44#define PT_EV_CONN_WAKE (1ULL << 1)
45 { .mask = PT_EV_CONN_WAKE, .name = "pt_conn_wake", .desc = "PT connection woken up" },
46#define PT_EV_CONN_END (1ULL << 2)
47 { .mask = PT_EV_CONN_END, .name = "pt_conn_end", .desc = "PT connection terminated" },
48#define PT_EV_CONN_ERR (1ULL << 3)
49 { .mask = PT_EV_CONN_ERR, .name = "pt_conn_err", .desc = "error on PT connection" },
50#define PT_EV_STRM_NEW (1ULL << 4)
51 { .mask = PT_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
52#define PT_EV_STRM_SHUT (1ULL << 5)
53 { .mask = PT_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
54#define PT_EV_STRM_END (1ULL << 6)
55 { .mask = PT_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
56#define PT_EV_STRM_ERR (1ULL << 7)
57 { .mask = PT_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
58#define PT_EV_RX_DATA (1ULL << 8)
59 { .mask = PT_EV_RX_DATA, .name = "pt_rx_data", .desc = "Rx on PT connection" },
60#define PT_EV_TX_DATA (1ULL << 9)
61 { .mask = PT_EV_TX_DATA, .name = "pt_tx_data", .desc = "Tx on PT connection" },
62
63 {}
64};
65
66
67static const struct name_desc pt_trace_decoding[] = {
68#define PT_VERB_CLEAN 1
69 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
70#define PT_VERB_MINIMAL 2
71 { .name="minimal", .desc="report only h1c/h1s state and flags, no real decoding" },
72#define PT_VERB_SIMPLE 3
73 { .name="simple", .desc="add request/response status line or htx info when available" },
74#define PT_VERB_ADVANCED 4
75 { .name="advanced", .desc="add header fields or frame decoding when available" },
76#define PT_VERB_COMPLETE 5
77 { .name="complete", .desc="add full data dump when available" },
78 { /* end */ }
79};
80
Willy Tarreau6eb3d372021-04-10 19:29:26 +020081static struct trace_source trace_pt __read_mostly = {
Christopher Fauletc0ae0972021-04-08 16:45:11 +020082 .name = IST("pt"),
83 .desc = "Passthrough multiplexer",
84 .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
85 .default_cb = pt_trace,
86 .known_events = pt_trace_events,
87 .lockon_args = NULL,
88 .decoding = pt_trace_decoding,
89 .report_events = ~0, // report everything by default
90};
91
92#define TRACE_SOURCE &trace_pt
93INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
94
95static inline void pt_trace_buf(const struct buffer *buf, size_t ofs, size_t len)
96{
97 size_t block1, block2;
98 int line, ptr, newptr;
99
100 block1 = b_contig_data(buf, ofs);
101 block2 = 0;
102 if (block1 > len)
103 block1 = len;
104 block2 = len - block1;
105
106 ofs = b_peek_ofs(buf, ofs);
107
108 line = 0;
109 ptr = ofs;
110 while (ptr < ofs + block1) {
111 newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), ofs + block1, &line, ptr);
112 if (newptr == ptr)
113 break;
114 ptr = newptr;
115 }
116
117 line = ptr = 0;
118 while (ptr < block2) {
119 newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), block2, &line, ptr);
120 if (newptr == ptr)
121 break;
122 ptr = newptr;
123 }
124}
125
126/* the PT traces always expect that arg1, if non-null, is of type connection
127 * (from which we can derive the pt context), that arg2, if non-null, is a
128 * conn-stream, and that arg3, if non-null, is a buffer.
129 */
130static void pt_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
131 const struct ist where, const struct ist func,
132 const void *a1, const void *a2, const void *a3, const void *a4)
133{
134 const struct connection *conn = a1;
135 const struct mux_pt_ctx *ctx = conn ? conn->ctx : NULL;
136 const struct conn_stream *cs = a2;
137 const struct buffer *buf = a3;
138 const size_t *val = a4;
139
140 if (!ctx|| src->verbosity < PT_VERB_CLEAN)
141 return;
142
143 /* Display frontend/backend info by default */
144 chunk_appendf(&trace_buf, " : [%c]", (conn_is_back(conn) ? 'B' : 'F'));
145
146 if (src->verbosity == PT_VERB_CLEAN)
147 return;
148
149 /* Display the value to the 4th argument (level > STATE) */
150 if (src->level > TRACE_LEVEL_STATE && val)
151 chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
152
153 /* Display conn and cs info, if defined (pointer + flags) */
154 chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn->flags);
155 if (cs)
156 chunk_appendf(&trace_buf, " cs=%p(0x%08x)", cs, cs->flags);
157
158 if (src->verbosity == PT_VERB_MINIMAL)
159 return;
160
161 /* Display buffer info, if defined (level > USER & verbosity > SIMPLE) */
162 if (src->level > TRACE_LEVEL_USER && buf) {
163 int full = 0, max = 3000, chunk = 1024;
164
165 /* Full info (level > STATE && verbosity > SIMPLE) */
166 if (src->level > TRACE_LEVEL_STATE) {
167 if (src->verbosity == PT_VERB_COMPLETE)
168 full = 1;
169 else if (src->verbosity == PT_VERB_ADVANCED) {
170 full = 1;
171 max = 256;
172 chunk = 64;
173 }
174 }
175
176 chunk_appendf(&trace_buf, " buf=%u@%p+%u/%u",
177 (unsigned int)b_data(buf), b_orig(buf),
178 (unsigned int)b_head_ofs(buf), (unsigned int)b_size(buf));
179
180 if (b_data(buf) && full) {
181 chunk_memcat(&trace_buf, "\n", 1);
182 if (b_data(buf) < max)
183 pt_trace_buf(buf, 0, b_data(buf));
184 else {
185 pt_trace_buf(buf, 0, chunk);
186 chunk_memcat(&trace_buf, " ...\n", 6);
187 pt_trace_buf(buf, b_data(buf) - chunk, chunk);
188 }
189 }
190 }
191}
192
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100193static void mux_pt_destroy(struct mux_pt_ctx *ctx)
194{
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100195 struct connection *conn = NULL;
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200196
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200197 TRACE_POINT(PT_EV_CONN_END);
198
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100199 if (ctx) {
200 /* The connection must be attached to this mux to be released */
201 if (ctx->conn && ctx->conn->ctx == ctx)
202 conn = ctx->conn;
203
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200204 TRACE_DEVEL("freeing pt context", PT_EV_CONN_END, conn);
205
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200206 tasklet_free(ctx->wait_event.tasklet);
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100207
208 if (conn && ctx->wait_event.events != 0)
209 conn->xprt->unsubscribe(conn, conn->xprt_ctx, ctx->wait_event.events,
210 &ctx->wait_event);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100211 BUG_ON(ctx->endp && !(ctx->endp->flags & CS_EP_ORPHAN));
212 cs_endpoint_free(ctx->endp);
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100213 pool_free(pool_head_pt_ctx, ctx);
214 }
215
216 if (conn) {
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200217 conn->mux = NULL;
218 conn->ctx = NULL;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200219 TRACE_DEVEL("freeing conn", PT_EV_CONN_END, conn);
Christopher Faulet5a7ca292020-11-03 09:11:43 +0100220
221 conn_stop_tracking(conn);
222 conn_full_close(conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200223 if (conn->destroy_cb)
224 conn->destroy_cb(conn);
Christopher Faulet39a96ee2019-04-08 10:52:21 +0200225 conn_free(conn);
226 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100227}
228
Willy Tarreau691d5032021-01-20 14:55:01 +0100229/* Callback, used when we get I/Os while in idle mode. This one is exported so
230 * that "show fd" can resolve it.
231 */
Willy Tarreau144f84a2021-03-02 16:09:26 +0100232struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status)
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100233{
234 struct mux_pt_ctx *ctx = tctx;
235
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200236 TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn, ctx->cs);
Olivier Houchardea510fc2019-10-18 13:56:40 +0200237 if (ctx->cs) {
238 /* There's a small race condition.
239 * mux_pt_io_cb() is only supposed to be called if we have no
240 * stream attached. However, maybe the tasklet got woken up,
241 * and this connection was then attached to a new stream.
Olivier Houchard2ed389d2019-10-18 14:18:29 +0200242 * If this happened, just wake the tasklet up if anybody
243 * subscribed to receive events, and otherwise call the wake
244 * method, to make sure the event is noticed.
Olivier Houchardea510fc2019-10-18 13:56:40 +0200245 */
Willy Tarreau7872d1f2020-01-10 07:06:05 +0100246 if (ctx->conn->subs) {
247 ctx->conn->subs->events = 0;
248 tasklet_wakeup(ctx->conn->subs->tasklet);
249 ctx->conn->subs = NULL;
Olivier Houchard2ed389d2019-10-18 14:18:29 +0200250 } else if (ctx->cs->data_cb->wake)
Olivier Houchardea510fc2019-10-18 13:56:40 +0200251 ctx->cs->data_cb->wake(ctx->cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200252 TRACE_DEVEL("leaving waking up CS", PT_EV_CONN_WAKE, ctx->conn, ctx->cs);
Willy Tarreau74163142021-03-13 11:30:19 +0100253 return t;
Olivier Houchardea510fc2019-10-18 13:56:40 +0200254 }
Willy Tarreau2ded48d2020-12-11 16:20:34 +0100255 conn_ctrl_drain(ctx->conn);
Willy Tarreau74163142021-03-13 11:30:19 +0100256 if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)) {
Christopher Faulete2c65ba2021-04-10 09:02:32 +0200257 TRACE_DEVEL("leaving destroying pt context", PT_EV_CONN_WAKE, ctx->conn);
Olivier Houchard9dce2c52019-10-18 10:59:30 +0200258 mux_pt_destroy(ctx);
Willy Tarreau74163142021-03-13 11:30:19 +0100259 t = NULL;
260 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200261 else {
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100262 ctx->conn->xprt->subscribe(ctx->conn, ctx->conn->xprt_ctx, SUB_RETRY_RECV,
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200263 &ctx->wait_event);
Christopher Faulete2c65ba2021-04-10 09:02:32 +0200264 TRACE_DEVEL("leaving subscribing for reads", PT_EV_CONN_WAKE, ctx->conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200265 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100266
Willy Tarreau74163142021-03-13 11:30:19 +0100267 return t;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100268}
Willy Tarreau53a47662017-08-28 10:53:00 +0200269
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100270/* Initialize the mux once it's attached. It is expected that conn->ctx
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200271 * points to the existing conn_stream (for outgoing connections) or NULL (for
272 * incoming ones, in which case one will be allocated and a new stream will be
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500273 * instantiated). Returns < 0 on error.
Willy Tarreau53a47662017-08-28 10:53:00 +0200274 */
Christopher Faulet51f73eb2019-04-08 11:22:47 +0200275static int mux_pt_init(struct connection *conn, struct proxy *prx, struct session *sess,
276 struct buffer *input)
Willy Tarreau53a47662017-08-28 10:53:00 +0200277{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100278 struct conn_stream *cs = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100279 struct mux_pt_ctx *ctx = pool_alloc(pool_head_pt_ctx);
280
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200281 TRACE_ENTER(PT_EV_CONN_NEW);
282
283 if (!ctx) {
284 TRACE_ERROR("PT context allocation failure", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100285 goto fail;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200286 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100287
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200288 ctx->wait_event.tasklet = tasklet_new();
289 if (!ctx->wait_event.tasklet)
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100290 goto fail_free_ctx;
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200291 ctx->wait_event.tasklet->context = ctx;
292 ctx->wait_event.tasklet->process = mux_pt_io_cb;
Willy Tarreau4f6516d2018-12-19 13:59:17 +0100293 ctx->wait_event.events = 0;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100294 ctx->conn = conn;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200295
296 if (!cs) {
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100297 ctx->endp = cs_endpoint_new();
298 if (!ctx->endp) {
299 TRACE_ERROR("CS allocation failure", PT_EV_STRM_NEW|PT_EV_STRM_END|PT_EV_STRM_ERR, conn);
Christopher Fauletb669d682022-03-22 18:37:19 +0100300 goto fail_free_ctx;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100301 }
302 ctx->endp->target = ctx;
303 ctx->endp->ctx = conn;
304 ctx->endp->flags |= (CS_EP_T_MUX|CS_EP_ORPHAN);
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100305
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100306 cs = cs_new_from_mux(ctx->endp, sess, input);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200307 if (!cs) {
308 TRACE_ERROR("CS allocation failure", PT_EV_STRM_NEW|PT_EV_STRM_END|PT_EV_STRM_ERR, conn);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100309 goto fail_free_endp;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200310 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200311 TRACE_POINT(PT_EV_STRM_NEW, conn, cs);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200312 }
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100313 else {
314 cs_attach_mux(cs, ctx, conn);
315 ctx->cs = cs;
316 ctx->endp = cs->endp;
317 }
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100318 conn->ctx = ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100319 ctx->cs = cs;
Olivier Houchard8706c812018-12-04 19:17:25 +0100320 cs->flags |= CS_FL_RCV_MORE;
Willy Tarreau17ccd1a2020-01-17 16:19:34 +0100321 if (global.tune.options & GTUNE_USE_SPLICE)
Christopher Faulete9e48202022-03-22 18:13:29 +0100322 cs->endp->flags |= CS_EP_MAY_SPLICE;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200323
324 TRACE_LEAVE(PT_EV_CONN_NEW, conn, cs);
Willy Tarreau53a47662017-08-28 10:53:00 +0200325 return 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200326
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100327 fail_free_endp:
328 cs_endpoint_free(ctx->endp);
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100329 fail_free_ctx:
Willy Tarreau3c39a7d2019-06-14 14:42:29 +0200330 if (ctx->wait_event.tasklet)
331 tasklet_free(ctx->wait_event.tasklet);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100332 pool_free(pool_head_pt_ctx, ctx);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200333 fail:
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200334 TRACE_DEVEL("leaving in error", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200335 return -1;
Willy Tarreau53a47662017-08-28 10:53:00 +0200336}
337
338/* callback to be used by default for the pass-through mux. It calls the data
339 * layer wake() callback if it is set otherwise returns 0.
340 */
341static int mux_pt_wake(struct connection *conn)
342{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100343 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100344 struct conn_stream *cs = ctx->cs;
345 int ret = 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200346
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200347 TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn, cs);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100348 if (cs) {
349 ret = cs->data_cb->wake ? cs->data_cb->wake(cs) : 0;
Olivier Houchard9aaf7782017-09-13 18:30:23 +0200350
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200351 if (ret < 0) {
352 TRACE_DEVEL("leaving waking up CS", PT_EV_CONN_WAKE, ctx->conn, cs);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100353 return ret;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200354 }
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100355 } else {
Willy Tarreau2ded48d2020-12-11 16:20:34 +0100356 conn_ctrl_drain(conn);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100357 if (conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH)) {
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200358 TRACE_DEVEL("leaving destroying PT context", PT_EV_CONN_WAKE, ctx->conn);
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100359 mux_pt_destroy(ctx);
360 return -1;
361 }
362 }
Willy Tarreauad7f0ad2018-08-24 15:48:59 +0200363
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100364 /* If we had early data, and we're done with the handshake
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500365 * then we know the data are safe, and we can remove the flag.
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100366 */
Willy Tarreau911db9b2020-01-23 16:27:54 +0100367 if ((conn->flags & (CO_FL_EARLY_DATA | CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT)) ==
Olivier Houchard7fc96d52017-11-23 18:25:47 +0100368 CO_FL_EARLY_DATA)
369 conn->flags &= ~CO_FL_EARLY_DATA;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200370
371 TRACE_LEAVE(PT_EV_CONN_WAKE, ctx->conn);
Willy Tarreaued339a32017-11-03 15:55:24 +0100372 return ret;
Willy Tarreau53a47662017-08-28 10:53:00 +0200373}
374
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200375/*
376 * Attach a new stream to a connection
377 * (Used for outgoing connections)
378 */
Christopher Faulete00ad352021-12-16 14:44:31 +0100379static int mux_pt_attach(struct connection *conn, struct conn_stream *cs, struct session *sess)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200380{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100381 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100382
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200383 TRACE_ENTER(PT_EV_STRM_NEW, conn);
Olivier Houchardea32b0f2019-08-10 23:56:16 +0200384 if (ctx->wait_event.events)
385 conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100386 cs_attach_mux(cs, ctx, conn);
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100387 ctx->cs = cs;
Olivier Houchard8706c812018-12-04 19:17:25 +0100388 cs->flags |= CS_FL_RCV_MORE;
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200389
390 TRACE_LEAVE(PT_EV_STRM_NEW, conn, cs);
Christopher Faulete00ad352021-12-16 14:44:31 +0100391 return 0;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200392}
393
Willy Tarreaufafd3982018-11-18 21:29:20 +0100394/* Retrieves a valid conn_stream from this connection, or returns NULL. For
395 * this mux, it's easy as we can only store a single conn_stream.
396 */
397static const struct conn_stream *mux_pt_get_first_cs(const struct connection *conn)
398{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100399 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100400 struct conn_stream *cs = ctx->cs;
Willy Tarreaufafd3982018-11-18 21:29:20 +0100401
402 return cs;
403}
404
Christopher Faulet73c12072019-04-08 11:23:22 +0200405/* Destroy the mux and the associated connection if still attached to this mux
406 * and no longer used */
407static void mux_pt_destroy_meth(void *ctx)
Olivier Houchard060ed432018-11-06 16:32:42 +0100408{
Christopher Faulet73c12072019-04-08 11:23:22 +0200409 struct mux_pt_ctx *pt = ctx;
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100410
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200411 TRACE_POINT(PT_EV_CONN_END, pt->conn, pt->cs);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100412 if (!(pt->cs) || !(pt->conn) || pt->conn->ctx != pt) {
413 if (pt->conn->ctx != pt)
414 pt->endp = NULL;
Christopher Faulet73c12072019-04-08 11:23:22 +0200415 mux_pt_destroy(pt);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100416 }
Olivier Houchard060ed432018-11-06 16:32:42 +0100417}
418
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200419/*
Willy Tarreau2c52a2b2017-10-08 11:00:17 +0200420 * Detach the stream from the connection and possibly release the connection.
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200421 */
422static void mux_pt_detach(struct conn_stream *cs)
423{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100424 struct connection *conn = __cs_conn(cs);
Christopher Faulet2da02ae2022-02-24 13:45:27 +0100425 struct mux_pt_ctx *ctx;
426
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200427 TRACE_ENTER(PT_EV_STRM_END, conn, cs);
428
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100429 ctx = conn->ctx;
430 ctx->cs = NULL;
431
Olivier Houchardb6c32ee2018-11-05 18:28:43 +0100432 /* Subscribe, to know if we got disconnected */
Christopher Fauletfbff8542022-03-09 15:55:58 +0100433 if (!conn_is_back(conn) && conn->owner != NULL &&
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100434 !(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100435 conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200436 } else {
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100437 /* There's no session attached to that connection, destroy it */
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200438 TRACE_DEVEL("killing dead connection", PT_EV_STRM_END, conn, cs);
Olivier Houchard7c6f8b12018-11-13 16:48:36 +0100439 mux_pt_destroy(ctx);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200440 }
441
442 TRACE_LEAVE(PT_EV_STRM_END);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200443}
444
Willy Tarreau00f18a32019-01-26 12:19:01 +0100445/* returns the number of streams in use on a connection */
446static int mux_pt_used_streams(struct connection *conn)
Olivier Houchardd540b362018-11-05 18:37:53 +0100447{
Willy Tarreau3d2ee552018-12-19 14:12:10 +0100448 struct mux_pt_ctx *ctx = conn->ctx;
Olivier Houchardd540b362018-11-05 18:37:53 +0100449
Willy Tarreau00f18a32019-01-26 12:19:01 +0100450 return ctx->cs ? 1 : 0;
Olivier Houchardd540b362018-11-05 18:37:53 +0100451}
452
Willy Tarreau00f18a32019-01-26 12:19:01 +0100453/* returns the number of streams still available on a connection */
454static int mux_pt_avail_streams(struct connection *conn)
Olivier Houchard8defe4b2018-12-02 01:31:17 +0100455{
Willy Tarreau00f18a32019-01-26 12:19:01 +0100456 return 1 - mux_pt_used_streams(conn);
Olivier Houchard8defe4b2018-12-02 01:31:17 +0100457}
458
Willy Tarreauecdb3fe2017-10-05 15:25:48 +0200459static void mux_pt_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200460{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100461 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100462
463 TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200464
Christopher Faulete9e48202022-03-22 18:13:29 +0100465 if (cs->endp->flags & CS_EP_SHR)
Willy Tarreau4b795242017-10-05 18:47:38 +0200466 return;
Christopher Fauletd94f8772018-12-17 13:21:02 +0100467 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Faulet897d6122021-12-17 17:28:35 +0100468 if (conn_xprt_ready(conn) && conn->xprt->shutr)
469 conn->xprt->shutr(conn, conn->xprt_ctx,
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100470 (mode == CS_SHR_DRAIN));
Willy Tarreau7d7b11c2020-12-11 11:07:19 +0100471 else if (mode == CS_SHR_DRAIN)
Christopher Faulet897d6122021-12-17 17:28:35 +0100472 conn_ctrl_drain(conn);
Christopher Faulete9e48202022-03-22 18:13:29 +0100473 if (cs->endp->flags & CS_EP_SHW)
Christopher Faulet897d6122021-12-17 17:28:35 +0100474 conn_full_close(conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200475
Christopher Faulet897d6122021-12-17 17:28:35 +0100476 TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200477}
478
Willy Tarreauecdb3fe2017-10-05 15:25:48 +0200479static void mux_pt_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200480{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100481 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100482
483 TRACE_ENTER(PT_EV_STRM_SHUT, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200484
Christopher Faulete9e48202022-03-22 18:13:29 +0100485 if (cs->endp->flags & CS_EP_SHW)
Willy Tarreau4b795242017-10-05 18:47:38 +0200486 return;
Christopher Faulet897d6122021-12-17 17:28:35 +0100487 if (conn_xprt_ready(conn) && conn->xprt->shutw)
488 conn->xprt->shutw(conn, conn->xprt_ctx,
Olivier Houcharde179d0e2019-03-21 18:27:17 +0100489 (mode == CS_SHW_NORMAL));
Christopher Faulete9e48202022-03-22 18:13:29 +0100490 if (!(cs->endp->flags & CS_EP_SHR))
Christopher Faulet897d6122021-12-17 17:28:35 +0100491 conn_sock_shutw(conn, (mode == CS_SHW_NORMAL));
Willy Tarreau4b795242017-10-05 18:47:38 +0200492 else
Christopher Faulet897d6122021-12-17 17:28:35 +0100493 conn_full_close(conn);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200494
Christopher Faulet897d6122021-12-17 17:28:35 +0100495 TRACE_LEAVE(PT_EV_STRM_SHUT, conn, cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200496}
497
498/*
499 * Called from the upper layer, to get more data
Christopher Faulet564e39c2021-09-21 15:50:55 +0200500 *
501 * The caller is responsible for defragmenting <buf> if necessary. But <flags>
502 * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
503 * means the caller wants to flush input data (from the mux buffer and the
504 * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
505 * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
506 * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
507 * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
508 * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
509 * copy as much data as possible.
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200510 */
Willy Tarreau7f3225f2018-06-19 06:15:17 +0200511static size_t mux_pt_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200512{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100513 struct connection *conn = __cs_conn(cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200514 size_t ret = 0;
515
Christopher Faulet897d6122021-12-17 17:28:35 +0100516 TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count});
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200517
Christopher Faulet4eb7d742018-10-11 15:29:21 +0200518 if (!count) {
Christopher Fauletd94f8772018-12-17 13:21:02 +0100519 cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200520 goto end;
Christopher Faulet4eb7d742018-10-11 15:29:21 +0200521 }
Willy Tarreaue0f24ee2018-12-14 10:51:23 +0100522 b_realign_if_empty(buf);
Christopher Faulet897d6122021-12-17 17:28:35 +0100523 ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags);
524 if (conn_xprt_read0_pending(conn)) {
Willy Tarreau9cca8df2019-07-15 06:47:54 +0200525 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Christopher Faulet87a8f352019-03-22 14:51:36 +0100526 cs->flags |= CS_FL_EOS;
Christopher Faulet897d6122021-12-17 17:28:35 +0100527 TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
Olivier Houchard8706c812018-12-04 19:17:25 +0100528 }
Christopher Faulet897d6122021-12-17 17:28:35 +0100529 if (conn->flags & CO_FL_ERROR) {
Willy Tarreau9cca8df2019-07-15 06:47:54 +0200530 cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
Willy Tarreau4ff3b892017-10-16 15:17:17 +0200531 cs->flags |= CS_FL_ERROR;
Christopher Faulet897d6122021-12-17 17:28:35 +0100532 TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
Olivier Houchard8706c812018-12-04 19:17:25 +0100533 }
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200534 end:
Christopher Faulet897d6122021-12-17 17:28:35 +0100535 TRACE_LEAVE(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){ret});
Willy Tarreaud9cf5402018-07-18 11:29:06 +0200536 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200537}
538
539/* Called from the upper layer, to send data */
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200540static size_t mux_pt_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200541{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100542 struct connection *conn = __cs_conn(cs);
Olivier Houchardb72d98a2018-11-30 13:17:48 +0100543 size_t ret;
544
Christopher Faulet897d6122021-12-17 17:28:35 +0100545 TRACE_ENTER(PT_EV_TX_DATA, conn, cs, buf, (size_t[]){count});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200546
Christopher Faulet897d6122021-12-17 17:28:35 +0100547 ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, count, flags);
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200548
549 if (ret > 0)
550 b_del(buf, ret);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200551
Willy Tarreau413713f2022-03-31 16:47:46 +0200552 if (conn->flags & CO_FL_ERROR) {
553 cs->flags |= CS_FL_ERROR;
554 TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
555 }
556
Christopher Faulet897d6122021-12-17 17:28:35 +0100557 TRACE_LEAVE(PT_EV_TX_DATA, conn, cs, buf, (size_t[]){ret});
Christopher Fauletd44a9b32018-07-27 11:59:41 +0200558 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200559}
560
Willy Tarreauee1a6fc2020-01-17 07:52:13 +0100561/* Called from the upper layer, to subscribe <es> to events <event_type>. The
562 * event subscriber <es> is not allowed to change from a previous call as long
563 * as at least one event is still subscribed. The <event_type> must only be a
564 * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
565 */
566static int mux_pt_subscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard6ff20392018-07-17 18:46:31 +0200567{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100568 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100569
570 TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, cs, 0, (size_t[]){event_type});
571 return conn->xprt->subscribe(conn, conn->xprt_ctx, event_type, es);
Olivier Houchard6ff20392018-07-17 18:46:31 +0200572}
573
Willy Tarreauee1a6fc2020-01-17 07:52:13 +0100574/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
575 * The <es> pointer is not allowed to differ from the one passed to the
576 * subscribe() call. It always returns zero.
577 */
578static int mux_pt_unsubscribe(struct conn_stream *cs, int event_type, struct wait_event *es)
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200579{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100580 struct connection *conn = __cs_conn(cs);
Christopher Faulet897d6122021-12-17 17:28:35 +0100581
582 TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, cs, 0, (size_t[]){event_type});
583 return conn->xprt->unsubscribe(conn, conn->xprt_ctx, event_type, es);
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200584}
585
Willy Tarreaue5733232019-05-22 19:24:06 +0200586#if defined(USE_LINUX_SPLICE)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200587/* Send and get, using splicing */
588static int mux_pt_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int count)
589{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100590 struct connection *conn = __cs_conn(cs);
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200591 int ret;
592
Christopher Faulet897d6122021-12-17 17:28:35 +0100593 TRACE_ENTER(PT_EV_RX_DATA, conn, cs, 0, (size_t[]){count});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200594
Christopher Faulet897d6122021-12-17 17:28:35 +0100595 ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count);
596 if (conn_xprt_read0_pending(conn)) {
Christopher Faulet87a8f352019-03-22 14:51:36 +0100597 cs->flags |= CS_FL_EOS;
Christopher Faulet897d6122021-12-17 17:28:35 +0100598 TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200599 }
Christopher Faulet897d6122021-12-17 17:28:35 +0100600 if (conn->flags & CO_FL_ERROR) {
Willy Tarreau4ff3b892017-10-16 15:17:17 +0200601 cs->flags |= CS_FL_ERROR;
Christopher Faulet897d6122021-12-17 17:28:35 +0100602 TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200603 }
604
Christopher Faulet897d6122021-12-17 17:28:35 +0100605 TRACE_LEAVE(PT_EV_RX_DATA, conn, cs, 0, (size_t[]){ret});
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200606 return (ret);
607}
608
609static int mux_pt_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
610{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100611 struct connection *conn = __cs_conn(cs);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200612 int ret;
613
Christopher Faulet897d6122021-12-17 17:28:35 +0100614 TRACE_ENTER(PT_EV_TX_DATA, conn, cs, 0, (size_t[]){pipe->data});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200615
Christopher Faulet897d6122021-12-17 17:28:35 +0100616 ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe);
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200617
Willy Tarreau413713f2022-03-31 16:47:46 +0200618 if (conn->flags & CO_FL_ERROR) {
619 cs->flags |= CS_FL_ERROR;
620 TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
621 }
622
Christopher Faulet897d6122021-12-17 17:28:35 +0100623 TRACE_LEAVE(PT_EV_TX_DATA, conn, cs, 0, (size_t[]){ret});
Christopher Fauletc0ae0972021-04-08 16:45:11 +0200624 return ret;
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200625}
Olivier Houchard7da120b2017-11-01 13:55:10 +0100626#endif
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200627
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200628static int mux_pt_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
629{
630 int ret = 0;
631 switch (mux_ctl) {
632 case MUX_STATUS:
Willy Tarreau911db9b2020-01-23 16:27:54 +0100633 if (!(conn->flags & CO_FL_WAIT_XPRT))
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200634 ret |= MUX_STATUS_READY;
635 return ret;
Christopher Faulet4c8ad842020-10-06 14:59:17 +0200636 case MUX_EXIT_STATUS:
637 return MUX_ES_UNKNOWN;
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200638 default:
639 return -1;
640 }
641}
642
Willy Tarreau53a47662017-08-28 10:53:00 +0200643/* The mux operations */
Christopher Faulet28da3f52021-02-05 16:44:46 +0100644const struct mux_ops mux_tcp_ops = {
Willy Tarreau53a47662017-08-28 10:53:00 +0200645 .init = mux_pt_init,
Willy Tarreau53a47662017-08-28 10:53:00 +0200646 .wake = mux_pt_wake,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200647 .rcv_buf = mux_pt_rcv_buf,
648 .snd_buf = mux_pt_snd_buf,
Olivier Houchard6ff20392018-07-17 18:46:31 +0200649 .subscribe = mux_pt_subscribe,
Olivier Houchard83a0cd82018-09-28 17:57:58 +0200650 .unsubscribe = mux_pt_unsubscribe,
Willy Tarreaue5733232019-05-22 19:24:06 +0200651#if defined(USE_LINUX_SPLICE)
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200652 .rcv_pipe = mux_pt_rcv_pipe,
653 .snd_pipe = mux_pt_snd_pipe,
654#endif
655 .attach = mux_pt_attach,
Willy Tarreaufafd3982018-11-18 21:29:20 +0100656 .get_first_cs = mux_pt_get_first_cs,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200657 .detach = mux_pt_detach,
Olivier Houchardd540b362018-11-05 18:37:53 +0100658 .avail_streams = mux_pt_avail_streams,
Willy Tarreau00f18a32019-01-26 12:19:01 +0100659 .used_streams = mux_pt_used_streams,
Olivier Houchard060ed432018-11-06 16:32:42 +0100660 .destroy = mux_pt_destroy_meth,
Olivier Houchard9b8e11e2019-10-25 16:19:26 +0200661 .ctl = mux_pt_ctl,
Olivier Houchard7a3f0df2017-09-13 18:30:23 +0200662 .shutr = mux_pt_shutr,
663 .shutw = mux_pt_shutw,
Willy Tarreau28f1cb92017-12-20 16:14:44 +0100664 .flags = MX_FL_NONE,
Willy Tarreau53a47662017-08-28 10:53:00 +0200665 .name = "PASS",
666};
Willy Tarreauf6490822017-09-21 19:43:21 +0200667
Christopher Faulet28da3f52021-02-05 16:44:46 +0100668
669const struct mux_ops mux_pt_ops = {
670 .init = mux_pt_init,
671 .wake = mux_pt_wake,
672 .rcv_buf = mux_pt_rcv_buf,
673 .snd_buf = mux_pt_snd_buf,
674 .subscribe = mux_pt_subscribe,
675 .unsubscribe = mux_pt_unsubscribe,
676#if defined(USE_LINUX_SPLICE)
677 .rcv_pipe = mux_pt_rcv_pipe,
678 .snd_pipe = mux_pt_snd_pipe,
679#endif
680 .attach = mux_pt_attach,
681 .get_first_cs = mux_pt_get_first_cs,
682 .detach = mux_pt_detach,
683 .avail_streams = mux_pt_avail_streams,
684 .used_streams = mux_pt_used_streams,
685 .destroy = mux_pt_destroy_meth,
686 .ctl = mux_pt_ctl,
687 .shutr = mux_pt_shutr,
688 .shutw = mux_pt_shutw,
689 .flags = MX_FL_NONE|MX_FL_NO_UPG,
690 .name = "PASS",
691};
692
Christopher Faulet32f61c02018-04-10 14:33:41 +0200693/* PROT selection : default mux has empty name */
Christopher Faulet28da3f52021-02-05 16:44:46 +0100694static struct mux_proto_list mux_proto_none =
695 { .token = IST("none"), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_pt_ops };
696static struct mux_proto_list mux_proto_tcp =
697 { .token = IST(""), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_tcp_ops };
Willy Tarreauf6490822017-09-21 19:43:21 +0200698
Christopher Faulet28da3f52021-02-05 16:44:46 +0100699INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_none);
700INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_tcp);