blob: e1f00a49dae6e298b9eca2c6d42efa9babb1391e [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau87b09662015-04-03 00:22:06 +02002 * Stream management functions.
Willy Tarreaubaaee002006-06-26 02:48:02 +02003 *
Willy Tarreaud28c3532012-04-19 19:28:33 +02004 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <stdlib.h>
Willy Tarreau81f9aa32010-06-01 17:45:26 +020014#include <unistd.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020015
Willy Tarreaua264d962020-06-04 22:29:18 +020016#include <import/ebistree.h>
17
Willy Tarreaudcc048a2020-06-04 19:11:43 +020018#include <haproxy/acl.h>
Willy Tarreau122eba92020-06-04 10:15:32 +020019#include <haproxy/action.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/activity.h>
21#include <haproxy/api.h>
Willy Tarreau3f0f82e2020-06-04 19:42:41 +020022#include <haproxy/applet.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/arg.h>
Willy Tarreau49801602020-06-04 22:50:02 +020024#include <haproxy/backend.h>
Willy Tarreau278161c2020-06-04 11:18:28 +020025#include <haproxy/capture.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020026#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020027#include <haproxy/channel.h>
Willy Tarreau4aa573d2020-06-04 18:21:56 +020028#include <haproxy/check.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020029#include <haproxy/cli.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020030#include <haproxy/connection.h>
Willy Tarreau3afc4c42020-06-03 18:23:19 +020031#include <haproxy/dict.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020032#include <haproxy/dynbuf.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020033#include <haproxy/fd.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020034#include <haproxy/filters.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020035#include <haproxy/freq_ctr.h>
Willy Tarreau762d7a52020-06-04 11:23:07 +020036#include <haproxy/frontend.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020037#include <haproxy/global.h>
Willy Tarreau86416052020-06-04 09:20:54 +020038#include <haproxy/hlua.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020039#include <haproxy/http_ana.h>
Willy Tarreauc761f842020-06-04 11:40:28 +020040#include <haproxy/http_rules.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020041#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020042#include <haproxy/istbuf.h>
Willy Tarreauaeed4a82020-06-04 22:01:04 +020043#include <haproxy/log.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020044#include <haproxy/pipe.h>
Willy Tarreaud0ef4392020-06-02 09:38:52 +020045#include <haproxy/pool.h>
Willy Tarreaua264d962020-06-04 22:29:18 +020046#include <haproxy/proxy.h>
Willy Tarreaua55c4542020-06-04 22:59:39 +020047#include <haproxy/queue.h>
Willy Tarreau5edca2f2022-05-27 09:25:10 +020048#include <haproxy/sc_strm.h>
Willy Tarreau1e56f922020-06-04 23:20:13 +020049#include <haproxy/server.h>
Emeric Brunc9437992021-02-12 19:42:55 +010050#include <haproxy/resolvers.h>
Amaury Denoyelle12bada52020-12-10 13:43:57 +010051#include <haproxy/sample.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020052#include <haproxy/session.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020053#include <haproxy/stats-t.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020054#include <haproxy/stconn.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020055#include <haproxy/stick_table.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020056#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020057#include <haproxy/task.h>
Willy Tarreau8b550af2020-06-04 17:42:48 +020058#include <haproxy/tcp_rules.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020059#include <haproxy/thread.h>
Erwan Le Goas57e35f42022-09-14 17:45:41 +020060#include <haproxy/tools.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020061#include <haproxy/trace.h>
Willy Tarreaua1718922020-06-04 16:25:31 +020062#include <haproxy/vars.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020063
Willy Tarreaubaaee002006-06-26 02:48:02 +020064
Willy Tarreau8ceae722018-11-26 11:58:30 +010065DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
Tim Duesterhus127a74d2020-02-28 15:13:33 +010066DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
Willy Tarreau8ceae722018-11-26 11:58:30 +010067
Willy Tarreaub9813182021-02-24 11:29:51 +010068/* incremented by each "show sess" to fix a delimiter between streams */
69unsigned stream_epoch = 0;
Willy Tarreaubaaee002006-06-26 02:48:02 +020070
Thierry FOURNIER5a363e72015-09-27 19:29:33 +020071/* List of all use-service keywords. */
72static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
73
Willy Tarreau5790eb02017-08-28 17:18:36 +020074
Christopher Fauleteea8fc72019-11-05 16:18:10 +010075/* trace source and events */
76static void strm_trace(enum trace_level level, uint64_t mask,
77 const struct trace_source *src,
78 const struct ist where, const struct ist func,
79 const void *a1, const void *a2, const void *a3, const void *a4);
80
81/* The event representation is split like this :
82 * strm - stream
Willy Tarreaub49672d2022-05-27 10:13:37 +020083 * sc - stream connector
Christopher Fauleteea8fc72019-11-05 16:18:10 +010084 * http - http analyzis
85 * tcp - tcp analyzis
86 *
87 * STRM_EV_* macros are defined in <proto/stream.h>
88 */
89static const struct trace_event strm_trace_events[] = {
90 { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
91 { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
92 { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
93 { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
94 { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
95
Willy Tarreau74568cf2022-05-27 09:03:30 +020096 { .mask = STRM_EV_CS_ST, .name = "sc_state", .desc = "processing connector states" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +010097
98 { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
99 { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
100
101 { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
102 { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
Christopher Faulet50019132022-03-08 15:47:02 +0100103
104 { .mask = STRM_EV_FLT_ANA, .name = "flt_ana", .desc = "Filter analyzers" },
105 { .mask = STRM_EV_FLT_ERR, .name = "flt_err", .desc = "error during filter analyzis" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100106 {}
107};
108
109static const struct name_desc strm_trace_lockon_args[4] = {
110 /* arg1 */ { /* already used by the stream */ },
111 /* arg2 */ { },
112 /* arg3 */ { },
113 /* arg4 */ { }
114};
115
116static const struct name_desc strm_trace_decoding[] = {
117#define STRM_VERB_CLEAN 1
118 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
119#define STRM_VERB_MINIMAL 2
Willy Tarreau4596fe22022-05-17 19:07:51 +0200120 { .name="minimal", .desc="report info on streams and connectors" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100121#define STRM_VERB_SIMPLE 3
122 { .name="simple", .desc="add info on request and response channels" },
123#define STRM_VERB_ADVANCED 4
124 { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
125#define STRM_VERB_COMPLETE 5
126 { .name="complete", .desc="add info on channel's buffer" },
127 { /* end */ }
128};
129
130struct trace_source trace_strm = {
131 .name = IST("stream"),
132 .desc = "Applicative stream",
133 .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
134 .default_cb = strm_trace,
135 .known_events = strm_trace_events,
136 .lockon_args = strm_trace_lockon_args,
137 .decoding = strm_trace_decoding,
138 .report_events = ~0, // report everything by default
139};
140
141#define TRACE_SOURCE &trace_strm
142INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
143
144/* the stream traces always expect that arg1, if non-null, is of a stream (from
145 * which we can derive everything), that arg2, if non-null, is an http
146 * transaction, that arg3, if non-null, is an http message.
147 */
148static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
149 const struct ist where, const struct ist func,
150 const void *a1, const void *a2, const void *a3, const void *a4)
151{
152 const struct stream *s = a1;
153 const struct http_txn *txn = a2;
154 const struct http_msg *msg = a3;
155 struct task *task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100156 const struct channel *req, *res;
157 struct htx *htx;
158
159 if (!s || src->verbosity < STRM_VERB_CLEAN)
160 return;
161
162 task = s->task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100163 req = &s->req;
164 res = &s->res;
165 htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
166
167 /* General info about the stream (htx/tcp, id...) */
168 chunk_appendf(&trace_buf, " : [%u,%s]",
169 s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100170 if (isttest(s->unique_id)) {
171 chunk_appendf(&trace_buf, " id=");
172 b_putist(&trace_buf, s->unique_id);
173 }
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100174
Willy Tarreau4596fe22022-05-17 19:07:51 +0200175 /* Front and back stream connector state */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200176 chunk_appendf(&trace_buf, " SC=(%s,%s)",
Willy Tarreau74568cf2022-05-27 09:03:30 +0200177 sc_state_str(s->scf->state), sc_state_str(s->scb->state));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100178
179 /* If txn is defined, HTTP req/rep states */
180 if (txn)
181 chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
182 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
183 if (msg)
184 chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
185
186 if (src->verbosity == STRM_VERB_CLEAN)
187 return;
188
189 /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
190 if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
Willy Tarreaud46b5b92022-05-30 16:27:48 +0200191 const struct htx_blk *blk = __htx_get_head_blk(htx);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100192 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
193 enum htx_blk_type type = htx_get_blk_type(blk);
194
195 if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
196 chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
197 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
198 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
199 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
200 }
201
Christopher Faulet80e45322023-02-16 14:35:51 +0100202 chunk_appendf(&trace_buf, " - t=%p t.exp=%d s=(%p,0x%08x,0x%x)",
203 task, tick_isset(task->expire) ? TICKS_TO_MS(task->expire - now_ms) : TICK_ETERNITY, s, s->flags, s->conn_err_type);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100204
205 /* If txn defined info about HTTP msgs, otherwise info about SI. */
206 if (txn) {
Christopher Faulet80e45322023-02-16 14:35:51 +0100207 chunk_appendf(&trace_buf, " txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
208 txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100209 }
210 else {
Christopher Faulet80e45322023-02-16 14:35:51 +0100211 chunk_appendf(&trace_buf, " scf=(%p,%d,0x%08x,0x%x) scb=(%p,%d,0x%08x,0x%x) scf.exp(r,w)=(%d,%d) scb.exp(r,w)=(%d,%d) retries=%d",
Christopher Faulet63629342023-02-21 18:00:25 +0100212 s->scf, s->scf->state, s->scf->flags, s->scf->sedesc->flags,
213 s->scb, s->scb->state, s->scb->flags, s->scb->sedesc->flags,
Christopher Faulet80e45322023-02-16 14:35:51 +0100214 tick_isset(sc_ep_rcv_ex(s->scf)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scf) - now_ms) : TICK_ETERNITY,
215 tick_isset(sc_ep_snd_ex(s->scf)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scf) - now_ms) : TICK_ETERNITY,
216 tick_isset(sc_ep_rcv_ex(s->scb)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scb) - now_ms) : TICK_ETERNITY,
217 tick_isset(sc_ep_snd_ex(s->scb)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scb) - now_ms) : TICK_ETERNITY,
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200218 s->conn_retries);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100219 }
220
221 if (src->verbosity == STRM_VERB_MINIMAL)
222 return;
223
224
225 /* If txn defined, don't display all channel info */
226 if (src->verbosity == STRM_VERB_SIMPLE || txn) {
Christopher Faulet80e45322023-02-16 14:35:51 +0100227 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp=%d)",
228 req, req->flags, tick_isset(req->analyse_exp) ? TICKS_TO_MS(req->analyse_exp - now_ms) : TICK_ETERNITY);
229 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp=%d)",
230 res, res->flags, tick_isset(res->analyse_exp) ? TICKS_TO_MS(res->analyse_exp - now_ms) : TICK_ETERNITY);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100231 }
232 else {
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100233 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
234 req, req->flags, req->analysers, req->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100235 (long)req->output, req->total, req->to_forward);
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100236 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
237 res, res->flags, res->analysers, res->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100238 (long)res->output, res->total, res->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100239 }
240
241 if (src->verbosity == STRM_VERB_SIMPLE ||
242 (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
243 return;
244
245 /* channels' buffer info */
246 if (s->flags & SF_HTX) {
247 struct htx *rqhtx = htxbuf(&req->buf);
248 struct htx *rphtx = htxbuf(&res->buf);
249
250 chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
251 rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
252 rphtx->data, rphtx->size, htx_nbblks(rphtx));
253 }
254 else {
255 chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
256 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
257 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
Christopher Faulet5ce12992022-03-08 15:48:55 +0100258 (unsigned int)b_data(&res->buf), b_orig(&res->buf),
259 (unsigned int)b_head_ofs(&res->buf), (unsigned int)b_size(&res->buf));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100260 }
261
262 /* If msg defined, display htx info if defined (level > USER) */
263 if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
264 int full = 0;
265
266 /* Full htx info (level > STATE && verbosity > SIMPLE) */
267 if (src->level > TRACE_LEVEL_STATE) {
268 if (src->verbosity == STRM_VERB_COMPLETE)
269 full = 1;
270 }
271
272 chunk_memcat(&trace_buf, "\n\t", 2);
273 htx_dump(&trace_buf, htx, full);
274 }
275}
276
Willy Tarreaub49672d2022-05-27 10:13:37 +0200277/* Upgrade an existing stream for stream connector <sc>. Return < 0 on error. This
Christopher Faulet13a35e52021-12-20 15:34:16 +0100278 * is only valid right after a TCP to H1 upgrade. The stream should be
279 * "reativated" by removing SF_IGNORE flag. And the right mode must be set. On
Christopher Faulet16df1782020-12-04 16:47:41 +0100280 * success, <input> buffer is transferred to the stream and thus points to
281 * BUF_NULL. On error, it is unchanged and it is the caller responsibility to
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100282 * release it (this never happens for now).
283 */
Willy Tarreaudf1a2fc2022-05-27 11:11:15 +0200284int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input)
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100285{
Willy Tarreaub49672d2022-05-27 10:13:37 +0200286 struct stream *s = __sc_strm(sc);
287 const struct mux_ops *mux = sc_mux_ops(sc);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100288
Christopher Faulet693b23b2022-02-28 09:09:05 +0100289 if (mux) {
Christopher Faulet13a35e52021-12-20 15:34:16 +0100290 if (mux->flags & MX_FL_HTX)
291 s->flags |= SF_HTX;
292 }
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100293
294 if (!b_is_null(input)) {
295 /* Xfer the input buffer to the request channel. <input> will
296 * than point to BUF_NULL. From this point, it is the stream
297 * responsibility to release it.
298 */
299 s->req.buf = *input;
300 *input = BUF_NULL;
301 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet4c135682023-02-16 11:09:31 +0100302 sc_ep_report_read_activity(s->scf);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100303 }
304
Christopher Faulet23577182022-12-20 18:47:39 +0100305 s->req.flags |= CF_READ_EVENT; /* Always report a read event */
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100306 s->flags &= ~SF_IGNORE;
307
308 task_wakeup(s->task, TASK_WOKEN_INIT);
309 return 0;
310}
311
Willy Tarreaub882dd82018-11-06 15:50:21 +0100312/* Callback used to wake up a stream when an input buffer is available. The
Willy Tarreau4596fe22022-05-17 19:07:51 +0200313 * stream <s>'s stream connectors are checked for a failed buffer allocation
Willy Tarreau15252cd2022-05-25 16:36:21 +0200314 * as indicated by the presence of the SC_FL_NEED_BUFF flag and the lack of a
Willy Tarreaub882dd82018-11-06 15:50:21 +0100315 * buffer, and and input buffer is assigned there (at most one). The function
316 * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
317 * It's designed to be called from __offer_buffer().
318 */
319int stream_buf_available(void *arg)
320{
321 struct stream *s = arg;
322
Willy Tarreau15252cd2022-05-25 16:36:21 +0200323 if (!s->req.buf.size && !s->req.pipe && s->scf->flags & SC_FL_NEED_BUFF &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100324 b_alloc(&s->req.buf))
Willy Tarreau0ed73c32022-05-25 07:48:07 +0200325 sc_have_buff(s->scf);
Willy Tarreau15252cd2022-05-25 16:36:21 +0200326 else if (!s->res.buf.size && !s->res.pipe && s->scb->flags & SC_FL_NEED_BUFF &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100327 b_alloc(&s->res.buf))
Willy Tarreau0ed73c32022-05-25 07:48:07 +0200328 sc_have_buff(s->scb);
Willy Tarreaub882dd82018-11-06 15:50:21 +0100329 else
330 return 0;
331
332 task_wakeup(s->task, TASK_WOKEN_RES);
333 return 1;
334
335}
336
Willy Tarreau9903f0e2015-04-04 18:50:31 +0200337/* This function is called from the session handler which detects the end of
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200338 * handshake, in order to complete initialization of a valid stream. It must be
Joseph Herlant4cc8d0d2018-11-15 09:14:14 -0800339 * called with a completely initialized session. It returns the pointer to
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200340 * the newly created stream, or NULL in case of fatal error. The client-facing
Willy Tarreau87787ac2017-08-28 16:22:54 +0200341 * end point is assigned to <origin>, which must be valid. The stream's task
342 * is configured with a nice value inherited from the listener's nice if any.
343 * The task's context is set to the new stream, and its function is set to
Christopher Faulet16df1782020-12-04 16:47:41 +0100344 * process_stream(). Target and analysers are null. <input> is used as input
345 * buffer for the request channel and may contain data. On success, it is
346 * transfer to the stream and <input> is set to BUF_NULL. On error, <input>
347 * buffer is unchanged and it is the caller responsibility to release it.
Willy Tarreau2542b532012-08-31 16:01:23 +0200348 */
Willy Tarreaub49672d2022-05-27 10:13:37 +0200349struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer *input)
Willy Tarreau2542b532012-08-31 16:01:23 +0200350{
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200351 struct stream *s;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200352 struct task *t;
Willy Tarreau2542b532012-08-31 16:01:23 +0200353
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100354 DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100355 if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
Willy Tarreau87787ac2017-08-28 16:22:54 +0200356 goto out_fail_alloc;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200357
358 /* minimum stream initialization required for an embryonic stream is
359 * fairly low. We need very little to execute L4 ACLs, then we need a
360 * task to make the client-side connection live on its own.
361 * - flags
362 * - stick-entry tracking
363 */
364 s->flags = 0;
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200365 s->logs.logwait = sess->fe->to_log;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200366 s->logs.level = 0;
Willy Tarreauad5a5f62023-04-27 09:46:02 +0200367 s->logs.request_ts = 0;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200368 s->logs.t_queue = -1;
369 s->logs.t_connect = -1;
370 s->logs.t_data = -1;
371 s->logs.t_close = 0;
372 s->logs.bytes_in = s->logs.bytes_out = 0;
Patrick Hemmerffe5e8c2018-05-11 12:52:31 -0400373 s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
374 s->logs.srv_queue_pos = 0; /* we will get this number soon */
Baptiste Assmann333939c2019-01-21 08:34:50 +0100375 s->obj_type = OBJ_TYPE_STREAM;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200376
Christopher Faulet15e525f2020-09-30 14:03:54 +0200377 s->logs.accept_date = sess->accept_date;
Willy Tarreauad5a5f62023-04-27 09:46:02 +0200378 s->logs.accept_ts = sess->accept_ts;
Christopher Faulet15e525f2020-09-30 14:03:54 +0200379 s->logs.t_handshake = sess->t_handshake;
Christopher Faulet7a6c5132020-09-30 13:49:56 +0200380 s->logs.t_idle = sess->t_idle;
Christopher Fauletb3484d62018-11-29 15:19:05 +0100381
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200382 /* default logging function */
383 s->do_log = strm_log;
384
385 /* default error reporting function, may be changed by analysers */
386 s->srv_error = default_srv_error;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200387
388 /* Initialise the current rule list pointer to NULL. We are sure that
389 * any rulelist match the NULL pointer.
390 */
391 s->current_rule_list = NULL;
Remi Gacogne7fb9de22015-07-22 17:10:58 +0200392 s->current_rule = NULL;
Christopher Faulet2747fbb2020-07-28 11:56:13 +0200393 s->rules_exp = TICK_ETERNITY;
Willy Tarreauc6dae862022-03-09 17:23:10 +0100394 s->last_rule_file = NULL;
395 s->last_rule_line = 0;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200396
Willy Tarreau6c011712023-01-06 16:09:58 +0100397 s->stkctr = NULL;
398 if (pool_head_stk_ctr) {
399 s->stkctr = pool_alloc(pool_head_stk_ctr);
400 if (!s->stkctr)
401 goto out_fail_alloc;
402
403 /* Copy SC counters for the stream. We don't touch refcounts because
404 * any reference we have is inherited from the session. Since the stream
405 * doesn't exist without the session, the session's existence guarantees
406 * we don't lose the entry. During the store operation, the stream won't
407 * touch these ones.
408 */
409 memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr[0]) * global.tune.nb_stk_ctr);
410 }
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200411
412 s->sess = sess;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200413
Willy Tarreaub9813182021-02-24 11:29:51 +0100414 s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
Willy Tarreau18515722021-04-06 11:57:41 +0200415 s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200416
Willy Tarreau87b09662015-04-03 00:22:06 +0200417 /* OK, we're keeping the stream, so let's properly initialize the stream */
Willy Tarreau2542b532012-08-31 16:01:23 +0200418 LIST_INIT(&s->back_refs);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100419
Willy Tarreau90f366b2021-02-20 11:49:49 +0100420 LIST_INIT(&s->buffer_wait.list);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100421 s->buffer_wait.target = s;
Willy Tarreaub882dd82018-11-06 15:50:21 +0100422 s->buffer_wait.wakeup_cb = stream_buf_available;
Willy Tarreauf8a49ea2013-10-14 21:32:07 +0200423
Willy Tarreau6a28a302022-09-07 09:17:45 +0200424 s->lat_time = s->cpu_time = 0;
Willy Tarreaufa1258f2021-04-10 23:00:53 +0200425 s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
William Lallemandcf62f7e2018-10-26 14:47:40 +0200426 s->pcli_next_pid = 0;
William Lallemandebf61802018-12-11 16:10:57 +0100427 s->pcli_flags = 0;
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100428 s->unique_id = IST_NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200429
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200430 if ((t = task_new_here()) == NULL)
Willy Tarreau87787ac2017-08-28 16:22:54 +0200431 goto out_fail_alloc;
432
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200433 s->task = t;
Christopher Faulet9d810ca2016-12-08 22:33:52 +0100434 s->pending_events = 0;
Christopher Faulet909f3182022-03-29 15:42:09 +0200435 s->conn_retries = 0;
Christopher Fauletae024ce2022-03-29 19:02:31 +0200436 s->conn_exp = TICK_ETERNITY;
Christopher Faulet50264b42022-03-30 19:39:30 +0200437 s->conn_err_type = STRM_ET_NONE;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200438 s->prev_conn_state = SC_ST_INI;
Willy Tarreaud1769b82015-04-06 00:25:48 +0200439 t->process = process_stream;
Willy Tarreau2542b532012-08-31 16:01:23 +0200440 t->context = s;
441 t->expire = TICK_ETERNITY;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200442 if (sess->listener)
Willy Tarreau7dbd4182023-01-12 19:32:45 +0100443 t->nice = sess->listener->bind_conf->nice;
Willy Tarreau2542b532012-08-31 16:01:23 +0200444
Willy Tarreau87b09662015-04-03 00:22:06 +0200445 /* Note: initially, the stream's backend points to the frontend.
Willy Tarreau2542b532012-08-31 16:01:23 +0200446 * This changes later when switching rules are executed or
447 * when the default backend is assigned.
448 */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200449 s->be = sess->fe;
Willy Tarreaucb7dd012015-04-03 22:16:32 +0200450 s->req_cap = NULL;
451 s->res_cap = NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200452
Aurelien DARRAGON5ad2b642022-11-18 09:17:29 +0100453 /* Initialize all the variables contexts even if not used.
Willy Tarreauebcd4842015-06-19 11:59:02 +0200454 * This permits to prune these contexts without errors.
Aurelien DARRAGON5ad2b642022-11-18 09:17:29 +0100455 *
456 * We need to make sure that those lists are not re-initialized
457 * by stream-dependant underlying code because we could lose
458 * track of already defined variables, leading to data inconsistency
459 * and memory leaks...
460 *
461 * For reference: we had a very old bug caused by vars_txn and
462 * vars_reqres being accidentally re-initialized in http_create_txn()
463 * (https://github.com/haproxy/haproxy/issues/1935)
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200464 */
Willy Tarreaub7bfcb32021-08-31 08:13:25 +0200465 vars_init_head(&s->vars_txn, SCOPE_TXN);
466 vars_init_head(&s->vars_reqres, SCOPE_REQ);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200467
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100468 /* Set SF_HTX flag for HTTP frontends. */
469 if (sess->fe->mode == PR_MODE_HTTP)
470 s->flags |= SF_HTX;
471
Willy Tarreaub49672d2022-05-27 10:13:37 +0200472 s->scf = sc;
Willy Tarreau19c65a92022-05-27 08:49:24 +0200473 if (sc_attach_strm(s->scf, s) < 0)
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +0200474 goto out_fail_attach_scf;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100475
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200476 s->scb = sc_new_from_strm(s, SC_FL_ISBACK);
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +0200477 if (!s->scb)
478 goto out_fail_alloc_scb;
Christopher Faulet95a61e82021-12-22 14:22:03 +0100479
Willy Tarreau74568cf2022-05-27 09:03:30 +0200480 sc_set_state(s->scf, SC_ST_EST);
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100481
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100482 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Willy Tarreaucb041662022-05-17 19:44:42 +0200483 s->scf->flags |= SC_FL_INDEP_STR;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100484
485 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Willy Tarreaucb041662022-05-17 19:44:42 +0200486 s->scb->flags |= SC_FL_INDEP_STR;
Willy Tarreau984fca92017-12-20 16:31:43 +0100487
Willy Tarreaub49672d2022-05-27 10:13:37 +0200488 if (sc_ep_test(sc, SE_FL_WEBSOCKET))
Christopher Faulet13a35e52021-12-20 15:34:16 +0100489 s->flags |= SF_WEBSOCKET;
Willy Tarreaub49672d2022-05-27 10:13:37 +0200490 if (sc_conn(sc)) {
491 const struct mux_ops *mux = sc_mux_ops(sc);
Christopher Faulet897d6122021-12-17 17:28:35 +0100492
Christopher Faulet78ed7f22022-03-30 16:31:41 +0200493 if (mux && mux->flags & MX_FL_HTX)
494 s->flags |= SF_HTX;
Christopher Fauleta7422932021-12-15 11:42:23 +0100495 }
496
Willy Tarreau87b09662015-04-03 00:22:06 +0200497 stream_init_srv_conn(s);
Willy Tarreaud5983ce2023-01-12 19:18:34 +0100498 s->target = sess->fe->default_target;
Willy Tarreau9b82d942016-12-05 00:26:31 +0100499
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200500 s->pend_pos = NULL;
Patrick Hemmer268a7072018-05-11 12:52:31 -0400501 s->priority_class = 0;
502 s->priority_offset = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200503
504 /* init store persistence */
505 s->store_count = 0;
506
Christopher Faulet16df1782020-12-04 16:47:41 +0100507 channel_init(&s->req);
Christopher Faulet23577182022-12-20 18:47:39 +0100508 s->req.flags |= CF_READ_EVENT; /* the producer is already connected */
Willy Tarreau7866e8e2023-01-12 18:39:42 +0100509 s->req.analysers = sess->listener ? sess->listener->bind_conf->analysers : sess->fe->fe_req_ana;
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100510
Christopher Faulet57e4a1b2021-03-15 17:09:27 +0100511 if (IS_HTX_STRM(s)) {
512 /* Be sure to have HTTP analysers because in case of
513 * "destructive" stream upgrade, they may be missing (e.g
514 * TCP>H2)
515 */
516 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
517 }
518
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100519 if (!sess->fe->fe_req_ana) {
520 channel_auto_connect(&s->req); /* don't wait to establish connection */
521 channel_auto_close(&s->req); /* let the producer forward close requests */
522 }
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200523
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100524 s->scf->ioto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100525 s->req.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200526
Christopher Faulet16df1782020-12-04 16:47:41 +0100527 channel_init(&s->res);
Willy Tarreauef573c02014-11-28 14:17:09 +0100528 s->res.flags |= CF_ISRESP;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100529 s->res.analysers = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200530
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200531 if (sess->fe->options2 & PR_O2_NODELAY) {
Christopher Faulet68ef2182023-03-17 15:38:18 +0100532 s->scf->flags |= SC_FL_SND_NEVERWAIT;
533 s->scb->flags |= SC_FL_SND_NEVERWAIT;
Willy Tarreau96e31212011-05-30 18:10:30 +0200534 }
535
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100536 s->scb->ioto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100537 s->res.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200538
Willy Tarreaueee5b512015-04-03 23:46:31 +0200539 s->txn = NULL;
Aurelien DARRAGON1a3ac402024-03-12 17:05:54 +0100540 s->hlua[0] = s->hlua[1] = NULL;
Thierry FOURNIER65f34c62015-02-16 20:11:43 +0100541
Emeric Brun08622d32020-12-23 17:41:43 +0100542 s->resolv_ctx.requester = NULL;
543 s->resolv_ctx.hostname_dn = NULL;
544 s->resolv_ctx.hostname_dn_len = 0;
545 s->resolv_ctx.parent = NULL;
Frédéric Lécaillebed883a2019-04-23 17:26:33 +0200546
Amaury Denoyellefb504432020-12-10 13:43:53 +0100547 s->tunnel_timeout = TICK_ETERNITY;
548
Willy Tarreaub4e34762021-09-30 19:02:18 +0200549 LIST_APPEND(&th_ctx->streams, &s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200550
Christopher Faulet92d36382015-11-05 13:35:03 +0100551 if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200552 goto out_fail_accept;
553
Willy Tarreau369d5aa2022-05-27 16:49:18 +0200554 /* just in case the caller would have pre-disabled it */
555 se_will_consume(s->scf->sedesc);
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200556
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200557 if (sess->fe->accept && sess->fe->accept(s) < 0)
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200558 goto out_fail_accept;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200559
Christopher Faulet16df1782020-12-04 16:47:41 +0100560 if (!b_is_null(input)) {
561 /* Xfer the input buffer to the request channel. <input> will
562 * than point to BUF_NULL. From this point, it is the stream
563 * responsibility to release it.
564 */
565 s->req.buf = *input;
566 *input = BUF_NULL;
Christopher Fauletc43fca02020-12-04 17:22:49 +0100567 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet4c135682023-02-16 11:09:31 +0100568 sc_ep_report_read_activity(s->scf);
Christopher Faulet16df1782020-12-04 16:47:41 +0100569 }
570
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200571 /* it is important not to call the wakeup function directly but to
572 * pass through task_wakeup(), because this one knows how to apply
Emeric Brun5f77fef2017-05-29 15:26:51 +0200573 * priorities to tasks. Using multi thread we must be sure that
574 * stream is fully initialized before calling task_wakeup. So
575 * the caller must handle the task_wakeup
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200576 */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100577 DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
Christopher Faulet13a35e52021-12-20 15:34:16 +0100578 task_wakeup(s->task, TASK_WOKEN_INIT);
Willy Tarreau02d86382015-04-05 12:00:52 +0200579 return s;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200580
581 /* Error unrolling */
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200582 out_fail_accept:
Christopher Faulet92d36382015-11-05 13:35:03 +0100583 flt_stream_release(s, 0);
Willy Tarreau2b718102021-04-21 07:32:39 +0200584 LIST_DELETE(&s->list);
Christopher Faulet4cfc0382022-09-27 09:14:47 +0200585 sc_free(s->scb);
Willy Tarreaua45e7e82023-03-20 19:11:08 +0100586 out_fail_alloc_scb:
Christopher Faulet4cfc0382022-09-27 09:14:47 +0200587 out_fail_attach_scf:
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100588 task_destroy(t);
Willy Tarreau87787ac2017-08-28 16:22:54 +0200589 out_fail_alloc:
Willy Tarreau6c011712023-01-06 16:09:58 +0100590 if (s)
591 pool_free(pool_head_stk_ctr, s->stkctr);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100592 pool_free(pool_head_stream, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100593 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
Willy Tarreau02d86382015-04-05 12:00:52 +0200594 return NULL;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200595}
596
Willy Tarreaubaaee002006-06-26 02:48:02 +0200597/*
Willy Tarreau87b09662015-04-03 00:22:06 +0200598 * frees the context associated to a stream. It must have been removed first.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200599 */
Christopher Fauletab5d1dc2022-05-12 14:56:55 +0200600void stream_free(struct stream *s)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200601{
Willy Tarreau9ad7bd42015-04-03 19:19:59 +0200602 struct session *sess = strm_sess(s);
603 struct proxy *fe = sess->fe;
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100604 struct bref *bref, *back;
Willy Tarreaua4cda672010-06-06 18:28:49 +0200605 int i;
Willy Tarreau0f7562b2007-01-07 15:46:13 +0100606
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100607 DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
608
Willy Tarreau0ad46fa2019-05-17 14:20:05 +0200609 /* detach the stream from its own task before even releasing it so
610 * that walking over a task list never exhibits a dying stream.
611 */
612 s->task->context = NULL;
613 __ha_barrier_store();
614
Willy Tarreaud0ad4a82018-07-25 11:13:53 +0200615 pendconn_free(s);
Willy Tarreau922a8062008-12-04 09:33:58 +0100616
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100617 if (objt_server(s->target)) { /* there may be requests left pending in queue */
Willy Tarreaue7dff022015-04-03 01:14:29 +0200618 if (s->flags & SF_CURR_SESS) {
619 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +0200620 _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
Willy Tarreau1e62de62008-11-11 20:20:02 +0100621 }
Willy Tarreau88bc8002021-12-06 07:01:02 +0000622 if (may_dequeue_tasks(__objt_server(s->target), s->be))
623 process_srv_queue(__objt_server(s->target));
Willy Tarreau1e62de62008-11-11 20:20:02 +0100624 }
Willy Tarreau922a8062008-12-04 09:33:58 +0100625
Willy Tarreau7c669d72008-06-20 15:04:11 +0200626 if (unlikely(s->srv_conn)) {
Willy Tarreau87b09662015-04-03 00:22:06 +0200627 /* the stream still has a reserved slot on a server, but
Willy Tarreau7c669d72008-06-20 15:04:11 +0200628 * it should normally be only the same as the one above,
629 * so this should not happen in fact.
630 */
631 sess_change_server(s, NULL);
632 }
633
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100634 if (s->req.pipe)
635 put_pipe(s->req.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100636
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100637 if (s->res.pipe)
638 put_pipe(s->res.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100639
Willy Tarreaubf883e02014-11-25 21:10:35 +0100640 /* We may still be present in the buffer wait queue */
Willy Tarreau2b718102021-04-21 07:32:39 +0200641 if (LIST_INLIST(&s->buffer_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +0100642 LIST_DEL_INIT(&s->buffer_wait.list);
Willy Tarreau21046592020-02-26 10:39:36 +0100643
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200644 if (s->req.buf.size || s->res.buf.size) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100645 int count = !!s->req.buf.size + !!s->res.buf.size;
646
Willy Tarreaue0d0b402019-08-08 08:06:27 +0200647 b_free(&s->req.buf);
648 b_free(&s->res.buf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100649 offer_buffers(NULL, count);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100650 }
Willy Tarreau9b28e032012-10-12 23:49:43 +0200651
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100652 pool_free(pool_head_uniqueid, s->unique_id.ptr);
653 s->unique_id = IST_NULL;
Olivier Houchard32211a12019-02-01 18:10:46 +0100654
Christopher Faulet03fb1b22020-02-24 16:26:55 +0100655 flt_stream_stop(s);
656 flt_stream_release(s, 0);
657
Aurelien DARRAGON1a3ac402024-03-12 17:05:54 +0100658 hlua_ctx_destroy(s->hlua[0]);
659 hlua_ctx_destroy(s->hlua[1]);
660 s->hlua[0] = s->hlua[1] = NULL;
661
Willy Tarreaueee5b512015-04-03 23:46:31 +0200662 if (s->txn)
Christopher Faulet75f619a2021-03-08 19:12:58 +0100663 http_destroy_txn(s);
Willy Tarreau46023632010-01-07 22:51:47 +0100664
Willy Tarreau1e954912012-10-12 17:50:05 +0200665 /* ensure the client-side transport layer is destroyed */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100666 /* Be sure it is useless !! */
667 /* if (cli_cs) */
668 /* cs_close(cli_cs); */
Willy Tarreau1e954912012-10-12 17:50:05 +0200669
Willy Tarreaua4cda672010-06-06 18:28:49 +0200670 for (i = 0; i < s->store_count; i++) {
671 if (!s->store[i].ts)
672 continue;
673 stksess_free(s->store[i].table, s->store[i].ts);
674 s->store[i].ts = NULL;
675 }
676
Emeric Brun08622d32020-12-23 17:41:43 +0100677 if (s->resolv_ctx.requester) {
Emeric Brun21fbeed2020-12-23 18:01:04 +0100678 __decl_thread(struct resolvers *resolvers = s->resolv_ctx.parent->arg.resolv.resolvers);
Christopher Faulet5098a082020-07-22 11:46:32 +0200679
680 HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100681 ha_free(&s->resolv_ctx.hostname_dn);
Emeric Brun08622d32020-12-23 17:41:43 +0100682 s->resolv_ctx.hostname_dn_len = 0;
Willy Tarreau6878f802021-10-20 14:07:31 +0200683 resolv_unlink_resolution(s->resolv_ctx.requester);
Christopher Faulet5098a082020-07-22 11:46:32 +0200684 HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
Baptiste Assmann333939c2019-01-21 08:34:50 +0100685
Emeric Brun08622d32020-12-23 17:41:43 +0100686 pool_free(resolv_requester_pool, s->resolv_ctx.requester);
687 s->resolv_ctx.requester = NULL;
Baptiste Assmann333939c2019-01-21 08:34:50 +0100688 }
689
Willy Tarreau92fb9832007-10-16 17:34:28 +0200690 if (fe) {
Christopher Faulet59399252019-11-07 14:27:52 +0100691 if (s->req_cap) {
692 struct cap_hdr *h;
693 for (h = fe->req_cap; h; h = h->next)
694 pool_free(h->pool, s->req_cap[h->index]);
Willy Tarreau47af3172022-06-23 11:46:14 +0200695 pool_free(fe->req_cap_pool, s->req_cap);
Christopher Faulet59399252019-11-07 14:27:52 +0100696 }
697
698 if (s->res_cap) {
699 struct cap_hdr *h;
700 for (h = fe->rsp_cap; h; h = h->next)
701 pool_free(h->pool, s->res_cap[h->index]);
Willy Tarreau47af3172022-06-23 11:46:14 +0200702 pool_free(fe->rsp_cap_pool, s->res_cap);
Christopher Faulet59399252019-11-07 14:27:52 +0100703 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200704 }
Willy Tarreau0937bc42009-12-22 15:03:09 +0100705
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200706 /* Cleanup all variable contexts. */
Willy Tarreaucda7f3f2018-10-28 13:44:36 +0100707 if (!LIST_ISEMPTY(&s->vars_txn.head))
708 vars_prune(&s->vars_txn, s->sess, s);
709 if (!LIST_ISEMPTY(&s->vars_reqres.head))
710 vars_prune(&s->vars_reqres, s->sess, s);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200711
Willy Tarreau87b09662015-04-03 00:22:06 +0200712 stream_store_counters(s);
Willy Tarreau6c011712023-01-06 16:09:58 +0100713 pool_free(pool_head_stk_ctr, s->stkctr);
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +0200714
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100715 list_for_each_entry_safe(bref, back, &s->back_refs, users) {
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100716 /* we have to unlink all watchers. We must not relink them if
Willy Tarreau49de6852021-02-24 13:46:12 +0100717 * this stream was the last one in the list. This is safe to do
718 * here because we're touching our thread's list so we know
719 * that other streams are not active, and the watchers will
720 * only touch their node under thread isolation.
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100721 */
Willy Tarreau49de6852021-02-24 13:46:12 +0100722 LIST_DEL_INIT(&bref->users);
Willy Tarreaub4e34762021-09-30 19:02:18 +0200723 if (s->list.n != &th_ctx->streams)
Willy Tarreau2b718102021-04-21 07:32:39 +0200724 LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100725 bref->ref = s->list.n;
Willy Tarreau49de6852021-02-24 13:46:12 +0100726 __ha_barrier_store();
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100727 }
Willy Tarreau2b718102021-04-21 07:32:39 +0200728 LIST_DELETE(&s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200729
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200730 sc_destroy(s->scb);
731 sc_destroy(s->scf);
Olivier Houchard4fdec7a2018-10-11 17:09:14 +0200732
Willy Tarreaubafbe012017-11-24 17:34:44 +0100733 pool_free(pool_head_stream, s);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200734
735 /* We may want to free the maximum amount of pools if the proxy is stopping */
Christopher Fauletdfd10ab2021-10-06 14:24:19 +0200736 if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100737 pool_flush(pool_head_buffer);
738 pool_flush(pool_head_http_txn);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100739 pool_flush(pool_head_requri);
740 pool_flush(pool_head_capture);
741 pool_flush(pool_head_stream);
742 pool_flush(pool_head_session);
743 pool_flush(pool_head_connection);
744 pool_flush(pool_head_pendconn);
745 pool_flush(fe->req_cap_pool);
746 pool_flush(fe->rsp_cap_pool);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200747 }
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200748}
749
Willy Tarreau656859d2014-11-25 19:46:36 +0100750
Willy Tarreau87b09662015-04-03 00:22:06 +0200751/* Allocates a work buffer for stream <s>. It is meant to be called inside
752 * process_stream(). It will only allocate the side needed for the function
Willy Tarreaubc39a5d2015-04-20 15:52:18 +0200753 * to work fine, which is the response buffer so that an error message may be
754 * built and returned. Response buffers may be allocated from the reserve, this
755 * is critical to ensure that a response may always flow and will never block a
756 * server from releasing a connection. Returns 0 in case of failure, non-zero
757 * otherwise.
Willy Tarreau656859d2014-11-25 19:46:36 +0100758 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100759static int stream_alloc_work_buffer(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100760{
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100761 if (b_alloc(&s->res.buf))
Willy Tarreau656859d2014-11-25 19:46:36 +0100762 return 1;
Willy Tarreau656859d2014-11-25 19:46:36 +0100763 return 0;
764}
765
766/* releases unused buffers after processing. Typically used at the end of the
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100767 * update() functions. It will try to wake up as many tasks/applets as the
768 * number of buffers that it releases. In practice, most often streams are
769 * blocked on a single buffer, so it makes sense to try to wake two up when two
770 * buffers are released at once.
Willy Tarreau656859d2014-11-25 19:46:36 +0100771 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200772void stream_release_buffers(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100773{
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100774 int offer = 0;
Willy Tarreau656859d2014-11-25 19:46:36 +0100775
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200776 if (c_size(&s->req) && c_empty(&s->req)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100777 offer++;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100778 b_free(&s->req.buf);
779 }
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200780 if (c_size(&s->res) && c_empty(&s->res)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100781 offer++;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100782 b_free(&s->res.buf);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100783 }
Willy Tarreau656859d2014-11-25 19:46:36 +0100784
Willy Tarreaubf883e02014-11-25 21:10:35 +0100785 /* if we're certain to have at least 1 buffer available, and there is
786 * someone waiting, we can wake up a waiter and offer them.
787 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100788 if (offer)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100789 offer_buffers(s, offer);
Willy Tarreau656859d2014-11-25 19:46:36 +0100790}
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200791
Willy Tarreau87b09662015-04-03 00:22:06 +0200792void stream_process_counters(struct stream *s)
Willy Tarreau30e71012007-11-26 20:15:35 +0100793{
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200794 struct session *sess = s->sess;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100795 unsigned long long bytes;
Willy Tarreau20d46a52012-12-09 15:55:40 +0100796 int i;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100797
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100798 bytes = s->req.total - s->logs.bytes_in;
799 s->logs.bytes_in = s->req.total;
800 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100801 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
802 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100803
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100804 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000805 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_in, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200806
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200807 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100808 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
Willy Tarreau855e4bb2010-06-18 18:33:32 +0200809
Willy Tarreau6c011712023-01-06 16:09:58 +0100810 for (i = 0; i < global.tune.nb_stk_ctr; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200811 if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
812 stkctr_inc_bytes_in_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100813 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100814 }
815
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100816 bytes = s->res.total - s->logs.bytes_out;
817 s->logs.bytes_out = s->res.total;
818 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100819 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
820 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100821
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100822 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000823 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_out, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200824
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200825 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100826 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
Willy Tarreauf059a0f2010-08-03 16:29:52 +0200827
Willy Tarreau6c011712023-01-06 16:09:58 +0100828 for (i = 0; i < global.tune.nb_stk_ctr; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200829 if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
830 stkctr_inc_bytes_out_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100831 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100832 }
833}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200834
Christopher Faulet7eb837d2023-04-13 15:22:29 +0200835/* Abort processing on the both channels in same time */
836void stream_abort(struct stream *s)
837{
838 channel_abort(&s->req);
839 channel_abort(&s->res);
840}
841
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200842/*
843 * Returns a message to the client ; the connection is shut down for read,
844 * and the request is cleared so that no server connection can be initiated.
845 * The buffer is marked for read shutdown on the other side to protect the
846 * message, and the buffer write is enabled. The message is contained in a
847 * "chunk". If it is null, then an empty message is used. The reply buffer does
848 * not need to be empty before this, and its contents will not be overwritten.
849 * The primary goal of this function is to return error messages to a client.
850 */
851void stream_retnclose(struct stream *s, const struct buffer *msg)
852{
853 struct channel *ic = &s->req;
854 struct channel *oc = &s->res;
855
856 channel_auto_read(ic);
857 channel_abort(ic);
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200858 channel_erase(ic);
859 channel_truncate(oc);
860
861 if (likely(msg && msg->data))
862 co_inject(oc, msg->area, msg->data);
863
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200864 channel_auto_read(oc);
865 channel_auto_close(oc);
Christopher Faulet12762f02023-04-13 15:40:10 +0200866 sc_schedule_abort(s->scb);
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200867}
868
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100869int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout)
870{
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100871 switch (name) {
872 case ACT_TIMEOUT_SERVER:
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100873 s->scb->ioto = timeout;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100874 return 1;
875
Amaury Denoyellefb504432020-12-10 13:43:53 +0100876 case ACT_TIMEOUT_TUNNEL:
877 s->tunnel_timeout = timeout;
878 return 1;
879
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100880 default:
881 return 0;
882 }
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100883}
884
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100885/*
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200886 * This function handles the transition between the SC_ST_CON state and the
887 * SC_ST_EST state. It must only be called after switching from SC_ST_CON (or
888 * SC_ST_INI or SC_ST_RDY) to SC_ST_EST, but only when a ->proto is defined.
889 * Note that it will switch the interface to SC_ST_DIS if we already have
Christopher Faulet0c370ee2023-04-13 16:05:13 +0200890 * the SC_FL_ABRT_DONE flag, it means we were able to forward the request, and
Olivier Houchardaacc4052019-05-21 17:43:50 +0200891 * receive the response, before process_stream() had the opportunity to
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200892 * make the switch from SC_ST_CON to SC_ST_EST. When that happens, we want
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100893 * to go through back_establish() anyway, to make sure the analysers run.
Willy Tarreaud66ed882019-06-05 18:02:04 +0200894 * Timeouts are cleared. Error are reported on the channel so that analysers
895 * can handle them.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100896 */
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100897static void back_establish(struct stream *s)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100898{
Willy Tarreaufd9417b2022-05-18 16:23:22 +0200899 struct connection *conn = sc_conn(s->scb);
Willy Tarreau7b8c4f92014-11-28 15:15:44 +0100900 struct channel *req = &s->req;
901 struct channel *rep = &s->res;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100902
Christopher Faulet62e75742022-03-31 09:16:34 +0200903 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200904 /* First, centralize the timers information, and clear any irrelevant
905 * timeout.
906 */
Willy Tarreau69530f52023-04-28 09:16:15 +0200907 s->logs.t_connect = ns_to_ms(now_ns - s->logs.accept_ts);
Christopher Fauletae024ce2022-03-29 19:02:31 +0200908 s->conn_exp = TICK_ETERNITY;
909 s->flags &= ~SF_CONN_EXP;
Willy Tarreaud66ed882019-06-05 18:02:04 +0200910
911 /* errors faced after sending data need to be reported */
Christopher Faulete182a8e2023-04-14 12:07:26 +0200912 if ((s->scb->flags & SC_FL_ERROR) && req->flags & CF_WROTE_DATA) {
Christopher Faulet2e56a732023-01-26 16:18:09 +0100913 s->req.flags |= CF_WRITE_EVENT;
914 s->res.flags |= CF_READ_EVENT;
Christopher Faulet50264b42022-03-30 19:39:30 +0200915 s->conn_err_type = STRM_ET_DATA_ERR;
Christopher Faulet62e75742022-03-31 09:16:34 +0200916 DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200917 }
918
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100919 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000920 health_adjust(__objt_server(s->target), HANA_STATUS_L4_OK);
Krzysztof Piotr Oledzki97f07b82009-12-15 22:31:24 +0100921
Christopher Faulet1bb6afa2021-03-08 17:57:53 +0100922 if (!IS_HTX_STRM(s)) { /* let's allow immediate data connection in this case */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100923 /* if the user wants to log as soon as possible, without counting
924 * bytes from the server, then this is the right moment. */
Willy Tarreaud0d8da92015-04-04 02:10:38 +0200925 if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
Willy Tarreau66425e32018-07-25 06:55:12 +0200926 /* note: no pend_pos here, session is established */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100927 s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
Willy Tarreaua5555ec2008-11-30 19:02:32 +0100928 s->do_log(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100929 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100930 }
931 else {
Christopher Faulet9a790f62023-03-16 14:40:03 +0100932 s->scb->flags |= SC_FL_RCV_ONCE; /* a single read is enough to get response headers */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100933 }
934
Willy Tarreau0007d0a2018-12-11 18:01:38 +0100935 rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
Christopher Faulet309c6412015-12-02 09:57:32 +0100936
Willy Tarreau4164eb92022-05-25 15:42:03 +0200937 se_have_more_data(s->scb->sedesc);
Christopher Faulet23577182022-12-20 18:47:39 +0100938 rep->flags |= CF_READ_EVENT; /* producer is now attached */
Christopher Faulet4c135682023-02-16 11:09:31 +0100939 sc_ep_report_read_activity(s->scb);
Christopher Faulet0256da12021-12-15 09:50:17 +0100940 if (conn) {
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100941 /* real connections have timeouts
942 * if already defined, it means that a set-timeout rule has
943 * been executed so do not overwrite them
944 */
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100945 if (!tick_isset(s->scb->ioto))
946 s->scb->ioto = s->be->timeout.server;
Amaury Denoyellefb504432020-12-10 13:43:53 +0100947 if (!tick_isset(s->tunnel_timeout))
948 s->tunnel_timeout = s->be->timeout.tunnel;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100949
Olivier Houchard47e9a1a2018-11-07 17:55:19 +0100950 /* The connection is now established, try to read data from the
951 * underlying layer, and subscribe to recv events. We use a
952 * delayed recv here to give a chance to the data to flow back
953 * by the time we process other tasks.
954 */
Willy Tarreauf61dd192022-05-27 09:00:19 +0200955 sc_chk_rcv(s->scb);
Willy Tarreaud04e8582010-05-31 12:31:35 +0200956 }
Olivier Houchard78595262019-07-26 14:54:34 +0200957 /* If we managed to get the whole response, and we don't have anything
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200958 * left to send, or can't, switch to SC_ST_DIS now. */
Christopher Fauletca5309a2023-04-17 16:17:32 +0200959 if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || (s->scf->flags & SC_FL_SHUT_DONE)) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200960 s->scb->state = SC_ST_DIS;
Christopher Faulet62e75742022-03-31 09:16:34 +0200961 DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100962 }
963
Christopher Faulet62e75742022-03-31 09:16:34 +0200964 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100965}
966
Willy Tarreau87b09662015-04-03 00:22:06 +0200967/* Set correct stream termination flags in case no analyser has done it. It
Simon Hormandec5be42011-06-08 09:19:07 +0900968 * also counts a failed request if the server state has not reached the request
969 * stage.
970 */
Christopher Fauletdbad8ec2023-04-13 14:46:01 +0200971void sess_set_term_flags(struct stream *s)
Simon Hormandec5be42011-06-08 09:19:07 +0900972{
Willy Tarreaue7dff022015-04-03 01:14:29 +0200973 if (!(s->flags & SF_FINST_MASK)) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200974 if (s->scb->state == SC_ST_INI) {
Willy Tarreau7ab22adb2019-06-05 14:53:22 +0200975 /* anything before REQ in fact */
Willy Tarreau4781b152021-04-06 13:53:36 +0200976 _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
Willy Tarreau2c1068c2015-09-23 12:21:21 +0200977 if (strm_li(s) && strm_li(s)->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +0200978 _HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
Simon Hormandec5be42011-06-08 09:19:07 +0900979
Willy Tarreaue7dff022015-04-03 01:14:29 +0200980 s->flags |= SF_FINST_R;
Simon Hormandec5be42011-06-08 09:19:07 +0900981 }
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200982 else if (s->scb->state == SC_ST_QUE)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200983 s->flags |= SF_FINST_Q;
Willy Tarreau74568cf2022-05-27 09:03:30 +0200984 else if (sc_state_in(s->scb->state, SC_SB_REQ|SC_SB_TAR|SC_SB_ASS|SC_SB_CON|SC_SB_CER|SC_SB_RDY))
Willy Tarreaue7dff022015-04-03 01:14:29 +0200985 s->flags |= SF_FINST_C;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200986 else if (s->scb->state == SC_ST_EST || s->prev_conn_state == SC_ST_EST)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200987 s->flags |= SF_FINST_D;
Simon Hormandec5be42011-06-08 09:19:07 +0900988 else
Willy Tarreaue7dff022015-04-03 01:14:29 +0200989 s->flags |= SF_FINST_L;
Simon Hormandec5be42011-06-08 09:19:07 +0900990 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100991}
992
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200993/* This function parses the use-service action ruleset. It executes
994 * the associated ACL and set an applet as a stream or txn final node.
995 * it returns ACT_RET_ERR if an error occurs, the proxy left in
Ilya Shipitsinc02a23f2020-05-06 00:53:22 +0500996 * consistent state. It returns ACT_RET_STOP in success case because
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200997 * use-service must be a terminal action. Returns ACT_RET_YIELD
998 * if the initialisation function require more data.
999 */
1000enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
1001 struct session *sess, struct stream *s, int flags)
1002
1003{
1004 struct appctx *appctx;
1005
1006 /* Initialises the applet if it is required. */
Christopher Faulet105ba6c2019-12-18 14:41:51 +01001007 if (flags & ACT_OPT_FIRST) {
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001008 /* Register applet. this function schedules the applet. */
1009 s->target = &rule->applet.obj_type;
Willy Tarreaua0b58b52022-05-27 08:33:53 +02001010 appctx = sc_applet_create(s->scb, objt_applet(s->target));
Christopher Faulet2da02ae2022-02-24 13:45:27 +01001011 if (unlikely(!appctx))
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001012 return ACT_RET_ERR;
1013
Christopher Faulet93882042022-01-19 14:56:50 +01001014 /* Finish initialisation of the context. */
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001015 appctx->rule = rule;
Christopher Faulet16c0d9c2022-05-12 14:59:28 +02001016 if (appctx_init(appctx) == -1)
Christopher Faulet4aa1d282022-01-13 16:01:35 +01001017 return ACT_RET_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001018 }
1019 else
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001020 appctx = __sc_appctx(s->scb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001021
Christopher Faulet2571bc62019-03-01 11:44:26 +01001022 if (rule->from != ACT_F_HTTP_REQ) {
1023 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
Willy Tarreau4781b152021-04-06 13:53:36 +02001024 _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
Christopher Faulet2571bc62019-03-01 11:44:26 +01001025
1026 /* The flag SF_ASSIGNED prevent from server assignment. */
1027 s->flags |= SF_ASSIGNED;
1028 }
1029
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001030 /* Now we can schedule the applet. */
Willy Tarreau90e8b452022-05-25 18:21:43 +02001031 applet_need_more_data(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001032 appctx_wakeup(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001033 return ACT_RET_STOP;
1034}
1035
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001036/* This stream analyser checks the switching rules and changes the backend
Willy Tarreau4de91492010-01-22 19:10:05 +01001037 * if appropriate. The default_backend rule is also considered, then the
1038 * target backend's forced persistence rules are also evaluated last if any.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001039 * It returns 1 if the processing can continue on next analysers, or zero if it
1040 * either needs more data or wants to immediately abort the request.
1041 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001042static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001043{
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001044 struct persist_rule *prst_rule;
Willy Tarreau192252e2015-04-04 01:47:55 +02001045 struct session *sess = s->sess;
1046 struct proxy *fe = sess->fe;
Willy Tarreau4de91492010-01-22 19:10:05 +01001047
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001048 req->analysers &= ~an_bit;
1049 req->analyse_exp = TICK_ETERNITY;
1050
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001051 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001052
1053 /* now check whether we have some switching rules for this request */
Willy Tarreaue7dff022015-04-03 01:14:29 +02001054 if (!(s->flags & SF_BE_ASSIGNED)) {
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001055 struct switching_rule *rule;
1056
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001057 list_for_each_entry(rule, &fe->switching_rules, list) {
Willy Tarreauf51658d2014-04-23 01:21:56 +02001058 int ret = 1;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001059
Willy Tarreauf51658d2014-04-23 01:21:56 +02001060 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001061 ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreauf51658d2014-04-23 01:21:56 +02001062 ret = acl_pass(ret);
1063 if (rule->cond->pol == ACL_COND_UNLESS)
1064 ret = !ret;
1065 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001066
1067 if (ret) {
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001068 /* If the backend name is dynamic, try to resolve the name.
1069 * If we can't resolve the name, or if any error occurs, break
1070 * the loop and fallback to the default backend.
1071 */
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001072 struct proxy *backend = NULL;
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001073
1074 if (rule->dynamic) {
Willy Tarreau83061a82018-07-13 11:56:34 +02001075 struct buffer *tmp;
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001076
1077 tmp = alloc_trash_chunk();
1078 if (!tmp)
1079 goto sw_failed;
1080
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001081 if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
1082 backend = proxy_be_by_name(tmp->area);
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001083
1084 free_trash_chunk(tmp);
1085 tmp = NULL;
1086
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001087 if (!backend)
1088 break;
1089 }
1090 else
1091 backend = rule->be.backend;
1092
Willy Tarreau87b09662015-04-03 00:22:06 +02001093 if (!stream_set_backend(s, backend))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001094 goto sw_failed;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001095 break;
1096 }
1097 }
1098
1099 /* To ensure correct connection accounting on the backend, we
1100 * have to assign one if it was not set (eg: a listen). This
1101 * measure also takes care of correctly setting the default
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001102 * backend if any. Don't do anything if an upgrade is already in
1103 * progress.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001104 */
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001105 if (!(s->flags & (SF_BE_ASSIGNED|SF_IGNORE)))
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001106 if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001107 goto sw_failed;
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001108
1109 /* No backend assigned but no error reported. It happens when a
1110 * TCP stream is upgraded to HTTP/2.
1111 */
1112 if ((s->flags & (SF_BE_ASSIGNED|SF_IGNORE)) == SF_IGNORE) {
1113 DBG_TRACE_DEVEL("leaving with no backend because of a destructive upgrade", STRM_EV_STRM_ANA, s);
1114 return 0;
1115 }
1116
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001117 }
1118
Willy Tarreaufb356202010-08-03 14:02:05 +02001119 /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001120 if (fe == s->be) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001121 s->req.analysers &= ~AN_REQ_INSPECT_BE;
1122 s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001123 s->req.analysers &= ~AN_REQ_FLT_START_BE;
Willy Tarreaufb356202010-08-03 14:02:05 +02001124 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001125
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001126 /* as soon as we know the backend, we must check if we have a matching forced or ignored
Willy Tarreau87b09662015-04-03 00:22:06 +02001127 * persistence rule, and report that in the stream.
Willy Tarreau4de91492010-01-22 19:10:05 +01001128 */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001129 list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
Willy Tarreau4de91492010-01-22 19:10:05 +01001130 int ret = 1;
1131
1132 if (prst_rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001133 ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4de91492010-01-22 19:10:05 +01001134 ret = acl_pass(ret);
1135 if (prst_rule->cond->pol == ACL_COND_UNLESS)
1136 ret = !ret;
1137 }
1138
1139 if (ret) {
1140 /* no rule, or the rule matches */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001141 if (prst_rule->type == PERSIST_TYPE_FORCE) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001142 s->flags |= SF_FORCE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001143 } else {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001144 s->flags |= SF_IGNORE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001145 }
Willy Tarreau4de91492010-01-22 19:10:05 +01001146 break;
1147 }
1148 }
1149
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001150 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001151 return 1;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001152
1153 sw_failed:
1154 /* immediately abort this request in case of allocation failure */
Christopher Faulet7eb837d2023-04-13 15:22:29 +02001155 stream_abort(s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001156
Willy Tarreaue7dff022015-04-03 01:14:29 +02001157 if (!(s->flags & SF_ERR_MASK))
1158 s->flags |= SF_ERR_RESOURCE;
1159 if (!(s->flags & SF_FINST_MASK))
1160 s->flags |= SF_FINST_R;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001161
Willy Tarreaueee5b512015-04-03 23:46:31 +02001162 if (s->txn)
1163 s->txn->status = 500;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001164 s->req.analysers &= AN_REQ_FLT_END;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001165 s->req.analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001166 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001167 return 0;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001168}
1169
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001170/* This stream analyser works on a request. It applies all use-server rules on
1171 * it then returns 1. The data must already be present in the buffer otherwise
1172 * they won't match. It always returns 1.
1173 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001174static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001175{
1176 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001177 struct session *sess = s->sess;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001178 struct server_rule *rule;
1179
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001180 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001181
Willy Tarreaue7dff022015-04-03 01:14:29 +02001182 if (!(s->flags & SF_ASSIGNED)) {
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001183 list_for_each_entry(rule, &px->server_rules, list) {
1184 int ret;
1185
Willy Tarreau192252e2015-04-04 01:47:55 +02001186 ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001187 ret = acl_pass(ret);
1188 if (rule->cond->pol == ACL_COND_UNLESS)
1189 ret = !ret;
1190
1191 if (ret) {
Jerome Magnin824186b2020-03-29 09:37:12 +02001192 struct server *srv;
1193
1194 if (rule->dynamic) {
1195 struct buffer *tmp = get_trash_chunk();
1196
1197 if (!build_logline(s, tmp->area, tmp->size, &rule->expr))
1198 break;
1199
1200 srv = findserver(s->be, tmp->area);
1201 if (!srv)
1202 break;
1203 }
1204 else
1205 srv = rule->srv.ptr;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001206
Emeric Brun52a91d32017-08-31 14:41:55 +02001207 if ((srv->cur_state != SRV_ST_STOPPED) ||
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001208 (px->options & PR_O_PERSIST) ||
Willy Tarreaue7dff022015-04-03 01:14:29 +02001209 (s->flags & SF_FORCE_PRST)) {
1210 s->flags |= SF_DIRECT | SF_ASSIGNED;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001211 s->target = &srv->obj_type;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001212 break;
1213 }
1214 /* if the server is not UP, let's go on with next rules
1215 * just in case another one is suited.
1216 */
1217 }
1218 }
1219 }
1220
1221 req->analysers &= ~an_bit;
1222 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001223 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001224 return 1;
1225}
1226
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001227static inline void sticking_rule_find_target(struct stream *s,
1228 struct stktable *t, struct stksess *ts)
1229{
1230 struct proxy *px = s->be;
1231 struct eb32_node *node;
1232 struct dict_entry *de;
1233 void *ptr;
1234 struct server *srv;
1235
1236 /* Look for the server name previously stored in <t> stick-table */
1237 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001238 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001239 de = stktable_data_cast(ptr, std_t_dict);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001240 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1241
1242 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001243 struct ebpt_node *node;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001244
Thayne McCombs92149f92020-11-20 01:28:26 -07001245 if (t->server_key_type == STKTABLE_SRV_NAME) {
1246 node = ebis_lookup(&px->conf.used_server_name, de->value.key);
1247 if (node) {
1248 srv = container_of(node, struct server, conf.name);
1249 goto found;
1250 }
1251 } else if (t->server_key_type == STKTABLE_SRV_ADDR) {
1252 HA_RWLOCK_RDLOCK(PROXY_LOCK, &px->lock);
1253 node = ebis_lookup(&px->used_server_addr, de->value.key);
1254 HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &px->lock);
1255 if (node) {
1256 srv = container_of(node, struct server, addr_node);
1257 goto found;
1258 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001259 }
1260 }
1261
1262 /* Look for the server ID */
1263 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1264 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001265 node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, std_t_sint));
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001266 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1267
1268 if (!node)
1269 return;
1270
1271 srv = container_of(node, struct server, conf.id);
1272 found:
1273 if ((srv->cur_state != SRV_ST_STOPPED) ||
1274 (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
1275 s->flags |= SF_DIRECT | SF_ASSIGNED;
1276 s->target = &srv->obj_type;
1277 }
1278}
1279
Emeric Brun1d33b292010-01-04 15:47:17 +01001280/* This stream analyser works on a request. It applies all sticking rules on
1281 * it then returns 1. The data must already be present in the buffer otherwise
1282 * they won't match. It always returns 1.
1283 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001284static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001285{
1286 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001287 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001288 struct sticking_rule *rule;
1289
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001290 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001291
1292 list_for_each_entry(rule, &px->sticking_rules, list) {
1293 int ret = 1 ;
1294 int i;
1295
Willy Tarreau9667a802013-12-09 12:52:13 +01001296 /* Only the first stick store-request of each table is applied
1297 * and other ones are ignored. The purpose is to allow complex
1298 * configurations which look for multiple entries by decreasing
1299 * order of precision and to stop at the first which matches.
1300 * An example could be a store of the IP address from an HTTP
1301 * header first, then from the source if not found.
1302 */
Jerome Magninbee00ad2020-01-16 17:37:21 +01001303 if (rule->flags & STK_IS_STORE) {
1304 for (i = 0; i < s->store_count; i++) {
1305 if (rule->table.t == s->store[i].table)
1306 break;
1307 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001308
Jerome Magninbee00ad2020-01-16 17:37:21 +01001309 if (i != s->store_count)
1310 continue;
1311 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001312
1313 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001314 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001315 ret = acl_pass(ret);
1316 if (rule->cond->pol == ACL_COND_UNLESS)
1317 ret = !ret;
1318 }
1319
1320 if (ret) {
1321 struct stktable_key *key;
1322
Willy Tarreau192252e2015-04-04 01:47:55 +02001323 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001324 if (!key)
1325 continue;
1326
1327 if (rule->flags & STK_IS_MATCH) {
1328 struct stksess *ts;
1329
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001330 if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001331 if (!(s->flags & SF_ASSIGNED))
1332 sticking_rule_find_target(s, rule->table.t, ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001333 stktable_touch_local(rule->table.t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001334 }
1335 }
1336 if (rule->flags & STK_IS_STORE) {
1337 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1338 struct stksess *ts;
1339
1340 ts = stksess_new(rule->table.t, key);
1341 if (ts) {
1342 s->store[s->store_count].table = rule->table.t;
1343 s->store[s->store_count++].ts = ts;
1344 }
1345 }
1346 }
1347 }
1348 }
1349
1350 req->analysers &= ~an_bit;
1351 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001352 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001353 return 1;
1354}
1355
1356/* This stream analyser works on a response. It applies all store rules on it
1357 * then returns 1. The data must already be present in the buffer otherwise
1358 * they won't match. It always returns 1.
1359 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001360static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001361{
1362 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001363 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001364 struct sticking_rule *rule;
1365 int i;
Willy Tarreau9667a802013-12-09 12:52:13 +01001366 int nbreq = s->store_count;
Emeric Brun1d33b292010-01-04 15:47:17 +01001367
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001368 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001369
1370 list_for_each_entry(rule, &px->storersp_rules, list) {
1371 int ret = 1 ;
Emeric Brun1d33b292010-01-04 15:47:17 +01001372
Willy Tarreau9667a802013-12-09 12:52:13 +01001373 /* Only the first stick store-response of each table is applied
1374 * and other ones are ignored. The purpose is to allow complex
1375 * configurations which look for multiple entries by decreasing
1376 * order of precision and to stop at the first which matches.
1377 * An example could be a store of a set-cookie value, with a
1378 * fallback to a parameter found in a 302 redirect.
1379 *
1380 * The store-response rules are not allowed to override the
1381 * store-request rules for the same table, but they may coexist.
1382 * Thus we can have up to one store-request entry and one store-
1383 * response entry for the same table at any time.
1384 */
1385 for (i = nbreq; i < s->store_count; i++) {
1386 if (rule->table.t == s->store[i].table)
1387 break;
1388 }
1389
1390 /* skip existing entries for this table */
1391 if (i < s->store_count)
1392 continue;
1393
Emeric Brun1d33b292010-01-04 15:47:17 +01001394 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001395 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001396 ret = acl_pass(ret);
1397 if (rule->cond->pol == ACL_COND_UNLESS)
1398 ret = !ret;
1399 }
1400
1401 if (ret) {
1402 struct stktable_key *key;
1403
Willy Tarreau192252e2015-04-04 01:47:55 +02001404 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001405 if (!key)
1406 continue;
1407
Willy Tarreau37e340c2013-12-06 23:05:21 +01001408 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
Emeric Brun1d33b292010-01-04 15:47:17 +01001409 struct stksess *ts;
1410
1411 ts = stksess_new(rule->table.t, key);
1412 if (ts) {
1413 s->store[s->store_count].table = rule->table.t;
Emeric Brun1d33b292010-01-04 15:47:17 +01001414 s->store[s->store_count++].ts = ts;
1415 }
1416 }
1417 }
1418 }
1419
1420 /* process store request and store response */
1421 for (i = 0; i < s->store_count; i++) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001422 struct stksess *ts;
Willy Tarreau13c29de2010-06-06 16:40:39 +02001423 void *ptr;
Thayne McCombs92149f92020-11-20 01:28:26 -07001424 char *key;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001425 struct dict_entry *de;
Thayne McCombs92149f92020-11-20 01:28:26 -07001426 struct stktable *t = s->store[i].table;
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001427
Christopher Fauletb9766402022-10-25 16:45:38 +02001428 if (!objt_server(s->target) || (__objt_server(s->target)->flags & SRV_F_NON_STICK)) {
Simon Hormanfa461682011-06-25 09:39:49 +09001429 stksess_free(s->store[i].table, s->store[i].ts);
1430 s->store[i].ts = NULL;
1431 continue;
1432 }
1433
Thayne McCombs92149f92020-11-20 01:28:26 -07001434 ts = stktable_set_entry(t, s->store[i].ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001435 if (ts != s->store[i].ts) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001436 /* the entry already existed, we can free ours */
Thayne McCombs92149f92020-11-20 01:28:26 -07001437 stksess_free(t, s->store[i].ts);
Emeric Brun1d33b292010-01-04 15:47:17 +01001438 }
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001439 s->store[i].ts = NULL;
Emeric Brun819fc6f2017-06-13 19:37:32 +02001440
Thayne McCombs92149f92020-11-20 01:28:26 -07001441 if (t->server_key_type == STKTABLE_SRV_NAME)
1442 key = __objt_server(s->target)->id;
1443 else if (t->server_key_type == STKTABLE_SRV_ADDR)
1444 key = __objt_server(s->target)->addr_node.key;
1445 else
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001446 key = NULL;
Thayne McCombs92149f92020-11-20 01:28:26 -07001447
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001448 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001449 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
1450 stktable_data_cast(ptr, std_t_sint) = __objt_server(s->target)->puid;
1451
1452 if (key) {
1453 de = dict_insert(&server_key_dict, key);
1454 if (de) {
1455 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
1456 stktable_data_cast(ptr, std_t_dict) = de;
1457 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001458 }
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001459
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001460 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1461
Thayne McCombs92149f92020-11-20 01:28:26 -07001462 stktable_touch_local(t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001463 }
Willy Tarreau2a164ee2010-06-18 09:57:45 +02001464 s->store_count = 0; /* everything is stored */
Emeric Brun1d33b292010-01-04 15:47:17 +01001465
1466 rep->analysers &= ~an_bit;
1467 rep->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001468
1469 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001470 return 1;
1471}
1472
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001473/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
1474 * are set and the client mux is upgraded. It returns 1 if the stream processing
1475 * may continue or 0 if it should be stopped. It happens on error or if the
Christopher Fauletae863c62021-03-15 12:03:44 +01001476 * upgrade required a new stream. The mux protocol may be specified.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001477 */
Christopher Fauletae863c62021-03-15 12:03:44 +01001478int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001479{
Willy Tarreaub49672d2022-05-27 10:13:37 +02001480 struct stconn *sc = s->scf;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001481 struct connection *conn;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001482
1483 /* Already an HTTP stream */
1484 if (IS_HTX_STRM(s))
1485 return 1;
1486
1487 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
1488
1489 if (unlikely(!s->txn && !http_create_txn(s)))
1490 return 0;
1491
Willy Tarreaub49672d2022-05-27 10:13:37 +02001492 conn = sc_conn(sc);
Christopher Faulet2b48b042024-08-28 15:42:22 +02001493
1494 if (!sc_conn_ready(sc))
1495 return 0;
1496
Christopher Faulet13a35e52021-12-20 15:34:16 +01001497 if (conn) {
Willy Tarreau4164eb92022-05-25 15:42:03 +02001498 se_have_more_data(s->scf->sedesc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001499 /* Make sure we're unsubscribed, the the new
1500 * mux will probably want to subscribe to
1501 * the underlying XPRT
1502 */
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001503 if (s->scf->wait_event.events)
Willy Tarreaub49672d2022-05-27 10:13:37 +02001504 conn->mux->unsubscribe(sc, s->scf->wait_event.events, &(s->scf->wait_event));
Christopher Fauletae863c62021-03-15 12:03:44 +01001505
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001506 if (conn->mux->flags & MX_FL_NO_UPG)
1507 return 0;
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001508
1509 sc_conn_prepare_endp_upgrade(sc);
Willy Tarreaub49672d2022-05-27 10:13:37 +02001510 if (conn_upgrade_mux_fe(conn, sc, &s->req.buf,
Christopher Fauletae863c62021-03-15 12:03:44 +01001511 (mux_proto ? mux_proto->token : ist("")),
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001512 PROTO_MODE_HTTP) == -1) {
1513 sc_conn_abort_endp_upgrade(sc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001514 return 0;
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001515 }
1516 sc_conn_commit_endp_upgrade(sc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001517
Christopher Faulet285f7612022-12-12 08:28:55 +01001518 s->req.flags &= ~(CF_READ_EVENT|CF_AUTO_CONNECT);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001519 s->req.total = 0;
1520 s->flags |= SF_IGNORE;
Christopher Faulet9b8d7a12022-06-17 09:36:57 +02001521 if (sc_ep_test(sc, SE_FL_DETACHED)) {
1522 /* If stream connector is detached, it means it was not
1523 * reused by the new mux. Son destroy it, disable
1524 * logging, and abort the stream process. Thus the
1525 * stream will be silently destroyed. The new mux will
1526 * create new streams.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001527 */
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001528 s->logs.logwait = 0;
1529 s->logs.level = 0;
Christopher Faulet7eb837d2023-04-13 15:22:29 +02001530 stream_abort(s);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001531 s->req.analysers &= AN_REQ_FLT_END;
1532 s->req.analyse_exp = TICK_ETERNITY;
1533 }
1534 }
1535
1536 return 1;
1537}
1538
1539
Willy Tarreau4596fe22022-05-17 19:07:51 +02001540/* Updates at once the channel flags, and timers of both stream connectors of a
Christopher Fauletef285c12022-04-01 14:48:06 +02001541 * same stream, to complete the work after the analysers, then updates the data
1542 * layer below. This will ensure that any synchronous update performed at the
Willy Tarreau4596fe22022-05-17 19:07:51 +02001543 * data layer will be reflected in the channel flags and/or stream connector.
1544 * Note that this does not change the stream connector's current state, though
Christopher Fauletef285c12022-04-01 14:48:06 +02001545 * it updates the previous state to the current one.
1546 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02001547static void stream_update_both_sc(struct stream *s)
Christopher Fauletef285c12022-04-01 14:48:06 +02001548{
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001549 struct stconn *scf = s->scf;
1550 struct stconn *scb = s->scb;
Christopher Fauletef285c12022-04-01 14:48:06 +02001551 struct channel *req = &s->req;
1552 struct channel *res = &s->res;
1553
Christopher Faulet23577182022-12-20 18:47:39 +01001554 req->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
1555 res->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
Christopher Fauletef285c12022-04-01 14:48:06 +02001556
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001557 s->prev_conn_state = scb->state;
Christopher Fauletef285c12022-04-01 14:48:06 +02001558
1559 /* let's recompute both sides states */
Willy Tarreau74568cf2022-05-27 09:03:30 +02001560 if (sc_state_in(scf->state, SC_SB_RDY|SC_SB_EST))
Willy Tarreau19c65a92022-05-27 08:49:24 +02001561 sc_update(scf);
Christopher Fauletef285c12022-04-01 14:48:06 +02001562
Willy Tarreau74568cf2022-05-27 09:03:30 +02001563 if (sc_state_in(scb->state, SC_SB_RDY|SC_SB_EST))
Willy Tarreau19c65a92022-05-27 08:49:24 +02001564 sc_update(scb);
Christopher Fauletef285c12022-04-01 14:48:06 +02001565
Willy Tarreau4596fe22022-05-17 19:07:51 +02001566 /* stream connectors are processed outside of process_stream() and must be
Christopher Fauletef285c12022-04-01 14:48:06 +02001567 * handled at the latest moment.
1568 */
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001569 if (sc_appctx(scf)) {
Willy Tarreau13d63af2022-05-25 15:00:44 +02001570 if (sc_is_recv_allowed(scf) || sc_is_send_allowed(scf))
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001571 appctx_wakeup(__sc_appctx(scf));
Christopher Fauletef285c12022-04-01 14:48:06 +02001572 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001573 if (sc_appctx(scb)) {
Willy Tarreau13d63af2022-05-25 15:00:44 +02001574 if (sc_is_recv_allowed(scb) || sc_is_send_allowed(scb))
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001575 appctx_wakeup(__sc_appctx(scb));
Christopher Fauletef285c12022-04-01 14:48:06 +02001576 }
1577}
1578
Christopher Faulet85e568f2023-02-27 16:08:31 +01001579/* check SC and channel timeouts, and close the corresponding stream connectors
1580 * for future reads or writes.
1581 * Note: this will also concern upper layers but we do not touch any other
1582 * flag. We must be careful and correctly detect state changes when calling
1583 * them.
1584 */
1585static void stream_handle_timeouts(struct stream *s)
1586{
1587 stream_check_conn_timeout(s);
1588
1589 sc_check_timeouts(s->scf);
1590 channel_check_timeout(&s->req);
Christopher Faulet915ba082023-04-12 18:23:15 +02001591 sc_check_timeouts(s->scb);
1592 channel_check_timeout(&s->res);
1593
Christopher Faulet208c7122023-04-13 16:16:15 +02001594 if (unlikely(!(s->scb->flags & SC_FL_SHUT_DONE) && (s->req.flags & CF_WRITE_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001595 s->scb->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001596 sc_shutdown(s->scb);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001597 }
1598
Christopher Fauletca5309a2023-04-17 16:17:32 +02001599 if (unlikely(!(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->req.flags & CF_READ_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001600 if (s->scf->flags & SC_FL_NOHALF)
1601 s->scf->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001602 sc_abort(s->scf);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001603 }
Christopher Faulet208c7122023-04-13 16:16:15 +02001604 if (unlikely(!(s->scf->flags & SC_FL_SHUT_DONE) && (s->res.flags & CF_WRITE_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001605 s->scf->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001606 sc_shutdown(s->scf);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001607 }
1608
Christopher Fauletca5309a2023-04-17 16:17:32 +02001609 if (unlikely(!(s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->res.flags & CF_READ_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001610 if (s->scb->flags & SC_FL_NOHALF)
1611 s->scb->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001612 sc_abort(s->scb);
Christopher Faulet87633c32023-04-03 18:32:50 +02001613 }
Christopher Faulet85e568f2023-02-27 16:08:31 +01001614
1615 if (HAS_FILTERS(s))
1616 flt_stream_check_timeouts(s);
1617}
1618
Willy Tarreaubeee6002022-09-07 16:17:49 +02001619/* if the current task's wake_date was set, it's being profiled, thus we may
Willy Tarreau6a28a302022-09-07 09:17:45 +02001620 * report latencies and CPU usages in logs, so it's desirable to update the
1621 * latency when entering process_stream().
1622 */
1623static void stream_cond_update_cpu_latency(struct stream *s)
1624{
1625 uint32_t lat = th_ctx->sched_call_date - th_ctx->sched_wake_date;
1626
1627 s->lat_time += lat;
1628}
1629
1630/* if the current task's wake_date was set, it's being profiled, thus we may
Willy Tarreaubeee6002022-09-07 16:17:49 +02001631 * report latencies and CPU usages in logs, so it's desirable to do that before
1632 * logging in order to report accurate CPU usage. In this case we count that
1633 * final part and reset the wake date so that the scheduler doesn't do it a
1634 * second time, and by doing so we also avoid an extra call to clock_gettime().
1635 * The CPU usage will be off by the little time needed to run over stream_free()
1636 * but that's only marginal.
1637 */
1638static void stream_cond_update_cpu_usage(struct stream *s)
1639{
1640 uint32_t cpu;
1641
1642 /* stats are only registered for non-zero wake dates */
1643 if (likely(!th_ctx->sched_wake_date))
1644 return;
1645
1646 cpu = (uint32_t)now_mono_time() - th_ctx->sched_call_date;
Willy Tarreau6a28a302022-09-07 09:17:45 +02001647 s->cpu_time += cpu;
Willy Tarreaubeee6002022-09-07 16:17:49 +02001648 HA_ATOMIC_ADD(&th_ctx->sched_profile_entry->cpu_time, cpu);
1649 th_ctx->sched_wake_date = 0;
1650}
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001651
Willy Tarreau6a28a302022-09-07 09:17:45 +02001652/* this functions is called directly by the scheduler for tasks whose
1653 * ->process points to process_stream(), and is used to keep latencies
1654 * and CPU usage measurements accurate.
1655 */
1656void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu)
1657{
1658 struct stream *s = t->context;
1659 s->lat_time += lat;
1660 s->cpu_time += cpu;
1661}
1662
1663
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001664/* This macro is very specific to the function below. See the comments in
Willy Tarreau87b09662015-04-03 00:22:06 +02001665 * process_stream() below to understand the logic and the tests.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001666 */
1667#define UPDATE_ANALYSERS(real, list, back, flag) { \
1668 list = (((list) & ~(flag)) | ~(back)) & (real); \
1669 back = real; \
1670 if (!(list)) \
1671 break; \
1672 if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
1673 continue; \
1674}
1675
Christopher Fauleta9215b72016-05-11 17:06:28 +02001676/* These 2 following macros call an analayzer for the specified channel if the
1677 * right flag is set. The first one is used for "filterable" analyzers. If a
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001678 * stream has some registered filters, pre and post analyaze callbacks are
Christopher Faulet0184ea72017-01-05 14:06:34 +01001679 * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
Christopher Fauleta9215b72016-05-11 17:06:28 +02001680 * AN_REQ/RES_HTTP_XFER_BODY) */
1681#define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
1682 { \
1683 if ((list) & (flag)) { \
1684 if (HAS_FILTERS(strm)) { \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001685 if (!flt_pre_analyze((strm), (chn), (flag))) \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001686 break; \
1687 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1688 break; \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001689 if (!flt_post_analyze((strm), (chn), (flag))) \
1690 break; \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001691 } \
1692 else { \
1693 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1694 break; \
1695 } \
1696 UPDATE_ANALYSERS((chn)->analysers, (list), \
1697 (back), (flag)); \
1698 } \
1699 }
1700
1701#define ANALYZE(strm, chn, fun, list, back, flag, ...) \
1702 { \
1703 if ((list) & (flag)) { \
1704 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1705 break; \
1706 UPDATE_ANALYSERS((chn)->analysers, (list), \
1707 (back), (flag)); \
1708 } \
1709 }
1710
Willy Tarreau87b09662015-04-03 00:22:06 +02001711/* Processes the client, server, request and response jobs of a stream task,
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001712 * then puts it back to the wait queue in a clean state, or cleans up its
1713 * resources if it must be deleted. Returns in <next> the date the task wants
1714 * to be woken up, or TICK_ETERNITY. In order not to call all functions for
1715 * nothing too many times, the request and response buffers flags are monitored
1716 * and each function is called only if at least another function has changed at
1717 * least one flag it is interested in.
1718 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01001719struct task *process_stream(struct task *t, void *context, unsigned int state)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001720{
Willy Tarreau827aee92011-03-10 16:55:02 +01001721 struct server *srv;
Olivier Houchard9f6af332018-05-25 14:04:04 +02001722 struct stream *s = context;
Willy Tarreaufb0afa72015-04-03 14:46:27 +02001723 struct session *sess = s->sess;
Christopher Faulet87633c32023-04-03 18:32:50 +02001724 unsigned int scf_flags, scb_flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001725 unsigned int rqf_last, rpf_last;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001726 unsigned int rq_prod_last, rq_cons_last;
1727 unsigned int rp_cons_last, rp_prod_last;
Christopher Fauletbd90a162023-05-10 16:40:27 +02001728 unsigned int req_ana_back, res_ana_back;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001729 struct channel *req, *res;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001730 struct stconn *scf, *scb;
Willy Tarreau3d07a162019-04-25 19:15:20 +02001731 unsigned int rate;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001732
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001733 DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
1734
Willy Tarreau7af4fa92020-06-17 20:49:49 +02001735 activity[tid].stream_calls++;
Willy Tarreau6a28a302022-09-07 09:17:45 +02001736 stream_cond_update_cpu_latency(s);
Willy Tarreaud80cb4e2018-01-20 19:30:13 +01001737
Willy Tarreau8f128b42014-11-28 15:07:47 +01001738 req = &s->req;
1739 res = &s->res;
1740
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001741 scf = s->scf;
1742 scb = s->scb;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001743
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001744 /* First, attempt to receive pending data from I/O layers */
Willy Tarreau462b9892022-05-18 18:06:53 +02001745 sc_conn_sync_recv(scf);
1746 sc_conn_sync_recv(scb);
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001747
Willy Tarreau6c539c42022-01-20 18:42:16 +01001748 /* Let's check if we're looping without making any progress, e.g. due
1749 * to a bogus analyser or the fact that we're ignoring a read0. The
1750 * call_rate counter only counts calls with no progress made.
1751 */
Christopher Fauletd8988412022-12-20 18:10:04 +01001752 if (!((req->flags | res->flags) & (CF_READ_EVENT|CF_WRITE_EVENT))) {
Willy Tarreau6c539c42022-01-20 18:42:16 +01001753 rate = update_freq_ctr(&s->call_rate, 1);
1754 if (rate >= 100000 && s->call_rate.prev_ctr) // make sure to wait at least a full second
1755 stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
Willy Tarreau3d07a162019-04-25 19:15:20 +02001756 }
Olivier Houchardc2aa7112018-09-11 18:27:21 +02001757
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001758 /* this data may be no longer valid, clear it */
Willy Tarreaueee5b512015-04-03 23:46:31 +02001759 if (s->txn)
1760 memset(&s->txn->auth, 0, sizeof(s->txn->auth));
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001761
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02001762 /* This flag must explicitly be set every time */
Christopher Faulet81fdeb82023-02-16 16:47:33 +01001763 req->flags &= ~CF_WAKE_WRITE;
1764 res->flags &= ~CF_WAKE_WRITE;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001765
1766 /* Keep a copy of req/rep flags so that we can detect shutdowns */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001767 rqf_last = req->flags & ~CF_MASK_ANALYSER;
1768 rpf_last = res->flags & ~CF_MASK_ANALYSER;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001769
Willy Tarreau4596fe22022-05-17 19:07:51 +02001770 /* we don't want the stream connector functions to recursively wake us up */
Willy Tarreaucb041662022-05-17 19:44:42 +02001771 scf->flags |= SC_FL_DONT_WAKE;
1772 scb->flags |= SC_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02001773
Christopher Faulet87633c32023-04-03 18:32:50 +02001774 /* Keep a copy of SC flags */
1775 scf_flags = scf->flags;
1776 scb_flags = scb->flags;
1777
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001778 /* update pending events */
Olivier Houchard9f6af332018-05-25 14:04:04 +02001779 s->pending_events |= (state & TASK_WOKEN_ANY);
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001780
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001781 /* 1a: Check for low level timeouts if needed. We just set a flag on
Willy Tarreau4596fe22022-05-17 19:07:51 +02001782 * stream connectors when their timeouts have expired.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001783 */
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001784 if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001785 stream_handle_timeouts(s);
Christopher Fauleta00d8172016-11-10 14:58:05 +01001786
Willy Tarreau798f4322012-11-08 14:49:17 +01001787 /* Once in a while we're woken up because the task expires. But
1788 * this does not necessarily mean that a timeout has been reached.
Willy Tarreau87b09662015-04-03 00:22:06 +02001789 * So let's not run a whole stream processing if only an expiration
Willy Tarreau798f4322012-11-08 14:49:17 +01001790 * timeout needs to be refreshed.
1791 */
Christopher Fauletca5309a2023-04-17 16:17:32 +02001792 if (!((scf->flags | scb->flags) & (SC_FL_ERROR|SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02001793 !((req->flags | res->flags) & (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
Christopher Fauletae024ce2022-03-29 19:02:31 +02001794 !(s->flags & SF_CONN_EXP) &&
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001795 ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02001796 scf->flags &= ~SC_FL_DONT_WAKE;
1797 scb->flags &= ~SC_FL_DONT_WAKE;
Willy Tarreau798f4322012-11-08 14:49:17 +01001798 goto update_exp_and_leave;
Willy Tarreau5fb04712016-05-04 10:18:37 +02001799 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001800 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001801
Willy Tarreau4596fe22022-05-17 19:07:51 +02001802 resync_stconns:
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001803 /* below we may emit error messages so we have to ensure that we have
Christopher Faulet686501c2022-02-01 18:53:53 +01001804 * our buffers properly allocated. If the allocation failed, an error is
1805 * triggered.
1806 *
1807 * NOTE: An error is returned because the mechanism to queue entities
1808 * waiting for a buffer is totally broken for now. However, this
1809 * part must be refactored. When it will be handled, this part
1810 * must be be reviewed too.
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001811 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001812 if (!stream_alloc_work_buffer(s)) {
Christopher Faulet340021b2023-04-14 11:36:29 +02001813 scf->flags |= SC_FL_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001814 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001815
Christopher Faulet340021b2023-04-14 11:36:29 +02001816 scb->flags |= SC_FL_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001817 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001818
1819 if (!(s->flags & SF_ERR_MASK))
1820 s->flags |= SF_ERR_RESOURCE;
1821 sess_set_term_flags(s);
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001822 }
1823
Willy Tarreau4596fe22022-05-17 19:07:51 +02001824 /* 1b: check for low-level errors reported at the stream connector.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001825 * First we check if it's a retryable error (in which case we don't
1826 * want to tell the buffer). Otherwise we report the error one level
1827 * upper by setting flags into the buffers. Note that the side towards
1828 * the client cannot have connect (hence retryable) errors. Also, the
1829 * connection setup code must be able to deal with any type of abort.
1830 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001831 srv = objt_server(s->target);
Christopher Faulete182a8e2023-04-14 12:07:26 +02001832 if (unlikely(scf->flags & SC_FL_ERROR)) {
Willy Tarreau74568cf2022-05-27 09:03:30 +02001833 if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS)) {
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001834 sc_abort(scf);
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001835 sc_shutdown(scf);
Christopher Faulet2e56a732023-01-26 16:18:09 +01001836 //sc_report_error(scf); TODO: Be sure it is useless
Willy Tarreau8f128b42014-11-28 15:07:47 +01001837 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001838 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
1839 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001840 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001841 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001842 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001843 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001844 if (!(s->flags & SF_ERR_MASK))
1845 s->flags |= SF_ERR_CLICL;
1846 if (!(s->flags & SF_FINST_MASK))
1847 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001848 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001849 }
1850 }
1851
Christopher Faulete182a8e2023-04-14 12:07:26 +02001852 if (unlikely(scb->flags & SC_FL_ERROR)) {
Willy Tarreau74568cf2022-05-27 09:03:30 +02001853 if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001854 sc_abort(scb);
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001855 sc_shutdown(scb);
Christopher Faulet2e56a732023-01-26 16:18:09 +01001856 //sc_report_error(scb); TODO: Be sure it is useless
Willy Tarreau4781b152021-04-06 13:53:36 +02001857 _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
Willy Tarreau827aee92011-03-10 16:55:02 +01001858 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001859 _HA_ATOMIC_INC(&srv->counters.failed_resp);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001860 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001861 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
1862 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001863 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001864 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001865 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001866 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001867 if (!(s->flags & SF_ERR_MASK))
1868 s->flags |= SF_ERR_SRVCL;
1869 if (!(s->flags & SF_FINST_MASK))
1870 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001871 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001872 }
1873 /* note: maybe we should process connection errors here ? */
1874 }
1875
Willy Tarreau74568cf2022-05-27 09:03:30 +02001876 if (sc_state_in(scb->state, SC_SB_CON|SC_SB_RDY)) {
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001877 /* we were trying to establish a connection on the server side,
1878 * maybe it succeeded, maybe it failed, maybe we timed out, ...
1879 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001880 if (scb->state == SC_ST_RDY)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001881 back_handle_st_rdy(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001882 else if (s->scb->state == SC_ST_CON)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001883 back_handle_st_con(s);
Willy Tarreaud66ed882019-06-05 18:02:04 +02001884
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001885 if (scb->state == SC_ST_CER)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001886 back_handle_st_cer(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001887 else if (scb->state == SC_ST_EST)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001888 back_establish(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001889
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001890 /* state is now one of SC_ST_CON (still in progress), SC_ST_EST
1891 * (established), SC_ST_DIS (abort), SC_ST_CLO (last error),
1892 * SC_ST_ASS/SC_ST_TAR/SC_ST_REQ for retryable errors.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001893 */
1894 }
1895
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001896 rq_prod_last = scf->state;
1897 rq_cons_last = scb->state;
1898 rp_cons_last = scf->state;
1899 rp_prod_last = scb->state;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001900
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001901 /* Check for connection closure */
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001902 DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001903
1904 /* nothing special to be done on client side */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001905 if (unlikely(scf->state == SC_ST_DIS)) {
1906 scf->state = SC_ST_CLO;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001907
Christopher Fauleta70a3542022-03-30 17:13:02 +02001908 /* This is needed only when debugging is enabled, to indicate
1909 * client-side close.
1910 */
1911 if (unlikely((global.mode & MODE_DEBUG) &&
1912 (!(global.mode & MODE_QUIET) ||
1913 (global.mode & MODE_VERBOSE)))) {
1914 chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
1915 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02001916 (unsigned short)conn_fd(sc_conn(scf)),
1917 (unsigned short)conn_fd(sc_conn(scb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001918 DISGUISE(write(1, trash.area, trash.data));
1919 }
1920 }
1921
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001922 /* When a server-side connection is released, we have to count it and
1923 * check for pending connections on this server.
1924 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001925 if (unlikely(scb->state == SC_ST_DIS)) {
1926 scb->state = SC_ST_CLO;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001927 srv = objt_server(s->target);
Willy Tarreau827aee92011-03-10 16:55:02 +01001928 if (srv) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001929 if (s->flags & SF_CURR_SESS) {
1930 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +02001931 _HA_ATOMIC_DEC(&srv->cur_sess);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001932 }
1933 sess_change_server(s, NULL);
Willy Tarreau827aee92011-03-10 16:55:02 +01001934 if (may_dequeue_tasks(srv, s->be))
Willy Tarreau9ab78292021-06-22 18:47:51 +02001935 process_srv_queue(srv);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001936 }
Christopher Fauleta70a3542022-03-30 17:13:02 +02001937
1938 /* This is needed only when debugging is enabled, to indicate
1939 * server-side close.
1940 */
1941 if (unlikely((global.mode & MODE_DEBUG) &&
1942 (!(global.mode & MODE_QUIET) ||
1943 (global.mode & MODE_VERBOSE)))) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001944 if (s->prev_conn_state == SC_ST_EST) {
Christopher Fauleta70a3542022-03-30 17:13:02 +02001945 chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
1946 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02001947 (unsigned short)conn_fd(sc_conn(scf)),
1948 (unsigned short)conn_fd(sc_conn(scb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001949 DISGUISE(write(1, trash.area, trash.data));
1950 }
1951 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001952 }
1953
1954 /*
1955 * Note: of the transient states (REQ, CER, DIS), only REQ may remain
1956 * at this point.
1957 */
1958
Willy Tarreau0be0ef92009-03-08 19:20:25 +01001959 resync_request:
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001960 /* Analyse request */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001961 if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
Christopher Fauletca5309a2023-04-17 16:17:32 +02001962 ((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Faulet208c7122023-04-13 16:16:15 +02001963 ((scb->flags ^ scb_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
Christopher Faulet64350bb2023-04-13 16:37:37 +02001964 (req->analysers && (scb->flags & SC_FL_SHUT_DONE)) ||
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001965 scf->state != rq_prod_last ||
1966 scb->state != rq_cons_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001967 s->pending_events & TASK_WOKEN_MSG) {
Christopher Faulet87633c32023-04-03 18:32:50 +02001968 unsigned int scf_flags_ana = scf->flags;
1969 unsigned int scb_flags_ana = scb->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001970
Willy Tarreau74568cf2022-05-27 09:03:30 +02001971 if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01001972 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001973 unsigned int ana_list;
1974 unsigned int ana_back;
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001975
Willy Tarreau90deb182010-01-07 00:20:41 +01001976 /* it's up to the analysers to stop new connections,
1977 * disable reading or closing. Note: if an analyser
1978 * disables any of these bits, it is responsible for
1979 * enabling them again when it disables itself, so
1980 * that other analysers are called in similar conditions.
1981 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001982 channel_auto_read(req);
1983 channel_auto_connect(req);
1984 channel_auto_close(req);
Willy Tarreauedcf6682008-11-30 23:15:34 +01001985
1986 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01001987 * req->analysers, following the bit order from LSB
Willy Tarreauedcf6682008-11-30 23:15:34 +01001988 * to MSB. The analysers must remove themselves from
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001989 * the list when not needed. Any analyser may return 0
1990 * to break out of the loop, either because of missing
1991 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02001992 * kill the stream. We loop at least once through each
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001993 * analyser, and we may loop again if other analysers
1994 * are added in the middle.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001995 *
1996 * We build a list of analysers to run. We evaluate all
1997 * of these analysers in the order of the lower bit to
1998 * the higher bit. This ordering is very important.
1999 * An analyser will often add/remove other analysers,
2000 * including itself. Any changes to itself have no effect
2001 * on the loop. If it removes any other analysers, we
2002 * want those analysers not to be called anymore during
2003 * this loop. If it adds an analyser that is located
2004 * after itself, we want it to be scheduled for being
2005 * processed during the loop. If it adds an analyser
2006 * which is located before it, we want it to switch to
2007 * it immediately, even if it has already been called
2008 * once but removed since.
2009 *
2010 * In order to achieve this, we compare the analyser
2011 * list after the call with a copy of it before the
2012 * call. The work list is fed with analyser bits that
2013 * appeared during the call. Then we compare previous
2014 * work list with the new one, and check the bits that
2015 * appeared. If the lowest of these bits is lower than
2016 * the current bit, it means we have enabled a previous
2017 * analyser and must immediately loop again.
Willy Tarreauedcf6682008-11-30 23:15:34 +01002018 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002019
Willy Tarreau8f128b42014-11-28 15:07:47 +01002020 ana_list = ana_back = req->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002021 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002022 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002023 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002024 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002025 FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
2026 FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
2027 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002028 FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002029 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002030 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002031 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
2032 FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002033 FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002034 FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002035 FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
2036 FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002037 ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002038 ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002039 ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002040 ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
2041 ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002042 break;
2043 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002044 }
Willy Tarreau84455332009-03-15 22:34:05 +01002045
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002046 rq_prod_last = scf->state;
2047 rq_cons_last = scb->state;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002048 req->flags &= ~CF_WAKE_ONCE;
2049 rqf_last = req->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002050 scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002051 scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau815a9b22010-07-27 17:15:12 +02002052
Christopher Fauletca5309a2023-04-17 16:17:32 +02002053 if (((scf->flags ^ scf_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUT_DONE))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002054 goto resync_request;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002055 }
2056
Willy Tarreau576507f2010-01-07 00:09:04 +01002057 /* we'll monitor the request analysers while parsing the response,
2058 * because some response analysers may indirectly enable new request
2059 * analysers (eg: HTTP keep-alive).
2060 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002061 req_ana_back = req->analysers;
Willy Tarreau576507f2010-01-07 00:09:04 +01002062
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002063 resync_response:
2064 /* Analyse response */
2065
Willy Tarreau8f128b42014-11-28 15:07:47 +01002066 if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
Christopher Fauletca5309a2023-04-17 16:17:32 +02002067 ((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Faulet208c7122023-04-13 16:16:15 +02002068 ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
Christopher Faulet64350bb2023-04-13 16:37:37 +02002069 (res->analysers && (scf->flags & SC_FL_SHUT_DONE)) ||
Christopher Faulet87633c32023-04-03 18:32:50 +02002070 scf->state != rp_cons_last ||
2071 scb->state != rp_prod_last ||
2072 s->pending_events & TASK_WOKEN_MSG) {
2073 unsigned int scb_flags_ana = scb->flags;
2074 unsigned int scf_flags_ana = scf->flags;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002075
Willy Tarreau74568cf2022-05-27 09:03:30 +02002076 if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01002077 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002078 unsigned int ana_list;
2079 unsigned int ana_back;
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002080
Willy Tarreau90deb182010-01-07 00:20:41 +01002081 /* it's up to the analysers to stop disable reading or
2082 * closing. Note: if an analyser disables any of these
2083 * bits, it is responsible for enabling them again when
2084 * it disables itself, so that other analysers are called
2085 * in similar conditions.
2086 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002087 channel_auto_read(res);
2088 channel_auto_close(res);
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002089
2090 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01002091 * res->analysers, following the bit order from LSB
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002092 * to MSB. The analysers must remove themselves from
2093 * the list when not needed. Any analyser may return 0
2094 * to break out of the loop, either because of missing
2095 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02002096 * kill the stream. We loop at least once through each
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002097 * analyser, and we may loop again if other analysers
2098 * are added in the middle.
2099 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002100
Willy Tarreau8f128b42014-11-28 15:07:47 +01002101 ana_list = ana_back = res->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002102 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002103 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002104 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
2105 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002106 FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002107 FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002108 FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002109 FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002110 ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002111 ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002112 ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002113 ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
2114 ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002115 break;
2116 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002117 }
2118
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002119 rp_cons_last = scf->state;
2120 rp_prod_last = scb->state;
Christopher Fauletcdaea892017-07-06 15:49:30 +02002121 res->flags &= ~CF_WAKE_ONCE;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002122 rpf_last = res->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002123 scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002124 scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau815a9b22010-07-27 17:15:12 +02002125
Christopher Fauletca5309a2023-04-17 16:17:32 +02002126 if (((scb->flags ^ scb_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUT_DONE))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002127 goto resync_response;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002128 }
2129
Christopher Fauletbd90a162023-05-10 16:40:27 +02002130 /* we'll monitor the response analysers because some response analysers
2131 * may be enabled/disabled later
2132 */
2133 res_ana_back = res->analysers;
2134
Willy Tarreau576507f2010-01-07 00:09:04 +01002135 /* maybe someone has added some request analysers, so we must check and loop */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002136 if (req->analysers & ~req_ana_back)
Willy Tarreau576507f2010-01-07 00:09:04 +01002137 goto resync_request;
2138
Willy Tarreau8f128b42014-11-28 15:07:47 +01002139 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0499e352010-12-17 07:13:42 +01002140 goto resync_request;
2141
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002142 /* FIXME: here we should call protocol handlers which rely on
2143 * both buffers.
2144 */
2145
2146
2147 /*
Willy Tarreau87b09662015-04-03 00:22:06 +02002148 * Now we propagate unhandled errors to the stream. Normally
Willy Tarreauae526782010-03-04 20:34:23 +01002149 * we're just in a data phase here since it means we have not
2150 * seen any analyser who could set an error status.
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002151 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002152 srv = objt_server(s->target);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002153 if (unlikely(!(s->flags & SF_ERR_MASK))) {
Christopher Faulete182a8e2023-04-14 12:07:26 +02002154 if ((scf->flags & SC_FL_ERROR) || req->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002155 /* Report it if the client got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002156 req->analysers &= AN_REQ_FLT_END;
Christopher Fauletb1368ad2023-05-10 16:28:38 +02002157 channel_auto_close(req);
Christopher Faulete182a8e2023-04-14 12:07:26 +02002158 if (scf->flags & SC_FL_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002159 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2160 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002161 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002162 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002163 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002164 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002165 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002166 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002167 else if (req->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002168 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2169 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002170 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002171 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002172 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002173 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002174 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002175 }
Willy Tarreauae526782010-03-04 20:34:23 +01002176 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002177 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2178 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002179 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002180 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002181 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002182 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002183 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002184 }
Willy Tarreau84455332009-03-15 22:34:05 +01002185 sess_set_term_flags(s);
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002186
2187 /* Abort the request if a client error occurred while
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002188 * the backend stream connector is in the SC_ST_INI
2189 * state. It is switched into the SC_ST_CLO state and
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002190 * the request channel is erased. */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002191 if (scb->state == SC_ST_INI) {
2192 s->scb->state = SC_ST_CLO;
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002193 channel_abort(req);
2194 if (IS_HTX_STRM(s))
2195 channel_htx_erase(req, htxbuf(&req->buf));
2196 else
2197 channel_erase(req);
2198 }
Willy Tarreau84455332009-03-15 22:34:05 +01002199 }
Christopher Faulete182a8e2023-04-14 12:07:26 +02002200 else if ((scb->flags & SC_FL_ERROR) || res->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002201 /* Report it if the server got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002202 res->analysers &= AN_RES_FLT_END;
Christopher Fauletb1368ad2023-05-10 16:28:38 +02002203 channel_auto_close(res);
Christopher Faulete182a8e2023-04-14 12:07:26 +02002204 if (scb->flags & SC_FL_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002205 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2206 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002207 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002208 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002209 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002210 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002211 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002212 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002213 else if (res->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002214 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2215 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002216 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002217 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002218 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002219 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002220 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002221 }
Willy Tarreauae526782010-03-04 20:34:23 +01002222 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002223 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2224 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002225 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002226 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002227 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002228 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002229 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002230 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002231 sess_set_term_flags(s);
2232 }
Willy Tarreau84455332009-03-15 22:34:05 +01002233 }
2234
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002235 /*
2236 * Here we take care of forwarding unhandled data. This also includes
2237 * connection establishments and shutdown requests.
2238 */
2239
2240
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002241 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002242 * everything. We configure the buffer to forward indefinitely.
Christopher Faulet573ead12023-04-13 15:39:30 +02002243 * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002244 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002245 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002246 if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002247 !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_DONE) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002248 (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) &&
2249 (req->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002250 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002251 * attached to it. If any data are left in, we'll permit them to
2252 * move.
2253 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002254 channel_auto_read(req);
2255 channel_auto_connect(req);
2256 channel_auto_close(req);
Willy Tarreau5bd8c372009-01-19 00:32:22 +01002257
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002258 if (IS_HTX_STRM(s)) {
2259 struct htx *htx = htxbuf(&req->buf);
2260
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002261 /* We'll let data flow between the producer (if still connected)
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002262 * to the consumer.
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002263 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002264 co_set_data(req, htx->data);
Christopher Faulet87633c32023-04-03 18:32:50 +02002265 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002266 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002267 channel_htx_forward_forever(req, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002268 }
2269 else {
2270 /* We'll let data flow between the producer (if still connected)
2271 * to the consumer (which might possibly not be connected yet).
2272 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002273 c_adv(req, ci_data(req));
Christopher Faulet87633c32023-04-03 18:32:50 +02002274 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002275 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002276 channel_forward_forever(req);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002277 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002278 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002279
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002280 /* check if it is wise to enable kernel splicing to forward request data */
Christopher Faulet87633c32023-04-03 18:32:50 +02002281 if (!(req->flags & CF_KERN_SPLICING) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002282 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002283 req->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002284 (global.tune.options & GTUNE_USE_SPLICE) &&
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002285 (sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->rcv_pipe &&
2286 __sc_conn(scf)->mux && __sc_conn(scf)->mux->rcv_pipe) &&
2287 (sc_conn(scb) && __sc_conn(scb)->xprt && __sc_conn(scb)->xprt->snd_pipe &&
2288 __sc_conn(scb)->mux && __sc_conn(scb)->mux->snd_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002289 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002290 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
2291 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002292 (req->flags & CF_STREAMER_FAST)))) {
2293 req->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002294 }
2295
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002296 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002297 rqf_last = req->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002298 scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002299 scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002300
Willy Tarreau520d95e2009-09-19 21:04:57 +02002301 /* it's possible that an upper layer has requested a connection setup or abort.
2302 * There are 2 situations where we decide to establish a new connection :
2303 * - there are data scheduled for emission in the buffer
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002304 * - the CF_AUTO_CONNECT flag is set (active connection)
Willy Tarreau520d95e2009-09-19 21:04:57 +02002305 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002306 if (scb->state == SC_ST_INI) {
Christopher Faulet208c7122023-04-13 16:16:15 +02002307 if (!(scb->flags & SC_FL_SHUT_DONE)) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002308 if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
Willy Tarreaucf644ed2013-09-29 17:19:56 +02002309 /* If we have an appctx, there is no connect method, so we
2310 * immediately switch to the connected state, otherwise we
2311 * perform a connection request.
Willy Tarreau520d95e2009-09-19 21:04:57 +02002312 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002313 scb->state = SC_ST_REQ; /* new connection requested */
Christopher Faulet731c8e62022-03-29 16:08:44 +02002314 s->conn_retries = 0;
Christopher Faulet9f5382e2021-05-21 13:46:14 +02002315 if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
2316 (s->be->mode == PR_MODE_HTTP) &&
Christopher Faulete05bf9e2022-03-29 15:23:40 +02002317 !(s->txn->flags & TX_D_L7_RETRY))
2318 s->txn->flags |= TX_L7_RETRY;
Christopher Faulet948a5a02023-11-14 07:47:52 +01002319
2320 if (s->be->options & PR_O_ABRT_CLOSE) {
2321 struct connection *conn = sc_conn(scf);
2322
Christopher Faulet8af12382023-11-14 19:18:53 +01002323 if (conn && conn->mux && conn->mux->ctl)
Christopher Faulet948a5a02023-11-14 07:47:52 +01002324 conn->mux->ctl(conn, MUX_SUBS_RECV, NULL);
2325 }
Willy Tarreau520d95e2009-09-19 21:04:57 +02002326 }
Willy Tarreau73201222009-08-16 18:27:24 +02002327 }
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002328 else {
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002329 s->scb->state = SC_ST_CLO; /* shutw+ini = abort */
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002330 sc_schedule_shutdown(scb);
Christopher Faulet12762f02023-04-13 15:40:10 +02002331 sc_schedule_abort(scb);
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002332 }
Willy Tarreau92795622009-03-06 12:51:23 +01002333 }
2334
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002335
2336 /* we may have a pending connection request, or a connection waiting
2337 * for completion.
2338 */
Willy Tarreau74568cf2022-05-27 09:03:30 +02002339 if (sc_state_in(scb->state, SC_SB_REQ|SC_SB_QUE|SC_SB_TAR|SC_SB_ASS)) {
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002340 /* prune the request variables and swap to the response variables. */
2341 if (s->vars_reqres.scope != SCOPE_RES) {
Jerome Magnin2f44e882019-11-09 18:00:47 +01002342 if (!LIST_ISEMPTY(&s->vars_reqres.head))
Willy Tarreaucda7f3f2018-10-28 13:44:36 +01002343 vars_prune(&s->vars_reqres, s->sess, s);
Willy Tarreaub7bfcb32021-08-31 08:13:25 +02002344 vars_init_head(&s->vars_reqres, SCOPE_RES);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002345 }
2346
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002347 do {
2348 /* nb: step 1 might switch from QUE to ASS, but we first want
2349 * to give a chance to step 2 to perform a redirect if needed.
2350 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002351 if (scb->state != SC_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002352 back_try_conn_req(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002353 if (scb->state == SC_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002354 back_handle_st_req(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002355
Willy Tarreauada4c582020-03-04 16:42:03 +01002356 /* get a chance to complete an immediate connection setup */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002357 if (scb->state == SC_ST_RDY)
Willy Tarreau4596fe22022-05-17 19:07:51 +02002358 goto resync_stconns;
Willy Tarreauada4c582020-03-04 16:42:03 +01002359
Willy Tarreau9e5a3aa2013-12-31 23:32:12 +01002360 /* applets directly go to the ESTABLISHED state. Similarly,
2361 * servers experience the same fate when their connection
2362 * is reused.
2363 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002364 if (unlikely(scb->state == SC_ST_EST))
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002365 back_establish(s);
Willy Tarreaufac4bd12013-11-30 09:21:49 +01002366
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002367 srv = objt_server(s->target);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002368 if (scb->state == SC_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002369 http_perform_server_redirect(s, scb);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002370 } while (scb->state == SC_ST_ASS);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002371 }
2372
Willy Tarreau829bd472019-06-06 09:17:23 +02002373 /* Let's see if we can send the pending request now */
Willy Tarreau462b9892022-05-18 18:06:53 +02002374 sc_conn_sync_send(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002375
2376 /*
2377 * Now forward all shutdown requests between both sides of the request buffer
2378 */
2379
2380 /* first, let's check if the request buffer needs to shutdown(write), which may
2381 * happen either because the input is closed or because we want to force a close
2382 * once the server has begun to respond. If a half-closed timeout is set, we adjust
Willy Tarreaua544c662022-04-14 17:39:48 +02002383 * the other side's timeout as well. However this doesn't have effect during the
2384 * connection setup unless the backend has abortonclose set.
Willy Tarreau829bd472019-06-06 09:17:23 +02002385 */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002386 if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002387 !(scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) &&
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002388 (scb->state != SC_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002389 sc_schedule_shutdown(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002390 }
2391
2392 /* shutdown(write) pending */
Christopher Faulet208c7122023-04-13 16:16:15 +02002393 if (unlikely((scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
Christopher Faulet406b81c2023-09-06 08:59:33 +02002394 (channel_is_empty(req) || (req->flags & CF_WRITE_TIMEOUT)))) {
Christopher Faulete182a8e2023-04-14 12:07:26 +02002395 if (scf->flags & SC_FL_ERROR)
Willy Tarreaucb041662022-05-17 19:44:42 +02002396 scb->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02002397 sc_shutdown(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002398 }
2399
2400 /* shutdown(write) done on server side, we must stop the client too */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002401 if (unlikely((scb->flags & SC_FL_SHUT_DONE) && !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002402 !req->analysers)
Christopher Faulet12762f02023-04-13 15:40:10 +02002403 sc_schedule_abort(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002404
2405 /* shutdown(read) pending */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002406 if (unlikely((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02002407 if (scf->flags & SC_FL_NOHALF)
2408 scf->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02002409 sc_abort(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002410 }
2411
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002412 /* Benchmarks have shown that it's optimal to do a full resync now */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002413 if (scf->state == SC_ST_DIS ||
Willy Tarreau74568cf2022-05-27 09:03:30 +02002414 sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
Christopher Fauletad46e522023-04-14 11:59:15 +02002415 ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
2416 ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
Willy Tarreau4596fe22022-05-17 19:07:51 +02002417 goto resync_stconns;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002418
Willy Tarreau815a9b22010-07-27 17:15:12 +02002419 /* otherwise we want to check if we need to resync the req buffer or not */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002420 if (((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags) & SC_FL_SHUT_DONE))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002421 goto resync_request;
2422
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002423 /* perform output updates to the response buffer */
Willy Tarreau84455332009-03-15 22:34:05 +01002424
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002425 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002426 * everything. We configure the buffer to forward indefinitely.
Christopher Faulet573ead12023-04-13 15:39:30 +02002427 * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002428 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002429 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002430 if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
Christopher Faulete38534c2023-04-13 15:45:24 +02002431 !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_WANTED) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002432 sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
2433 (res->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002434 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002435 * attached to it. If any data are left in, we'll permit them to
2436 * move.
2437 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002438 channel_auto_read(res);
2439 channel_auto_close(res);
Willy Tarreauda4d9fe2010-11-07 20:26:56 +01002440
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002441 if (IS_HTX_STRM(s)) {
2442 struct htx *htx = htxbuf(&res->buf);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002443
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002444 /* We'll let data flow between the producer (if still connected)
2445 * to the consumer.
2446 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002447 co_set_data(res, htx->data);
Christopher Faulet87633c32023-04-03 18:32:50 +02002448 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002449 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002450 channel_htx_forward_forever(res, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002451 }
2452 else {
2453 /* We'll let data flow between the producer (if still connected)
2454 * to the consumer.
2455 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002456 c_adv(res, ci_data(res));
Christopher Faulet87633c32023-04-03 18:32:50 +02002457 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002458 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002459 channel_forward_forever(res);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002460 }
Willy Tarreau42529c32015-07-09 18:38:57 +02002461
Willy Tarreauce887fd2012-05-12 12:50:00 +02002462 /* if we have no analyser anymore in any direction and have a
Willy Tarreau05cdd962014-05-10 14:30:07 +02002463 * tunnel timeout set, use it now. Note that we must respect
2464 * the half-closed timeouts as well.
Willy Tarreauce887fd2012-05-12 12:50:00 +02002465 */
Amaury Denoyellefb504432020-12-10 13:43:53 +01002466 if (!req->analysers && s->tunnel_timeout) {
Christopher Faulet5aaacfb2023-02-15 08:13:33 +01002467 scf->ioto = scb->ioto = s->tunnel_timeout;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002468
Willy Tarreaud7f1ce42023-06-02 16:19:51 +02002469 if (!IS_HTX_STRM(s)) {
2470 if ((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(sess->fe->timeout.clientfin))
2471 scf->ioto = sess->fe->timeout.clientfin;
2472 if ((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(s->be->timeout.serverfin))
2473 scb->ioto = s->be->timeout.serverfin;
2474 }
Willy Tarreauce887fd2012-05-12 12:50:00 +02002475 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002476 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002477
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002478 /* check if it is wise to enable kernel splicing to forward response data */
Christopher Faulet87633c32023-04-03 18:32:50 +02002479 if (!(res->flags & CF_KERN_SPLICING) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002480 !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002481 res->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002482 (global.tune.options & GTUNE_USE_SPLICE) &&
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002483 (sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->snd_pipe &&
2484 __sc_conn(scf)->mux && __sc_conn(scf)->mux->snd_pipe) &&
2485 (sc_conn(scb) && __sc_conn(scb)->xprt && __sc_conn(scb)->xprt->rcv_pipe &&
2486 __sc_conn(scb)->mux && __sc_conn(scb)->mux->rcv_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002487 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002488 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
2489 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002490 (res->flags & CF_STREAMER_FAST)))) {
2491 res->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002492 }
2493
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002494 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002495 rpf_last = res->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002496 scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002497 scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002498
Willy Tarreau829bd472019-06-06 09:17:23 +02002499 /* Let's see if we can send the pending response now */
Willy Tarreau462b9892022-05-18 18:06:53 +02002500 sc_conn_sync_send(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002501
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002502 /*
2503 * Now forward all shutdown requests between both sides of the buffer
2504 */
2505
2506 /*
2507 * FIXME: this is probably where we should produce error responses.
2508 */
2509
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002510 /* first, let's check if the response buffer needs to shutdown(write) */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002511 if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002512 !(scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))) {
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002513 sc_schedule_shutdown(scf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002514 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002515
2516 /* shutdown(write) pending */
Christopher Faulet208c7122023-04-13 16:16:15 +02002517 if (unlikely((scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
Christopher Faulet406b81c2023-09-06 08:59:33 +02002518 (channel_is_empty(res) || (res->flags & CF_WRITE_TIMEOUT)))) {
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02002519 sc_shutdown(scf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002520 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002521
2522 /* shutdown(write) done on the client side, we must stop the server too */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002523 if (unlikely((scf->flags & SC_FL_SHUT_DONE) && !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002524 !res->analysers)
Christopher Faulet12762f02023-04-13 15:40:10 +02002525 sc_schedule_abort(scb);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002526
2527 /* shutdown(read) pending */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002528 if (unlikely((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02002529 if (scb->flags & SC_FL_NOHALF)
2530 scb->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02002531 sc_abort(scb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02002532 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002533
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002534 if (scf->state == SC_ST_DIS ||
Willy Tarreau74568cf2022-05-27 09:03:30 +02002535 sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
Christopher Fauletad46e522023-04-14 11:59:15 +02002536 ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
2537 ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
Willy Tarreau4596fe22022-05-17 19:07:51 +02002538 goto resync_stconns;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002539
Willy Tarreau3c5c0662019-06-06 14:32:49 +02002540 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002541 goto resync_request;
2542
Christopher Fauletca5309a2023-04-17 16:17:32 +02002543 if (((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Fauletbd90a162023-05-10 16:40:27 +02002544 ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
2545 (res->analysers ^ res_ana_back))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002546 goto resync_response;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002547
Christopher Fauletbd90a162023-05-10 16:40:27 +02002548 if ((((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER) ||
2549 (req->analysers ^ req_ana_back))
Willy Tarreau829bd472019-06-06 09:17:23 +02002550 goto resync_request;
2551
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002552 /* we're interested in getting wakeups again */
Willy Tarreaucb041662022-05-17 19:44:42 +02002553 scf->flags &= ~SC_FL_DONT_WAKE;
2554 scb->flags &= ~SC_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002555
Willy Tarreau74568cf2022-05-27 09:03:30 +02002556 if (likely((scf->state != SC_ST_CLO) || !sc_state_in(scb->state, SC_SB_INI|SC_SB_CLO) ||
Christopher Faulet6fcd2d32019-11-13 11:12:32 +01002557 (req->analysers & AN_REQ_FLT_END) || (res->analysers & AN_RES_FLT_END))) {
Olivier Houchard4c18f942019-07-31 18:05:26 +02002558 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
Willy Tarreau87b09662015-04-03 00:22:06 +02002559 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002560
Willy Tarreaub49672d2022-05-27 10:13:37 +02002561 stream_update_both_sc(s);
Olivier Houchard53216e72018-10-10 15:46:36 +02002562
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002563 /* Reset pending events now */
2564 s->pending_events = 0;
2565
Willy Tarreau798f4322012-11-08 14:49:17 +01002566 update_exp_and_leave:
Willy Tarreaucb041662022-05-17 19:44:42 +02002567 /* Note: please ensure that if you branch here you disable SC_FL_DONT_WAKE */
Willy Tarreaudef0d222016-11-08 22:03:00 +01002568 if (!req->analysers)
2569 req->analyse_exp = TICK_ETERNITY;
2570
2571 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
2572 (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
2573 req->analyse_exp = tick_add(now_ms, 5000);
2574
Christopher Faulet92657802023-11-06 08:45:22 +01002575 t->expire = (tick_is_expired(t->expire, now_ms) ? 0 : t->expire);
2576 t->expire = tick_first(t->expire, sc_ep_rcv_ex(scf));
2577 t->expire = tick_first(t->expire, sc_ep_snd_ex(scf));
2578 t->expire = tick_first(t->expire, sc_ep_rcv_ex(scb));
2579 t->expire = tick_first(t->expire, sc_ep_snd_ex(scb));
Willy Tarreaudef0d222016-11-08 22:03:00 +01002580 t->expire = tick_first(t->expire, req->analyse_exp);
Willy Tarreau9a398be2017-11-10 17:14:23 +01002581 t->expire = tick_first(t->expire, res->analyse_exp);
Christopher Fauletae024ce2022-03-29 19:02:31 +02002582 t->expire = tick_first(t->expire, s->conn_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002583
Christopher Faulete758b5c2023-02-27 16:21:00 +01002584 if (unlikely(tick_is_expired(t->expire, now_ms))) {
2585 /* Some events prevented the timeouts to be handled but nothing evolved.
2586 So do it now and resyunc the stconns
2587 */
2588 stream_handle_timeouts(s);
2589 goto resync_stconns;
2590 }
Christopher Fauleta62201d2023-02-20 14:43:49 +01002591
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002592 s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
Willy Tarreau87b09662015-04-03 00:22:06 +02002593 stream_release_buffers(s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002594
2595 DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
Willy Tarreau26c25062009-03-08 09:38:41 +01002596 return t; /* nothing more to do */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002597 }
2598
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002599 DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
2600
Willy Tarreaue7dff022015-04-03 01:14:29 +02002601 if (s->flags & SF_BE_ASSIGNED)
Willy Tarreau4781b152021-04-06 13:53:36 +02002602 _HA_ATOMIC_DEC(&s->be->beconn);
Willy Tarreau6f5e4b92017-09-15 09:07:56 +02002603
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002604 if (unlikely((global.mode & MODE_DEBUG) &&
2605 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
Willy Tarreau19d14ef2012-10-29 16:51:55 +01002606 chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
Christopher Faulet0256da12021-12-15 09:50:17 +01002607 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002608 (unsigned short)conn_fd(sc_conn(scf)),
2609 (unsigned short)conn_fd(sc_conn(scb)));
Willy Tarreau2e8ab6b2020-03-14 11:03:20 +01002610 DISGUISE(write(1, trash.area, trash.data));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002611 }
2612
Christopher Faulet341064e2021-01-21 17:10:44 +01002613 if (!(s->flags & SF_IGNORE)) {
Willy Tarreau69530f52023-04-28 09:16:15 +02002614 s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
Christopher Faulet341064e2021-01-21 17:10:44 +01002615
Olivier Houchard4c18f942019-07-31 18:05:26 +02002616 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002617
Christopher Faulet341064e2021-01-21 17:10:44 +01002618 if (s->txn && s->txn->status) {
2619 int n;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002620
Christopher Faulet341064e2021-01-21 17:10:44 +01002621 n = s->txn->status / 100;
2622 if (n < 1 || n > 5)
2623 n = 0;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002624
Christopher Faulet341064e2021-01-21 17:10:44 +01002625 if (sess->fe->mode == PR_MODE_HTTP) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002626 _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
Christopher Faulet341064e2021-01-21 17:10:44 +01002627 }
2628 if ((s->flags & SF_BE_ASSIGNED) &&
2629 (s->be->mode == PR_MODE_HTTP)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002630 _HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
2631 _HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
Christopher Faulet341064e2021-01-21 17:10:44 +01002632 }
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002633 }
Christopher Faulet341064e2021-01-21 17:10:44 +01002634
2635 /* let's do a final log if we need it */
2636 if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
2637 !(s->flags & SF_MONITOR) &&
2638 (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
2639 /* we may need to know the position in the queue */
2640 pendconn_free(s);
Willy Tarreaubeee6002022-09-07 16:17:49 +02002641
2642 stream_cond_update_cpu_usage(s);
Christopher Faulet341064e2021-01-21 17:10:44 +01002643 s->do_log(s);
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002644 }
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002645
Christopher Faulet341064e2021-01-21 17:10:44 +01002646 /* update time stats for this stream */
2647 stream_update_time_stats(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002648 }
2649
2650 /* the task MUST not be in the run queue anymore */
Willy Tarreau87b09662015-04-03 00:22:06 +02002651 stream_free(s);
Olivier Houchard3f795f72019-04-17 22:51:06 +02002652 task_destroy(t);
Willy Tarreau26c25062009-03-08 09:38:41 +01002653 return NULL;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002654}
2655
Willy Tarreau87b09662015-04-03 00:22:06 +02002656/* Update the stream's backend and server time stats */
2657void stream_update_time_stats(struct stream *s)
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002658{
2659 int t_request;
2660 int t_queue;
2661 int t_connect;
2662 int t_data;
2663 int t_close;
2664 struct server *srv;
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002665 unsigned int samples_window;
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002666
2667 t_request = 0;
2668 t_queue = s->logs.t_queue;
2669 t_connect = s->logs.t_connect;
2670 t_close = s->logs.t_close;
2671 t_data = s->logs.t_data;
2672
2673 if (s->be->mode != PR_MODE_HTTP)
2674 t_data = t_connect;
2675
2676 if (t_connect < 0 || t_data < 0)
2677 return;
2678
Willy Tarreauad5a5f62023-04-27 09:46:02 +02002679 if ((llong)(s->logs.request_ts - s->logs.accept_ts) >= 0)
2680 t_request = ns_to_ms(s->logs.request_ts - s->logs.accept_ts);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002681
2682 t_data -= t_connect;
2683 t_connect -= t_queue;
2684 t_queue -= t_request;
2685
2686 srv = objt_server(s->target);
2687 if (srv) {
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002688 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2689 srv->counters.p.http.cum_req : srv->counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2690 swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
2691 swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
2692 swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
2693 swrate_add_dynamic(&srv->counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002694 HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
2695 HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
2696 HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
2697 HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002698 }
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002699 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2700 s->be->be_counters.p.http.cum_req : s->be->be_counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2701 swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
2702 swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
2703 swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
2704 swrate_add_dynamic(&s->be->be_counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002705 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
2706 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
2707 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
2708 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002709}
2710
Willy Tarreau7c669d72008-06-20 15:04:11 +02002711/*
2712 * This function adjusts sess->srv_conn and maintains the previous and new
Willy Tarreau87b09662015-04-03 00:22:06 +02002713 * server's served stream counts. Setting newsrv to NULL is enough to release
Willy Tarreau7c669d72008-06-20 15:04:11 +02002714 * current connection slot. This function also notifies any LB algo which might
Willy Tarreau87b09662015-04-03 00:22:06 +02002715 * expect to be informed about any change in the number of active streams on a
Willy Tarreau7c669d72008-06-20 15:04:11 +02002716 * server.
2717 */
Willy Tarreaue89fae32021-03-09 15:43:32 +01002718void sess_change_server(struct stream *strm, struct server *newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002719{
Willy Tarreaue89fae32021-03-09 15:43:32 +01002720 struct server *oldsrv = strm->srv_conn;
Willy Tarreau751153e2021-02-17 13:33:24 +01002721
Amaury Denoyelled8514b92024-02-21 15:54:11 +01002722 /* Dynamic servers may be deleted during process lifetime. This
2723 * operation is always conducted under thread isolation. Several
2724 * conditions prevent deletion, one of them is if server streams list
2725 * is not empty. sess_change_server() uses stream_add_srv_conn() to
2726 * ensure the latter condition.
2727 *
2728 * A race condition could exist for stream which referenced a server
2729 * instance (s->target) without registering itself in its server list.
2730 * This is notably the case for SF_DIRECT streams which referenced a
2731 * server earlier during process_stream(). However at this time the
2732 * code is deemed safe as process_stream() cannot be rescheduled before
2733 * invocation of sess_change_server().
2734 */
2735
Willy Tarreau751153e2021-02-17 13:33:24 +01002736 if (oldsrv == newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002737 return;
2738
Willy Tarreau751153e2021-02-17 13:33:24 +01002739 if (oldsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002740 _HA_ATOMIC_DEC(&oldsrv->served);
2741 _HA_ATOMIC_DEC(&oldsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002742 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002743 if (oldsrv->proxy->lbprm.server_drop_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002744 oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002745 stream_del_srv_conn(strm);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002746 }
2747
2748 if (newsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002749 _HA_ATOMIC_INC(&newsrv->served);
2750 _HA_ATOMIC_INC(&newsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002751 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002752 if (newsrv->proxy->lbprm.server_take_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002753 newsrv->proxy->lbprm.server_take_conn(newsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002754 stream_add_srv_conn(strm, newsrv);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002755 }
2756}
2757
Willy Tarreau84455332009-03-15 22:34:05 +01002758/* Handle server-side errors for default protocols. It is called whenever a a
2759 * connection setup is aborted or a request is aborted in queue. It sets the
Willy Tarreau87b09662015-04-03 00:22:06 +02002760 * stream termination flags so that the caller does not have to worry about
Willy Tarreau4596fe22022-05-17 19:07:51 +02002761 * them. It's installed as ->srv_error for the server-side stream connector.
Willy Tarreau84455332009-03-15 22:34:05 +01002762 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02002763void default_srv_error(struct stream *s, struct stconn *sc)
Willy Tarreau84455332009-03-15 22:34:05 +01002764{
Christopher Faulet50264b42022-03-30 19:39:30 +02002765 int err_type = s->conn_err_type;
Willy Tarreau84455332009-03-15 22:34:05 +01002766 int err = 0, fin = 0;
2767
Christopher Faulet50264b42022-03-30 19:39:30 +02002768 if (err_type & STRM_ET_QUEUE_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002769 err = SF_ERR_CLICL;
2770 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002771 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002772 else if (err_type & STRM_ET_CONN_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002773 err = SF_ERR_CLICL;
2774 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002775 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002776 else if (err_type & STRM_ET_QUEUE_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002777 err = SF_ERR_SRVTO;
2778 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002779 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002780 else if (err_type & STRM_ET_QUEUE_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002781 err = SF_ERR_SRVCL;
2782 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002783 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002784 else if (err_type & STRM_ET_CONN_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002785 err = SF_ERR_SRVTO;
2786 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002787 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002788 else if (err_type & STRM_ET_CONN_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002789 err = SF_ERR_SRVCL;
2790 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002791 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002792 else if (err_type & STRM_ET_CONN_RES) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002793 err = SF_ERR_RESOURCE;
2794 fin = SF_FINST_C;
Willy Tarreau2d400bb2012-05-14 12:11:47 +02002795 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002796 else /* STRM_ET_CONN_OTHER and others */ {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002797 err = SF_ERR_INTERNAL;
2798 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002799 }
2800
Willy Tarreaue7dff022015-04-03 01:14:29 +02002801 if (!(s->flags & SF_ERR_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002802 s->flags |= err;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002803 if (!(s->flags & SF_FINST_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002804 s->flags |= fin;
2805}
Willy Tarreau7c669d72008-06-20 15:04:11 +02002806
Willy Tarreaue7dff022015-04-03 01:14:29 +02002807/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
Willy Tarreau87b09662015-04-03 00:22:06 +02002808void stream_shutdown(struct stream *stream, int why)
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002809{
Christopher Faulet208c7122023-04-13 16:16:15 +02002810 if (stream->scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002811 return;
2812
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002813 sc_schedule_shutdown(stream->scb);
Christopher Faulet12762f02023-04-13 15:40:10 +02002814 sc_schedule_abort(stream->scb);
Willy Tarreau87b09662015-04-03 00:22:06 +02002815 stream->task->nice = 1024;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002816 if (!(stream->flags & SF_ERR_MASK))
Willy Tarreau87b09662015-04-03 00:22:06 +02002817 stream->flags |= why;
2818 task_wakeup(stream->task, TASK_WOKEN_OTHER);
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002819}
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +02002820
Willy Tarreau5484d582019-05-22 09:33:03 +02002821/* Appends a dump of the state of stream <s> into buffer <buf> which must have
2822 * preliminary be prepared by its caller, with each line prepended by prefix
2823 * <pfx>, and each line terminated by character <eol>.
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002824 */
Willy Tarreau5484d582019-05-22 09:33:03 +02002825void stream_dump(struct buffer *buf, const struct stream *s, const char *pfx, char eol)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002826{
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002827 const struct stconn *scf, *scb;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002828 const struct connection *cof, *cob;
2829 const struct appctx *acf, *acb;
2830 const struct server *srv;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002831 const char *src = "unknown";
2832 const char *dst = "unknown";
2833 char pn[INET6_ADDRSTRLEN];
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002834 const struct channel *req, *res;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002835
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002836 if (!s) {
Willy Tarreau5484d582019-05-22 09:33:03 +02002837 chunk_appendf(buf, "%sstrm=%p%c", pfx, s, eol);
2838 return;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002839 }
2840
Willy Tarreau5484d582019-05-22 09:33:03 +02002841 if (s->obj_type != OBJ_TYPE_STREAM) {
2842 chunk_appendf(buf, "%sstrm=%p [invalid type=%d(%s)]%c",
2843 pfx, s, s->obj_type, obj_type_name(&s->obj_type), eol);
2844 return;
2845 }
2846
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002847 req = &s->req;
2848 res = &s->res;
2849
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002850 scf = s->scf;
Willy Tarreau46784222023-08-28 17:05:22 +02002851 cof = (scf && scf->sedesc) ? sc_conn(scf) : NULL;
2852 acf = (scf && scf->sedesc) ? sc_appctx(scf) : NULL;
Willy Tarreau71e34c12019-07-17 15:07:06 +02002853 if (cof && cof->src && addr_to_str(cof->src, pn, sizeof(pn)) >= 0)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002854 src = pn;
2855 else if (acf)
2856 src = acf->applet->name;
2857
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002858 scb = s->scb;
Willy Tarreau46784222023-08-28 17:05:22 +02002859 cob = (scb && scb->sedesc) ? sc_conn(scb) : NULL;
2860 acb = (scb && scb->sedesc) ? sc_appctx(scb) : NULL;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002861 srv = objt_server(s->target);
2862 if (srv)
2863 dst = srv->id;
2864 else if (acb)
2865 dst = acb->applet->name;
2866
Willy Tarreau5484d582019-05-22 09:33:03 +02002867 chunk_appendf(buf,
Christopher Faulete8f35962021-11-02 17:18:15 +01002868 "%sstrm=%p,%x src=%s fe=%s be=%s dst=%s%c"
2869 "%stxn=%p,%x txn.req=%s,%x txn.rsp=%s,%x%c"
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002870 "%srqf=%x rqa=%x rpf=%x rpa=%x%c"
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002871 "%sscf=%p,%s,%x scb=%p,%s,%x%c"
Christopher Faulet13a35e52021-12-20 15:34:16 +01002872 "%saf=%p,%u sab=%p,%u%c"
Willy Tarreau5484d582019-05-22 09:33:03 +02002873 "%scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2874 "%scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2875 "",
Christopher Faulete8f35962021-11-02 17:18:15 +01002876 pfx, s, s->flags, src, s->sess->fe->id, s->be->id, dst, eol,
2877 pfx, s->txn, (s->txn ? s->txn->flags : 0),
2878 (s->txn ? h1_msg_state_str(s->txn->req.msg_state): "-"), (s->txn ? s->txn->req.flags : 0),
2879 (s->txn ? h1_msg_state_str(s->txn->rsp.msg_state): "-"), (s->txn ? s->txn->rsp.flags : 0), eol,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002880 pfx, req->flags, req->analysers, res->flags, res->analysers, eol,
Willy Tarreau2e866732023-09-04 15:30:33 +02002881 pfx, scf, scf ? sc_state_str(scf->state) : 0, scf ? scf->flags : 0,
2882 scb, scb ? sc_state_str(scb->state) : 0, scb ? scb->flags : 0, eol,
Christopher Faulet13a35e52021-12-20 15:34:16 +01002883 pfx, acf, acf ? acf->st0 : 0, acb, acb ? acb->st0 : 0, eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002884 pfx, cof, cof ? cof->flags : 0, conn_get_mux_name(cof), cof?cof->ctx:0, conn_get_xprt_name(cof),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002885 cof ? cof->xprt_ctx : 0, conn_get_ctrl_name(cof), conn_fd(cof), eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002886 pfx, cob, cob ? cob->flags : 0, conn_get_mux_name(cob), cob?cob->ctx:0, conn_get_xprt_name(cob),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002887 cob ? cob->xprt_ctx : 0, conn_get_ctrl_name(cob), conn_fd(cob), eol);
Willy Tarreau5484d582019-05-22 09:33:03 +02002888}
2889
2890/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
Willy Tarreaub106ce12019-05-22 08:57:01 +02002891 * having reached loop rate <rate>, then aborts hoping to retrieve a core.
Willy Tarreau5484d582019-05-22 09:33:03 +02002892 */
2893void stream_dump_and_crash(enum obj_type *obj, int rate)
2894{
2895 const struct stream *s;
Willy Tarreau5484d582019-05-22 09:33:03 +02002896 char *msg = NULL;
2897 const void *ptr;
2898
2899 ptr = s = objt_stream(obj);
2900 if (!s) {
2901 const struct appctx *appctx = objt_appctx(obj);
2902 if (!appctx)
2903 return;
2904 ptr = appctx;
Willy Tarreau0698c802022-05-11 14:09:57 +02002905 s = appctx_strm(appctx);
Willy Tarreau5484d582019-05-22 09:33:03 +02002906 if (!s)
2907 return;
2908 }
2909
Willy Tarreau5484d582019-05-22 09:33:03 +02002910 chunk_reset(&trash);
2911 stream_dump(&trash, s, "", ' ');
Willy Tarreau9753d612020-05-01 16:57:02 +02002912
2913 chunk_appendf(&trash, "filters={");
2914 if (HAS_FILTERS(s)) {
2915 struct filter *filter;
2916
2917 list_for_each_entry(filter, &s->strm_flt.filters, list) {
2918 if (filter->list.p != &s->strm_flt.filters)
2919 chunk_appendf(&trash, ", ");
2920 chunk_appendf(&trash, "%p=\"%s\"", filter, FLT_ID(filter));
2921 }
2922 }
2923 chunk_appendf(&trash, "}");
2924
Willy Tarreau714900a2022-09-02 09:13:12 +02002925 if (ptr != s) { // that's an appctx
2926 const struct appctx *appctx = ptr;
2927
2928 chunk_appendf(&trash, " applet=%p(", appctx->applet);
2929 resolve_sym_name(&trash, NULL, appctx->applet);
2930 chunk_appendf(&trash, ")");
2931
2932 chunk_appendf(&trash, " handler=%p(", appctx->applet->fct);
2933 resolve_sym_name(&trash, NULL, appctx->applet->fct);
2934 chunk_appendf(&trash, ")");
2935 }
2936
Willy Tarreaub106ce12019-05-22 08:57:01 +02002937 memprintf(&msg,
2938 "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
2939 "aborting now! Please report this error to developers "
2940 "[%s]\n",
Willy Tarreau5484d582019-05-22 09:33:03 +02002941 obj_type_name(obj), ptr, rate, trash.area);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002942
2943 ha_alert("%s", msg);
2944 send_log(NULL, LOG_EMERG, "%s", msg);
Willy Tarreau2f67e542021-03-02 19:19:41 +01002945 ABORT_NOW();
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002946}
2947
Willy Tarreaua698eb62021-02-24 10:37:01 +01002948/* initialize the require structures */
2949static void init_stream()
2950{
2951 int thr;
2952
2953 for (thr = 0; thr < MAX_THREADS; thr++)
Willy Tarreaub4e34762021-09-30 19:02:18 +02002954 LIST_INIT(&ha_thread_ctx[thr].streams);
Willy Tarreaua698eb62021-02-24 10:37:01 +01002955}
2956INITCALL0(STG_INIT, init_stream);
2957
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002958/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002959 * returns the unique ID.
Tim Duesterhus7ad27d42022-05-18 00:22:15 +02002960 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002961 * If this function fails to allocate memory IST_NULL is returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002962 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002963 * If an ID is already stored within the stream nothing happens existing unique ID is
2964 * returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002965 */
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002966struct ist stream_generate_unique_id(struct stream *strm, struct list *format)
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002967{
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002968 if (isttest(strm->unique_id)) {
2969 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002970 }
2971 else {
2972 char *unique_id;
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002973 int length;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002974 if ((unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002975 return IST_NULL;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002976
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002977 length = build_logline(strm, unique_id, UNIQUEID_LEN, format);
2978 strm->unique_id = ist2(unique_id, length);
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002979
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002980 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002981 }
2982}
2983
Willy Tarreau8b22a712010-06-18 17:46:06 +02002984/************************************************************************/
2985/* All supported ACL keywords must be declared here. */
2986/************************************************************************/
Christopher Faulet551a6412021-06-25 14:35:29 +02002987static enum act_return stream_action_set_log_level(struct act_rule *rule, struct proxy *px,
2988 struct session *sess, struct stream *s, int flags)
2989{
2990 s->logs.level = (uintptr_t)rule->arg.act.p[0];
2991 return ACT_RET_CONT;
2992}
2993
2994
2995/* Parse a "set-log-level" action. It takes the level value as argument. It
2996 * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2997 */
2998static enum act_parse_ret stream_parse_set_log_level(const char **args, int *cur_arg, struct proxy *px,
2999 struct act_rule *rule, char **err)
3000{
3001 int level;
3002
3003 if (!*args[*cur_arg]) {
3004 bad_log_level:
3005 memprintf(err, "expects exactly 1 argument (log level name or 'silent')");
3006 return ACT_RET_PRS_ERR;
3007 }
3008 if (strcmp(args[*cur_arg], "silent") == 0)
3009 level = -1;
3010 else if ((level = get_log_level(args[*cur_arg]) + 1) == 0)
3011 goto bad_log_level;
3012
3013 (*cur_arg)++;
3014
3015 /* Register processing function. */
3016 rule->action_ptr = stream_action_set_log_level;
3017 rule->action = ACT_CUSTOM;
3018 rule->arg.act.p[0] = (void *)(uintptr_t)level;
3019 return ACT_RET_PRS_OK;
3020}
3021
Christopher Faulet1da374a2021-06-25 14:46:02 +02003022static enum act_return stream_action_set_nice(struct act_rule *rule, struct proxy *px,
3023 struct session *sess, struct stream *s, int flags)
3024{
3025 s->task->nice = (uintptr_t)rule->arg.act.p[0];
3026 return ACT_RET_CONT;
3027}
3028
3029
3030/* Parse a "set-nice" action. It takes the nice value as argument. It returns
3031 * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
3032 */
3033static enum act_parse_ret stream_parse_set_nice(const char **args, int *cur_arg, struct proxy *px,
3034 struct act_rule *rule, char **err)
3035{
3036 int nice;
3037
3038 if (!*args[*cur_arg]) {
3039 bad_log_level:
3040 memprintf(err, "expects exactly 1 argument (integer value)");
3041 return ACT_RET_PRS_ERR;
3042 }
3043
3044 nice = atoi(args[*cur_arg]);
3045 if (nice < -1024)
3046 nice = -1024;
3047 else if (nice > 1024)
3048 nice = 1024;
3049
3050 (*cur_arg)++;
3051
3052 /* Register processing function. */
3053 rule->action_ptr = stream_action_set_nice;
3054 rule->action = ACT_CUSTOM;
3055 rule->arg.act.p[0] = (void *)(uintptr_t)nice;
3056 return ACT_RET_PRS_OK;
3057}
3058
Christopher Faulet551a6412021-06-25 14:35:29 +02003059
Christopher Fauletae863c62021-03-15 12:03:44 +01003060static enum act_return tcp_action_switch_stream_mode(struct act_rule *rule, struct proxy *px,
3061 struct session *sess, struct stream *s, int flags)
3062{
3063 enum pr_mode mode = (uintptr_t)rule->arg.act.p[0];
3064 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
3065
3066 if (!IS_HTX_STRM(s) && mode == PR_MODE_HTTP) {
3067 if (!stream_set_http_mode(s, mux_proto)) {
Christopher Faulet7eb837d2023-04-13 15:22:29 +02003068 stream_abort(s);
Christopher Fauletae863c62021-03-15 12:03:44 +01003069 return ACT_RET_ABRT;
3070 }
3071 }
3072 return ACT_RET_STOP;
3073}
3074
3075
3076static int check_tcp_switch_stream_mode(struct act_rule *rule, struct proxy *px, char **err)
3077{
3078 const struct mux_proto_list *mux_ent;
3079 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
3080 enum pr_mode pr_mode = (uintptr_t)rule->arg.act.p[0];
Aurelien DARRAGON4e49a6f2023-10-19 16:06:03 +02003081 enum proto_proxy_mode mode = conn_pr_mode_to_proto_mode(pr_mode);
Christopher Fauletae863c62021-03-15 12:03:44 +01003082
Christopher Faulet3b6446f2021-03-15 15:10:38 +01003083 if (pr_mode == PR_MODE_HTTP)
3084 px->options |= PR_O_HTTP_UPG;
3085
Christopher Fauletae863c62021-03-15 12:03:44 +01003086 if (mux_proto) {
3087 mux_ent = conn_get_best_mux_entry(mux_proto->token, PROTO_SIDE_FE, mode);
3088 if (!mux_ent || !isteq(mux_ent->token, mux_proto->token)) {
3089 memprintf(err, "MUX protocol '%.*s' is not compatible with the selected mode",
3090 (int)mux_proto->token.len, mux_proto->token.ptr);
3091 return 0;
3092 }
3093 }
3094 else {
3095 mux_ent = conn_get_best_mux_entry(IST_NULL, PROTO_SIDE_FE, mode);
3096 if (!mux_ent) {
3097 memprintf(err, "Unable to find compatible MUX protocol with the selected mode");
3098 return 0;
3099 }
3100 }
3101
3102 /* Update the mux */
3103 rule->arg.act.p[1] = (void *)mux_ent;
3104 return 1;
3105
3106}
3107
3108static enum act_parse_ret stream_parse_switch_mode(const char **args, int *cur_arg,
3109 struct proxy *px, struct act_rule *rule,
3110 char **err)
3111{
3112 const struct mux_proto_list *mux_proto = NULL;
3113 struct ist proto;
3114 enum pr_mode mode;
3115
3116 /* must have at least the mode */
3117 if (*(args[*cur_arg]) == 0) {
3118 memprintf(err, "'%s %s' expects a mode as argument.", args[0], args[*cur_arg-1]);
3119 return ACT_RET_PRS_ERR;
3120 }
3121
3122 if (!(px->cap & PR_CAP_FE)) {
3123 memprintf(err, "'%s %s' not allowed because %s '%s' has no frontend capability",
3124 args[0], args[*cur_arg-1], proxy_type_str(px), px->id);
3125 return ACT_RET_PRS_ERR;
3126 }
3127 /* Check if the mode. For now "tcp" is disabled because downgrade is not
3128 * supported and PT is the only TCP mux.
3129 */
3130 if (strcmp(args[*cur_arg], "http") == 0)
3131 mode = PR_MODE_HTTP;
3132 else {
3133 memprintf(err, "'%s %s' expects a valid mode (got '%s').", args[0], args[*cur_arg-1], args[*cur_arg]);
3134 return ACT_RET_PRS_ERR;
3135 }
3136
3137 /* check the proto, if specified */
3138 if (*(args[*cur_arg+1]) && strcmp(args[*cur_arg+1], "proto") == 0) {
3139 if (*(args[*cur_arg+2]) == 0) {
3140 memprintf(err, "'%s %s': '%s' expects a protocol as argument.",
3141 args[0], args[*cur_arg-1], args[*cur_arg+1]);
3142 return ACT_RET_PRS_ERR;
3143 }
3144
Tim Duesterhusb113b5c2021-09-15 13:58:44 +02003145 proto = ist(args[*cur_arg + 2]);
Christopher Fauletae863c62021-03-15 12:03:44 +01003146 mux_proto = get_mux_proto(proto);
3147 if (!mux_proto) {
3148 memprintf(err, "'%s %s': '%s' expects a valid MUX protocol, if specified (got '%s')",
3149 args[0], args[*cur_arg-1], args[*cur_arg+1], args[*cur_arg+2]);
3150 return ACT_RET_PRS_ERR;
3151 }
3152 *cur_arg += 2;
3153 }
3154
3155 (*cur_arg)++;
3156
3157 /* Register processing function. */
3158 rule->action_ptr = tcp_action_switch_stream_mode;
3159 rule->check_ptr = check_tcp_switch_stream_mode;
3160 rule->action = ACT_CUSTOM;
3161 rule->arg.act.p[0] = (void *)(uintptr_t)mode;
3162 rule->arg.act.p[1] = (void *)mux_proto;
3163 return ACT_RET_PRS_OK;
3164}
Willy Tarreau8b22a712010-06-18 17:46:06 +02003165
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003166/* 0=OK, <0=Alert, >0=Warning */
3167static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
3168 struct proxy *px, struct act_rule *rule,
3169 char **err)
3170{
3171 struct action_kw *kw;
3172
3173 /* Check if the service name exists. */
3174 if (*(args[*cur_arg]) == 0) {
3175 memprintf(err, "'%s' expects a service name.", args[0]);
Thierry FOURNIER337eae12015-11-26 19:48:04 +01003176 return ACT_RET_PRS_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003177 }
3178
3179 /* lookup for keyword corresponding to a service. */
3180 kw = action_lookup(&service_keywords, args[*cur_arg]);
3181 if (!kw) {
3182 memprintf(err, "'%s' unknown service name.", args[1]);
3183 return ACT_RET_PRS_ERR;
3184 }
3185 (*cur_arg)++;
3186
3187 /* executes specific rule parser. */
3188 rule->kw = kw;
3189 if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
3190 return ACT_RET_PRS_ERR;
3191
3192 /* Register processing function. */
3193 rule->action_ptr = process_use_service;
3194 rule->action = ACT_CUSTOM;
3195
3196 return ACT_RET_PRS_OK;
3197}
3198
3199void service_keywords_register(struct action_kw_list *kw_list)
3200{
Willy Tarreau2b718102021-04-21 07:32:39 +02003201 LIST_APPEND(&service_keywords, &kw_list->list);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003202}
3203
Thierry Fournier87e53992020-11-28 19:32:14 +01003204struct action_kw *service_find(const char *kw)
3205{
3206 return action_lookup(&service_keywords, kw);
3207}
3208
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003209/* Lists the known services on <out>. If <out> is null, emit them on stdout one
3210 * per line.
3211 */
Willy Tarreau679bba12019-03-19 08:08:10 +01003212void list_services(FILE *out)
3213{
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003214 const struct action_kw *akwp, *akwn;
Willy Tarreau679bba12019-03-19 08:08:10 +01003215 struct action_kw_list *kw_list;
3216 int found = 0;
3217 int i;
3218
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003219 if (out)
3220 fprintf(out, "Available services :");
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003221
3222 for (akwn = akwp = NULL;; akwp = akwn) {
3223 list_for_each_entry(kw_list, &service_keywords, list) {
3224 for (i = 0; kw_list->kw[i].kw != NULL; i++) {
3225 if (strordered(akwp ? akwp->kw : NULL,
3226 kw_list->kw[i].kw,
3227 akwn != akwp ? akwn->kw : NULL))
3228 akwn = &kw_list->kw[i];
3229 found = 1;
3230 }
Willy Tarreau679bba12019-03-19 08:08:10 +01003231 }
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003232 if (akwn == akwp)
3233 break;
3234 if (out)
3235 fprintf(out, " %s", akwn->kw);
3236 else
3237 printf("%s\n", akwn->kw);
Willy Tarreau679bba12019-03-19 08:08:10 +01003238 }
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003239 if (!found && out)
Willy Tarreau679bba12019-03-19 08:08:10 +01003240 fprintf(out, " none\n");
3241}
William Lallemand4c5b4d52016-11-21 08:51:11 +01003242
Willy Tarreau39f097d2022-05-03 10:49:00 +02003243/* appctx context used by the "show sess" command */
3244
3245struct show_sess_ctx {
3246 struct bref bref; /* back-reference from the session being dumped */
3247 void *target; /* session we want to dump, or NULL for all */
3248 unsigned int thr; /* the thread number being explored (0..MAX_THREADS-1) */
3249 unsigned int uid; /* if non-null, the uniq_id of the session being dumped */
3250 int section; /* section of the session being dumped */
3251 int pos; /* last position of the current session's buffer */
3252};
3253
Willy Tarreau4596fe22022-05-17 19:07:51 +02003254/* This function dumps a complete stream state onto the stream connector's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003255 * read buffer. The stream has to be set in strm. It returns 0 if the output
3256 * buffer is full and it needs to be called again, otherwise non-zero. It is
3257 * designed to be called from stats_dump_strm_to_buffer() below.
3258 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02003259static int stats_dump_full_strm_to_buffer(struct stconn *sc, struct stream *strm)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003260{
Willy Tarreaub49672d2022-05-27 10:13:37 +02003261 struct appctx *appctx = __sc_appctx(sc);
Willy Tarreau39f097d2022-05-03 10:49:00 +02003262 struct show_sess_ctx *ctx = appctx->svcctx;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003263 struct stconn *scf, *scb;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003264 struct tm tm;
3265 extern const char *monthname[12];
3266 char pn[INET6_ADDRSTRLEN];
3267 struct connection *conn;
3268 struct appctx *tmpctx;
3269
3270 chunk_reset(&trash);
3271
Willy Tarreau39f097d2022-05-03 10:49:00 +02003272 if (ctx->section > 0 && ctx->uid != strm->uniq_id) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003273 /* stream changed, no need to go any further */
3274 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003275 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003276 goto full;
3277 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003278 }
3279
Willy Tarreau39f097d2022-05-03 10:49:00 +02003280 switch (ctx->section) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003281 case 0: /* main status of the stream */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003282 ctx->uid = strm->uniq_id;
3283 ctx->section = 1;
Willy Tarreau46984792022-11-14 07:09:39 +01003284 __fallthrough;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003285
3286 case 1:
3287 get_localtime(strm->logs.accept_date.tv_sec, &tm);
3288 chunk_appendf(&trash,
3289 "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
3290 strm,
3291 tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
3292 tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
3293 strm->uniq_id,
Willy Tarreaub7436612020-08-28 19:51:44 +02003294 strm_li(strm) ? strm_li(strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003295
3296 conn = objt_conn(strm_orig(strm));
Willy Tarreau71e34c12019-07-17 15:07:06 +02003297 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003298 case AF_INET:
3299 case AF_INET6:
3300 chunk_appendf(&trash, " source=%s:%d\n",
Erwan Le Goas2a2e46f2022-09-28 17:02:30 +02003301 HA_ANON_CLI(pn), get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003302 break;
3303 case AF_UNIX:
3304 chunk_appendf(&trash, " source=unix:%d\n", strm_li(strm)->luid);
3305 break;
3306 default:
3307 /* no more information to print right now */
3308 chunk_appendf(&trash, "\n");
3309 break;
3310 }
3311
3312 chunk_appendf(&trash,
Christopher Faulet50264b42022-03-30 19:39:30 +02003313 " flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x\n",
Christopher Fauletae024ce2022-03-29 19:02:31 +02003314 strm->flags, strm->conn_retries,
3315 strm->conn_exp ?
3316 tick_is_expired(strm->conn_exp, now_ms) ? "<PAST>" :
3317 human_time(TICKS_TO_MS(strm->conn_exp - now_ms),
3318 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Faulet50264b42022-03-30 19:39:30 +02003319 strm->conn_err_type, strm->srv_conn, strm->pend_pos,
Willy Tarreau2b718102021-04-21 07:32:39 +02003320 LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003321
3322 chunk_appendf(&trash,
3323 " frontend=%s (id=%u mode=%s), listener=%s (id=%u)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003324 HA_ANON_CLI(strm_fe(strm)->id), strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003325 strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
3326 strm_li(strm) ? strm_li(strm)->luid : 0);
3327
Willy Tarreau71e34c12019-07-17 15:07:06 +02003328 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003329 case AF_INET:
3330 case AF_INET6:
3331 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003332 HA_ANON_CLI(pn), get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003333 break;
3334 case AF_UNIX:
3335 chunk_appendf(&trash, " addr=unix:%d\n", strm_li(strm)->luid);
3336 break;
3337 default:
3338 /* no more information to print right now */
3339 chunk_appendf(&trash, "\n");
3340 break;
3341 }
3342
3343 if (strm->be->cap & PR_CAP_BE)
3344 chunk_appendf(&trash,
3345 " backend=%s (id=%u mode=%s)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003346 HA_ANON_CLI(strm->be->id),
William Lallemandb0dfd092022-03-08 12:05:31 +01003347 strm->be->uuid, proxy_mode_str(strm->be->mode));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003348 else
3349 chunk_appendf(&trash, " backend=<NONE> (id=-1 mode=-)");
3350
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003351 conn = sc_conn(strm->scb);
Willy Tarreau71e34c12019-07-17 15:07:06 +02003352 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003353 case AF_INET:
3354 case AF_INET6:
3355 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003356 HA_ANON_CLI(pn), get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003357 break;
3358 case AF_UNIX:
3359 chunk_appendf(&trash, " addr=unix\n");
3360 break;
3361 default:
3362 /* no more information to print right now */
3363 chunk_appendf(&trash, "\n");
3364 break;
3365 }
3366
3367 if (strm->be->cap & PR_CAP_BE)
3368 chunk_appendf(&trash,
3369 " server=%s (id=%u)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003370 objt_server(strm->target) ? HA_ANON_CLI(__objt_server(strm->target)->id) : "<none>",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003371 objt_server(strm->target) ? __objt_server(strm->target)->puid : 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003372 else
3373 chunk_appendf(&trash, " server=<NONE> (id=-1)");
3374
Willy Tarreau71e34c12019-07-17 15:07:06 +02003375 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003376 case AF_INET:
3377 case AF_INET6:
3378 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003379 HA_ANON_CLI(pn), get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003380 break;
3381 case AF_UNIX:
3382 chunk_appendf(&trash, " addr=unix\n");
3383 break;
3384 default:
3385 /* no more information to print right now */
3386 chunk_appendf(&trash, "\n");
3387 break;
3388 }
3389
3390 chunk_appendf(&trash,
Willy Tarreaudd75b642022-07-15 16:18:43 +02003391 " task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tid=%d(%d/%d)%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003392 strm->task,
3393 strm->task->state,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003394 strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003395 strm->task->expire ?
3396 tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
3397 human_time(TICKS_TO_MS(strm->task->expire - now_ms),
3398 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003399 strm->task->tid,
Willy Tarreaudd75b642022-07-15 16:18:43 +02003400 ha_thread_info[strm->task->tid].tgid,
3401 ha_thread_info[strm->task->tid].ltid,
William Lallemand4c5b4d52016-11-21 08:51:11 +01003402 task_in_rq(strm->task) ? ", running" : "");
3403
3404 chunk_appendf(&trash,
3405 " age=%s)\n",
Willy Tarreau18420142023-11-17 18:51:26 +01003406 human_time(ns_to_sec(now_ns) - ns_to_sec(strm->logs.request_ts), 1));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003407
3408 if (strm->txn)
3409 chunk_appendf(&trash,
Christopher Fauletbcac7862019-07-17 10:46:50 +02003410 " txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003411 strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
Willy Tarreau7778b592019-01-07 10:38:10 +01003412 h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003413 strm->txn->req.flags, strm->txn->rsp.flags);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003414
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003415 scf = strm->scf;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003416 chunk_appendf(&trash, " scf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d",
Willy Tarreau74568cf2022-05-27 09:03:30 +02003417 scf, scf->flags, sc_state_str(scf->state),
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003418 (sc_ep_test(scf, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scf, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
3419 scf->sedesc->se, sc_ep_get(scf), scf->wait_event.events);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003420 chunk_appendf(&trash, " rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003421 sc_ep_rcv_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003422 chunk_appendf(&trash, " wex=%s\n",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003423 sc_ep_snd_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Olivier Houchard9aaf7782017-09-13 18:30:23 +02003424
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003425 if ((conn = sc_conn(scf)) != NULL) {
Willy Tarreauce577772022-09-02 15:00:48 +02003426 if (conn->mux && conn->mux->show_sd) {
3427 chunk_appendf(&trash, " ");
3428 conn->mux->show_sd(&trash, scf->sedesc, " ");
3429 chunk_appendf(&trash, "\n");
3430 }
3431
William Lallemand4c5b4d52016-11-21 08:51:11 +01003432 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003433 " co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003434 conn,
3435 conn_get_ctrl_name(conn),
3436 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003437 conn_get_mux_name(conn),
Willy Tarreauf8d0ab52022-05-18 18:00:31 +02003438 sc_get_data_name(scf),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003439 obj_type_name(conn->target),
3440 obj_base_ptr(conn->target));
3441
3442 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003443 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003444 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003445 conn_fd(conn),
3446 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
Willy Tarreau6d3c5012022-07-05 19:21:06 +02003447 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003448 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003449 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02003450 else if ((tmpctx = sc_appctx(scf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003451 chunk_appendf(&trash,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003452 " app0=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003453 tmpctx,
3454 tmpctx->st0,
3455 tmpctx->st1,
Christopher Fauletf0205062017-11-15 20:56:43 +01003456 tmpctx->applet->name,
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003457 tmpctx->t->tid,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003458 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003459 }
3460
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003461 scb = strm->scb;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003462 chunk_appendf(&trash, " scb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d",
Willy Tarreau74568cf2022-05-27 09:03:30 +02003463 scb, scb->flags, sc_state_str(scb->state),
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003464 (sc_ep_test(scb, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scb, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
3465 scb->sedesc->se, sc_ep_get(scb), scb->wait_event.events);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003466 chunk_appendf(&trash, " rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003467 sc_ep_rcv_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003468 chunk_appendf(&trash, " wex=%s\n",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003469 sc_ep_snd_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Willy Tarreaub605c422022-05-17 17:04:55 +02003470
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003471 if ((conn = sc_conn(scb)) != NULL) {
Willy Tarreauce577772022-09-02 15:00:48 +02003472 if (conn->mux && conn->mux->show_sd) {
3473 chunk_appendf(&trash, " ");
3474 conn->mux->show_sd(&trash, scb->sedesc, " ");
3475 chunk_appendf(&trash, "\n");
3476 }
3477
William Lallemand4c5b4d52016-11-21 08:51:11 +01003478 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003479 " co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003480 conn,
3481 conn_get_ctrl_name(conn),
3482 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003483 conn_get_mux_name(conn),
Willy Tarreauf8d0ab52022-05-18 18:00:31 +02003484 sc_get_data_name(scb),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003485 obj_type_name(conn->target),
3486 obj_base_ptr(conn->target));
3487
3488 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003489 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003490 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003491 conn_fd(conn),
3492 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
Willy Tarreau6d3c5012022-07-05 19:21:06 +02003493 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003494 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003495 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02003496 else if ((tmpctx = sc_appctx(scb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003497 chunk_appendf(&trash,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003498 " app1=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003499 tmpctx,
3500 tmpctx->st0,
3501 tmpctx->st1,
Christopher Fauletf0205062017-11-15 20:56:43 +01003502 tmpctx->applet->name,
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003503 tmpctx->t->tid,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003504 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003505 }
3506
3507 chunk_appendf(&trash,
3508 " req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003509 " an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003510 &strm->req,
3511 strm->req.flags, strm->req.analysers,
3512 strm->req.pipe ? strm->req.pipe->data : 0,
3513 strm->req.to_forward, strm->req.total,
3514 strm->req.analyse_exp ?
3515 human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003516 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003517 &strm->req.buf,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003518 b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003519 (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003520 (unsigned int)strm->req.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003521
Christopher Fauletb9af8812019-01-04 14:30:44 +01003522 if (IS_HTX_STRM(strm)) {
3523 struct htx *htx = htxbuf(&strm->req.buf);
3524
3525 chunk_appendf(&trash,
Willy Tarreaub84e67f2019-01-07 10:01:34 +01003526 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003527 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003528 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003529 (unsigned long long)htx->extra);
3530 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003531 if (HAS_FILTERS(strm) && strm_flt(strm)->current[0]) {
3532 struct filter *flt = strm_flt(strm)->current[0];
3533
3534 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3535 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3536 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003537
William Lallemand4c5b4d52016-11-21 08:51:11 +01003538 chunk_appendf(&trash,
3539 " res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003540 " an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003541 &strm->res,
3542 strm->res.flags, strm->res.analysers,
3543 strm->res.pipe ? strm->res.pipe->data : 0,
3544 strm->res.to_forward, strm->res.total,
3545 strm->res.analyse_exp ?
3546 human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003547 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003548 &strm->res.buf,
3549 b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003550 (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003551 (unsigned int)strm->res.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003552
Christopher Fauletb9af8812019-01-04 14:30:44 +01003553 if (IS_HTX_STRM(strm)) {
3554 struct htx *htx = htxbuf(&strm->res.buf);
3555
3556 chunk_appendf(&trash,
3557 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003558 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003559 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003560 (unsigned long long)htx->extra);
3561 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003562 if (HAS_FILTERS(strm) && strm_flt(strm)->current[1]) {
3563 struct filter *flt = strm_flt(strm)->current[1];
3564
3565 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3566 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3567 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003568
Willy Tarreau1274e102021-10-11 09:49:03 +02003569 if (strm->current_rule_list && strm->current_rule) {
3570 const struct act_rule *rule = strm->current_rule;
Christopher Faulet8c67ece2021-10-12 11:10:31 +02003571 chunk_appendf(&trash, " current_rule=\"%s\" [%s:%d]\n", rule->kw->kw, rule->conf.file, rule->conf.line);
Willy Tarreau1274e102021-10-11 09:49:03 +02003572 }
3573
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003574 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003575 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003576
3577 /* use other states to dump the contents */
3578 }
3579 /* end of dump */
Willy Tarreaue6e52362019-01-04 17:42:57 +01003580 done:
Willy Tarreau39f097d2022-05-03 10:49:00 +02003581 ctx->uid = 0;
3582 ctx->section = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003583 return 1;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003584 full:
3585 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003586}
3587
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003588static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003589{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003590 struct show_sess_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
3591
William Lallemand4c5b4d52016-11-21 08:51:11 +01003592 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
3593 return 1;
3594
3595 if (*args[2] && strcmp(args[2], "all") == 0)
Willy Tarreau39f097d2022-05-03 10:49:00 +02003596 ctx->target = (void *)-1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003597 else if (*args[2])
Willy Tarreau39f097d2022-05-03 10:49:00 +02003598 ctx->target = (void *)strtoul(args[2], NULL, 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003599 else
Willy Tarreau39f097d2022-05-03 10:49:00 +02003600 ctx->target = NULL;
3601 ctx->section = 0; /* start with stream status */
3602 ctx->pos = 0;
3603 ctx->thr = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003604
Willy Tarreauf3629f82022-05-03 11:05:39 +02003605 /* The back-ref must be reset, it will be detected and set by
3606 * the dump code upon first invocation.
3607 */
3608 LIST_INIT(&ctx->bref.users);
3609
Willy Tarreaub9813182021-02-24 11:29:51 +01003610 /* let's set our own stream's epoch to the current one and increment
3611 * it so that we know which streams were already there before us.
3612 */
Willy Tarreau0698c802022-05-11 14:09:57 +02003613 appctx_strm(appctx)->stream_epoch = _HA_ATOMIC_FETCH_ADD(&stream_epoch, 1);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003614 return 0;
3615}
3616
Willy Tarreau4596fe22022-05-17 19:07:51 +02003617/* This function dumps all streams' states onto the stream connector's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003618 * read buffer. It returns 0 if the output buffer is full and it needs
Willy Tarreaue6e52362019-01-04 17:42:57 +01003619 * to be called again, otherwise non-zero. It proceeds in an isolated
3620 * thread so there is no thread safety issue here.
William Lallemand4c5b4d52016-11-21 08:51:11 +01003621 */
3622static int cli_io_handler_dump_sess(struct appctx *appctx)
3623{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003624 struct show_sess_ctx *ctx = appctx->svcctx;
Willy Tarreauc12b3212022-05-27 11:08:15 +02003625 struct stconn *sc = appctx_sc(appctx);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003626 struct connection *conn;
3627
Willy Tarreaue6e52362019-01-04 17:42:57 +01003628 thread_isolate();
3629
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003630 if (ctx->thr >= global.nbthread) {
3631 /* already terminated */
3632 goto done;
3633 }
3634
Christopher Faulet7faac7c2023-04-04 10:05:27 +02003635 /* FIXME: Don't watch the other side !*/
Christopher Faulet208c7122023-04-13 16:16:15 +02003636 if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003637 /* If we're forced to shut down, we might have to remove our
3638 * reference to the last stream being dumped.
3639 */
Willy Tarreauf3629f82022-05-03 11:05:39 +02003640 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3641 LIST_DELETE(&ctx->bref.users);
3642 LIST_INIT(&ctx->bref.users);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003643 }
Willy Tarreaue6e52362019-01-04 17:42:57 +01003644 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003645 }
3646
3647 chunk_reset(&trash);
3648
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003649 /* first, let's detach the back-ref from a possible previous stream */
3650 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3651 LIST_DELETE(&ctx->bref.users);
3652 LIST_INIT(&ctx->bref.users);
3653 } else if (!ctx->bref.ref) {
3654 /* first call, start with first stream */
3655 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3656 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003657
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003658 /* and start from where we stopped */
3659 while (1) {
3660 char pn[INET6_ADDRSTRLEN];
3661 struct stream *curr_strm;
3662 int done= 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003663
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003664 if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].streams)
3665 done = 1;
3666 else {
3667 /* check if we've found a stream created after issuing the "show sess" */
3668 curr_strm = LIST_ELEM(ctx->bref.ref, struct stream *, list);
Willy Tarreau0698c802022-05-11 14:09:57 +02003669 if ((int)(curr_strm->stream_epoch - appctx_strm(appctx)->stream_epoch) > 0)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003670 done = 1;
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003671 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003672
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003673 if (done) {
3674 ctx->thr++;
3675 if (ctx->thr >= global.nbthread)
3676 break;
3677 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3678 continue;
3679 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003680
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003681 if (ctx->target) {
3682 if (ctx->target != (void *)-1 && ctx->target != curr_strm)
3683 goto next_sess;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003684
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003685 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3686 /* call the proper dump() function and return if we're missing space */
Willy Tarreaub49672d2022-05-27 10:13:37 +02003687 if (!stats_dump_full_strm_to_buffer(sc, curr_strm))
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003688 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003689
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003690 /* stream dump complete */
3691 LIST_DELETE(&ctx->bref.users);
3692 LIST_INIT(&ctx->bref.users);
3693 if (ctx->target != (void *)-1) {
3694 ctx->target = NULL;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003695 break;
3696 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003697 else
3698 goto next_sess;
3699 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003700
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003701 chunk_appendf(&trash,
3702 "%p: proto=%s",
3703 curr_strm,
3704 strm_li(curr_strm) ? strm_li(curr_strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003705
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003706 conn = objt_conn(strm_orig(curr_strm));
3707 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3708 case AF_INET:
3709 case AF_INET6:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003710 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003711 " src=%s:%d fe=%s be=%s srv=%s",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003712 HA_ANON_CLI(pn),
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003713 get_host_port(conn->src),
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003714 HA_ANON_CLI(strm_fe(curr_strm)->id),
3715 (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
3716 objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003717 );
3718 break;
3719 case AF_UNIX:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003720 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003721 " src=unix:%d fe=%s be=%s srv=%s",
3722 strm_li(curr_strm)->luid,
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003723 HA_ANON_CLI(strm_fe(curr_strm)->id),
3724 (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
3725 objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003726 );
3727 break;
3728 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003729
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003730 chunk_appendf(&trash,
3731 " ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
3732 curr_strm->task->state, curr_strm->stream_epoch,
Willy Tarreau18420142023-11-17 18:51:26 +01003733 human_time(ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.request_ts), 1),
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003734 curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
Willy Tarreau6a28a302022-09-07 09:17:45 +02003735 (unsigned long long)curr_strm->cpu_time, (unsigned long long)curr_strm->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003736
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003737 chunk_appendf(&trash,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003738 " rq[f=%06xh,i=%u,an=%02xh",
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003739 curr_strm->req.flags,
3740 (unsigned int)ci_data(&curr_strm->req),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003741 curr_strm->req.analysers);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003742
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003743 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003744 ",ax=%s]",
3745 curr_strm->req.analyse_exp ?
3746 human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
3747 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003748
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003749 chunk_appendf(&trash,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003750 " rp[f=%06xh,i=%u,an=%02xh",
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003751 curr_strm->res.flags,
3752 (unsigned int)ci_data(&curr_strm->res),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003753 curr_strm->res.analysers);
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003754 chunk_appendf(&trash,
3755 ",ax=%s]",
3756 curr_strm->res.analyse_exp ?
3757 human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
3758 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003759
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003760 conn = sc_conn(curr_strm->scf);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003761 chunk_appendf(&trash," scf=[%d,%1xh,fd=%d",
3762 curr_strm->scf->state, curr_strm->scf->flags, conn_fd(conn));
3763 chunk_appendf(&trash, ",rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003764 sc_ep_rcv_ex(curr_strm->scf) ?
3765 human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scf) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003766 TICKS_TO_MS(1000)) : "");
3767 chunk_appendf(&trash,",wex=%s]",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003768 sc_ep_snd_ex(curr_strm->scf) ?
3769 human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scf) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003770 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003771
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003772 conn = sc_conn(curr_strm->scb);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003773 chunk_appendf(&trash, " scb=[%d,%1xh,fd=%d",
3774 curr_strm->scb->state, curr_strm->scb->flags, conn_fd(conn));
3775 chunk_appendf(&trash, ",rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003776 sc_ep_rcv_ex(curr_strm->scb) ?
3777 human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scb) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003778 TICKS_TO_MS(1000)) : "");
3779 chunk_appendf(&trash, ",wex=%s]",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003780 sc_ep_snd_ex(curr_strm->scb) ?
3781 human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scb) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003782 TICKS_TO_MS(1000)) : "");
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003783
3784 chunk_appendf(&trash,
3785 " exp=%s rc=%d c_exp=%s",
3786 curr_strm->task->expire ?
3787 human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
3788 TICKS_TO_MS(1000)) : "",
3789 curr_strm->conn_retries,
3790 curr_strm->conn_exp ?
3791 human_time(TICKS_TO_MS(curr_strm->conn_exp - now_ms),
3792 TICKS_TO_MS(1000)) : "");
3793 if (task_in_rq(curr_strm->task))
3794 chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
3795
3796 chunk_appendf(&trash, "\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003797
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003798 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003799 /* let's try again later from this stream. We add ourselves into
3800 * this stream's users so that it can remove us upon termination.
3801 */
3802 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3803 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003804 }
3805
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003806 next_sess:
3807 ctx->bref.ref = curr_strm->list.n;
3808 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003809
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003810 if (ctx->target && ctx->target != (void *)-1) {
3811 /* specified stream not found */
3812 if (ctx->section > 0)
3813 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3814 else
3815 chunk_appendf(&trash, "Session not found.\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003816
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003817 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003818 goto full;
3819
3820 ctx->target = NULL;
3821 ctx->uid = 0;
3822 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003823 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003824
Willy Tarreaue6e52362019-01-04 17:42:57 +01003825 done:
3826 thread_release();
3827 return 1;
3828 full:
3829 thread_release();
Willy Tarreaue6e52362019-01-04 17:42:57 +01003830 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003831}
3832
3833static void cli_release_show_sess(struct appctx *appctx)
3834{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003835 struct show_sess_ctx *ctx = appctx->svcctx;
3836
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003837 if (ctx->thr < global.nbthread) {
Willy Tarreau49de6852021-02-24 13:46:12 +01003838 /* a dump was aborted, either in error or timeout. We need to
3839 * safely detach from the target stream's list. It's mandatory
3840 * to lock because a stream on the target thread could be moving
3841 * our node.
3842 */
3843 thread_isolate();
Willy Tarreau39f097d2022-05-03 10:49:00 +02003844 if (!LIST_ISEMPTY(&ctx->bref.users))
3845 LIST_DELETE(&ctx->bref.users);
Willy Tarreau49de6852021-02-24 13:46:12 +01003846 thread_release();
William Lallemand4c5b4d52016-11-21 08:51:11 +01003847 }
3848}
3849
Willy Tarreau61b65212016-11-24 11:09:25 +01003850/* Parses the "shutdown session" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003851static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau61b65212016-11-24 11:09:25 +01003852{
3853 struct stream *strm, *ptr;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003854 int thr;
Willy Tarreau61b65212016-11-24 11:09:25 +01003855
3856 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3857 return 1;
3858
Willy Tarreauc40c4072022-03-31 14:49:45 +02003859 ptr = (void *)strtoul(args[2], NULL, 0);
3860 if (!ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003861 return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003862
Willy Tarreaua698eb62021-02-24 10:37:01 +01003863 strm = NULL;
Willy Tarreau61b65212016-11-24 11:09:25 +01003864
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003865 thread_isolate();
3866
Willy Tarreau61b65212016-11-24 11:09:25 +01003867 /* first, look for the requested stream in the stream table */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003868 for (thr = 0; strm != ptr && thr < global.nbthread; thr++) {
Willy Tarreaub4e34762021-09-30 19:02:18 +02003869 list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
Willy Tarreaua698eb62021-02-24 10:37:01 +01003870 if (strm == ptr) {
3871 stream_shutdown(strm, SF_ERR_KILLED);
3872 break;
3873 }
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003874 }
Willy Tarreau61b65212016-11-24 11:09:25 +01003875 }
3876
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003877 thread_release();
3878
Willy Tarreau61b65212016-11-24 11:09:25 +01003879 /* do we have the stream ? */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003880 if (strm != ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003881 return cli_err(appctx, "No such session (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003882
Willy Tarreau61b65212016-11-24 11:09:25 +01003883 return 1;
3884}
3885
Willy Tarreau4e46b622016-11-23 16:50:48 +01003886/* Parses the "shutdown session server" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003887static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau4e46b622016-11-23 16:50:48 +01003888{
3889 struct server *sv;
Willy Tarreau4e46b622016-11-23 16:50:48 +01003890
3891 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3892 return 1;
3893
3894 sv = cli_find_server(appctx, args[3]);
3895 if (!sv)
3896 return 1;
3897
3898 /* kill all the stream that are on this server */
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003899 HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
Willy Tarreaud9e26a72019-11-14 16:37:16 +01003900 srv_shutdown_streams(sv, SF_ERR_KILLED);
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003901 HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
Willy Tarreau4e46b622016-11-23 16:50:48 +01003902 return 1;
3903}
3904
William Lallemand4c5b4d52016-11-21 08:51:11 +01003905/* register cli keywords */
3906static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaub205bfd2021-05-07 11:38:37 +02003907 { { "show", "sess", NULL }, "show sess [id] : report the list of current sessions or dump this exact session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
3908 { { "shutdown", "session", NULL }, "shutdown session [id] : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
3909 { { "shutdown", "sessions", "server" }, "shutdown sessions server <bk>/<srv> : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
William Lallemand4c5b4d52016-11-21 08:51:11 +01003910 {{},}
3911}};
3912
Willy Tarreau0108d902018-11-25 19:14:37 +01003913INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
3914
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003915/* main configuration keyword registration. */
Christopher Faulet551a6412021-06-25 14:35:29 +02003916static struct action_kw_list stream_tcp_req_keywords = { ILH, {
3917 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003918 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003919 { "switch-mode", stream_parse_switch_mode },
3920 { "use-service", stream_parse_use_service },
3921 { /* END */ }
3922}};
3923
3924INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_req_keywords);
3925
3926/* main configuration keyword registration. */
3927static struct action_kw_list stream_tcp_res_keywords = { ILH, {
3928 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003929 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003930 { /* END */ }
3931}};
3932
3933INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &stream_tcp_res_keywords);
3934
3935static struct action_kw_list stream_http_req_keywords = { ILH, {
3936 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003937 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003938 { "use-service", stream_parse_use_service },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003939 { /* END */ }
3940}};
3941
Christopher Faulet551a6412021-06-25 14:35:29 +02003942INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_req_keywords);
Willy Tarreau0108d902018-11-25 19:14:37 +01003943
Christopher Faulet551a6412021-06-25 14:35:29 +02003944static struct action_kw_list stream_http_res_keywords = { ILH, {
3945 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003946 { "set-nice", stream_parse_set_nice },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003947 { /* END */ }
3948}};
3949
Christopher Faulet551a6412021-06-25 14:35:29 +02003950INITCALL1(STG_REGISTER, http_res_keywords_register, &stream_http_res_keywords);
Willy Tarreau8b22a712010-06-18 17:46:06 +02003951
Christopher Fauleta9248042023-01-05 11:17:38 +01003952static struct action_kw_list stream_http_after_res_actions = { ILH, {
3953 { "set-log-level", stream_parse_set_log_level },
3954 { /* END */ }
3955}};
3956
3957INITCALL1(STG_REGISTER, http_after_res_keywords_register, &stream_http_after_res_actions);
3958
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003959static int smp_fetch_cur_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3960{
3961 smp->flags = SMP_F_VOL_TXN;
3962 smp->data.type = SMP_T_SINT;
3963 if (!smp->strm)
3964 return 0;
3965
Christopher Faulet5aaacfb2023-02-15 08:13:33 +01003966 smp->data.u.sint = TICKS_TO_MS(smp->strm->scb->ioto);
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003967 return 1;
3968}
3969
3970static int smp_fetch_cur_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3971{
3972 smp->flags = SMP_F_VOL_TXN;
3973 smp->data.type = SMP_T_SINT;
3974 if (!smp->strm)
3975 return 0;
3976
3977 smp->data.u.sint = TICKS_TO_MS(smp->strm->tunnel_timeout);
3978 return 1;
3979}
3980
Willy Tarreau0657b932022-03-09 17:33:05 +01003981static int smp_fetch_last_rule_file(const struct arg *args, struct sample *smp, const char *km, void *private)
3982{
3983 smp->flags = SMP_F_VOL_TXN;
3984 smp->data.type = SMP_T_STR;
3985 if (!smp->strm || !smp->strm->last_rule_file)
3986 return 0;
3987
3988 smp->flags |= SMP_F_CONST;
3989 smp->data.u.str.area = (char *)smp->strm->last_rule_file;
3990 smp->data.u.str.data = strlen(smp->strm->last_rule_file);
3991 return 1;
3992}
3993
3994static int smp_fetch_last_rule_line(const struct arg *args, struct sample *smp, const char *km, void *private)
3995{
3996 smp->flags = SMP_F_VOL_TXN;
3997 smp->data.type = SMP_T_SINT;
3998 if (!smp->strm || !smp->strm->last_rule_line)
3999 return 0;
4000
4001 smp->data.u.sint = smp->strm->last_rule_line;
4002 return 1;
4003}
4004
Amaury Denoyelle12bada52020-12-10 13:43:57 +01004005/* Note: must not be declared <const> as its list will be overwritten.
4006 * Please take care of keeping this list alphabetically sorted.
4007 */
4008static struct sample_fetch_kw_list smp_kws = {ILH, {
Amaury Denoyellef7719a22020-12-10 13:43:58 +01004009 { "cur_server_timeout", smp_fetch_cur_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
4010 { "cur_tunnel_timeout", smp_fetch_cur_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
Willy Tarreau0657b932022-03-09 17:33:05 +01004011 { "last_rule_file", smp_fetch_last_rule_file, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
4012 { "last_rule_line", smp_fetch_last_rule_line, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
Amaury Denoyelle12bada52020-12-10 13:43:57 +01004013 { NULL, NULL, 0, 0, 0 },
4014}};
4015
4016INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
4017
Willy Tarreaubaaee002006-06-26 02:48:02 +02004018/*
4019 * Local variables:
4020 * c-indent-level: 8
4021 * c-basic-offset: 8
4022 * End:
4023 */