blob: 9d6ac14ec78308041bb94d6258da269995f4043a [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau87b09662015-04-03 00:22:06 +02002 * Stream management functions.
Willy Tarreaubaaee002006-06-26 02:48:02 +02003 *
Willy Tarreaud28c3532012-04-19 19:28:33 +02004 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <stdlib.h>
Willy Tarreau81f9aa32010-06-01 17:45:26 +020014#include <unistd.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020015
Willy Tarreaua264d962020-06-04 22:29:18 +020016#include <import/ebistree.h>
17
Willy Tarreaudcc048a2020-06-04 19:11:43 +020018#include <haproxy/acl.h>
Willy Tarreau122eba92020-06-04 10:15:32 +020019#include <haproxy/action.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/activity.h>
21#include <haproxy/api.h>
Willy Tarreau3f0f82e2020-06-04 19:42:41 +020022#include <haproxy/applet.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/arg.h>
Willy Tarreau49801602020-06-04 22:50:02 +020024#include <haproxy/backend.h>
Willy Tarreau278161c2020-06-04 11:18:28 +020025#include <haproxy/capture.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020026#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020027#include <haproxy/channel.h>
Willy Tarreau4aa573d2020-06-04 18:21:56 +020028#include <haproxy/check.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020029#include <haproxy/cli.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020030#include <haproxy/connection.h>
Willy Tarreau3afc4c42020-06-03 18:23:19 +020031#include <haproxy/dict.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020032#include <haproxy/dynbuf.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020033#include <haproxy/fd.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020034#include <haproxy/filters.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020035#include <haproxy/freq_ctr.h>
Willy Tarreau762d7a52020-06-04 11:23:07 +020036#include <haproxy/frontend.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020037#include <haproxy/global.h>
Willy Tarreau86416052020-06-04 09:20:54 +020038#include <haproxy/hlua.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020039#include <haproxy/http_ana.h>
Willy Tarreauc761f842020-06-04 11:40:28 +020040#include <haproxy/http_rules.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020041#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020042#include <haproxy/istbuf.h>
Willy Tarreauaeed4a82020-06-04 22:01:04 +020043#include <haproxy/log.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020044#include <haproxy/pipe.h>
Willy Tarreaud0ef4392020-06-02 09:38:52 +020045#include <haproxy/pool.h>
Willy Tarreaua264d962020-06-04 22:29:18 +020046#include <haproxy/proxy.h>
Willy Tarreaua55c4542020-06-04 22:59:39 +020047#include <haproxy/queue.h>
Willy Tarreau5edca2f2022-05-27 09:25:10 +020048#include <haproxy/sc_strm.h>
Willy Tarreau1e56f922020-06-04 23:20:13 +020049#include <haproxy/server.h>
Emeric Brunc9437992021-02-12 19:42:55 +010050#include <haproxy/resolvers.h>
Amaury Denoyelle12bada52020-12-10 13:43:57 +010051#include <haproxy/sample.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020052#include <haproxy/session.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020053#include <haproxy/stats-t.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020054#include <haproxy/stconn.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020055#include <haproxy/stick_table.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020056#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020057#include <haproxy/task.h>
Willy Tarreau8b550af2020-06-04 17:42:48 +020058#include <haproxy/tcp_rules.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020059#include <haproxy/thread.h>
Erwan Le Goas57e35f42022-09-14 17:45:41 +020060#include <haproxy/tools.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020061#include <haproxy/trace.h>
Willy Tarreaua1718922020-06-04 16:25:31 +020062#include <haproxy/vars.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020063
Willy Tarreaubaaee002006-06-26 02:48:02 +020064
Willy Tarreau8ceae722018-11-26 11:58:30 +010065DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
Tim Duesterhus127a74d2020-02-28 15:13:33 +010066DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
Willy Tarreau8ceae722018-11-26 11:58:30 +010067
Willy Tarreaub9813182021-02-24 11:29:51 +010068/* incremented by each "show sess" to fix a delimiter between streams */
69unsigned stream_epoch = 0;
Willy Tarreaubaaee002006-06-26 02:48:02 +020070
Thierry FOURNIER5a363e72015-09-27 19:29:33 +020071/* List of all use-service keywords. */
72static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
73
Willy Tarreau5790eb02017-08-28 17:18:36 +020074
Christopher Fauleteea8fc72019-11-05 16:18:10 +010075/* trace source and events */
76static void strm_trace(enum trace_level level, uint64_t mask,
77 const struct trace_source *src,
78 const struct ist where, const struct ist func,
79 const void *a1, const void *a2, const void *a3, const void *a4);
80
81/* The event representation is split like this :
82 * strm - stream
Willy Tarreaub49672d2022-05-27 10:13:37 +020083 * sc - stream connector
Christopher Fauleteea8fc72019-11-05 16:18:10 +010084 * http - http analyzis
85 * tcp - tcp analyzis
86 *
87 * STRM_EV_* macros are defined in <proto/stream.h>
88 */
89static const struct trace_event strm_trace_events[] = {
90 { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
91 { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
92 { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
93 { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
94 { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
95
Willy Tarreau74568cf2022-05-27 09:03:30 +020096 { .mask = STRM_EV_CS_ST, .name = "sc_state", .desc = "processing connector states" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +010097
98 { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
99 { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
100
101 { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
102 { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
Christopher Faulet50019132022-03-08 15:47:02 +0100103
104 { .mask = STRM_EV_FLT_ANA, .name = "flt_ana", .desc = "Filter analyzers" },
105 { .mask = STRM_EV_FLT_ERR, .name = "flt_err", .desc = "error during filter analyzis" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100106 {}
107};
108
109static const struct name_desc strm_trace_lockon_args[4] = {
110 /* arg1 */ { /* already used by the stream */ },
111 /* arg2 */ { },
112 /* arg3 */ { },
113 /* arg4 */ { }
114};
115
116static const struct name_desc strm_trace_decoding[] = {
117#define STRM_VERB_CLEAN 1
118 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
119#define STRM_VERB_MINIMAL 2
Willy Tarreau4596fe22022-05-17 19:07:51 +0200120 { .name="minimal", .desc="report info on streams and connectors" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100121#define STRM_VERB_SIMPLE 3
122 { .name="simple", .desc="add info on request and response channels" },
123#define STRM_VERB_ADVANCED 4
124 { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
125#define STRM_VERB_COMPLETE 5
126 { .name="complete", .desc="add info on channel's buffer" },
127 { /* end */ }
128};
129
130struct trace_source trace_strm = {
131 .name = IST("stream"),
132 .desc = "Applicative stream",
133 .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
134 .default_cb = strm_trace,
135 .known_events = strm_trace_events,
136 .lockon_args = strm_trace_lockon_args,
137 .decoding = strm_trace_decoding,
138 .report_events = ~0, // report everything by default
139};
140
141#define TRACE_SOURCE &trace_strm
142INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
143
144/* the stream traces always expect that arg1, if non-null, is of a stream (from
145 * which we can derive everything), that arg2, if non-null, is an http
146 * transaction, that arg3, if non-null, is an http message.
147 */
148static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
149 const struct ist where, const struct ist func,
150 const void *a1, const void *a2, const void *a3, const void *a4)
151{
152 const struct stream *s = a1;
153 const struct http_txn *txn = a2;
154 const struct http_msg *msg = a3;
155 struct task *task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100156 const struct channel *req, *res;
157 struct htx *htx;
158
159 if (!s || src->verbosity < STRM_VERB_CLEAN)
160 return;
161
162 task = s->task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100163 req = &s->req;
164 res = &s->res;
165 htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
166
167 /* General info about the stream (htx/tcp, id...) */
168 chunk_appendf(&trace_buf, " : [%u,%s]",
169 s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100170 if (isttest(s->unique_id)) {
171 chunk_appendf(&trace_buf, " id=");
172 b_putist(&trace_buf, s->unique_id);
173 }
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100174
Willy Tarreau4596fe22022-05-17 19:07:51 +0200175 /* Front and back stream connector state */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200176 chunk_appendf(&trace_buf, " SC=(%s,%s)",
Willy Tarreau74568cf2022-05-27 09:03:30 +0200177 sc_state_str(s->scf->state), sc_state_str(s->scb->state));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100178
179 /* If txn is defined, HTTP req/rep states */
180 if (txn)
181 chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
182 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
183 if (msg)
184 chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
185
186 if (src->verbosity == STRM_VERB_CLEAN)
187 return;
188
189 /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
190 if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
Willy Tarreaud46b5b92022-05-30 16:27:48 +0200191 const struct htx_blk *blk = __htx_get_head_blk(htx);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100192 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
193 enum htx_blk_type type = htx_get_blk_type(blk);
194
195 if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
196 chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
197 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
198 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
199 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
200 }
201
Christopher Faulet80e45322023-02-16 14:35:51 +0100202 chunk_appendf(&trace_buf, " - t=%p t.exp=%d s=(%p,0x%08x,0x%x)",
203 task, tick_isset(task->expire) ? TICKS_TO_MS(task->expire - now_ms) : TICK_ETERNITY, s, s->flags, s->conn_err_type);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100204
205 /* If txn defined info about HTTP msgs, otherwise info about SI. */
206 if (txn) {
Christopher Faulet80e45322023-02-16 14:35:51 +0100207 chunk_appendf(&trace_buf, " txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
208 txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100209 }
210 else {
Christopher Faulet80e45322023-02-16 14:35:51 +0100211 chunk_appendf(&trace_buf, " scf=(%p,%d,0x%08x,0x%x) scb=(%p,%d,0x%08x,0x%x) scf.exp(r,w)=(%d,%d) scb.exp(r,w)=(%d,%d) retries=%d",
Christopher Faulet63629342023-02-21 18:00:25 +0100212 s->scf, s->scf->state, s->scf->flags, s->scf->sedesc->flags,
213 s->scb, s->scb->state, s->scb->flags, s->scb->sedesc->flags,
Christopher Faulet80e45322023-02-16 14:35:51 +0100214 tick_isset(sc_ep_rcv_ex(s->scf)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scf) - now_ms) : TICK_ETERNITY,
215 tick_isset(sc_ep_snd_ex(s->scf)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scf) - now_ms) : TICK_ETERNITY,
216 tick_isset(sc_ep_rcv_ex(s->scb)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scb) - now_ms) : TICK_ETERNITY,
217 tick_isset(sc_ep_snd_ex(s->scb)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scb) - now_ms) : TICK_ETERNITY,
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200218 s->conn_retries);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100219 }
220
221 if (src->verbosity == STRM_VERB_MINIMAL)
222 return;
223
224
225 /* If txn defined, don't display all channel info */
226 if (src->verbosity == STRM_VERB_SIMPLE || txn) {
Christopher Faulet80e45322023-02-16 14:35:51 +0100227 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp=%d)",
228 req, req->flags, tick_isset(req->analyse_exp) ? TICKS_TO_MS(req->analyse_exp - now_ms) : TICK_ETERNITY);
229 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp=%d)",
230 res, res->flags, tick_isset(res->analyse_exp) ? TICKS_TO_MS(res->analyse_exp - now_ms) : TICK_ETERNITY);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100231 }
232 else {
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100233 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
234 req, req->flags, req->analysers, req->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100235 (long)req->output, req->total, req->to_forward);
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100236 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
237 res, res->flags, res->analysers, res->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100238 (long)res->output, res->total, res->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100239 }
240
241 if (src->verbosity == STRM_VERB_SIMPLE ||
242 (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
243 return;
244
245 /* channels' buffer info */
246 if (s->flags & SF_HTX) {
247 struct htx *rqhtx = htxbuf(&req->buf);
248 struct htx *rphtx = htxbuf(&res->buf);
249
250 chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
251 rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
252 rphtx->data, rphtx->size, htx_nbblks(rphtx));
253 }
254 else {
255 chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
256 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
257 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
Christopher Faulet5ce12992022-03-08 15:48:55 +0100258 (unsigned int)b_data(&res->buf), b_orig(&res->buf),
259 (unsigned int)b_head_ofs(&res->buf), (unsigned int)b_size(&res->buf));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100260 }
261
262 /* If msg defined, display htx info if defined (level > USER) */
263 if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
264 int full = 0;
265
266 /* Full htx info (level > STATE && verbosity > SIMPLE) */
267 if (src->level > TRACE_LEVEL_STATE) {
268 if (src->verbosity == STRM_VERB_COMPLETE)
269 full = 1;
270 }
271
272 chunk_memcat(&trace_buf, "\n\t", 2);
273 htx_dump(&trace_buf, htx, full);
274 }
275}
276
Willy Tarreaub49672d2022-05-27 10:13:37 +0200277/* Upgrade an existing stream for stream connector <sc>. Return < 0 on error. This
Christopher Faulet13a35e52021-12-20 15:34:16 +0100278 * is only valid right after a TCP to H1 upgrade. The stream should be
279 * "reativated" by removing SF_IGNORE flag. And the right mode must be set. On
Christopher Faulet16df1782020-12-04 16:47:41 +0100280 * success, <input> buffer is transferred to the stream and thus points to
281 * BUF_NULL. On error, it is unchanged and it is the caller responsibility to
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100282 * release it (this never happens for now).
283 */
Willy Tarreaudf1a2fc2022-05-27 11:11:15 +0200284int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input)
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100285{
Willy Tarreaub49672d2022-05-27 10:13:37 +0200286 struct stream *s = __sc_strm(sc);
287 const struct mux_ops *mux = sc_mux_ops(sc);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100288
Christopher Faulet693b23b2022-02-28 09:09:05 +0100289 if (mux) {
Christopher Faulet13a35e52021-12-20 15:34:16 +0100290 if (mux->flags & MX_FL_HTX)
291 s->flags |= SF_HTX;
292 }
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100293
294 if (!b_is_null(input)) {
295 /* Xfer the input buffer to the request channel. <input> will
296 * than point to BUF_NULL. From this point, it is the stream
297 * responsibility to release it.
298 */
299 s->req.buf = *input;
300 *input = BUF_NULL;
301 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet4c135682023-02-16 11:09:31 +0100302 sc_ep_report_read_activity(s->scf);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100303 }
304
Christopher Faulet23577182022-12-20 18:47:39 +0100305 s->req.flags |= CF_READ_EVENT; /* Always report a read event */
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100306 s->flags &= ~SF_IGNORE;
307
308 task_wakeup(s->task, TASK_WOKEN_INIT);
309 return 0;
310}
311
Willy Tarreaub882dd82018-11-06 15:50:21 +0100312/* Callback used to wake up a stream when an input buffer is available. The
Willy Tarreau4596fe22022-05-17 19:07:51 +0200313 * stream <s>'s stream connectors are checked for a failed buffer allocation
Willy Tarreau15252cd2022-05-25 16:36:21 +0200314 * as indicated by the presence of the SC_FL_NEED_BUFF flag and the lack of a
Willy Tarreaub882dd82018-11-06 15:50:21 +0100315 * buffer, and and input buffer is assigned there (at most one). The function
316 * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
317 * It's designed to be called from __offer_buffer().
318 */
319int stream_buf_available(void *arg)
320{
321 struct stream *s = arg;
322
Willy Tarreau15252cd2022-05-25 16:36:21 +0200323 if (!s->req.buf.size && !s->req.pipe && s->scf->flags & SC_FL_NEED_BUFF &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100324 b_alloc(&s->req.buf))
Willy Tarreau0ed73c32022-05-25 07:48:07 +0200325 sc_have_buff(s->scf);
Willy Tarreau15252cd2022-05-25 16:36:21 +0200326 else if (!s->res.buf.size && !s->res.pipe && s->scb->flags & SC_FL_NEED_BUFF &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100327 b_alloc(&s->res.buf))
Willy Tarreau0ed73c32022-05-25 07:48:07 +0200328 sc_have_buff(s->scb);
Willy Tarreaub882dd82018-11-06 15:50:21 +0100329 else
330 return 0;
331
332 task_wakeup(s->task, TASK_WOKEN_RES);
333 return 1;
334
335}
336
Willy Tarreau9903f0e2015-04-04 18:50:31 +0200337/* This function is called from the session handler which detects the end of
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200338 * handshake, in order to complete initialization of a valid stream. It must be
Joseph Herlant4cc8d0d2018-11-15 09:14:14 -0800339 * called with a completely initialized session. It returns the pointer to
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200340 * the newly created stream, or NULL in case of fatal error. The client-facing
Willy Tarreau87787ac2017-08-28 16:22:54 +0200341 * end point is assigned to <origin>, which must be valid. The stream's task
342 * is configured with a nice value inherited from the listener's nice if any.
343 * The task's context is set to the new stream, and its function is set to
Christopher Faulet16df1782020-12-04 16:47:41 +0100344 * process_stream(). Target and analysers are null. <input> is used as input
345 * buffer for the request channel and may contain data. On success, it is
346 * transfer to the stream and <input> is set to BUF_NULL. On error, <input>
347 * buffer is unchanged and it is the caller responsibility to release it.
Willy Tarreau2542b532012-08-31 16:01:23 +0200348 */
Willy Tarreaub49672d2022-05-27 10:13:37 +0200349struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer *input)
Willy Tarreau2542b532012-08-31 16:01:23 +0200350{
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200351 struct stream *s;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200352 struct task *t;
Willy Tarreau2542b532012-08-31 16:01:23 +0200353
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100354 DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100355 if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
Willy Tarreau87787ac2017-08-28 16:22:54 +0200356 goto out_fail_alloc;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200357
358 /* minimum stream initialization required for an embryonic stream is
359 * fairly low. We need very little to execute L4 ACLs, then we need a
360 * task to make the client-side connection live on its own.
361 * - flags
362 * - stick-entry tracking
363 */
364 s->flags = 0;
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200365 s->logs.logwait = sess->fe->to_log;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200366 s->logs.level = 0;
Willy Tarreauad5a5f62023-04-27 09:46:02 +0200367 s->logs.request_ts = 0;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200368 s->logs.t_queue = -1;
369 s->logs.t_connect = -1;
370 s->logs.t_data = -1;
371 s->logs.t_close = 0;
372 s->logs.bytes_in = s->logs.bytes_out = 0;
Patrick Hemmerffe5e8c2018-05-11 12:52:31 -0400373 s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
374 s->logs.srv_queue_pos = 0; /* we will get this number soon */
Baptiste Assmann333939c2019-01-21 08:34:50 +0100375 s->obj_type = OBJ_TYPE_STREAM;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200376
Christopher Faulet15e525f2020-09-30 14:03:54 +0200377 s->logs.accept_date = sess->accept_date;
Willy Tarreauad5a5f62023-04-27 09:46:02 +0200378 s->logs.accept_ts = sess->accept_ts;
Christopher Faulet15e525f2020-09-30 14:03:54 +0200379 s->logs.t_handshake = sess->t_handshake;
Christopher Faulet7a6c5132020-09-30 13:49:56 +0200380 s->logs.t_idle = sess->t_idle;
Christopher Fauletb3484d62018-11-29 15:19:05 +0100381
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200382 /* default logging function */
383 s->do_log = strm_log;
384
385 /* default error reporting function, may be changed by analysers */
386 s->srv_error = default_srv_error;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200387
388 /* Initialise the current rule list pointer to NULL. We are sure that
389 * any rulelist match the NULL pointer.
390 */
391 s->current_rule_list = NULL;
Remi Gacogne7fb9de22015-07-22 17:10:58 +0200392 s->current_rule = NULL;
Christopher Faulet2747fbb2020-07-28 11:56:13 +0200393 s->rules_exp = TICK_ETERNITY;
Willy Tarreauc6dae862022-03-09 17:23:10 +0100394 s->last_rule_file = NULL;
395 s->last_rule_line = 0;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200396
Willy Tarreau6c011712023-01-06 16:09:58 +0100397 s->stkctr = NULL;
398 if (pool_head_stk_ctr) {
399 s->stkctr = pool_alloc(pool_head_stk_ctr);
400 if (!s->stkctr)
401 goto out_fail_alloc;
402
403 /* Copy SC counters for the stream. We don't touch refcounts because
404 * any reference we have is inherited from the session. Since the stream
405 * doesn't exist without the session, the session's existence guarantees
406 * we don't lose the entry. During the store operation, the stream won't
407 * touch these ones.
408 */
409 memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr[0]) * global.tune.nb_stk_ctr);
410 }
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200411
412 s->sess = sess;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200413
Willy Tarreaub9813182021-02-24 11:29:51 +0100414 s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
Willy Tarreau18515722021-04-06 11:57:41 +0200415 s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200416
Willy Tarreau87b09662015-04-03 00:22:06 +0200417 /* OK, we're keeping the stream, so let's properly initialize the stream */
Willy Tarreau2542b532012-08-31 16:01:23 +0200418 LIST_INIT(&s->back_refs);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100419
Willy Tarreau90f366b2021-02-20 11:49:49 +0100420 LIST_INIT(&s->buffer_wait.list);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100421 s->buffer_wait.target = s;
Willy Tarreaub882dd82018-11-06 15:50:21 +0100422 s->buffer_wait.wakeup_cb = stream_buf_available;
Willy Tarreauf8a49ea2013-10-14 21:32:07 +0200423
Willy Tarreau6a28a302022-09-07 09:17:45 +0200424 s->lat_time = s->cpu_time = 0;
Willy Tarreaufa1258f2021-04-10 23:00:53 +0200425 s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
William Lallemandcf62f7e2018-10-26 14:47:40 +0200426 s->pcli_next_pid = 0;
William Lallemandebf61802018-12-11 16:10:57 +0100427 s->pcli_flags = 0;
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100428 s->unique_id = IST_NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200429
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200430 if ((t = task_new_here()) == NULL)
Willy Tarreau87787ac2017-08-28 16:22:54 +0200431 goto out_fail_alloc;
432
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200433 s->task = t;
Christopher Faulet9d810ca2016-12-08 22:33:52 +0100434 s->pending_events = 0;
Christopher Faulet909f3182022-03-29 15:42:09 +0200435 s->conn_retries = 0;
Christopher Fauletae024ce2022-03-29 19:02:31 +0200436 s->conn_exp = TICK_ETERNITY;
Christopher Faulet50264b42022-03-30 19:39:30 +0200437 s->conn_err_type = STRM_ET_NONE;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200438 s->prev_conn_state = SC_ST_INI;
Willy Tarreaud1769b82015-04-06 00:25:48 +0200439 t->process = process_stream;
Willy Tarreau2542b532012-08-31 16:01:23 +0200440 t->context = s;
441 t->expire = TICK_ETERNITY;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200442 if (sess->listener)
Willy Tarreau7dbd4182023-01-12 19:32:45 +0100443 t->nice = sess->listener->bind_conf->nice;
Willy Tarreau2542b532012-08-31 16:01:23 +0200444
Willy Tarreau87b09662015-04-03 00:22:06 +0200445 /* Note: initially, the stream's backend points to the frontend.
Willy Tarreau2542b532012-08-31 16:01:23 +0200446 * This changes later when switching rules are executed or
447 * when the default backend is assigned.
448 */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200449 s->be = sess->fe;
Willy Tarreaucb7dd012015-04-03 22:16:32 +0200450 s->req_cap = NULL;
451 s->res_cap = NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200452
Aurelien DARRAGON5ad2b642022-11-18 09:17:29 +0100453 /* Initialize all the variables contexts even if not used.
Willy Tarreauebcd4842015-06-19 11:59:02 +0200454 * This permits to prune these contexts without errors.
Aurelien DARRAGON5ad2b642022-11-18 09:17:29 +0100455 *
456 * We need to make sure that those lists are not re-initialized
457 * by stream-dependant underlying code because we could lose
458 * track of already defined variables, leading to data inconsistency
459 * and memory leaks...
460 *
461 * For reference: we had a very old bug caused by vars_txn and
462 * vars_reqres being accidentally re-initialized in http_create_txn()
463 * (https://github.com/haproxy/haproxy/issues/1935)
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200464 */
Willy Tarreaub7bfcb32021-08-31 08:13:25 +0200465 vars_init_head(&s->vars_txn, SCOPE_TXN);
466 vars_init_head(&s->vars_reqres, SCOPE_REQ);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200467
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100468 /* Set SF_HTX flag for HTTP frontends. */
469 if (sess->fe->mode == PR_MODE_HTTP)
470 s->flags |= SF_HTX;
471
Willy Tarreaub49672d2022-05-27 10:13:37 +0200472 s->scf = sc;
Willy Tarreau19c65a92022-05-27 08:49:24 +0200473 if (sc_attach_strm(s->scf, s) < 0)
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +0200474 goto out_fail_attach_scf;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100475
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200476 s->scb = sc_new_from_strm(s, SC_FL_ISBACK);
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +0200477 if (!s->scb)
478 goto out_fail_alloc_scb;
Christopher Faulet95a61e82021-12-22 14:22:03 +0100479
Willy Tarreau74568cf2022-05-27 09:03:30 +0200480 sc_set_state(s->scf, SC_ST_EST);
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100481
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100482 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Willy Tarreaucb041662022-05-17 19:44:42 +0200483 s->scf->flags |= SC_FL_INDEP_STR;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100484
485 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Willy Tarreaucb041662022-05-17 19:44:42 +0200486 s->scb->flags |= SC_FL_INDEP_STR;
Willy Tarreau984fca92017-12-20 16:31:43 +0100487
Willy Tarreaub49672d2022-05-27 10:13:37 +0200488 if (sc_ep_test(sc, SE_FL_WEBSOCKET))
Christopher Faulet13a35e52021-12-20 15:34:16 +0100489 s->flags |= SF_WEBSOCKET;
Willy Tarreaub49672d2022-05-27 10:13:37 +0200490 if (sc_conn(sc)) {
491 const struct mux_ops *mux = sc_mux_ops(sc);
Christopher Faulet897d6122021-12-17 17:28:35 +0100492
Christopher Faulet78ed7f22022-03-30 16:31:41 +0200493 if (mux && mux->flags & MX_FL_HTX)
494 s->flags |= SF_HTX;
Christopher Fauleta7422932021-12-15 11:42:23 +0100495 }
496
Willy Tarreau87b09662015-04-03 00:22:06 +0200497 stream_init_srv_conn(s);
Willy Tarreaud5983ce2023-01-12 19:18:34 +0100498 s->target = sess->fe->default_target;
Willy Tarreau9b82d942016-12-05 00:26:31 +0100499
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200500 s->pend_pos = NULL;
Patrick Hemmer268a7072018-05-11 12:52:31 -0400501 s->priority_class = 0;
502 s->priority_offset = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200503
504 /* init store persistence */
505 s->store_count = 0;
506
Christopher Faulet16df1782020-12-04 16:47:41 +0100507 channel_init(&s->req);
Christopher Faulet23577182022-12-20 18:47:39 +0100508 s->req.flags |= CF_READ_EVENT; /* the producer is already connected */
Willy Tarreau7866e8e2023-01-12 18:39:42 +0100509 s->req.analysers = sess->listener ? sess->listener->bind_conf->analysers : sess->fe->fe_req_ana;
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100510
Christopher Faulet57e4a1b2021-03-15 17:09:27 +0100511 if (IS_HTX_STRM(s)) {
512 /* Be sure to have HTTP analysers because in case of
513 * "destructive" stream upgrade, they may be missing (e.g
514 * TCP>H2)
515 */
516 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
517 }
518
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100519 if (!sess->fe->fe_req_ana) {
520 channel_auto_connect(&s->req); /* don't wait to establish connection */
521 channel_auto_close(&s->req); /* let the producer forward close requests */
522 }
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200523
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100524 s->scf->ioto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100525 s->req.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200526
Christopher Faulet16df1782020-12-04 16:47:41 +0100527 channel_init(&s->res);
Willy Tarreauef573c02014-11-28 14:17:09 +0100528 s->res.flags |= CF_ISRESP;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100529 s->res.analysers = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200530
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200531 if (sess->fe->options2 & PR_O2_NODELAY) {
Christopher Faulet68ef2182023-03-17 15:38:18 +0100532 s->scf->flags |= SC_FL_SND_NEVERWAIT;
533 s->scb->flags |= SC_FL_SND_NEVERWAIT;
Willy Tarreau96e31212011-05-30 18:10:30 +0200534 }
535
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100536 s->scb->ioto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100537 s->res.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200538
Willy Tarreaueee5b512015-04-03 23:46:31 +0200539 s->txn = NULL;
Aurelien DARRAGON1a3ac402024-03-12 17:05:54 +0100540 s->hlua[0] = s->hlua[1] = NULL;
Thierry FOURNIER65f34c62015-02-16 20:11:43 +0100541
Emeric Brun08622d32020-12-23 17:41:43 +0100542 s->resolv_ctx.requester = NULL;
543 s->resolv_ctx.hostname_dn = NULL;
544 s->resolv_ctx.hostname_dn_len = 0;
545 s->resolv_ctx.parent = NULL;
Frédéric Lécaillebed883a2019-04-23 17:26:33 +0200546
Amaury Denoyellefb504432020-12-10 13:43:53 +0100547 s->tunnel_timeout = TICK_ETERNITY;
548
Willy Tarreaub4e34762021-09-30 19:02:18 +0200549 LIST_APPEND(&th_ctx->streams, &s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200550
Christopher Faulet92d36382015-11-05 13:35:03 +0100551 if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200552 goto out_fail_accept;
553
Willy Tarreau369d5aa2022-05-27 16:49:18 +0200554 /* just in case the caller would have pre-disabled it */
555 se_will_consume(s->scf->sedesc);
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200556
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200557 if (sess->fe->accept && sess->fe->accept(s) < 0)
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200558 goto out_fail_accept;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200559
Christopher Faulet16df1782020-12-04 16:47:41 +0100560 if (!b_is_null(input)) {
561 /* Xfer the input buffer to the request channel. <input> will
562 * than point to BUF_NULL. From this point, it is the stream
563 * responsibility to release it.
564 */
565 s->req.buf = *input;
566 *input = BUF_NULL;
Christopher Fauletc43fca02020-12-04 17:22:49 +0100567 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet4c135682023-02-16 11:09:31 +0100568 sc_ep_report_read_activity(s->scf);
Christopher Faulet16df1782020-12-04 16:47:41 +0100569 }
570
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200571 /* it is important not to call the wakeup function directly but to
572 * pass through task_wakeup(), because this one knows how to apply
Emeric Brun5f77fef2017-05-29 15:26:51 +0200573 * priorities to tasks. Using multi thread we must be sure that
574 * stream is fully initialized before calling task_wakeup. So
575 * the caller must handle the task_wakeup
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200576 */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100577 DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
Christopher Faulet13a35e52021-12-20 15:34:16 +0100578 task_wakeup(s->task, TASK_WOKEN_INIT);
Willy Tarreau02d86382015-04-05 12:00:52 +0200579 return s;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200580
581 /* Error unrolling */
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200582 out_fail_accept:
Christopher Faulet92d36382015-11-05 13:35:03 +0100583 flt_stream_release(s, 0);
Willy Tarreau2b718102021-04-21 07:32:39 +0200584 LIST_DELETE(&s->list);
Christopher Faulet4cfc0382022-09-27 09:14:47 +0200585 sc_free(s->scb);
Willy Tarreaua45e7e82023-03-20 19:11:08 +0100586 out_fail_alloc_scb:
Christopher Faulet4cfc0382022-09-27 09:14:47 +0200587 out_fail_attach_scf:
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100588 task_destroy(t);
Willy Tarreau87787ac2017-08-28 16:22:54 +0200589 out_fail_alloc:
Willy Tarreau6c011712023-01-06 16:09:58 +0100590 if (s)
591 pool_free(pool_head_stk_ctr, s->stkctr);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100592 pool_free(pool_head_stream, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100593 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
Willy Tarreau02d86382015-04-05 12:00:52 +0200594 return NULL;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200595}
596
Willy Tarreaubaaee002006-06-26 02:48:02 +0200597/*
Willy Tarreau87b09662015-04-03 00:22:06 +0200598 * frees the context associated to a stream. It must have been removed first.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200599 */
Christopher Fauletab5d1dc2022-05-12 14:56:55 +0200600void stream_free(struct stream *s)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200601{
Willy Tarreau9ad7bd42015-04-03 19:19:59 +0200602 struct session *sess = strm_sess(s);
603 struct proxy *fe = sess->fe;
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100604 struct bref *bref, *back;
Willy Tarreaua4cda672010-06-06 18:28:49 +0200605 int i;
Willy Tarreau0f7562b2007-01-07 15:46:13 +0100606
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100607 DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
608
Willy Tarreau0ad46fa2019-05-17 14:20:05 +0200609 /* detach the stream from its own task before even releasing it so
610 * that walking over a task list never exhibits a dying stream.
611 */
612 s->task->context = NULL;
613 __ha_barrier_store();
614
Willy Tarreaud0ad4a82018-07-25 11:13:53 +0200615 pendconn_free(s);
Willy Tarreau922a8062008-12-04 09:33:58 +0100616
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100617 if (objt_server(s->target)) { /* there may be requests left pending in queue */
Willy Tarreaue7dff022015-04-03 01:14:29 +0200618 if (s->flags & SF_CURR_SESS) {
619 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +0200620 _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
Willy Tarreau1e62de62008-11-11 20:20:02 +0100621 }
Willy Tarreau88bc8002021-12-06 07:01:02 +0000622 if (may_dequeue_tasks(__objt_server(s->target), s->be))
623 process_srv_queue(__objt_server(s->target));
Willy Tarreau1e62de62008-11-11 20:20:02 +0100624 }
Willy Tarreau922a8062008-12-04 09:33:58 +0100625
Willy Tarreau7c669d72008-06-20 15:04:11 +0200626 if (unlikely(s->srv_conn)) {
Willy Tarreau87b09662015-04-03 00:22:06 +0200627 /* the stream still has a reserved slot on a server, but
Willy Tarreau7c669d72008-06-20 15:04:11 +0200628 * it should normally be only the same as the one above,
629 * so this should not happen in fact.
630 */
631 sess_change_server(s, NULL);
632 }
633
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100634 if (s->req.pipe)
635 put_pipe(s->req.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100636
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100637 if (s->res.pipe)
638 put_pipe(s->res.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100639
Willy Tarreaubf883e02014-11-25 21:10:35 +0100640 /* We may still be present in the buffer wait queue */
Willy Tarreau2b718102021-04-21 07:32:39 +0200641 if (LIST_INLIST(&s->buffer_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +0100642 LIST_DEL_INIT(&s->buffer_wait.list);
Willy Tarreau21046592020-02-26 10:39:36 +0100643
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200644 if (s->req.buf.size || s->res.buf.size) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100645 int count = !!s->req.buf.size + !!s->res.buf.size;
646
Willy Tarreaue0d0b402019-08-08 08:06:27 +0200647 b_free(&s->req.buf);
648 b_free(&s->res.buf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100649 offer_buffers(NULL, count);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100650 }
Willy Tarreau9b28e032012-10-12 23:49:43 +0200651
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100652 pool_free(pool_head_uniqueid, s->unique_id.ptr);
653 s->unique_id = IST_NULL;
Olivier Houchard32211a12019-02-01 18:10:46 +0100654
Christopher Faulet03fb1b22020-02-24 16:26:55 +0100655 flt_stream_stop(s);
656 flt_stream_release(s, 0);
657
Aurelien DARRAGON1a3ac402024-03-12 17:05:54 +0100658 hlua_ctx_destroy(s->hlua[0]);
659 hlua_ctx_destroy(s->hlua[1]);
660 s->hlua[0] = s->hlua[1] = NULL;
661
Willy Tarreaueee5b512015-04-03 23:46:31 +0200662 if (s->txn)
Christopher Faulet75f619a2021-03-08 19:12:58 +0100663 http_destroy_txn(s);
Willy Tarreau46023632010-01-07 22:51:47 +0100664
Willy Tarreau1e954912012-10-12 17:50:05 +0200665 /* ensure the client-side transport layer is destroyed */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100666 /* Be sure it is useless !! */
667 /* if (cli_cs) */
668 /* cs_close(cli_cs); */
Willy Tarreau1e954912012-10-12 17:50:05 +0200669
Willy Tarreaua4cda672010-06-06 18:28:49 +0200670 for (i = 0; i < s->store_count; i++) {
671 if (!s->store[i].ts)
672 continue;
673 stksess_free(s->store[i].table, s->store[i].ts);
674 s->store[i].ts = NULL;
675 }
676
Emeric Brun08622d32020-12-23 17:41:43 +0100677 if (s->resolv_ctx.requester) {
Emeric Brun21fbeed2020-12-23 18:01:04 +0100678 __decl_thread(struct resolvers *resolvers = s->resolv_ctx.parent->arg.resolv.resolvers);
Christopher Faulet5098a082020-07-22 11:46:32 +0200679
680 HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100681 ha_free(&s->resolv_ctx.hostname_dn);
Emeric Brun08622d32020-12-23 17:41:43 +0100682 s->resolv_ctx.hostname_dn_len = 0;
Willy Tarreau6878f802021-10-20 14:07:31 +0200683 resolv_unlink_resolution(s->resolv_ctx.requester);
Christopher Faulet5098a082020-07-22 11:46:32 +0200684 HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
Baptiste Assmann333939c2019-01-21 08:34:50 +0100685
Emeric Brun08622d32020-12-23 17:41:43 +0100686 pool_free(resolv_requester_pool, s->resolv_ctx.requester);
687 s->resolv_ctx.requester = NULL;
Baptiste Assmann333939c2019-01-21 08:34:50 +0100688 }
689
Willy Tarreau92fb9832007-10-16 17:34:28 +0200690 if (fe) {
Christopher Faulet59399252019-11-07 14:27:52 +0100691 if (s->req_cap) {
692 struct cap_hdr *h;
693 for (h = fe->req_cap; h; h = h->next)
694 pool_free(h->pool, s->req_cap[h->index]);
Willy Tarreau47af3172022-06-23 11:46:14 +0200695 pool_free(fe->req_cap_pool, s->req_cap);
Christopher Faulet59399252019-11-07 14:27:52 +0100696 }
697
698 if (s->res_cap) {
699 struct cap_hdr *h;
700 for (h = fe->rsp_cap; h; h = h->next)
701 pool_free(h->pool, s->res_cap[h->index]);
Willy Tarreau47af3172022-06-23 11:46:14 +0200702 pool_free(fe->rsp_cap_pool, s->res_cap);
Christopher Faulet59399252019-11-07 14:27:52 +0100703 }
Willy Tarreaubaaee002006-06-26 02:48:02 +0200704 }
Willy Tarreau0937bc42009-12-22 15:03:09 +0100705
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200706 /* Cleanup all variable contexts. */
Willy Tarreaucda7f3f2018-10-28 13:44:36 +0100707 if (!LIST_ISEMPTY(&s->vars_txn.head))
708 vars_prune(&s->vars_txn, s->sess, s);
709 if (!LIST_ISEMPTY(&s->vars_reqres.head))
710 vars_prune(&s->vars_reqres, s->sess, s);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200711
Willy Tarreau87b09662015-04-03 00:22:06 +0200712 stream_store_counters(s);
Willy Tarreau6c011712023-01-06 16:09:58 +0100713 pool_free(pool_head_stk_ctr, s->stkctr);
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +0200714
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100715 list_for_each_entry_safe(bref, back, &s->back_refs, users) {
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100716 /* we have to unlink all watchers. We must not relink them if
Willy Tarreau49de6852021-02-24 13:46:12 +0100717 * this stream was the last one in the list. This is safe to do
718 * here because we're touching our thread's list so we know
719 * that other streams are not active, and the watchers will
720 * only touch their node under thread isolation.
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100721 */
Willy Tarreau49de6852021-02-24 13:46:12 +0100722 LIST_DEL_INIT(&bref->users);
Willy Tarreaub4e34762021-09-30 19:02:18 +0200723 if (s->list.n != &th_ctx->streams)
Willy Tarreau2b718102021-04-21 07:32:39 +0200724 LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100725 bref->ref = s->list.n;
Willy Tarreau49de6852021-02-24 13:46:12 +0100726 __ha_barrier_store();
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100727 }
Willy Tarreau2b718102021-04-21 07:32:39 +0200728 LIST_DELETE(&s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200729
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200730 sc_destroy(s->scb);
731 sc_destroy(s->scf);
Olivier Houchard4fdec7a2018-10-11 17:09:14 +0200732
Willy Tarreaubafbe012017-11-24 17:34:44 +0100733 pool_free(pool_head_stream, s);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200734
735 /* We may want to free the maximum amount of pools if the proxy is stopping */
Christopher Fauletdfd10ab2021-10-06 14:24:19 +0200736 if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100737 pool_flush(pool_head_buffer);
738 pool_flush(pool_head_http_txn);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100739 pool_flush(pool_head_requri);
740 pool_flush(pool_head_capture);
741 pool_flush(pool_head_stream);
742 pool_flush(pool_head_session);
743 pool_flush(pool_head_connection);
744 pool_flush(pool_head_pendconn);
745 pool_flush(fe->req_cap_pool);
746 pool_flush(fe->rsp_cap_pool);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200747 }
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200748}
749
Willy Tarreau656859d2014-11-25 19:46:36 +0100750
Willy Tarreau87b09662015-04-03 00:22:06 +0200751/* Allocates a work buffer for stream <s>. It is meant to be called inside
752 * process_stream(). It will only allocate the side needed for the function
Willy Tarreaubc39a5d2015-04-20 15:52:18 +0200753 * to work fine, which is the response buffer so that an error message may be
754 * built and returned. Response buffers may be allocated from the reserve, this
755 * is critical to ensure that a response may always flow and will never block a
756 * server from releasing a connection. Returns 0 in case of failure, non-zero
757 * otherwise.
Willy Tarreau656859d2014-11-25 19:46:36 +0100758 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100759static int stream_alloc_work_buffer(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100760{
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100761 if (b_alloc(&s->res.buf))
Willy Tarreau656859d2014-11-25 19:46:36 +0100762 return 1;
Willy Tarreau656859d2014-11-25 19:46:36 +0100763 return 0;
764}
765
766/* releases unused buffers after processing. Typically used at the end of the
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100767 * update() functions. It will try to wake up as many tasks/applets as the
768 * number of buffers that it releases. In practice, most often streams are
769 * blocked on a single buffer, so it makes sense to try to wake two up when two
770 * buffers are released at once.
Willy Tarreau656859d2014-11-25 19:46:36 +0100771 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200772void stream_release_buffers(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100773{
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100774 int offer = 0;
Willy Tarreau656859d2014-11-25 19:46:36 +0100775
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200776 if (c_size(&s->req) && c_empty(&s->req)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100777 offer++;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100778 b_free(&s->req.buf);
779 }
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200780 if (c_size(&s->res) && c_empty(&s->res)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100781 offer++;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100782 b_free(&s->res.buf);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100783 }
Willy Tarreau656859d2014-11-25 19:46:36 +0100784
Willy Tarreaubf883e02014-11-25 21:10:35 +0100785 /* if we're certain to have at least 1 buffer available, and there is
786 * someone waiting, we can wake up a waiter and offer them.
787 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100788 if (offer)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100789 offer_buffers(s, offer);
Willy Tarreau656859d2014-11-25 19:46:36 +0100790}
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200791
Willy Tarreau87b09662015-04-03 00:22:06 +0200792void stream_process_counters(struct stream *s)
Willy Tarreau30e71012007-11-26 20:15:35 +0100793{
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200794 struct session *sess = s->sess;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100795 unsigned long long bytes;
Willy Tarreau20d46a52012-12-09 15:55:40 +0100796 int i;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100797
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100798 bytes = s->req.total - s->logs.bytes_in;
799 s->logs.bytes_in = s->req.total;
800 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100801 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
802 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100803
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100804 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000805 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_in, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200806
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200807 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100808 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
Willy Tarreau855e4bb2010-06-18 18:33:32 +0200809
Willy Tarreau6c011712023-01-06 16:09:58 +0100810 for (i = 0; i < global.tune.nb_stk_ctr; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200811 if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
812 stkctr_inc_bytes_in_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100813 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100814 }
815
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100816 bytes = s->res.total - s->logs.bytes_out;
817 s->logs.bytes_out = s->res.total;
818 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100819 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
820 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100821
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100822 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000823 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_out, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200824
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200825 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100826 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
Willy Tarreauf059a0f2010-08-03 16:29:52 +0200827
Willy Tarreau6c011712023-01-06 16:09:58 +0100828 for (i = 0; i < global.tune.nb_stk_ctr; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200829 if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
830 stkctr_inc_bytes_out_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100831 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100832 }
833}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200834
Christopher Faulet7eb837d2023-04-13 15:22:29 +0200835/* Abort processing on the both channels in same time */
836void stream_abort(struct stream *s)
837{
838 channel_abort(&s->req);
839 channel_abort(&s->res);
840}
841
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200842/*
843 * Returns a message to the client ; the connection is shut down for read,
844 * and the request is cleared so that no server connection can be initiated.
845 * The buffer is marked for read shutdown on the other side to protect the
846 * message, and the buffer write is enabled. The message is contained in a
847 * "chunk". If it is null, then an empty message is used. The reply buffer does
848 * not need to be empty before this, and its contents will not be overwritten.
849 * The primary goal of this function is to return error messages to a client.
850 */
851void stream_retnclose(struct stream *s, const struct buffer *msg)
852{
853 struct channel *ic = &s->req;
854 struct channel *oc = &s->res;
855
856 channel_auto_read(ic);
857 channel_abort(ic);
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200858 channel_erase(ic);
859 channel_truncate(oc);
860
861 if (likely(msg && msg->data))
862 co_inject(oc, msg->area, msg->data);
863
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200864 channel_auto_read(oc);
865 channel_auto_close(oc);
Christopher Faulet12762f02023-04-13 15:40:10 +0200866 sc_schedule_abort(s->scb);
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200867}
868
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100869int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout)
870{
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100871 switch (name) {
872 case ACT_TIMEOUT_SERVER:
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100873 s->scb->ioto = timeout;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100874 return 1;
875
Amaury Denoyellefb504432020-12-10 13:43:53 +0100876 case ACT_TIMEOUT_TUNNEL:
877 s->tunnel_timeout = timeout;
878 return 1;
879
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100880 default:
881 return 0;
882 }
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100883}
884
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100885/*
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200886 * This function handles the transition between the SC_ST_CON state and the
887 * SC_ST_EST state. It must only be called after switching from SC_ST_CON (or
888 * SC_ST_INI or SC_ST_RDY) to SC_ST_EST, but only when a ->proto is defined.
889 * Note that it will switch the interface to SC_ST_DIS if we already have
Christopher Faulet0c370ee2023-04-13 16:05:13 +0200890 * the SC_FL_ABRT_DONE flag, it means we were able to forward the request, and
Olivier Houchardaacc4052019-05-21 17:43:50 +0200891 * receive the response, before process_stream() had the opportunity to
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200892 * make the switch from SC_ST_CON to SC_ST_EST. When that happens, we want
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100893 * to go through back_establish() anyway, to make sure the analysers run.
Willy Tarreaud66ed882019-06-05 18:02:04 +0200894 * Timeouts are cleared. Error are reported on the channel so that analysers
895 * can handle them.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100896 */
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100897static void back_establish(struct stream *s)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100898{
Willy Tarreaufd9417b2022-05-18 16:23:22 +0200899 struct connection *conn = sc_conn(s->scb);
Willy Tarreau7b8c4f92014-11-28 15:15:44 +0100900 struct channel *req = &s->req;
901 struct channel *rep = &s->res;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100902
Christopher Faulet62e75742022-03-31 09:16:34 +0200903 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200904 /* First, centralize the timers information, and clear any irrelevant
905 * timeout.
906 */
Willy Tarreau69530f52023-04-28 09:16:15 +0200907 s->logs.t_connect = ns_to_ms(now_ns - s->logs.accept_ts);
Christopher Fauletae024ce2022-03-29 19:02:31 +0200908 s->conn_exp = TICK_ETERNITY;
909 s->flags &= ~SF_CONN_EXP;
Willy Tarreaud66ed882019-06-05 18:02:04 +0200910
911 /* errors faced after sending data need to be reported */
Christopher Faulete182a8e2023-04-14 12:07:26 +0200912 if ((s->scb->flags & SC_FL_ERROR) && req->flags & CF_WROTE_DATA) {
Christopher Faulet2e56a732023-01-26 16:18:09 +0100913 s->req.flags |= CF_WRITE_EVENT;
914 s->res.flags |= CF_READ_EVENT;
Christopher Faulet50264b42022-03-30 19:39:30 +0200915 s->conn_err_type = STRM_ET_DATA_ERR;
Christopher Faulet62e75742022-03-31 09:16:34 +0200916 DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200917 }
918
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100919 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000920 health_adjust(__objt_server(s->target), HANA_STATUS_L4_OK);
Krzysztof Piotr Oledzki97f07b82009-12-15 22:31:24 +0100921
Christopher Faulet1bb6afa2021-03-08 17:57:53 +0100922 if (!IS_HTX_STRM(s)) { /* let's allow immediate data connection in this case */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100923 /* if the user wants to log as soon as possible, without counting
924 * bytes from the server, then this is the right moment. */
Willy Tarreaud0d8da92015-04-04 02:10:38 +0200925 if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
Willy Tarreau66425e32018-07-25 06:55:12 +0200926 /* note: no pend_pos here, session is established */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100927 s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
Willy Tarreaua5555ec2008-11-30 19:02:32 +0100928 s->do_log(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100929 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100930 }
931 else {
Christopher Faulet9a790f62023-03-16 14:40:03 +0100932 s->scb->flags |= SC_FL_RCV_ONCE; /* a single read is enough to get response headers */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100933 }
934
Willy Tarreau0007d0a2018-12-11 18:01:38 +0100935 rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
Christopher Faulet309c6412015-12-02 09:57:32 +0100936
Willy Tarreau4164eb92022-05-25 15:42:03 +0200937 se_have_more_data(s->scb->sedesc);
Christopher Faulet23577182022-12-20 18:47:39 +0100938 rep->flags |= CF_READ_EVENT; /* producer is now attached */
Christopher Faulet4c135682023-02-16 11:09:31 +0100939 sc_ep_report_read_activity(s->scb);
Christopher Faulet0256da12021-12-15 09:50:17 +0100940 if (conn) {
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100941 /* real connections have timeouts
942 * if already defined, it means that a set-timeout rule has
943 * been executed so do not overwrite them
944 */
Christopher Faulet5aaacfb2023-02-15 08:13:33 +0100945 if (!tick_isset(s->scb->ioto))
946 s->scb->ioto = s->be->timeout.server;
Amaury Denoyellefb504432020-12-10 13:43:53 +0100947 if (!tick_isset(s->tunnel_timeout))
948 s->tunnel_timeout = s->be->timeout.tunnel;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100949
Olivier Houchard47e9a1a2018-11-07 17:55:19 +0100950 /* The connection is now established, try to read data from the
951 * underlying layer, and subscribe to recv events. We use a
952 * delayed recv here to give a chance to the data to flow back
953 * by the time we process other tasks.
954 */
Willy Tarreauf61dd192022-05-27 09:00:19 +0200955 sc_chk_rcv(s->scb);
Willy Tarreaud04e8582010-05-31 12:31:35 +0200956 }
Olivier Houchard78595262019-07-26 14:54:34 +0200957 /* If we managed to get the whole response, and we don't have anything
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200958 * left to send, or can't, switch to SC_ST_DIS now. */
Christopher Fauletca5309a2023-04-17 16:17:32 +0200959 if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || (s->scf->flags & SC_FL_SHUT_DONE)) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200960 s->scb->state = SC_ST_DIS;
Christopher Faulet62e75742022-03-31 09:16:34 +0200961 DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100962 }
963
Christopher Faulet62e75742022-03-31 09:16:34 +0200964 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100965}
966
Willy Tarreau87b09662015-04-03 00:22:06 +0200967/* Set correct stream termination flags in case no analyser has done it. It
Simon Hormandec5be42011-06-08 09:19:07 +0900968 * also counts a failed request if the server state has not reached the request
969 * stage.
970 */
Christopher Fauletdbad8ec2023-04-13 14:46:01 +0200971void sess_set_term_flags(struct stream *s)
Simon Hormandec5be42011-06-08 09:19:07 +0900972{
Willy Tarreaue7dff022015-04-03 01:14:29 +0200973 if (!(s->flags & SF_FINST_MASK)) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200974 if (s->scb->state == SC_ST_INI) {
Willy Tarreau7ab22adb2019-06-05 14:53:22 +0200975 /* anything before REQ in fact */
Willy Tarreau4781b152021-04-06 13:53:36 +0200976 _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
Willy Tarreau2c1068c2015-09-23 12:21:21 +0200977 if (strm_li(s) && strm_li(s)->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +0200978 _HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
Simon Hormandec5be42011-06-08 09:19:07 +0900979
Willy Tarreaue7dff022015-04-03 01:14:29 +0200980 s->flags |= SF_FINST_R;
Simon Hormandec5be42011-06-08 09:19:07 +0900981 }
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200982 else if (s->scb->state == SC_ST_QUE)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200983 s->flags |= SF_FINST_Q;
Willy Tarreau74568cf2022-05-27 09:03:30 +0200984 else if (sc_state_in(s->scb->state, SC_SB_REQ|SC_SB_TAR|SC_SB_ASS|SC_SB_CON|SC_SB_CER|SC_SB_RDY))
Willy Tarreaue7dff022015-04-03 01:14:29 +0200985 s->flags |= SF_FINST_C;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200986 else if (s->scb->state == SC_ST_EST || s->prev_conn_state == SC_ST_EST)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200987 s->flags |= SF_FINST_D;
Simon Hormandec5be42011-06-08 09:19:07 +0900988 else
Willy Tarreaue7dff022015-04-03 01:14:29 +0200989 s->flags |= SF_FINST_L;
Simon Hormandec5be42011-06-08 09:19:07 +0900990 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100991}
992
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200993/* This function parses the use-service action ruleset. It executes
994 * the associated ACL and set an applet as a stream or txn final node.
995 * it returns ACT_RET_ERR if an error occurs, the proxy left in
Ilya Shipitsinc02a23f2020-05-06 00:53:22 +0500996 * consistent state. It returns ACT_RET_STOP in success case because
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200997 * use-service must be a terminal action. Returns ACT_RET_YIELD
998 * if the initialisation function require more data.
999 */
1000enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
1001 struct session *sess, struct stream *s, int flags)
1002
1003{
1004 struct appctx *appctx;
1005
1006 /* Initialises the applet if it is required. */
Christopher Faulet105ba6c2019-12-18 14:41:51 +01001007 if (flags & ACT_OPT_FIRST) {
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001008 /* Register applet. this function schedules the applet. */
1009 s->target = &rule->applet.obj_type;
Willy Tarreaua0b58b52022-05-27 08:33:53 +02001010 appctx = sc_applet_create(s->scb, objt_applet(s->target));
Christopher Faulet2da02ae2022-02-24 13:45:27 +01001011 if (unlikely(!appctx))
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001012 return ACT_RET_ERR;
1013
Christopher Faulet93882042022-01-19 14:56:50 +01001014 /* Finish initialisation of the context. */
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001015 appctx->rule = rule;
Christopher Faulet16c0d9c2022-05-12 14:59:28 +02001016 if (appctx_init(appctx) == -1)
Christopher Faulet4aa1d282022-01-13 16:01:35 +01001017 return ACT_RET_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001018 }
1019 else
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001020 appctx = __sc_appctx(s->scb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001021
Christopher Faulet2571bc62019-03-01 11:44:26 +01001022 if (rule->from != ACT_F_HTTP_REQ) {
1023 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
Willy Tarreau4781b152021-04-06 13:53:36 +02001024 _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
Christopher Faulet2571bc62019-03-01 11:44:26 +01001025
1026 /* The flag SF_ASSIGNED prevent from server assignment. */
1027 s->flags |= SF_ASSIGNED;
1028 }
1029
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001030 /* Now we can schedule the applet. */
Willy Tarreau90e8b452022-05-25 18:21:43 +02001031 applet_need_more_data(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001032 appctx_wakeup(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001033 return ACT_RET_STOP;
1034}
1035
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001036/* This stream analyser checks the switching rules and changes the backend
Willy Tarreau4de91492010-01-22 19:10:05 +01001037 * if appropriate. The default_backend rule is also considered, then the
1038 * target backend's forced persistence rules are also evaluated last if any.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001039 * It returns 1 if the processing can continue on next analysers, or zero if it
1040 * either needs more data or wants to immediately abort the request.
1041 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001042static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001043{
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001044 struct persist_rule *prst_rule;
Willy Tarreau192252e2015-04-04 01:47:55 +02001045 struct session *sess = s->sess;
1046 struct proxy *fe = sess->fe;
Willy Tarreau4de91492010-01-22 19:10:05 +01001047
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001048 req->analysers &= ~an_bit;
1049 req->analyse_exp = TICK_ETERNITY;
1050
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001051 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001052
1053 /* now check whether we have some switching rules for this request */
Willy Tarreaue7dff022015-04-03 01:14:29 +02001054 if (!(s->flags & SF_BE_ASSIGNED)) {
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001055 struct switching_rule *rule;
1056
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001057 list_for_each_entry(rule, &fe->switching_rules, list) {
Willy Tarreauf51658d2014-04-23 01:21:56 +02001058 int ret = 1;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001059
Willy Tarreauf51658d2014-04-23 01:21:56 +02001060 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001061 ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreauf51658d2014-04-23 01:21:56 +02001062 ret = acl_pass(ret);
1063 if (rule->cond->pol == ACL_COND_UNLESS)
1064 ret = !ret;
1065 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001066
1067 if (ret) {
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001068 /* If the backend name is dynamic, try to resolve the name.
1069 * If we can't resolve the name, or if any error occurs, break
1070 * the loop and fallback to the default backend.
1071 */
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001072 struct proxy *backend = NULL;
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001073
1074 if (rule->dynamic) {
Willy Tarreau83061a82018-07-13 11:56:34 +02001075 struct buffer *tmp;
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001076
1077 tmp = alloc_trash_chunk();
1078 if (!tmp)
1079 goto sw_failed;
1080
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001081 if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
1082 backend = proxy_be_by_name(tmp->area);
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001083
1084 free_trash_chunk(tmp);
1085 tmp = NULL;
1086
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001087 if (!backend)
1088 break;
1089 }
1090 else
1091 backend = rule->be.backend;
1092
Willy Tarreau87b09662015-04-03 00:22:06 +02001093 if (!stream_set_backend(s, backend))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001094 goto sw_failed;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001095 break;
1096 }
1097 }
1098
1099 /* To ensure correct connection accounting on the backend, we
1100 * have to assign one if it was not set (eg: a listen). This
1101 * measure also takes care of correctly setting the default
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001102 * backend if any. Don't do anything if an upgrade is already in
1103 * progress.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001104 */
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001105 if (!(s->flags & (SF_BE_ASSIGNED|SF_IGNORE)))
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001106 if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001107 goto sw_failed;
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001108
1109 /* No backend assigned but no error reported. It happens when a
1110 * TCP stream is upgraded to HTTP/2.
1111 */
1112 if ((s->flags & (SF_BE_ASSIGNED|SF_IGNORE)) == SF_IGNORE) {
1113 DBG_TRACE_DEVEL("leaving with no backend because of a destructive upgrade", STRM_EV_STRM_ANA, s);
1114 return 0;
1115 }
1116
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001117 }
1118
Willy Tarreaufb356202010-08-03 14:02:05 +02001119 /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001120 if (fe == s->be) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001121 s->req.analysers &= ~AN_REQ_INSPECT_BE;
1122 s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001123 s->req.analysers &= ~AN_REQ_FLT_START_BE;
Willy Tarreaufb356202010-08-03 14:02:05 +02001124 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001125
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001126 /* as soon as we know the backend, we must check if we have a matching forced or ignored
Willy Tarreau87b09662015-04-03 00:22:06 +02001127 * persistence rule, and report that in the stream.
Willy Tarreau4de91492010-01-22 19:10:05 +01001128 */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001129 list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
Willy Tarreau4de91492010-01-22 19:10:05 +01001130 int ret = 1;
1131
1132 if (prst_rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001133 ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4de91492010-01-22 19:10:05 +01001134 ret = acl_pass(ret);
1135 if (prst_rule->cond->pol == ACL_COND_UNLESS)
1136 ret = !ret;
1137 }
1138
1139 if (ret) {
1140 /* no rule, or the rule matches */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001141 if (prst_rule->type == PERSIST_TYPE_FORCE) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001142 s->flags |= SF_FORCE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001143 } else {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001144 s->flags |= SF_IGNORE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001145 }
Willy Tarreau4de91492010-01-22 19:10:05 +01001146 break;
1147 }
1148 }
1149
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001150 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001151 return 1;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001152
1153 sw_failed:
1154 /* immediately abort this request in case of allocation failure */
Christopher Faulet7eb837d2023-04-13 15:22:29 +02001155 stream_abort(s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001156
Willy Tarreaue7dff022015-04-03 01:14:29 +02001157 if (!(s->flags & SF_ERR_MASK))
1158 s->flags |= SF_ERR_RESOURCE;
1159 if (!(s->flags & SF_FINST_MASK))
1160 s->flags |= SF_FINST_R;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001161
Willy Tarreaueee5b512015-04-03 23:46:31 +02001162 if (s->txn)
1163 s->txn->status = 500;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001164 s->req.analysers &= AN_REQ_FLT_END;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001165 s->req.analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001166 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001167 return 0;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001168}
1169
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001170/* This stream analyser works on a request. It applies all use-server rules on
1171 * it then returns 1. The data must already be present in the buffer otherwise
1172 * they won't match. It always returns 1.
1173 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001174static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001175{
1176 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001177 struct session *sess = s->sess;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001178 struct server_rule *rule;
1179
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001180 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001181
Willy Tarreaue7dff022015-04-03 01:14:29 +02001182 if (!(s->flags & SF_ASSIGNED)) {
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001183 list_for_each_entry(rule, &px->server_rules, list) {
1184 int ret;
1185
Willy Tarreau192252e2015-04-04 01:47:55 +02001186 ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001187 ret = acl_pass(ret);
1188 if (rule->cond->pol == ACL_COND_UNLESS)
1189 ret = !ret;
1190
1191 if (ret) {
Jerome Magnin824186b2020-03-29 09:37:12 +02001192 struct server *srv;
1193
1194 if (rule->dynamic) {
1195 struct buffer *tmp = get_trash_chunk();
1196
1197 if (!build_logline(s, tmp->area, tmp->size, &rule->expr))
1198 break;
1199
1200 srv = findserver(s->be, tmp->area);
1201 if (!srv)
1202 break;
1203 }
1204 else
1205 srv = rule->srv.ptr;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001206
Emeric Brun52a91d32017-08-31 14:41:55 +02001207 if ((srv->cur_state != SRV_ST_STOPPED) ||
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001208 (px->options & PR_O_PERSIST) ||
Willy Tarreaue7dff022015-04-03 01:14:29 +02001209 (s->flags & SF_FORCE_PRST)) {
1210 s->flags |= SF_DIRECT | SF_ASSIGNED;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001211 s->target = &srv->obj_type;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001212 break;
1213 }
1214 /* if the server is not UP, let's go on with next rules
1215 * just in case another one is suited.
1216 */
1217 }
1218 }
1219 }
1220
1221 req->analysers &= ~an_bit;
1222 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001223 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001224 return 1;
1225}
1226
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001227static inline void sticking_rule_find_target(struct stream *s,
1228 struct stktable *t, struct stksess *ts)
1229{
1230 struct proxy *px = s->be;
1231 struct eb32_node *node;
1232 struct dict_entry *de;
1233 void *ptr;
1234 struct server *srv;
1235
1236 /* Look for the server name previously stored in <t> stick-table */
1237 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001238 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001239 de = stktable_data_cast(ptr, std_t_dict);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001240 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1241
1242 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001243 struct ebpt_node *node;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001244
Thayne McCombs92149f92020-11-20 01:28:26 -07001245 if (t->server_key_type == STKTABLE_SRV_NAME) {
1246 node = ebis_lookup(&px->conf.used_server_name, de->value.key);
1247 if (node) {
1248 srv = container_of(node, struct server, conf.name);
1249 goto found;
1250 }
1251 } else if (t->server_key_type == STKTABLE_SRV_ADDR) {
1252 HA_RWLOCK_RDLOCK(PROXY_LOCK, &px->lock);
1253 node = ebis_lookup(&px->used_server_addr, de->value.key);
1254 HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &px->lock);
1255 if (node) {
1256 srv = container_of(node, struct server, addr_node);
1257 goto found;
1258 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001259 }
1260 }
1261
1262 /* Look for the server ID */
1263 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1264 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001265 node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, std_t_sint));
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001266 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1267
1268 if (!node)
1269 return;
1270
1271 srv = container_of(node, struct server, conf.id);
1272 found:
1273 if ((srv->cur_state != SRV_ST_STOPPED) ||
1274 (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
1275 s->flags |= SF_DIRECT | SF_ASSIGNED;
1276 s->target = &srv->obj_type;
1277 }
1278}
1279
Emeric Brun1d33b292010-01-04 15:47:17 +01001280/* This stream analyser works on a request. It applies all sticking rules on
1281 * it then returns 1. The data must already be present in the buffer otherwise
1282 * they won't match. It always returns 1.
1283 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001284static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001285{
1286 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001287 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001288 struct sticking_rule *rule;
1289
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001290 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001291
1292 list_for_each_entry(rule, &px->sticking_rules, list) {
1293 int ret = 1 ;
1294 int i;
1295
Willy Tarreau9667a802013-12-09 12:52:13 +01001296 /* Only the first stick store-request of each table is applied
1297 * and other ones are ignored. The purpose is to allow complex
1298 * configurations which look for multiple entries by decreasing
1299 * order of precision and to stop at the first which matches.
1300 * An example could be a store of the IP address from an HTTP
1301 * header first, then from the source if not found.
1302 */
Jerome Magninbee00ad2020-01-16 17:37:21 +01001303 if (rule->flags & STK_IS_STORE) {
1304 for (i = 0; i < s->store_count; i++) {
1305 if (rule->table.t == s->store[i].table)
1306 break;
1307 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001308
Jerome Magninbee00ad2020-01-16 17:37:21 +01001309 if (i != s->store_count)
1310 continue;
1311 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001312
1313 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001314 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001315 ret = acl_pass(ret);
1316 if (rule->cond->pol == ACL_COND_UNLESS)
1317 ret = !ret;
1318 }
1319
1320 if (ret) {
1321 struct stktable_key *key;
1322
Willy Tarreau192252e2015-04-04 01:47:55 +02001323 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001324 if (!key)
1325 continue;
1326
1327 if (rule->flags & STK_IS_MATCH) {
1328 struct stksess *ts;
1329
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001330 if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001331 if (!(s->flags & SF_ASSIGNED))
1332 sticking_rule_find_target(s, rule->table.t, ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001333 stktable_touch_local(rule->table.t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001334 }
1335 }
1336 if (rule->flags & STK_IS_STORE) {
1337 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1338 struct stksess *ts;
1339
1340 ts = stksess_new(rule->table.t, key);
1341 if (ts) {
1342 s->store[s->store_count].table = rule->table.t;
1343 s->store[s->store_count++].ts = ts;
1344 }
1345 }
1346 }
1347 }
1348 }
1349
1350 req->analysers &= ~an_bit;
1351 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001352 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001353 return 1;
1354}
1355
1356/* This stream analyser works on a response. It applies all store rules on it
1357 * then returns 1. The data must already be present in the buffer otherwise
1358 * they won't match. It always returns 1.
1359 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001360static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001361{
1362 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001363 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001364 struct sticking_rule *rule;
1365 int i;
Willy Tarreau9667a802013-12-09 12:52:13 +01001366 int nbreq = s->store_count;
Emeric Brun1d33b292010-01-04 15:47:17 +01001367
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001368 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001369
1370 list_for_each_entry(rule, &px->storersp_rules, list) {
1371 int ret = 1 ;
Emeric Brun1d33b292010-01-04 15:47:17 +01001372
Willy Tarreau9667a802013-12-09 12:52:13 +01001373 /* Only the first stick store-response of each table is applied
1374 * and other ones are ignored. The purpose is to allow complex
1375 * configurations which look for multiple entries by decreasing
1376 * order of precision and to stop at the first which matches.
1377 * An example could be a store of a set-cookie value, with a
1378 * fallback to a parameter found in a 302 redirect.
1379 *
1380 * The store-response rules are not allowed to override the
1381 * store-request rules for the same table, but they may coexist.
1382 * Thus we can have up to one store-request entry and one store-
1383 * response entry for the same table at any time.
1384 */
1385 for (i = nbreq; i < s->store_count; i++) {
1386 if (rule->table.t == s->store[i].table)
1387 break;
1388 }
1389
1390 /* skip existing entries for this table */
1391 if (i < s->store_count)
1392 continue;
1393
Emeric Brun1d33b292010-01-04 15:47:17 +01001394 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001395 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001396 ret = acl_pass(ret);
1397 if (rule->cond->pol == ACL_COND_UNLESS)
1398 ret = !ret;
1399 }
1400
1401 if (ret) {
1402 struct stktable_key *key;
1403
Willy Tarreau192252e2015-04-04 01:47:55 +02001404 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001405 if (!key)
1406 continue;
1407
Willy Tarreau37e340c2013-12-06 23:05:21 +01001408 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
Emeric Brun1d33b292010-01-04 15:47:17 +01001409 struct stksess *ts;
1410
1411 ts = stksess_new(rule->table.t, key);
1412 if (ts) {
1413 s->store[s->store_count].table = rule->table.t;
Emeric Brun1d33b292010-01-04 15:47:17 +01001414 s->store[s->store_count++].ts = ts;
1415 }
1416 }
1417 }
1418 }
1419
1420 /* process store request and store response */
1421 for (i = 0; i < s->store_count; i++) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001422 struct stksess *ts;
Willy Tarreau13c29de2010-06-06 16:40:39 +02001423 void *ptr;
Thayne McCombs92149f92020-11-20 01:28:26 -07001424 char *key;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001425 struct dict_entry *de;
Thayne McCombs92149f92020-11-20 01:28:26 -07001426 struct stktable *t = s->store[i].table;
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001427
Christopher Fauletb9766402022-10-25 16:45:38 +02001428 if (!objt_server(s->target) || (__objt_server(s->target)->flags & SRV_F_NON_STICK)) {
Simon Hormanfa461682011-06-25 09:39:49 +09001429 stksess_free(s->store[i].table, s->store[i].ts);
1430 s->store[i].ts = NULL;
1431 continue;
1432 }
1433
Thayne McCombs92149f92020-11-20 01:28:26 -07001434 ts = stktable_set_entry(t, s->store[i].ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001435 if (ts != s->store[i].ts) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001436 /* the entry already existed, we can free ours */
Thayne McCombs92149f92020-11-20 01:28:26 -07001437 stksess_free(t, s->store[i].ts);
Emeric Brun1d33b292010-01-04 15:47:17 +01001438 }
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001439 s->store[i].ts = NULL;
Emeric Brun819fc6f2017-06-13 19:37:32 +02001440
Thayne McCombs92149f92020-11-20 01:28:26 -07001441 if (t->server_key_type == STKTABLE_SRV_NAME)
1442 key = __objt_server(s->target)->id;
1443 else if (t->server_key_type == STKTABLE_SRV_ADDR)
1444 key = __objt_server(s->target)->addr_node.key;
1445 else
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001446 key = NULL;
Thayne McCombs92149f92020-11-20 01:28:26 -07001447
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001448 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001449 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
1450 stktable_data_cast(ptr, std_t_sint) = __objt_server(s->target)->puid;
1451
1452 if (key) {
1453 de = dict_insert(&server_key_dict, key);
1454 if (de) {
1455 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
1456 stktable_data_cast(ptr, std_t_dict) = de;
1457 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001458 }
Willy Tarreaubc7c2072022-10-12 10:35:41 +02001459
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001460 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1461
Thayne McCombs92149f92020-11-20 01:28:26 -07001462 stktable_touch_local(t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001463 }
Willy Tarreau2a164ee2010-06-18 09:57:45 +02001464 s->store_count = 0; /* everything is stored */
Emeric Brun1d33b292010-01-04 15:47:17 +01001465
1466 rep->analysers &= ~an_bit;
1467 rep->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001468
1469 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001470 return 1;
1471}
1472
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001473/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
1474 * are set and the client mux is upgraded. It returns 1 if the stream processing
1475 * may continue or 0 if it should be stopped. It happens on error or if the
Christopher Fauletae863c62021-03-15 12:03:44 +01001476 * upgrade required a new stream. The mux protocol may be specified.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001477 */
Christopher Fauletae863c62021-03-15 12:03:44 +01001478int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001479{
Willy Tarreaub49672d2022-05-27 10:13:37 +02001480 struct stconn *sc = s->scf;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001481 struct connection *conn;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001482
1483 /* Already an HTTP stream */
1484 if (IS_HTX_STRM(s))
1485 return 1;
1486
1487 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
1488
1489 if (unlikely(!s->txn && !http_create_txn(s)))
1490 return 0;
1491
Willy Tarreaub49672d2022-05-27 10:13:37 +02001492 conn = sc_conn(sc);
Christopher Faulet13a35e52021-12-20 15:34:16 +01001493 if (conn) {
Willy Tarreau4164eb92022-05-25 15:42:03 +02001494 se_have_more_data(s->scf->sedesc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001495 /* Make sure we're unsubscribed, the the new
1496 * mux will probably want to subscribe to
1497 * the underlying XPRT
1498 */
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001499 if (s->scf->wait_event.events)
Willy Tarreaub49672d2022-05-27 10:13:37 +02001500 conn->mux->unsubscribe(sc, s->scf->wait_event.events, &(s->scf->wait_event));
Christopher Fauletae863c62021-03-15 12:03:44 +01001501
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001502 if (conn->mux->flags & MX_FL_NO_UPG)
1503 return 0;
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001504
1505 sc_conn_prepare_endp_upgrade(sc);
Willy Tarreaub49672d2022-05-27 10:13:37 +02001506 if (conn_upgrade_mux_fe(conn, sc, &s->req.buf,
Christopher Fauletae863c62021-03-15 12:03:44 +01001507 (mux_proto ? mux_proto->token : ist("")),
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001508 PROTO_MODE_HTTP) == -1) {
1509 sc_conn_abort_endp_upgrade(sc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001510 return 0;
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001511 }
1512 sc_conn_commit_endp_upgrade(sc);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001513
Christopher Faulet285f7612022-12-12 08:28:55 +01001514 s->req.flags &= ~(CF_READ_EVENT|CF_AUTO_CONNECT);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001515 s->req.total = 0;
1516 s->flags |= SF_IGNORE;
Christopher Faulet9b8d7a12022-06-17 09:36:57 +02001517 if (sc_ep_test(sc, SE_FL_DETACHED)) {
1518 /* If stream connector is detached, it means it was not
1519 * reused by the new mux. Son destroy it, disable
1520 * logging, and abort the stream process. Thus the
1521 * stream will be silently destroyed. The new mux will
1522 * create new streams.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001523 */
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001524 s->logs.logwait = 0;
1525 s->logs.level = 0;
Christopher Faulet7eb837d2023-04-13 15:22:29 +02001526 stream_abort(s);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001527 s->req.analysers &= AN_REQ_FLT_END;
1528 s->req.analyse_exp = TICK_ETERNITY;
1529 }
1530 }
1531
1532 return 1;
1533}
1534
1535
Willy Tarreau4596fe22022-05-17 19:07:51 +02001536/* Updates at once the channel flags, and timers of both stream connectors of a
Christopher Fauletef285c12022-04-01 14:48:06 +02001537 * same stream, to complete the work after the analysers, then updates the data
1538 * layer below. This will ensure that any synchronous update performed at the
Willy Tarreau4596fe22022-05-17 19:07:51 +02001539 * data layer will be reflected in the channel flags and/or stream connector.
1540 * Note that this does not change the stream connector's current state, though
Christopher Fauletef285c12022-04-01 14:48:06 +02001541 * it updates the previous state to the current one.
1542 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02001543static void stream_update_both_sc(struct stream *s)
Christopher Fauletef285c12022-04-01 14:48:06 +02001544{
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001545 struct stconn *scf = s->scf;
1546 struct stconn *scb = s->scb;
Christopher Fauletef285c12022-04-01 14:48:06 +02001547 struct channel *req = &s->req;
1548 struct channel *res = &s->res;
1549
Christopher Faulet23577182022-12-20 18:47:39 +01001550 req->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
1551 res->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
Christopher Fauletef285c12022-04-01 14:48:06 +02001552
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001553 s->prev_conn_state = scb->state;
Christopher Fauletef285c12022-04-01 14:48:06 +02001554
1555 /* let's recompute both sides states */
Willy Tarreau74568cf2022-05-27 09:03:30 +02001556 if (sc_state_in(scf->state, SC_SB_RDY|SC_SB_EST))
Willy Tarreau19c65a92022-05-27 08:49:24 +02001557 sc_update(scf);
Christopher Fauletef285c12022-04-01 14:48:06 +02001558
Willy Tarreau74568cf2022-05-27 09:03:30 +02001559 if (sc_state_in(scb->state, SC_SB_RDY|SC_SB_EST))
Willy Tarreau19c65a92022-05-27 08:49:24 +02001560 sc_update(scb);
Christopher Fauletef285c12022-04-01 14:48:06 +02001561
Willy Tarreau4596fe22022-05-17 19:07:51 +02001562 /* stream connectors are processed outside of process_stream() and must be
Christopher Fauletef285c12022-04-01 14:48:06 +02001563 * handled at the latest moment.
1564 */
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001565 if (sc_appctx(scf)) {
Willy Tarreau13d63af2022-05-25 15:00:44 +02001566 if (sc_is_recv_allowed(scf) || sc_is_send_allowed(scf))
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001567 appctx_wakeup(__sc_appctx(scf));
Christopher Fauletef285c12022-04-01 14:48:06 +02001568 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001569 if (sc_appctx(scb)) {
Willy Tarreau13d63af2022-05-25 15:00:44 +02001570 if (sc_is_recv_allowed(scb) || sc_is_send_allowed(scb))
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02001571 appctx_wakeup(__sc_appctx(scb));
Christopher Fauletef285c12022-04-01 14:48:06 +02001572 }
1573}
1574
Christopher Faulet85e568f2023-02-27 16:08:31 +01001575/* check SC and channel timeouts, and close the corresponding stream connectors
1576 * for future reads or writes.
1577 * Note: this will also concern upper layers but we do not touch any other
1578 * flag. We must be careful and correctly detect state changes when calling
1579 * them.
1580 */
1581static void stream_handle_timeouts(struct stream *s)
1582{
1583 stream_check_conn_timeout(s);
1584
1585 sc_check_timeouts(s->scf);
1586 channel_check_timeout(&s->req);
Christopher Faulet915ba082023-04-12 18:23:15 +02001587 sc_check_timeouts(s->scb);
1588 channel_check_timeout(&s->res);
1589
Christopher Faulet208c7122023-04-13 16:16:15 +02001590 if (unlikely(!(s->scb->flags & SC_FL_SHUT_DONE) && (s->req.flags & CF_WRITE_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001591 s->scb->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001592 sc_shutdown(s->scb);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001593 }
1594
Christopher Fauletca5309a2023-04-17 16:17:32 +02001595 if (unlikely(!(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->req.flags & CF_READ_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001596 if (s->scf->flags & SC_FL_NOHALF)
1597 s->scf->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001598 sc_abort(s->scf);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001599 }
Christopher Faulet208c7122023-04-13 16:16:15 +02001600 if (unlikely(!(s->scf->flags & SC_FL_SHUT_DONE) && (s->res.flags & CF_WRITE_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001601 s->scf->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001602 sc_shutdown(s->scf);
Christopher Faulet85e568f2023-02-27 16:08:31 +01001603 }
1604
Christopher Fauletca5309a2023-04-17 16:17:32 +02001605 if (unlikely(!(s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->res.flags & CF_READ_TIMEOUT))) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001606 if (s->scb->flags & SC_FL_NOHALF)
1607 s->scb->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001608 sc_abort(s->scb);
Christopher Faulet87633c32023-04-03 18:32:50 +02001609 }
Christopher Faulet85e568f2023-02-27 16:08:31 +01001610
1611 if (HAS_FILTERS(s))
1612 flt_stream_check_timeouts(s);
1613}
1614
Willy Tarreaubeee6002022-09-07 16:17:49 +02001615/* if the current task's wake_date was set, it's being profiled, thus we may
Willy Tarreau6a28a302022-09-07 09:17:45 +02001616 * report latencies and CPU usages in logs, so it's desirable to update the
1617 * latency when entering process_stream().
1618 */
1619static void stream_cond_update_cpu_latency(struct stream *s)
1620{
1621 uint32_t lat = th_ctx->sched_call_date - th_ctx->sched_wake_date;
1622
1623 s->lat_time += lat;
1624}
1625
1626/* if the current task's wake_date was set, it's being profiled, thus we may
Willy Tarreaubeee6002022-09-07 16:17:49 +02001627 * report latencies and CPU usages in logs, so it's desirable to do that before
1628 * logging in order to report accurate CPU usage. In this case we count that
1629 * final part and reset the wake date so that the scheduler doesn't do it a
1630 * second time, and by doing so we also avoid an extra call to clock_gettime().
1631 * The CPU usage will be off by the little time needed to run over stream_free()
1632 * but that's only marginal.
1633 */
1634static void stream_cond_update_cpu_usage(struct stream *s)
1635{
1636 uint32_t cpu;
1637
1638 /* stats are only registered for non-zero wake dates */
1639 if (likely(!th_ctx->sched_wake_date))
1640 return;
1641
1642 cpu = (uint32_t)now_mono_time() - th_ctx->sched_call_date;
Willy Tarreau6a28a302022-09-07 09:17:45 +02001643 s->cpu_time += cpu;
Willy Tarreaubeee6002022-09-07 16:17:49 +02001644 HA_ATOMIC_ADD(&th_ctx->sched_profile_entry->cpu_time, cpu);
1645 th_ctx->sched_wake_date = 0;
1646}
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001647
Willy Tarreau6a28a302022-09-07 09:17:45 +02001648/* this functions is called directly by the scheduler for tasks whose
1649 * ->process points to process_stream(), and is used to keep latencies
1650 * and CPU usage measurements accurate.
1651 */
1652void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu)
1653{
1654 struct stream *s = t->context;
1655 s->lat_time += lat;
1656 s->cpu_time += cpu;
1657}
1658
1659
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001660/* This macro is very specific to the function below. See the comments in
Willy Tarreau87b09662015-04-03 00:22:06 +02001661 * process_stream() below to understand the logic and the tests.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001662 */
1663#define UPDATE_ANALYSERS(real, list, back, flag) { \
1664 list = (((list) & ~(flag)) | ~(back)) & (real); \
1665 back = real; \
1666 if (!(list)) \
1667 break; \
1668 if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
1669 continue; \
1670}
1671
Christopher Fauleta9215b72016-05-11 17:06:28 +02001672/* These 2 following macros call an analayzer for the specified channel if the
1673 * right flag is set. The first one is used for "filterable" analyzers. If a
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001674 * stream has some registered filters, pre and post analyaze callbacks are
Christopher Faulet0184ea72017-01-05 14:06:34 +01001675 * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
Christopher Fauleta9215b72016-05-11 17:06:28 +02001676 * AN_REQ/RES_HTTP_XFER_BODY) */
1677#define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
1678 { \
1679 if ((list) & (flag)) { \
1680 if (HAS_FILTERS(strm)) { \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001681 if (!flt_pre_analyze((strm), (chn), (flag))) \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001682 break; \
1683 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1684 break; \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001685 if (!flt_post_analyze((strm), (chn), (flag))) \
1686 break; \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001687 } \
1688 else { \
1689 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1690 break; \
1691 } \
1692 UPDATE_ANALYSERS((chn)->analysers, (list), \
1693 (back), (flag)); \
1694 } \
1695 }
1696
1697#define ANALYZE(strm, chn, fun, list, back, flag, ...) \
1698 { \
1699 if ((list) & (flag)) { \
1700 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1701 break; \
1702 UPDATE_ANALYSERS((chn)->analysers, (list), \
1703 (back), (flag)); \
1704 } \
1705 }
1706
Willy Tarreau87b09662015-04-03 00:22:06 +02001707/* Processes the client, server, request and response jobs of a stream task,
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001708 * then puts it back to the wait queue in a clean state, or cleans up its
1709 * resources if it must be deleted. Returns in <next> the date the task wants
1710 * to be woken up, or TICK_ETERNITY. In order not to call all functions for
1711 * nothing too many times, the request and response buffers flags are monitored
1712 * and each function is called only if at least another function has changed at
1713 * least one flag it is interested in.
1714 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01001715struct task *process_stream(struct task *t, void *context, unsigned int state)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001716{
Willy Tarreau827aee92011-03-10 16:55:02 +01001717 struct server *srv;
Olivier Houchard9f6af332018-05-25 14:04:04 +02001718 struct stream *s = context;
Willy Tarreaufb0afa72015-04-03 14:46:27 +02001719 struct session *sess = s->sess;
Christopher Faulet87633c32023-04-03 18:32:50 +02001720 unsigned int scf_flags, scb_flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001721 unsigned int rqf_last, rpf_last;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001722 unsigned int rq_prod_last, rq_cons_last;
1723 unsigned int rp_cons_last, rp_prod_last;
Christopher Fauletbd90a162023-05-10 16:40:27 +02001724 unsigned int req_ana_back, res_ana_back;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001725 struct channel *req, *res;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001726 struct stconn *scf, *scb;
Willy Tarreau3d07a162019-04-25 19:15:20 +02001727 unsigned int rate;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001728
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001729 DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
1730
Willy Tarreau7af4fa92020-06-17 20:49:49 +02001731 activity[tid].stream_calls++;
Willy Tarreau6a28a302022-09-07 09:17:45 +02001732 stream_cond_update_cpu_latency(s);
Willy Tarreaud80cb4e2018-01-20 19:30:13 +01001733
Willy Tarreau8f128b42014-11-28 15:07:47 +01001734 req = &s->req;
1735 res = &s->res;
1736
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001737 scf = s->scf;
1738 scb = s->scb;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001739
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001740 /* First, attempt to receive pending data from I/O layers */
Willy Tarreau462b9892022-05-18 18:06:53 +02001741 sc_conn_sync_recv(scf);
1742 sc_conn_sync_recv(scb);
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001743
Willy Tarreau6c539c42022-01-20 18:42:16 +01001744 /* Let's check if we're looping without making any progress, e.g. due
1745 * to a bogus analyser or the fact that we're ignoring a read0. The
1746 * call_rate counter only counts calls with no progress made.
1747 */
Christopher Fauletd8988412022-12-20 18:10:04 +01001748 if (!((req->flags | res->flags) & (CF_READ_EVENT|CF_WRITE_EVENT))) {
Willy Tarreau6c539c42022-01-20 18:42:16 +01001749 rate = update_freq_ctr(&s->call_rate, 1);
1750 if (rate >= 100000 && s->call_rate.prev_ctr) // make sure to wait at least a full second
1751 stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
Willy Tarreau3d07a162019-04-25 19:15:20 +02001752 }
Olivier Houchardc2aa7112018-09-11 18:27:21 +02001753
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001754 /* this data may be no longer valid, clear it */
Willy Tarreaueee5b512015-04-03 23:46:31 +02001755 if (s->txn)
1756 memset(&s->txn->auth, 0, sizeof(s->txn->auth));
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001757
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02001758 /* This flag must explicitly be set every time */
Christopher Faulet81fdeb82023-02-16 16:47:33 +01001759 req->flags &= ~CF_WAKE_WRITE;
1760 res->flags &= ~CF_WAKE_WRITE;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001761
1762 /* Keep a copy of req/rep flags so that we can detect shutdowns */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001763 rqf_last = req->flags & ~CF_MASK_ANALYSER;
1764 rpf_last = res->flags & ~CF_MASK_ANALYSER;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001765
Willy Tarreau4596fe22022-05-17 19:07:51 +02001766 /* we don't want the stream connector functions to recursively wake us up */
Willy Tarreaucb041662022-05-17 19:44:42 +02001767 scf->flags |= SC_FL_DONT_WAKE;
1768 scb->flags |= SC_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02001769
Christopher Faulet87633c32023-04-03 18:32:50 +02001770 /* Keep a copy of SC flags */
1771 scf_flags = scf->flags;
1772 scb_flags = scb->flags;
1773
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001774 /* update pending events */
Olivier Houchard9f6af332018-05-25 14:04:04 +02001775 s->pending_events |= (state & TASK_WOKEN_ANY);
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001776
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001777 /* 1a: Check for low level timeouts if needed. We just set a flag on
Willy Tarreau4596fe22022-05-17 19:07:51 +02001778 * stream connectors when their timeouts have expired.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001779 */
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001780 if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
Christopher Faulet85e568f2023-02-27 16:08:31 +01001781 stream_handle_timeouts(s);
Christopher Fauleta00d8172016-11-10 14:58:05 +01001782
Willy Tarreau798f4322012-11-08 14:49:17 +01001783 /* Once in a while we're woken up because the task expires. But
1784 * this does not necessarily mean that a timeout has been reached.
Willy Tarreau87b09662015-04-03 00:22:06 +02001785 * So let's not run a whole stream processing if only an expiration
Willy Tarreau798f4322012-11-08 14:49:17 +01001786 * timeout needs to be refreshed.
1787 */
Christopher Fauletca5309a2023-04-17 16:17:32 +02001788 if (!((scf->flags | scb->flags) & (SC_FL_ERROR|SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02001789 !((req->flags | res->flags) & (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
Christopher Fauletae024ce2022-03-29 19:02:31 +02001790 !(s->flags & SF_CONN_EXP) &&
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001791 ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02001792 scf->flags &= ~SC_FL_DONT_WAKE;
1793 scb->flags &= ~SC_FL_DONT_WAKE;
Willy Tarreau798f4322012-11-08 14:49:17 +01001794 goto update_exp_and_leave;
Willy Tarreau5fb04712016-05-04 10:18:37 +02001795 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001796 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001797
Willy Tarreau4596fe22022-05-17 19:07:51 +02001798 resync_stconns:
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001799 /* below we may emit error messages so we have to ensure that we have
Christopher Faulet686501c2022-02-01 18:53:53 +01001800 * our buffers properly allocated. If the allocation failed, an error is
1801 * triggered.
1802 *
1803 * NOTE: An error is returned because the mechanism to queue entities
1804 * waiting for a buffer is totally broken for now. However, this
1805 * part must be refactored. When it will be handled, this part
1806 * must be be reviewed too.
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001807 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001808 if (!stream_alloc_work_buffer(s)) {
Christopher Faulet340021b2023-04-14 11:36:29 +02001809 scf->flags |= SC_FL_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001810 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001811
Christopher Faulet340021b2023-04-14 11:36:29 +02001812 scb->flags |= SC_FL_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001813 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001814
1815 if (!(s->flags & SF_ERR_MASK))
1816 s->flags |= SF_ERR_RESOURCE;
1817 sess_set_term_flags(s);
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001818 }
1819
Willy Tarreau4596fe22022-05-17 19:07:51 +02001820 /* 1b: check for low-level errors reported at the stream connector.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001821 * First we check if it's a retryable error (in which case we don't
1822 * want to tell the buffer). Otherwise we report the error one level
1823 * upper by setting flags into the buffers. Note that the side towards
1824 * the client cannot have connect (hence retryable) errors. Also, the
1825 * connection setup code must be able to deal with any type of abort.
1826 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001827 srv = objt_server(s->target);
Christopher Faulete182a8e2023-04-14 12:07:26 +02001828 if (unlikely(scf->flags & SC_FL_ERROR)) {
Willy Tarreau74568cf2022-05-27 09:03:30 +02001829 if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS)) {
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001830 sc_abort(scf);
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001831 sc_shutdown(scf);
Christopher Faulet2e56a732023-01-26 16:18:09 +01001832 //sc_report_error(scf); TODO: Be sure it is useless
Willy Tarreau8f128b42014-11-28 15:07:47 +01001833 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001834 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
1835 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001836 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001837 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001838 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001839 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001840 if (!(s->flags & SF_ERR_MASK))
1841 s->flags |= SF_ERR_CLICL;
1842 if (!(s->flags & SF_FINST_MASK))
1843 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001844 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001845 }
1846 }
1847
Christopher Faulete182a8e2023-04-14 12:07:26 +02001848 if (unlikely(scb->flags & SC_FL_ERROR)) {
Willy Tarreau74568cf2022-05-27 09:03:30 +02001849 if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
Christopher Fauletcfc11c02023-04-13 16:10:23 +02001850 sc_abort(scb);
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02001851 sc_shutdown(scb);
Christopher Faulet2e56a732023-01-26 16:18:09 +01001852 //sc_report_error(scb); TODO: Be sure it is useless
Willy Tarreau4781b152021-04-06 13:53:36 +02001853 _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
Willy Tarreau827aee92011-03-10 16:55:02 +01001854 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001855 _HA_ATOMIC_INC(&srv->counters.failed_resp);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001856 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001857 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
1858 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001859 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001860 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001861 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001862 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001863 if (!(s->flags & SF_ERR_MASK))
1864 s->flags |= SF_ERR_SRVCL;
1865 if (!(s->flags & SF_FINST_MASK))
1866 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001867 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001868 }
1869 /* note: maybe we should process connection errors here ? */
1870 }
1871
Willy Tarreau74568cf2022-05-27 09:03:30 +02001872 if (sc_state_in(scb->state, SC_SB_CON|SC_SB_RDY)) {
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001873 /* we were trying to establish a connection on the server side,
1874 * maybe it succeeded, maybe it failed, maybe we timed out, ...
1875 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001876 if (scb->state == SC_ST_RDY)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001877 back_handle_st_rdy(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001878 else if (s->scb->state == SC_ST_CON)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001879 back_handle_st_con(s);
Willy Tarreaud66ed882019-06-05 18:02:04 +02001880
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001881 if (scb->state == SC_ST_CER)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001882 back_handle_st_cer(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001883 else if (scb->state == SC_ST_EST)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001884 back_establish(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001885
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001886 /* state is now one of SC_ST_CON (still in progress), SC_ST_EST
1887 * (established), SC_ST_DIS (abort), SC_ST_CLO (last error),
1888 * SC_ST_ASS/SC_ST_TAR/SC_ST_REQ for retryable errors.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001889 */
1890 }
1891
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001892 rq_prod_last = scf->state;
1893 rq_cons_last = scb->state;
1894 rp_cons_last = scf->state;
1895 rp_prod_last = scb->state;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001896
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001897 /* Check for connection closure */
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001898 DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001899
1900 /* nothing special to be done on client side */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001901 if (unlikely(scf->state == SC_ST_DIS)) {
1902 scf->state = SC_ST_CLO;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001903
Christopher Fauleta70a3542022-03-30 17:13:02 +02001904 /* This is needed only when debugging is enabled, to indicate
1905 * client-side close.
1906 */
1907 if (unlikely((global.mode & MODE_DEBUG) &&
1908 (!(global.mode & MODE_QUIET) ||
1909 (global.mode & MODE_VERBOSE)))) {
1910 chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
1911 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02001912 (unsigned short)conn_fd(sc_conn(scf)),
1913 (unsigned short)conn_fd(sc_conn(scb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001914 DISGUISE(write(1, trash.area, trash.data));
1915 }
1916 }
1917
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001918 /* When a server-side connection is released, we have to count it and
1919 * check for pending connections on this server.
1920 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001921 if (unlikely(scb->state == SC_ST_DIS)) {
1922 scb->state = SC_ST_CLO;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001923 srv = objt_server(s->target);
Willy Tarreau827aee92011-03-10 16:55:02 +01001924 if (srv) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001925 if (s->flags & SF_CURR_SESS) {
1926 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +02001927 _HA_ATOMIC_DEC(&srv->cur_sess);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001928 }
1929 sess_change_server(s, NULL);
Willy Tarreau827aee92011-03-10 16:55:02 +01001930 if (may_dequeue_tasks(srv, s->be))
Willy Tarreau9ab78292021-06-22 18:47:51 +02001931 process_srv_queue(srv);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001932 }
Christopher Fauleta70a3542022-03-30 17:13:02 +02001933
1934 /* This is needed only when debugging is enabled, to indicate
1935 * server-side close.
1936 */
1937 if (unlikely((global.mode & MODE_DEBUG) &&
1938 (!(global.mode & MODE_QUIET) ||
1939 (global.mode & MODE_VERBOSE)))) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +02001940 if (s->prev_conn_state == SC_ST_EST) {
Christopher Fauleta70a3542022-03-30 17:13:02 +02001941 chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
1942 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02001943 (unsigned short)conn_fd(sc_conn(scf)),
1944 (unsigned short)conn_fd(sc_conn(scb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001945 DISGUISE(write(1, trash.area, trash.data));
1946 }
1947 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001948 }
1949
1950 /*
1951 * Note: of the transient states (REQ, CER, DIS), only REQ may remain
1952 * at this point.
1953 */
1954
Willy Tarreau0be0ef92009-03-08 19:20:25 +01001955 resync_request:
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001956 /* Analyse request */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001957 if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
Christopher Fauletca5309a2023-04-17 16:17:32 +02001958 ((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Faulet208c7122023-04-13 16:16:15 +02001959 ((scb->flags ^ scb_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
Christopher Faulet64350bb2023-04-13 16:37:37 +02001960 (req->analysers && (scb->flags & SC_FL_SHUT_DONE)) ||
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02001961 scf->state != rq_prod_last ||
1962 scb->state != rq_cons_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001963 s->pending_events & TASK_WOKEN_MSG) {
Christopher Faulet87633c32023-04-03 18:32:50 +02001964 unsigned int scf_flags_ana = scf->flags;
1965 unsigned int scb_flags_ana = scb->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001966
Willy Tarreau74568cf2022-05-27 09:03:30 +02001967 if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01001968 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001969 unsigned int ana_list;
1970 unsigned int ana_back;
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001971
Willy Tarreau90deb182010-01-07 00:20:41 +01001972 /* it's up to the analysers to stop new connections,
1973 * disable reading or closing. Note: if an analyser
1974 * disables any of these bits, it is responsible for
1975 * enabling them again when it disables itself, so
1976 * that other analysers are called in similar conditions.
1977 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001978 channel_auto_read(req);
1979 channel_auto_connect(req);
1980 channel_auto_close(req);
Willy Tarreauedcf6682008-11-30 23:15:34 +01001981
1982 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01001983 * req->analysers, following the bit order from LSB
Willy Tarreauedcf6682008-11-30 23:15:34 +01001984 * to MSB. The analysers must remove themselves from
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001985 * the list when not needed. Any analyser may return 0
1986 * to break out of the loop, either because of missing
1987 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02001988 * kill the stream. We loop at least once through each
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001989 * analyser, and we may loop again if other analysers
1990 * are added in the middle.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001991 *
1992 * We build a list of analysers to run. We evaluate all
1993 * of these analysers in the order of the lower bit to
1994 * the higher bit. This ordering is very important.
1995 * An analyser will often add/remove other analysers,
1996 * including itself. Any changes to itself have no effect
1997 * on the loop. If it removes any other analysers, we
1998 * want those analysers not to be called anymore during
1999 * this loop. If it adds an analyser that is located
2000 * after itself, we want it to be scheduled for being
2001 * processed during the loop. If it adds an analyser
2002 * which is located before it, we want it to switch to
2003 * it immediately, even if it has already been called
2004 * once but removed since.
2005 *
2006 * In order to achieve this, we compare the analyser
2007 * list after the call with a copy of it before the
2008 * call. The work list is fed with analyser bits that
2009 * appeared during the call. Then we compare previous
2010 * work list with the new one, and check the bits that
2011 * appeared. If the lowest of these bits is lower than
2012 * the current bit, it means we have enabled a previous
2013 * analyser and must immediately loop again.
Willy Tarreauedcf6682008-11-30 23:15:34 +01002014 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002015
Willy Tarreau8f128b42014-11-28 15:07:47 +01002016 ana_list = ana_back = req->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002017 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002018 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002019 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002020 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002021 FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
2022 FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
2023 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002024 FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002025 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002026 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002027 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
2028 FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002029 FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002030 FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002031 FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
2032 FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002033 ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002034 ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002035 ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002036 ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
2037 ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002038 break;
2039 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002040 }
Willy Tarreau84455332009-03-15 22:34:05 +01002041
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002042 rq_prod_last = scf->state;
2043 rq_cons_last = scb->state;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002044 req->flags &= ~CF_WAKE_ONCE;
2045 rqf_last = req->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002046 scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002047 scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau815a9b22010-07-27 17:15:12 +02002048
Christopher Fauletca5309a2023-04-17 16:17:32 +02002049 if (((scf->flags ^ scf_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUT_DONE))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002050 goto resync_request;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002051 }
2052
Willy Tarreau576507f2010-01-07 00:09:04 +01002053 /* we'll monitor the request analysers while parsing the response,
2054 * because some response analysers may indirectly enable new request
2055 * analysers (eg: HTTP keep-alive).
2056 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002057 req_ana_back = req->analysers;
Willy Tarreau576507f2010-01-07 00:09:04 +01002058
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002059 resync_response:
2060 /* Analyse response */
2061
Willy Tarreau8f128b42014-11-28 15:07:47 +01002062 if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
Christopher Fauletca5309a2023-04-17 16:17:32 +02002063 ((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Faulet208c7122023-04-13 16:16:15 +02002064 ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
Christopher Faulet64350bb2023-04-13 16:37:37 +02002065 (res->analysers && (scf->flags & SC_FL_SHUT_DONE)) ||
Christopher Faulet87633c32023-04-03 18:32:50 +02002066 scf->state != rp_cons_last ||
2067 scb->state != rp_prod_last ||
2068 s->pending_events & TASK_WOKEN_MSG) {
2069 unsigned int scb_flags_ana = scb->flags;
2070 unsigned int scf_flags_ana = scf->flags;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002071
Willy Tarreau74568cf2022-05-27 09:03:30 +02002072 if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01002073 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002074 unsigned int ana_list;
2075 unsigned int ana_back;
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002076
Willy Tarreau90deb182010-01-07 00:20:41 +01002077 /* it's up to the analysers to stop disable reading or
2078 * closing. Note: if an analyser disables any of these
2079 * bits, it is responsible for enabling them again when
2080 * it disables itself, so that other analysers are called
2081 * in similar conditions.
2082 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002083 channel_auto_read(res);
2084 channel_auto_close(res);
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002085
2086 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01002087 * res->analysers, following the bit order from LSB
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002088 * to MSB. The analysers must remove themselves from
2089 * the list when not needed. Any analyser may return 0
2090 * to break out of the loop, either because of missing
2091 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02002092 * kill the stream. We loop at least once through each
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002093 * analyser, and we may loop again if other analysers
2094 * are added in the middle.
2095 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002096
Willy Tarreau8f128b42014-11-28 15:07:47 +01002097 ana_list = ana_back = res->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002098 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002099 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002100 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
2101 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002102 FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002103 FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002104 FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002105 FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002106 ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002107 ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002108 ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002109 ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
2110 ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002111 break;
2112 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002113 }
2114
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002115 rp_cons_last = scf->state;
2116 rp_prod_last = scb->state;
Christopher Fauletcdaea892017-07-06 15:49:30 +02002117 res->flags &= ~CF_WAKE_ONCE;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002118 rpf_last = res->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002119 scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002120 scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau815a9b22010-07-27 17:15:12 +02002121
Christopher Fauletca5309a2023-04-17 16:17:32 +02002122 if (((scb->flags ^ scb_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUT_DONE))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002123 goto resync_response;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002124 }
2125
Christopher Fauletbd90a162023-05-10 16:40:27 +02002126 /* we'll monitor the response analysers because some response analysers
2127 * may be enabled/disabled later
2128 */
2129 res_ana_back = res->analysers;
2130
Willy Tarreau576507f2010-01-07 00:09:04 +01002131 /* maybe someone has added some request analysers, so we must check and loop */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002132 if (req->analysers & ~req_ana_back)
Willy Tarreau576507f2010-01-07 00:09:04 +01002133 goto resync_request;
2134
Willy Tarreau8f128b42014-11-28 15:07:47 +01002135 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0499e352010-12-17 07:13:42 +01002136 goto resync_request;
2137
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002138 /* FIXME: here we should call protocol handlers which rely on
2139 * both buffers.
2140 */
2141
2142
2143 /*
Willy Tarreau87b09662015-04-03 00:22:06 +02002144 * Now we propagate unhandled errors to the stream. Normally
Willy Tarreauae526782010-03-04 20:34:23 +01002145 * we're just in a data phase here since it means we have not
2146 * seen any analyser who could set an error status.
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002147 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002148 srv = objt_server(s->target);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002149 if (unlikely(!(s->flags & SF_ERR_MASK))) {
Christopher Faulete182a8e2023-04-14 12:07:26 +02002150 if ((scf->flags & SC_FL_ERROR) || req->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002151 /* Report it if the client got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002152 req->analysers &= AN_REQ_FLT_END;
Christopher Fauletb1368ad2023-05-10 16:28:38 +02002153 channel_auto_close(req);
Christopher Faulete182a8e2023-04-14 12:07:26 +02002154 if (scf->flags & SC_FL_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002155 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2156 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002157 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002158 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002159 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002160 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002161 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002162 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002163 else if (req->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002164 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2165 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002166 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002167 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002168 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002169 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002170 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002171 }
Willy Tarreauae526782010-03-04 20:34:23 +01002172 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002173 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2174 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002175 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002176 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002177 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002178 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002179 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002180 }
Willy Tarreau84455332009-03-15 22:34:05 +01002181 sess_set_term_flags(s);
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002182
2183 /* Abort the request if a client error occurred while
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002184 * the backend stream connector is in the SC_ST_INI
2185 * state. It is switched into the SC_ST_CLO state and
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002186 * the request channel is erased. */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002187 if (scb->state == SC_ST_INI) {
2188 s->scb->state = SC_ST_CLO;
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002189 channel_abort(req);
2190 if (IS_HTX_STRM(s))
2191 channel_htx_erase(req, htxbuf(&req->buf));
2192 else
2193 channel_erase(req);
2194 }
Willy Tarreau84455332009-03-15 22:34:05 +01002195 }
Christopher Faulete182a8e2023-04-14 12:07:26 +02002196 else if ((scb->flags & SC_FL_ERROR) || res->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002197 /* Report it if the server got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002198 res->analysers &= AN_RES_FLT_END;
Christopher Fauletb1368ad2023-05-10 16:28:38 +02002199 channel_auto_close(res);
Christopher Faulete182a8e2023-04-14 12:07:26 +02002200 if (scb->flags & SC_FL_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002201 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2202 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002203 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002204 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002205 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002206 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002207 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002208 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002209 else if (res->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002210 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2211 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002212 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002213 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002214 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002215 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002216 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002217 }
Willy Tarreauae526782010-03-04 20:34:23 +01002218 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002219 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2220 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002221 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002222 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002223 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002224 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002225 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002226 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002227 sess_set_term_flags(s);
2228 }
Willy Tarreau84455332009-03-15 22:34:05 +01002229 }
2230
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002231 /*
2232 * Here we take care of forwarding unhandled data. This also includes
2233 * connection establishments and shutdown requests.
2234 */
2235
2236
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002237 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002238 * everything. We configure the buffer to forward indefinitely.
Christopher Faulet573ead12023-04-13 15:39:30 +02002239 * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002240 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002241 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002242 if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002243 !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_DONE) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002244 (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) &&
2245 (req->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002246 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002247 * attached to it. If any data are left in, we'll permit them to
2248 * move.
2249 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002250 channel_auto_read(req);
2251 channel_auto_connect(req);
2252 channel_auto_close(req);
Willy Tarreau5bd8c372009-01-19 00:32:22 +01002253
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002254 if (IS_HTX_STRM(s)) {
2255 struct htx *htx = htxbuf(&req->buf);
2256
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002257 /* We'll let data flow between the producer (if still connected)
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002258 * to the consumer.
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002259 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002260 co_set_data(req, htx->data);
Christopher Faulet87633c32023-04-03 18:32:50 +02002261 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002262 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002263 channel_htx_forward_forever(req, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002264 }
2265 else {
2266 /* We'll let data flow between the producer (if still connected)
2267 * to the consumer (which might possibly not be connected yet).
2268 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002269 c_adv(req, ci_data(req));
Christopher Faulet87633c32023-04-03 18:32:50 +02002270 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002271 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002272 channel_forward_forever(req);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002273 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002274 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002275
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002276 /* check if it is wise to enable kernel splicing to forward request data */
Christopher Faulet87633c32023-04-03 18:32:50 +02002277 if (!(req->flags & CF_KERN_SPLICING) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002278 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002279 req->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002280 (global.tune.options & GTUNE_USE_SPLICE) &&
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002281 (sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->rcv_pipe &&
2282 __sc_conn(scf)->mux && __sc_conn(scf)->mux->rcv_pipe) &&
2283 (sc_conn(scb) && __sc_conn(scb)->xprt && __sc_conn(scb)->xprt->snd_pipe &&
2284 __sc_conn(scb)->mux && __sc_conn(scb)->mux->snd_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002285 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002286 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
2287 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002288 (req->flags & CF_STREAMER_FAST)))) {
2289 req->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002290 }
2291
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002292 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002293 rqf_last = req->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002294 scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002295 scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002296
Willy Tarreau520d95e2009-09-19 21:04:57 +02002297 /* it's possible that an upper layer has requested a connection setup or abort.
2298 * There are 2 situations where we decide to establish a new connection :
2299 * - there are data scheduled for emission in the buffer
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002300 * - the CF_AUTO_CONNECT flag is set (active connection)
Willy Tarreau520d95e2009-09-19 21:04:57 +02002301 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002302 if (scb->state == SC_ST_INI) {
Christopher Faulet208c7122023-04-13 16:16:15 +02002303 if (!(scb->flags & SC_FL_SHUT_DONE)) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002304 if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
Willy Tarreaucf644ed2013-09-29 17:19:56 +02002305 /* If we have an appctx, there is no connect method, so we
2306 * immediately switch to the connected state, otherwise we
2307 * perform a connection request.
Willy Tarreau520d95e2009-09-19 21:04:57 +02002308 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002309 scb->state = SC_ST_REQ; /* new connection requested */
Christopher Faulet731c8e62022-03-29 16:08:44 +02002310 s->conn_retries = 0;
Christopher Faulet9f5382e2021-05-21 13:46:14 +02002311 if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
2312 (s->be->mode == PR_MODE_HTTP) &&
Christopher Faulete05bf9e2022-03-29 15:23:40 +02002313 !(s->txn->flags & TX_D_L7_RETRY))
2314 s->txn->flags |= TX_L7_RETRY;
Christopher Faulet948a5a02023-11-14 07:47:52 +01002315
2316 if (s->be->options & PR_O_ABRT_CLOSE) {
2317 struct connection *conn = sc_conn(scf);
2318
Christopher Faulet8af12382023-11-14 19:18:53 +01002319 if (conn && conn->mux && conn->mux->ctl)
Christopher Faulet948a5a02023-11-14 07:47:52 +01002320 conn->mux->ctl(conn, MUX_SUBS_RECV, NULL);
2321 }
Willy Tarreau520d95e2009-09-19 21:04:57 +02002322 }
Willy Tarreau73201222009-08-16 18:27:24 +02002323 }
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002324 else {
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002325 s->scb->state = SC_ST_CLO; /* shutw+ini = abort */
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002326 sc_schedule_shutdown(scb);
Christopher Faulet12762f02023-04-13 15:40:10 +02002327 sc_schedule_abort(scb);
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002328 }
Willy Tarreau92795622009-03-06 12:51:23 +01002329 }
2330
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002331
2332 /* we may have a pending connection request, or a connection waiting
2333 * for completion.
2334 */
Willy Tarreau74568cf2022-05-27 09:03:30 +02002335 if (sc_state_in(scb->state, SC_SB_REQ|SC_SB_QUE|SC_SB_TAR|SC_SB_ASS)) {
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002336 /* prune the request variables and swap to the response variables. */
2337 if (s->vars_reqres.scope != SCOPE_RES) {
Jerome Magnin2f44e882019-11-09 18:00:47 +01002338 if (!LIST_ISEMPTY(&s->vars_reqres.head))
Willy Tarreaucda7f3f2018-10-28 13:44:36 +01002339 vars_prune(&s->vars_reqres, s->sess, s);
Willy Tarreaub7bfcb32021-08-31 08:13:25 +02002340 vars_init_head(&s->vars_reqres, SCOPE_RES);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002341 }
2342
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002343 do {
2344 /* nb: step 1 might switch from QUE to ASS, but we first want
2345 * to give a chance to step 2 to perform a redirect if needed.
2346 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002347 if (scb->state != SC_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002348 back_try_conn_req(s);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002349 if (scb->state == SC_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002350 back_handle_st_req(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002351
Willy Tarreauada4c582020-03-04 16:42:03 +01002352 /* get a chance to complete an immediate connection setup */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002353 if (scb->state == SC_ST_RDY)
Willy Tarreau4596fe22022-05-17 19:07:51 +02002354 goto resync_stconns;
Willy Tarreauada4c582020-03-04 16:42:03 +01002355
Willy Tarreau9e5a3aa2013-12-31 23:32:12 +01002356 /* applets directly go to the ESTABLISHED state. Similarly,
2357 * servers experience the same fate when their connection
2358 * is reused.
2359 */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002360 if (unlikely(scb->state == SC_ST_EST))
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002361 back_establish(s);
Willy Tarreaufac4bd12013-11-30 09:21:49 +01002362
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002363 srv = objt_server(s->target);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002364 if (scb->state == SC_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002365 http_perform_server_redirect(s, scb);
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002366 } while (scb->state == SC_ST_ASS);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002367 }
2368
Willy Tarreau829bd472019-06-06 09:17:23 +02002369 /* Let's see if we can send the pending request now */
Willy Tarreau462b9892022-05-18 18:06:53 +02002370 sc_conn_sync_send(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002371
2372 /*
2373 * Now forward all shutdown requests between both sides of the request buffer
2374 */
2375
2376 /* first, let's check if the request buffer needs to shutdown(write), which may
2377 * happen either because the input is closed or because we want to force a close
2378 * once the server has begun to respond. If a half-closed timeout is set, we adjust
Willy Tarreaua544c662022-04-14 17:39:48 +02002379 * the other side's timeout as well. However this doesn't have effect during the
2380 * connection setup unless the backend has abortonclose set.
Willy Tarreau829bd472019-06-06 09:17:23 +02002381 */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002382 if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002383 !(scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) &&
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002384 (scb->state != SC_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002385 sc_schedule_shutdown(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002386 }
2387
2388 /* shutdown(write) pending */
Christopher Faulet208c7122023-04-13 16:16:15 +02002389 if (unlikely((scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
Christopher Faulet406b81c2023-09-06 08:59:33 +02002390 (channel_is_empty(req) || (req->flags & CF_WRITE_TIMEOUT)))) {
Christopher Faulete182a8e2023-04-14 12:07:26 +02002391 if (scf->flags & SC_FL_ERROR)
Willy Tarreaucb041662022-05-17 19:44:42 +02002392 scb->flags |= SC_FL_NOLINGER;
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02002393 sc_shutdown(scb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002394 }
2395
2396 /* shutdown(write) done on server side, we must stop the client too */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002397 if (unlikely((scb->flags & SC_FL_SHUT_DONE) && !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002398 !req->analysers)
Christopher Faulet12762f02023-04-13 15:40:10 +02002399 sc_schedule_abort(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002400
2401 /* shutdown(read) pending */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002402 if (unlikely((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02002403 if (scf->flags & SC_FL_NOHALF)
2404 scf->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02002405 sc_abort(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002406 }
2407
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002408 /* Benchmarks have shown that it's optimal to do a full resync now */
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002409 if (scf->state == SC_ST_DIS ||
Willy Tarreau74568cf2022-05-27 09:03:30 +02002410 sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
Christopher Fauletad46e522023-04-14 11:59:15 +02002411 ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
2412 ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
Willy Tarreau4596fe22022-05-17 19:07:51 +02002413 goto resync_stconns;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002414
Willy Tarreau815a9b22010-07-27 17:15:12 +02002415 /* otherwise we want to check if we need to resync the req buffer or not */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002416 if (((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags) & SC_FL_SHUT_DONE))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002417 goto resync_request;
2418
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002419 /* perform output updates to the response buffer */
Willy Tarreau84455332009-03-15 22:34:05 +01002420
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002421 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002422 * everything. We configure the buffer to forward indefinitely.
Christopher Faulet573ead12023-04-13 15:39:30 +02002423 * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002424 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002425 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002426 if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
Christopher Faulete38534c2023-04-13 15:45:24 +02002427 !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_WANTED) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02002428 sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
2429 (res->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002430 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002431 * attached to it. If any data are left in, we'll permit them to
2432 * move.
2433 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002434 channel_auto_read(res);
2435 channel_auto_close(res);
Willy Tarreauda4d9fe2010-11-07 20:26:56 +01002436
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002437 if (IS_HTX_STRM(s)) {
2438 struct htx *htx = htxbuf(&res->buf);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002439
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002440 /* We'll let data flow between the producer (if still connected)
2441 * to the consumer.
2442 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002443 co_set_data(res, htx->data);
Christopher Faulet87633c32023-04-03 18:32:50 +02002444 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002445 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002446 channel_htx_forward_forever(res, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002447 }
2448 else {
2449 /* We'll let data flow between the producer (if still connected)
2450 * to the consumer.
2451 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002452 c_adv(res, ci_data(res));
Christopher Faulet87633c32023-04-03 18:32:50 +02002453 if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002454 !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002455 channel_forward_forever(res);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002456 }
Willy Tarreau42529c32015-07-09 18:38:57 +02002457
Willy Tarreauce887fd2012-05-12 12:50:00 +02002458 /* if we have no analyser anymore in any direction and have a
Willy Tarreau05cdd962014-05-10 14:30:07 +02002459 * tunnel timeout set, use it now. Note that we must respect
2460 * the half-closed timeouts as well.
Willy Tarreauce887fd2012-05-12 12:50:00 +02002461 */
Amaury Denoyellefb504432020-12-10 13:43:53 +01002462 if (!req->analysers && s->tunnel_timeout) {
Christopher Faulet5aaacfb2023-02-15 08:13:33 +01002463 scf->ioto = scb->ioto = s->tunnel_timeout;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002464
Willy Tarreaud7f1ce42023-06-02 16:19:51 +02002465 if (!IS_HTX_STRM(s)) {
2466 if ((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(sess->fe->timeout.clientfin))
2467 scf->ioto = sess->fe->timeout.clientfin;
2468 if ((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(s->be->timeout.serverfin))
2469 scb->ioto = s->be->timeout.serverfin;
2470 }
Willy Tarreauce887fd2012-05-12 12:50:00 +02002471 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002472 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002473
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002474 /* check if it is wise to enable kernel splicing to forward response data */
Christopher Faulet87633c32023-04-03 18:32:50 +02002475 if (!(res->flags & CF_KERN_SPLICING) &&
Christopher Fauletca5309a2023-04-17 16:17:32 +02002476 !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002477 res->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002478 (global.tune.options & GTUNE_USE_SPLICE) &&
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002479 (sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->snd_pipe &&
2480 __sc_conn(scf)->mux && __sc_conn(scf)->mux->snd_pipe) &&
2481 (sc_conn(scb) && __sc_conn(scb)->xprt && __sc_conn(scb)->xprt->rcv_pipe &&
2482 __sc_conn(scb)->mux && __sc_conn(scb)->mux->rcv_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002483 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002484 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
2485 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002486 (res->flags & CF_STREAMER_FAST)))) {
2487 res->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002488 }
2489
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002490 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002491 rpf_last = res->flags;
Christopher Fauletca5309a2023-04-17 16:17:32 +02002492 scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
Christopher Faulet208c7122023-04-13 16:16:15 +02002493 scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002494
Willy Tarreau829bd472019-06-06 09:17:23 +02002495 /* Let's see if we can send the pending response now */
Willy Tarreau462b9892022-05-18 18:06:53 +02002496 sc_conn_sync_send(scf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002497
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002498 /*
2499 * Now forward all shutdown requests between both sides of the buffer
2500 */
2501
2502 /*
2503 * FIXME: this is probably where we should produce error responses.
2504 */
2505
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002506 /* first, let's check if the response buffer needs to shutdown(write) */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002507 if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
Christopher Faulet208c7122023-04-13 16:16:15 +02002508 !(scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))) {
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002509 sc_schedule_shutdown(scf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002510 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002511
2512 /* shutdown(write) pending */
Christopher Faulet208c7122023-04-13 16:16:15 +02002513 if (unlikely((scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
Christopher Faulet406b81c2023-09-06 08:59:33 +02002514 (channel_is_empty(res) || (res->flags & CF_WRITE_TIMEOUT)))) {
Christopher Fauletb2b1c3a2023-04-13 16:23:48 +02002515 sc_shutdown(scf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002516 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002517
2518 /* shutdown(write) done on the client side, we must stop the server too */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002519 if (unlikely((scf->flags & SC_FL_SHUT_DONE) && !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002520 !res->analysers)
Christopher Faulet12762f02023-04-13 15:40:10 +02002521 sc_schedule_abort(scb);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002522
2523 /* shutdown(read) pending */
Christopher Fauletca5309a2023-04-17 16:17:32 +02002524 if (unlikely((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
Willy Tarreaucb041662022-05-17 19:44:42 +02002525 if (scb->flags & SC_FL_NOHALF)
2526 scb->flags |= SC_FL_NOLINGER;
Christopher Fauletcfc11c02023-04-13 16:10:23 +02002527 sc_abort(scb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02002528 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002529
Willy Tarreau026e8fb2022-05-17 19:47:17 +02002530 if (scf->state == SC_ST_DIS ||
Willy Tarreau74568cf2022-05-27 09:03:30 +02002531 sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
Christopher Fauletad46e522023-04-14 11:59:15 +02002532 ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
2533 ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
Willy Tarreau4596fe22022-05-17 19:07:51 +02002534 goto resync_stconns;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002535
Willy Tarreau3c5c0662019-06-06 14:32:49 +02002536 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002537 goto resync_request;
2538
Christopher Fauletca5309a2023-04-17 16:17:32 +02002539 if (((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
Christopher Fauletbd90a162023-05-10 16:40:27 +02002540 ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
2541 (res->analysers ^ res_ana_back))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002542 goto resync_response;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002543
Christopher Fauletbd90a162023-05-10 16:40:27 +02002544 if ((((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER) ||
2545 (req->analysers ^ req_ana_back))
Willy Tarreau829bd472019-06-06 09:17:23 +02002546 goto resync_request;
2547
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002548 /* we're interested in getting wakeups again */
Willy Tarreaucb041662022-05-17 19:44:42 +02002549 scf->flags &= ~SC_FL_DONT_WAKE;
2550 scb->flags &= ~SC_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002551
Willy Tarreau74568cf2022-05-27 09:03:30 +02002552 if (likely((scf->state != SC_ST_CLO) || !sc_state_in(scb->state, SC_SB_INI|SC_SB_CLO) ||
Christopher Faulet6fcd2d32019-11-13 11:12:32 +01002553 (req->analysers & AN_REQ_FLT_END) || (res->analysers & AN_RES_FLT_END))) {
Olivier Houchard4c18f942019-07-31 18:05:26 +02002554 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
Willy Tarreau87b09662015-04-03 00:22:06 +02002555 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002556
Willy Tarreaub49672d2022-05-27 10:13:37 +02002557 stream_update_both_sc(s);
Olivier Houchard53216e72018-10-10 15:46:36 +02002558
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002559 /* Reset pending events now */
2560 s->pending_events = 0;
2561
Willy Tarreau798f4322012-11-08 14:49:17 +01002562 update_exp_and_leave:
Willy Tarreaucb041662022-05-17 19:44:42 +02002563 /* Note: please ensure that if you branch here you disable SC_FL_DONT_WAKE */
Willy Tarreaudef0d222016-11-08 22:03:00 +01002564 if (!req->analysers)
2565 req->analyse_exp = TICK_ETERNITY;
2566
2567 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
2568 (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
2569 req->analyse_exp = tick_add(now_ms, 5000);
2570
Christopher Faulet92657802023-11-06 08:45:22 +01002571 t->expire = (tick_is_expired(t->expire, now_ms) ? 0 : t->expire);
2572 t->expire = tick_first(t->expire, sc_ep_rcv_ex(scf));
2573 t->expire = tick_first(t->expire, sc_ep_snd_ex(scf));
2574 t->expire = tick_first(t->expire, sc_ep_rcv_ex(scb));
2575 t->expire = tick_first(t->expire, sc_ep_snd_ex(scb));
Willy Tarreaudef0d222016-11-08 22:03:00 +01002576 t->expire = tick_first(t->expire, req->analyse_exp);
Willy Tarreau9a398be2017-11-10 17:14:23 +01002577 t->expire = tick_first(t->expire, res->analyse_exp);
Christopher Fauletae024ce2022-03-29 19:02:31 +02002578 t->expire = tick_first(t->expire, s->conn_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002579
Christopher Faulete758b5c2023-02-27 16:21:00 +01002580 if (unlikely(tick_is_expired(t->expire, now_ms))) {
2581 /* Some events prevented the timeouts to be handled but nothing evolved.
2582 So do it now and resyunc the stconns
2583 */
2584 stream_handle_timeouts(s);
2585 goto resync_stconns;
2586 }
Christopher Fauleta62201d2023-02-20 14:43:49 +01002587
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002588 s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
Willy Tarreau87b09662015-04-03 00:22:06 +02002589 stream_release_buffers(s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002590
2591 DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
Willy Tarreau26c25062009-03-08 09:38:41 +01002592 return t; /* nothing more to do */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002593 }
2594
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002595 DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
2596
Willy Tarreaue7dff022015-04-03 01:14:29 +02002597 if (s->flags & SF_BE_ASSIGNED)
Willy Tarreau4781b152021-04-06 13:53:36 +02002598 _HA_ATOMIC_DEC(&s->be->beconn);
Willy Tarreau6f5e4b92017-09-15 09:07:56 +02002599
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002600 if (unlikely((global.mode & MODE_DEBUG) &&
2601 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
Willy Tarreau19d14ef2012-10-29 16:51:55 +01002602 chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
Christopher Faulet0256da12021-12-15 09:50:17 +01002603 s->uniq_id, s->be->id,
Willy Tarreaufd9417b2022-05-18 16:23:22 +02002604 (unsigned short)conn_fd(sc_conn(scf)),
2605 (unsigned short)conn_fd(sc_conn(scb)));
Willy Tarreau2e8ab6b2020-03-14 11:03:20 +01002606 DISGUISE(write(1, trash.area, trash.data));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002607 }
2608
Christopher Faulet341064e2021-01-21 17:10:44 +01002609 if (!(s->flags & SF_IGNORE)) {
Willy Tarreau69530f52023-04-28 09:16:15 +02002610 s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
Christopher Faulet341064e2021-01-21 17:10:44 +01002611
Olivier Houchard4c18f942019-07-31 18:05:26 +02002612 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002613
Christopher Faulet341064e2021-01-21 17:10:44 +01002614 if (s->txn && s->txn->status) {
2615 int n;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002616
Christopher Faulet341064e2021-01-21 17:10:44 +01002617 n = s->txn->status / 100;
2618 if (n < 1 || n > 5)
2619 n = 0;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002620
Christopher Faulet341064e2021-01-21 17:10:44 +01002621 if (sess->fe->mode == PR_MODE_HTTP) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002622 _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
Christopher Faulet341064e2021-01-21 17:10:44 +01002623 }
2624 if ((s->flags & SF_BE_ASSIGNED) &&
2625 (s->be->mode == PR_MODE_HTTP)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002626 _HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
2627 _HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
Christopher Faulet341064e2021-01-21 17:10:44 +01002628 }
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002629 }
Christopher Faulet341064e2021-01-21 17:10:44 +01002630
2631 /* let's do a final log if we need it */
2632 if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
2633 !(s->flags & SF_MONITOR) &&
2634 (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
2635 /* we may need to know the position in the queue */
2636 pendconn_free(s);
Willy Tarreaubeee6002022-09-07 16:17:49 +02002637
2638 stream_cond_update_cpu_usage(s);
Christopher Faulet341064e2021-01-21 17:10:44 +01002639 s->do_log(s);
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002640 }
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002641
Christopher Faulet341064e2021-01-21 17:10:44 +01002642 /* update time stats for this stream */
2643 stream_update_time_stats(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002644 }
2645
2646 /* the task MUST not be in the run queue anymore */
Willy Tarreau87b09662015-04-03 00:22:06 +02002647 stream_free(s);
Olivier Houchard3f795f72019-04-17 22:51:06 +02002648 task_destroy(t);
Willy Tarreau26c25062009-03-08 09:38:41 +01002649 return NULL;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002650}
2651
Willy Tarreau87b09662015-04-03 00:22:06 +02002652/* Update the stream's backend and server time stats */
2653void stream_update_time_stats(struct stream *s)
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002654{
2655 int t_request;
2656 int t_queue;
2657 int t_connect;
2658 int t_data;
2659 int t_close;
2660 struct server *srv;
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002661 unsigned int samples_window;
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002662
2663 t_request = 0;
2664 t_queue = s->logs.t_queue;
2665 t_connect = s->logs.t_connect;
2666 t_close = s->logs.t_close;
2667 t_data = s->logs.t_data;
2668
2669 if (s->be->mode != PR_MODE_HTTP)
2670 t_data = t_connect;
2671
2672 if (t_connect < 0 || t_data < 0)
2673 return;
2674
Willy Tarreauad5a5f62023-04-27 09:46:02 +02002675 if ((llong)(s->logs.request_ts - s->logs.accept_ts) >= 0)
2676 t_request = ns_to_ms(s->logs.request_ts - s->logs.accept_ts);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002677
2678 t_data -= t_connect;
2679 t_connect -= t_queue;
2680 t_queue -= t_request;
2681
2682 srv = objt_server(s->target);
2683 if (srv) {
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002684 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2685 srv->counters.p.http.cum_req : srv->counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2686 swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
2687 swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
2688 swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
2689 swrate_add_dynamic(&srv->counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002690 HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
2691 HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
2692 HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
2693 HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002694 }
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002695 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2696 s->be->be_counters.p.http.cum_req : s->be->be_counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2697 swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
2698 swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
2699 swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
2700 swrate_add_dynamic(&s->be->be_counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002701 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
2702 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
2703 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
2704 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002705}
2706
Willy Tarreau7c669d72008-06-20 15:04:11 +02002707/*
2708 * This function adjusts sess->srv_conn and maintains the previous and new
Willy Tarreau87b09662015-04-03 00:22:06 +02002709 * server's served stream counts. Setting newsrv to NULL is enough to release
Willy Tarreau7c669d72008-06-20 15:04:11 +02002710 * current connection slot. This function also notifies any LB algo which might
Willy Tarreau87b09662015-04-03 00:22:06 +02002711 * expect to be informed about any change in the number of active streams on a
Willy Tarreau7c669d72008-06-20 15:04:11 +02002712 * server.
2713 */
Willy Tarreaue89fae32021-03-09 15:43:32 +01002714void sess_change_server(struct stream *strm, struct server *newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002715{
Willy Tarreaue89fae32021-03-09 15:43:32 +01002716 struct server *oldsrv = strm->srv_conn;
Willy Tarreau751153e2021-02-17 13:33:24 +01002717
Amaury Denoyelled8514b92024-02-21 15:54:11 +01002718 /* Dynamic servers may be deleted during process lifetime. This
2719 * operation is always conducted under thread isolation. Several
2720 * conditions prevent deletion, one of them is if server streams list
2721 * is not empty. sess_change_server() uses stream_add_srv_conn() to
2722 * ensure the latter condition.
2723 *
2724 * A race condition could exist for stream which referenced a server
2725 * instance (s->target) without registering itself in its server list.
2726 * This is notably the case for SF_DIRECT streams which referenced a
2727 * server earlier during process_stream(). However at this time the
2728 * code is deemed safe as process_stream() cannot be rescheduled before
2729 * invocation of sess_change_server().
2730 */
2731
Willy Tarreau751153e2021-02-17 13:33:24 +01002732 if (oldsrv == newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002733 return;
2734
Willy Tarreau751153e2021-02-17 13:33:24 +01002735 if (oldsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002736 _HA_ATOMIC_DEC(&oldsrv->served);
2737 _HA_ATOMIC_DEC(&oldsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002738 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002739 if (oldsrv->proxy->lbprm.server_drop_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002740 oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002741 stream_del_srv_conn(strm);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002742 }
2743
2744 if (newsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002745 _HA_ATOMIC_INC(&newsrv->served);
2746 _HA_ATOMIC_INC(&newsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002747 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002748 if (newsrv->proxy->lbprm.server_take_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002749 newsrv->proxy->lbprm.server_take_conn(newsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002750 stream_add_srv_conn(strm, newsrv);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002751 }
2752}
2753
Willy Tarreau84455332009-03-15 22:34:05 +01002754/* Handle server-side errors for default protocols. It is called whenever a a
2755 * connection setup is aborted or a request is aborted in queue. It sets the
Willy Tarreau87b09662015-04-03 00:22:06 +02002756 * stream termination flags so that the caller does not have to worry about
Willy Tarreau4596fe22022-05-17 19:07:51 +02002757 * them. It's installed as ->srv_error for the server-side stream connector.
Willy Tarreau84455332009-03-15 22:34:05 +01002758 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02002759void default_srv_error(struct stream *s, struct stconn *sc)
Willy Tarreau84455332009-03-15 22:34:05 +01002760{
Christopher Faulet50264b42022-03-30 19:39:30 +02002761 int err_type = s->conn_err_type;
Willy Tarreau84455332009-03-15 22:34:05 +01002762 int err = 0, fin = 0;
2763
Christopher Faulet50264b42022-03-30 19:39:30 +02002764 if (err_type & STRM_ET_QUEUE_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002765 err = SF_ERR_CLICL;
2766 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002767 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002768 else if (err_type & STRM_ET_CONN_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002769 err = SF_ERR_CLICL;
2770 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002771 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002772 else if (err_type & STRM_ET_QUEUE_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002773 err = SF_ERR_SRVTO;
2774 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002775 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002776 else if (err_type & STRM_ET_QUEUE_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002777 err = SF_ERR_SRVCL;
2778 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002779 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002780 else if (err_type & STRM_ET_CONN_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002781 err = SF_ERR_SRVTO;
2782 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002783 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002784 else if (err_type & STRM_ET_CONN_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002785 err = SF_ERR_SRVCL;
2786 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002787 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002788 else if (err_type & STRM_ET_CONN_RES) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002789 err = SF_ERR_RESOURCE;
2790 fin = SF_FINST_C;
Willy Tarreau2d400bb2012-05-14 12:11:47 +02002791 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002792 else /* STRM_ET_CONN_OTHER and others */ {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002793 err = SF_ERR_INTERNAL;
2794 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002795 }
2796
Willy Tarreaue7dff022015-04-03 01:14:29 +02002797 if (!(s->flags & SF_ERR_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002798 s->flags |= err;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002799 if (!(s->flags & SF_FINST_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002800 s->flags |= fin;
2801}
Willy Tarreau7c669d72008-06-20 15:04:11 +02002802
Willy Tarreaue7dff022015-04-03 01:14:29 +02002803/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
Willy Tarreau87b09662015-04-03 00:22:06 +02002804void stream_shutdown(struct stream *stream, int why)
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002805{
Christopher Faulet208c7122023-04-13 16:16:15 +02002806 if (stream->scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002807 return;
2808
Christopher Fauletdf7cd712023-04-13 15:56:26 +02002809 sc_schedule_shutdown(stream->scb);
Christopher Faulet12762f02023-04-13 15:40:10 +02002810 sc_schedule_abort(stream->scb);
Willy Tarreau87b09662015-04-03 00:22:06 +02002811 stream->task->nice = 1024;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002812 if (!(stream->flags & SF_ERR_MASK))
Willy Tarreau87b09662015-04-03 00:22:06 +02002813 stream->flags |= why;
2814 task_wakeup(stream->task, TASK_WOKEN_OTHER);
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002815}
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +02002816
Willy Tarreau5484d582019-05-22 09:33:03 +02002817/* Appends a dump of the state of stream <s> into buffer <buf> which must have
2818 * preliminary be prepared by its caller, with each line prepended by prefix
2819 * <pfx>, and each line terminated by character <eol>.
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002820 */
Willy Tarreau5484d582019-05-22 09:33:03 +02002821void stream_dump(struct buffer *buf, const struct stream *s, const char *pfx, char eol)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002822{
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002823 const struct stconn *scf, *scb;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002824 const struct connection *cof, *cob;
2825 const struct appctx *acf, *acb;
2826 const struct server *srv;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002827 const char *src = "unknown";
2828 const char *dst = "unknown";
2829 char pn[INET6_ADDRSTRLEN];
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002830 const struct channel *req, *res;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002831
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002832 if (!s) {
Willy Tarreau5484d582019-05-22 09:33:03 +02002833 chunk_appendf(buf, "%sstrm=%p%c", pfx, s, eol);
2834 return;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002835 }
2836
Willy Tarreau5484d582019-05-22 09:33:03 +02002837 if (s->obj_type != OBJ_TYPE_STREAM) {
2838 chunk_appendf(buf, "%sstrm=%p [invalid type=%d(%s)]%c",
2839 pfx, s, s->obj_type, obj_type_name(&s->obj_type), eol);
2840 return;
2841 }
2842
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002843 req = &s->req;
2844 res = &s->res;
2845
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002846 scf = s->scf;
Willy Tarreau46784222023-08-28 17:05:22 +02002847 cof = (scf && scf->sedesc) ? sc_conn(scf) : NULL;
2848 acf = (scf && scf->sedesc) ? sc_appctx(scf) : NULL;
Willy Tarreau71e34c12019-07-17 15:07:06 +02002849 if (cof && cof->src && addr_to_str(cof->src, pn, sizeof(pn)) >= 0)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002850 src = pn;
2851 else if (acf)
2852 src = acf->applet->name;
2853
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002854 scb = s->scb;
Willy Tarreau46784222023-08-28 17:05:22 +02002855 cob = (scb && scb->sedesc) ? sc_conn(scb) : NULL;
2856 acb = (scb && scb->sedesc) ? sc_appctx(scb) : NULL;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002857 srv = objt_server(s->target);
2858 if (srv)
2859 dst = srv->id;
2860 else if (acb)
2861 dst = acb->applet->name;
2862
Willy Tarreau5484d582019-05-22 09:33:03 +02002863 chunk_appendf(buf,
Christopher Faulete8f35962021-11-02 17:18:15 +01002864 "%sstrm=%p,%x src=%s fe=%s be=%s dst=%s%c"
2865 "%stxn=%p,%x txn.req=%s,%x txn.rsp=%s,%x%c"
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002866 "%srqf=%x rqa=%x rpf=%x rpa=%x%c"
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02002867 "%sscf=%p,%s,%x scb=%p,%s,%x%c"
Christopher Faulet13a35e52021-12-20 15:34:16 +01002868 "%saf=%p,%u sab=%p,%u%c"
Willy Tarreau5484d582019-05-22 09:33:03 +02002869 "%scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2870 "%scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2871 "",
Christopher Faulete8f35962021-11-02 17:18:15 +01002872 pfx, s, s->flags, src, s->sess->fe->id, s->be->id, dst, eol,
2873 pfx, s->txn, (s->txn ? s->txn->flags : 0),
2874 (s->txn ? h1_msg_state_str(s->txn->req.msg_state): "-"), (s->txn ? s->txn->req.flags : 0),
2875 (s->txn ? h1_msg_state_str(s->txn->rsp.msg_state): "-"), (s->txn ? s->txn->rsp.flags : 0), eol,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002876 pfx, req->flags, req->analysers, res->flags, res->analysers, eol,
Willy Tarreau2e866732023-09-04 15:30:33 +02002877 pfx, scf, scf ? sc_state_str(scf->state) : 0, scf ? scf->flags : 0,
2878 scb, scb ? sc_state_str(scb->state) : 0, scb ? scb->flags : 0, eol,
Christopher Faulet13a35e52021-12-20 15:34:16 +01002879 pfx, acf, acf ? acf->st0 : 0, acb, acb ? acb->st0 : 0, eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002880 pfx, cof, cof ? cof->flags : 0, conn_get_mux_name(cof), cof?cof->ctx:0, conn_get_xprt_name(cof),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002881 cof ? cof->xprt_ctx : 0, conn_get_ctrl_name(cof), conn_fd(cof), eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002882 pfx, cob, cob ? cob->flags : 0, conn_get_mux_name(cob), cob?cob->ctx:0, conn_get_xprt_name(cob),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002883 cob ? cob->xprt_ctx : 0, conn_get_ctrl_name(cob), conn_fd(cob), eol);
Willy Tarreau5484d582019-05-22 09:33:03 +02002884}
2885
2886/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
Willy Tarreaub106ce12019-05-22 08:57:01 +02002887 * having reached loop rate <rate>, then aborts hoping to retrieve a core.
Willy Tarreau5484d582019-05-22 09:33:03 +02002888 */
2889void stream_dump_and_crash(enum obj_type *obj, int rate)
2890{
2891 const struct stream *s;
Willy Tarreau5484d582019-05-22 09:33:03 +02002892 char *msg = NULL;
2893 const void *ptr;
2894
2895 ptr = s = objt_stream(obj);
2896 if (!s) {
2897 const struct appctx *appctx = objt_appctx(obj);
2898 if (!appctx)
2899 return;
2900 ptr = appctx;
Willy Tarreau0698c802022-05-11 14:09:57 +02002901 s = appctx_strm(appctx);
Willy Tarreau5484d582019-05-22 09:33:03 +02002902 if (!s)
2903 return;
2904 }
2905
Willy Tarreau5484d582019-05-22 09:33:03 +02002906 chunk_reset(&trash);
2907 stream_dump(&trash, s, "", ' ');
Willy Tarreau9753d612020-05-01 16:57:02 +02002908
2909 chunk_appendf(&trash, "filters={");
2910 if (HAS_FILTERS(s)) {
2911 struct filter *filter;
2912
2913 list_for_each_entry(filter, &s->strm_flt.filters, list) {
2914 if (filter->list.p != &s->strm_flt.filters)
2915 chunk_appendf(&trash, ", ");
2916 chunk_appendf(&trash, "%p=\"%s\"", filter, FLT_ID(filter));
2917 }
2918 }
2919 chunk_appendf(&trash, "}");
2920
Willy Tarreau714900a2022-09-02 09:13:12 +02002921 if (ptr != s) { // that's an appctx
2922 const struct appctx *appctx = ptr;
2923
2924 chunk_appendf(&trash, " applet=%p(", appctx->applet);
2925 resolve_sym_name(&trash, NULL, appctx->applet);
2926 chunk_appendf(&trash, ")");
2927
2928 chunk_appendf(&trash, " handler=%p(", appctx->applet->fct);
2929 resolve_sym_name(&trash, NULL, appctx->applet->fct);
2930 chunk_appendf(&trash, ")");
2931 }
2932
Willy Tarreaub106ce12019-05-22 08:57:01 +02002933 memprintf(&msg,
2934 "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
2935 "aborting now! Please report this error to developers "
2936 "[%s]\n",
Willy Tarreau5484d582019-05-22 09:33:03 +02002937 obj_type_name(obj), ptr, rate, trash.area);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002938
2939 ha_alert("%s", msg);
2940 send_log(NULL, LOG_EMERG, "%s", msg);
Willy Tarreau2f67e542021-03-02 19:19:41 +01002941 ABORT_NOW();
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002942}
2943
Willy Tarreaua698eb62021-02-24 10:37:01 +01002944/* initialize the require structures */
2945static void init_stream()
2946{
2947 int thr;
2948
2949 for (thr = 0; thr < MAX_THREADS; thr++)
Willy Tarreaub4e34762021-09-30 19:02:18 +02002950 LIST_INIT(&ha_thread_ctx[thr].streams);
Willy Tarreaua698eb62021-02-24 10:37:01 +01002951}
2952INITCALL0(STG_INIT, init_stream);
2953
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002954/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002955 * returns the unique ID.
Tim Duesterhus7ad27d42022-05-18 00:22:15 +02002956 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002957 * If this function fails to allocate memory IST_NULL is returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002958 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002959 * If an ID is already stored within the stream nothing happens existing unique ID is
2960 * returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002961 */
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002962struct ist stream_generate_unique_id(struct stream *strm, struct list *format)
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002963{
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002964 if (isttest(strm->unique_id)) {
2965 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002966 }
2967 else {
2968 char *unique_id;
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002969 int length;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002970 if ((unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002971 return IST_NULL;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002972
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002973 length = build_logline(strm, unique_id, UNIQUEID_LEN, format);
2974 strm->unique_id = ist2(unique_id, length);
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002975
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002976 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002977 }
2978}
2979
Willy Tarreau8b22a712010-06-18 17:46:06 +02002980/************************************************************************/
2981/* All supported ACL keywords must be declared here. */
2982/************************************************************************/
Christopher Faulet551a6412021-06-25 14:35:29 +02002983static enum act_return stream_action_set_log_level(struct act_rule *rule, struct proxy *px,
2984 struct session *sess, struct stream *s, int flags)
2985{
2986 s->logs.level = (uintptr_t)rule->arg.act.p[0];
2987 return ACT_RET_CONT;
2988}
2989
2990
2991/* Parse a "set-log-level" action. It takes the level value as argument. It
2992 * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2993 */
2994static enum act_parse_ret stream_parse_set_log_level(const char **args, int *cur_arg, struct proxy *px,
2995 struct act_rule *rule, char **err)
2996{
2997 int level;
2998
2999 if (!*args[*cur_arg]) {
3000 bad_log_level:
3001 memprintf(err, "expects exactly 1 argument (log level name or 'silent')");
3002 return ACT_RET_PRS_ERR;
3003 }
3004 if (strcmp(args[*cur_arg], "silent") == 0)
3005 level = -1;
3006 else if ((level = get_log_level(args[*cur_arg]) + 1) == 0)
3007 goto bad_log_level;
3008
3009 (*cur_arg)++;
3010
3011 /* Register processing function. */
3012 rule->action_ptr = stream_action_set_log_level;
3013 rule->action = ACT_CUSTOM;
3014 rule->arg.act.p[0] = (void *)(uintptr_t)level;
3015 return ACT_RET_PRS_OK;
3016}
3017
Christopher Faulet1da374a2021-06-25 14:46:02 +02003018static enum act_return stream_action_set_nice(struct act_rule *rule, struct proxy *px,
3019 struct session *sess, struct stream *s, int flags)
3020{
3021 s->task->nice = (uintptr_t)rule->arg.act.p[0];
3022 return ACT_RET_CONT;
3023}
3024
3025
3026/* Parse a "set-nice" action. It takes the nice value as argument. It returns
3027 * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
3028 */
3029static enum act_parse_ret stream_parse_set_nice(const char **args, int *cur_arg, struct proxy *px,
3030 struct act_rule *rule, char **err)
3031{
3032 int nice;
3033
3034 if (!*args[*cur_arg]) {
3035 bad_log_level:
3036 memprintf(err, "expects exactly 1 argument (integer value)");
3037 return ACT_RET_PRS_ERR;
3038 }
3039
3040 nice = atoi(args[*cur_arg]);
3041 if (nice < -1024)
3042 nice = -1024;
3043 else if (nice > 1024)
3044 nice = 1024;
3045
3046 (*cur_arg)++;
3047
3048 /* Register processing function. */
3049 rule->action_ptr = stream_action_set_nice;
3050 rule->action = ACT_CUSTOM;
3051 rule->arg.act.p[0] = (void *)(uintptr_t)nice;
3052 return ACT_RET_PRS_OK;
3053}
3054
Christopher Faulet551a6412021-06-25 14:35:29 +02003055
Christopher Fauletae863c62021-03-15 12:03:44 +01003056static enum act_return tcp_action_switch_stream_mode(struct act_rule *rule, struct proxy *px,
3057 struct session *sess, struct stream *s, int flags)
3058{
3059 enum pr_mode mode = (uintptr_t)rule->arg.act.p[0];
3060 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
3061
3062 if (!IS_HTX_STRM(s) && mode == PR_MODE_HTTP) {
3063 if (!stream_set_http_mode(s, mux_proto)) {
Christopher Faulet7eb837d2023-04-13 15:22:29 +02003064 stream_abort(s);
Christopher Fauletae863c62021-03-15 12:03:44 +01003065 return ACT_RET_ABRT;
3066 }
3067 }
3068 return ACT_RET_STOP;
3069}
3070
3071
3072static int check_tcp_switch_stream_mode(struct act_rule *rule, struct proxy *px, char **err)
3073{
3074 const struct mux_proto_list *mux_ent;
3075 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
3076 enum pr_mode pr_mode = (uintptr_t)rule->arg.act.p[0];
Aurelien DARRAGON4e49a6f2023-10-19 16:06:03 +02003077 enum proto_proxy_mode mode = conn_pr_mode_to_proto_mode(pr_mode);
Christopher Fauletae863c62021-03-15 12:03:44 +01003078
Christopher Faulet3b6446f2021-03-15 15:10:38 +01003079 if (pr_mode == PR_MODE_HTTP)
3080 px->options |= PR_O_HTTP_UPG;
3081
Christopher Fauletae863c62021-03-15 12:03:44 +01003082 if (mux_proto) {
3083 mux_ent = conn_get_best_mux_entry(mux_proto->token, PROTO_SIDE_FE, mode);
3084 if (!mux_ent || !isteq(mux_ent->token, mux_proto->token)) {
3085 memprintf(err, "MUX protocol '%.*s' is not compatible with the selected mode",
3086 (int)mux_proto->token.len, mux_proto->token.ptr);
3087 return 0;
3088 }
3089 }
3090 else {
3091 mux_ent = conn_get_best_mux_entry(IST_NULL, PROTO_SIDE_FE, mode);
3092 if (!mux_ent) {
3093 memprintf(err, "Unable to find compatible MUX protocol with the selected mode");
3094 return 0;
3095 }
3096 }
3097
3098 /* Update the mux */
3099 rule->arg.act.p[1] = (void *)mux_ent;
3100 return 1;
3101
3102}
3103
3104static enum act_parse_ret stream_parse_switch_mode(const char **args, int *cur_arg,
3105 struct proxy *px, struct act_rule *rule,
3106 char **err)
3107{
3108 const struct mux_proto_list *mux_proto = NULL;
3109 struct ist proto;
3110 enum pr_mode mode;
3111
3112 /* must have at least the mode */
3113 if (*(args[*cur_arg]) == 0) {
3114 memprintf(err, "'%s %s' expects a mode as argument.", args[0], args[*cur_arg-1]);
3115 return ACT_RET_PRS_ERR;
3116 }
3117
3118 if (!(px->cap & PR_CAP_FE)) {
3119 memprintf(err, "'%s %s' not allowed because %s '%s' has no frontend capability",
3120 args[0], args[*cur_arg-1], proxy_type_str(px), px->id);
3121 return ACT_RET_PRS_ERR;
3122 }
3123 /* Check if the mode. For now "tcp" is disabled because downgrade is not
3124 * supported and PT is the only TCP mux.
3125 */
3126 if (strcmp(args[*cur_arg], "http") == 0)
3127 mode = PR_MODE_HTTP;
3128 else {
3129 memprintf(err, "'%s %s' expects a valid mode (got '%s').", args[0], args[*cur_arg-1], args[*cur_arg]);
3130 return ACT_RET_PRS_ERR;
3131 }
3132
3133 /* check the proto, if specified */
3134 if (*(args[*cur_arg+1]) && strcmp(args[*cur_arg+1], "proto") == 0) {
3135 if (*(args[*cur_arg+2]) == 0) {
3136 memprintf(err, "'%s %s': '%s' expects a protocol as argument.",
3137 args[0], args[*cur_arg-1], args[*cur_arg+1]);
3138 return ACT_RET_PRS_ERR;
3139 }
3140
Tim Duesterhusb113b5c2021-09-15 13:58:44 +02003141 proto = ist(args[*cur_arg + 2]);
Christopher Fauletae863c62021-03-15 12:03:44 +01003142 mux_proto = get_mux_proto(proto);
3143 if (!mux_proto) {
3144 memprintf(err, "'%s %s': '%s' expects a valid MUX protocol, if specified (got '%s')",
3145 args[0], args[*cur_arg-1], args[*cur_arg+1], args[*cur_arg+2]);
3146 return ACT_RET_PRS_ERR;
3147 }
3148 *cur_arg += 2;
3149 }
3150
3151 (*cur_arg)++;
3152
3153 /* Register processing function. */
3154 rule->action_ptr = tcp_action_switch_stream_mode;
3155 rule->check_ptr = check_tcp_switch_stream_mode;
3156 rule->action = ACT_CUSTOM;
3157 rule->arg.act.p[0] = (void *)(uintptr_t)mode;
3158 rule->arg.act.p[1] = (void *)mux_proto;
3159 return ACT_RET_PRS_OK;
3160}
Willy Tarreau8b22a712010-06-18 17:46:06 +02003161
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003162/* 0=OK, <0=Alert, >0=Warning */
3163static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
3164 struct proxy *px, struct act_rule *rule,
3165 char **err)
3166{
3167 struct action_kw *kw;
3168
3169 /* Check if the service name exists. */
3170 if (*(args[*cur_arg]) == 0) {
3171 memprintf(err, "'%s' expects a service name.", args[0]);
Thierry FOURNIER337eae12015-11-26 19:48:04 +01003172 return ACT_RET_PRS_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003173 }
3174
3175 /* lookup for keyword corresponding to a service. */
3176 kw = action_lookup(&service_keywords, args[*cur_arg]);
3177 if (!kw) {
3178 memprintf(err, "'%s' unknown service name.", args[1]);
3179 return ACT_RET_PRS_ERR;
3180 }
3181 (*cur_arg)++;
3182
3183 /* executes specific rule parser. */
3184 rule->kw = kw;
3185 if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
3186 return ACT_RET_PRS_ERR;
3187
3188 /* Register processing function. */
3189 rule->action_ptr = process_use_service;
3190 rule->action = ACT_CUSTOM;
3191
3192 return ACT_RET_PRS_OK;
3193}
3194
3195void service_keywords_register(struct action_kw_list *kw_list)
3196{
Willy Tarreau2b718102021-04-21 07:32:39 +02003197 LIST_APPEND(&service_keywords, &kw_list->list);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003198}
3199
Thierry Fournier87e53992020-11-28 19:32:14 +01003200struct action_kw *service_find(const char *kw)
3201{
3202 return action_lookup(&service_keywords, kw);
3203}
3204
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003205/* Lists the known services on <out>. If <out> is null, emit them on stdout one
3206 * per line.
3207 */
Willy Tarreau679bba12019-03-19 08:08:10 +01003208void list_services(FILE *out)
3209{
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003210 const struct action_kw *akwp, *akwn;
Willy Tarreau679bba12019-03-19 08:08:10 +01003211 struct action_kw_list *kw_list;
3212 int found = 0;
3213 int i;
3214
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003215 if (out)
3216 fprintf(out, "Available services :");
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003217
3218 for (akwn = akwp = NULL;; akwp = akwn) {
3219 list_for_each_entry(kw_list, &service_keywords, list) {
3220 for (i = 0; kw_list->kw[i].kw != NULL; i++) {
3221 if (strordered(akwp ? akwp->kw : NULL,
3222 kw_list->kw[i].kw,
3223 akwn != akwp ? akwn->kw : NULL))
3224 akwn = &kw_list->kw[i];
3225 found = 1;
3226 }
Willy Tarreau679bba12019-03-19 08:08:10 +01003227 }
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003228 if (akwn == akwp)
3229 break;
3230 if (out)
3231 fprintf(out, " %s", akwn->kw);
3232 else
3233 printf("%s\n", akwn->kw);
Willy Tarreau679bba12019-03-19 08:08:10 +01003234 }
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003235 if (!found && out)
Willy Tarreau679bba12019-03-19 08:08:10 +01003236 fprintf(out, " none\n");
3237}
William Lallemand4c5b4d52016-11-21 08:51:11 +01003238
Willy Tarreau39f097d2022-05-03 10:49:00 +02003239/* appctx context used by the "show sess" command */
3240
3241struct show_sess_ctx {
3242 struct bref bref; /* back-reference from the session being dumped */
3243 void *target; /* session we want to dump, or NULL for all */
3244 unsigned int thr; /* the thread number being explored (0..MAX_THREADS-1) */
3245 unsigned int uid; /* if non-null, the uniq_id of the session being dumped */
3246 int section; /* section of the session being dumped */
3247 int pos; /* last position of the current session's buffer */
3248};
3249
Willy Tarreau4596fe22022-05-17 19:07:51 +02003250/* This function dumps a complete stream state onto the stream connector's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003251 * read buffer. The stream has to be set in strm. It returns 0 if the output
3252 * buffer is full and it needs to be called again, otherwise non-zero. It is
3253 * designed to be called from stats_dump_strm_to_buffer() below.
3254 */
Willy Tarreaub49672d2022-05-27 10:13:37 +02003255static int stats_dump_full_strm_to_buffer(struct stconn *sc, struct stream *strm)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003256{
Willy Tarreaub49672d2022-05-27 10:13:37 +02003257 struct appctx *appctx = __sc_appctx(sc);
Willy Tarreau39f097d2022-05-03 10:49:00 +02003258 struct show_sess_ctx *ctx = appctx->svcctx;
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003259 struct stconn *scf, *scb;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003260 struct tm tm;
3261 extern const char *monthname[12];
3262 char pn[INET6_ADDRSTRLEN];
3263 struct connection *conn;
3264 struct appctx *tmpctx;
3265
3266 chunk_reset(&trash);
3267
Willy Tarreau39f097d2022-05-03 10:49:00 +02003268 if (ctx->section > 0 && ctx->uid != strm->uniq_id) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003269 /* stream changed, no need to go any further */
3270 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003271 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003272 goto full;
3273 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003274 }
3275
Willy Tarreau39f097d2022-05-03 10:49:00 +02003276 switch (ctx->section) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003277 case 0: /* main status of the stream */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003278 ctx->uid = strm->uniq_id;
3279 ctx->section = 1;
Willy Tarreau46984792022-11-14 07:09:39 +01003280 __fallthrough;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003281
3282 case 1:
3283 get_localtime(strm->logs.accept_date.tv_sec, &tm);
3284 chunk_appendf(&trash,
3285 "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
3286 strm,
3287 tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
3288 tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
3289 strm->uniq_id,
Willy Tarreaub7436612020-08-28 19:51:44 +02003290 strm_li(strm) ? strm_li(strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003291
3292 conn = objt_conn(strm_orig(strm));
Willy Tarreau71e34c12019-07-17 15:07:06 +02003293 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003294 case AF_INET:
3295 case AF_INET6:
3296 chunk_appendf(&trash, " source=%s:%d\n",
Erwan Le Goas2a2e46f2022-09-28 17:02:30 +02003297 HA_ANON_CLI(pn), get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003298 break;
3299 case AF_UNIX:
3300 chunk_appendf(&trash, " source=unix:%d\n", strm_li(strm)->luid);
3301 break;
3302 default:
3303 /* no more information to print right now */
3304 chunk_appendf(&trash, "\n");
3305 break;
3306 }
3307
3308 chunk_appendf(&trash,
Christopher Faulet50264b42022-03-30 19:39:30 +02003309 " flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x\n",
Christopher Fauletae024ce2022-03-29 19:02:31 +02003310 strm->flags, strm->conn_retries,
3311 strm->conn_exp ?
3312 tick_is_expired(strm->conn_exp, now_ms) ? "<PAST>" :
3313 human_time(TICKS_TO_MS(strm->conn_exp - now_ms),
3314 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Faulet50264b42022-03-30 19:39:30 +02003315 strm->conn_err_type, strm->srv_conn, strm->pend_pos,
Willy Tarreau2b718102021-04-21 07:32:39 +02003316 LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003317
3318 chunk_appendf(&trash,
3319 " frontend=%s (id=%u mode=%s), listener=%s (id=%u)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003320 HA_ANON_CLI(strm_fe(strm)->id), strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003321 strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
3322 strm_li(strm) ? strm_li(strm)->luid : 0);
3323
Willy Tarreau71e34c12019-07-17 15:07:06 +02003324 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003325 case AF_INET:
3326 case AF_INET6:
3327 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003328 HA_ANON_CLI(pn), get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003329 break;
3330 case AF_UNIX:
3331 chunk_appendf(&trash, " addr=unix:%d\n", strm_li(strm)->luid);
3332 break;
3333 default:
3334 /* no more information to print right now */
3335 chunk_appendf(&trash, "\n");
3336 break;
3337 }
3338
3339 if (strm->be->cap & PR_CAP_BE)
3340 chunk_appendf(&trash,
3341 " backend=%s (id=%u mode=%s)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003342 HA_ANON_CLI(strm->be->id),
William Lallemandb0dfd092022-03-08 12:05:31 +01003343 strm->be->uuid, proxy_mode_str(strm->be->mode));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003344 else
3345 chunk_appendf(&trash, " backend=<NONE> (id=-1 mode=-)");
3346
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003347 conn = sc_conn(strm->scb);
Willy Tarreau71e34c12019-07-17 15:07:06 +02003348 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003349 case AF_INET:
3350 case AF_INET6:
3351 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003352 HA_ANON_CLI(pn), get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003353 break;
3354 case AF_UNIX:
3355 chunk_appendf(&trash, " addr=unix\n");
3356 break;
3357 default:
3358 /* no more information to print right now */
3359 chunk_appendf(&trash, "\n");
3360 break;
3361 }
3362
3363 if (strm->be->cap & PR_CAP_BE)
3364 chunk_appendf(&trash,
3365 " server=%s (id=%u)",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003366 objt_server(strm->target) ? HA_ANON_CLI(__objt_server(strm->target)->id) : "<none>",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003367 objt_server(strm->target) ? __objt_server(strm->target)->puid : 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003368 else
3369 chunk_appendf(&trash, " server=<NONE> (id=-1)");
3370
Willy Tarreau71e34c12019-07-17 15:07:06 +02003371 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003372 case AF_INET:
3373 case AF_INET6:
3374 chunk_appendf(&trash, " addr=%s:%d\n",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003375 HA_ANON_CLI(pn), get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003376 break;
3377 case AF_UNIX:
3378 chunk_appendf(&trash, " addr=unix\n");
3379 break;
3380 default:
3381 /* no more information to print right now */
3382 chunk_appendf(&trash, "\n");
3383 break;
3384 }
3385
3386 chunk_appendf(&trash,
Willy Tarreaudd75b642022-07-15 16:18:43 +02003387 " task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tid=%d(%d/%d)%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003388 strm->task,
3389 strm->task->state,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003390 strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003391 strm->task->expire ?
3392 tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
3393 human_time(TICKS_TO_MS(strm->task->expire - now_ms),
3394 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003395 strm->task->tid,
Willy Tarreaudd75b642022-07-15 16:18:43 +02003396 ha_thread_info[strm->task->tid].tgid,
3397 ha_thread_info[strm->task->tid].ltid,
William Lallemand4c5b4d52016-11-21 08:51:11 +01003398 task_in_rq(strm->task) ? ", running" : "");
3399
3400 chunk_appendf(&trash,
3401 " age=%s)\n",
Willy Tarreau18420142023-11-17 18:51:26 +01003402 human_time(ns_to_sec(now_ns) - ns_to_sec(strm->logs.request_ts), 1));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003403
3404 if (strm->txn)
3405 chunk_appendf(&trash,
Christopher Fauletbcac7862019-07-17 10:46:50 +02003406 " txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003407 strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
Willy Tarreau7778b592019-01-07 10:38:10 +01003408 h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003409 strm->txn->req.flags, strm->txn->rsp.flags);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003410
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003411 scf = strm->scf;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003412 chunk_appendf(&trash, " scf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d",
Willy Tarreau74568cf2022-05-27 09:03:30 +02003413 scf, scf->flags, sc_state_str(scf->state),
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003414 (sc_ep_test(scf, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scf, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
3415 scf->sedesc->se, sc_ep_get(scf), scf->wait_event.events);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003416 chunk_appendf(&trash, " rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003417 sc_ep_rcv_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003418 chunk_appendf(&trash, " wex=%s\n",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003419 sc_ep_snd_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Olivier Houchard9aaf7782017-09-13 18:30:23 +02003420
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003421 if ((conn = sc_conn(scf)) != NULL) {
Willy Tarreauce577772022-09-02 15:00:48 +02003422 if (conn->mux && conn->mux->show_sd) {
3423 chunk_appendf(&trash, " ");
3424 conn->mux->show_sd(&trash, scf->sedesc, " ");
3425 chunk_appendf(&trash, "\n");
3426 }
3427
William Lallemand4c5b4d52016-11-21 08:51:11 +01003428 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003429 " co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003430 conn,
3431 conn_get_ctrl_name(conn),
3432 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003433 conn_get_mux_name(conn),
Willy Tarreauf8d0ab52022-05-18 18:00:31 +02003434 sc_get_data_name(scf),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003435 obj_type_name(conn->target),
3436 obj_base_ptr(conn->target));
3437
3438 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003439 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003440 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003441 conn_fd(conn),
3442 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
Willy Tarreau6d3c5012022-07-05 19:21:06 +02003443 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003444 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003445 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02003446 else if ((tmpctx = sc_appctx(scf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003447 chunk_appendf(&trash,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003448 " app0=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003449 tmpctx,
3450 tmpctx->st0,
3451 tmpctx->st1,
Christopher Fauletf0205062017-11-15 20:56:43 +01003452 tmpctx->applet->name,
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003453 tmpctx->t->tid,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003454 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003455 }
3456
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003457 scb = strm->scb;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003458 chunk_appendf(&trash, " scb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d",
Willy Tarreau74568cf2022-05-27 09:03:30 +02003459 scb, scb->flags, sc_state_str(scb->state),
Willy Tarreau7cb9e6c2022-05-17 19:40:40 +02003460 (sc_ep_test(scb, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scb, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
3461 scb->sedesc->se, sc_ep_get(scb), scb->wait_event.events);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003462 chunk_appendf(&trash, " rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003463 sc_ep_rcv_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003464 chunk_appendf(&trash, " wex=%s\n",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003465 sc_ep_snd_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
Willy Tarreaub605c422022-05-17 17:04:55 +02003466
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003467 if ((conn = sc_conn(scb)) != NULL) {
Willy Tarreauce577772022-09-02 15:00:48 +02003468 if (conn->mux && conn->mux->show_sd) {
3469 chunk_appendf(&trash, " ");
3470 conn->mux->show_sd(&trash, scb->sedesc, " ");
3471 chunk_appendf(&trash, "\n");
3472 }
3473
William Lallemand4c5b4d52016-11-21 08:51:11 +01003474 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003475 " co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003476 conn,
3477 conn_get_ctrl_name(conn),
3478 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003479 conn_get_mux_name(conn),
Willy Tarreauf8d0ab52022-05-18 18:00:31 +02003480 sc_get_data_name(scb),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003481 obj_type_name(conn->target),
3482 obj_base_ptr(conn->target));
3483
3484 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003485 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003486 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003487 conn_fd(conn),
3488 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
Willy Tarreau6d3c5012022-07-05 19:21:06 +02003489 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003490 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003491 }
Willy Tarreau8e7c6e62022-05-18 17:58:02 +02003492 else if ((tmpctx = sc_appctx(scb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003493 chunk_appendf(&trash,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003494 " app1=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003495 tmpctx,
3496 tmpctx->st0,
3497 tmpctx->st1,
Christopher Fauletf0205062017-11-15 20:56:43 +01003498 tmpctx->applet->name,
Willy Tarreau6ef52f42022-06-15 14:19:48 +02003499 tmpctx->t->tid,
Willy Tarreau6a28a302022-09-07 09:17:45 +02003500 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003501 }
3502
3503 chunk_appendf(&trash,
3504 " req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003505 " an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003506 &strm->req,
3507 strm->req.flags, strm->req.analysers,
3508 strm->req.pipe ? strm->req.pipe->data : 0,
3509 strm->req.to_forward, strm->req.total,
3510 strm->req.analyse_exp ?
3511 human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003512 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003513 &strm->req.buf,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003514 b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003515 (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003516 (unsigned int)strm->req.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003517
Christopher Fauletb9af8812019-01-04 14:30:44 +01003518 if (IS_HTX_STRM(strm)) {
3519 struct htx *htx = htxbuf(&strm->req.buf);
3520
3521 chunk_appendf(&trash,
Willy Tarreaub84e67f2019-01-07 10:01:34 +01003522 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003523 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003524 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003525 (unsigned long long)htx->extra);
3526 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003527 if (HAS_FILTERS(strm) && strm_flt(strm)->current[0]) {
3528 struct filter *flt = strm_flt(strm)->current[0];
3529
3530 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3531 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3532 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003533
William Lallemand4c5b4d52016-11-21 08:51:11 +01003534 chunk_appendf(&trash,
3535 " res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003536 " an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003537 &strm->res,
3538 strm->res.flags, strm->res.analysers,
3539 strm->res.pipe ? strm->res.pipe->data : 0,
3540 strm->res.to_forward, strm->res.total,
3541 strm->res.analyse_exp ?
3542 human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003543 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003544 &strm->res.buf,
3545 b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003546 (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003547 (unsigned int)strm->res.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003548
Christopher Fauletb9af8812019-01-04 14:30:44 +01003549 if (IS_HTX_STRM(strm)) {
3550 struct htx *htx = htxbuf(&strm->res.buf);
3551
3552 chunk_appendf(&trash,
3553 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003554 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003555 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003556 (unsigned long long)htx->extra);
3557 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003558 if (HAS_FILTERS(strm) && strm_flt(strm)->current[1]) {
3559 struct filter *flt = strm_flt(strm)->current[1];
3560
3561 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3562 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3563 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003564
Willy Tarreau1274e102021-10-11 09:49:03 +02003565 if (strm->current_rule_list && strm->current_rule) {
3566 const struct act_rule *rule = strm->current_rule;
Christopher Faulet8c67ece2021-10-12 11:10:31 +02003567 chunk_appendf(&trash, " current_rule=\"%s\" [%s:%d]\n", rule->kw->kw, rule->conf.file, rule->conf.line);
Willy Tarreau1274e102021-10-11 09:49:03 +02003568 }
3569
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003570 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003571 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003572
3573 /* use other states to dump the contents */
3574 }
3575 /* end of dump */
Willy Tarreaue6e52362019-01-04 17:42:57 +01003576 done:
Willy Tarreau39f097d2022-05-03 10:49:00 +02003577 ctx->uid = 0;
3578 ctx->section = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003579 return 1;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003580 full:
3581 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003582}
3583
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003584static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003585{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003586 struct show_sess_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
3587
William Lallemand4c5b4d52016-11-21 08:51:11 +01003588 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
3589 return 1;
3590
3591 if (*args[2] && strcmp(args[2], "all") == 0)
Willy Tarreau39f097d2022-05-03 10:49:00 +02003592 ctx->target = (void *)-1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003593 else if (*args[2])
Willy Tarreau39f097d2022-05-03 10:49:00 +02003594 ctx->target = (void *)strtoul(args[2], NULL, 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003595 else
Willy Tarreau39f097d2022-05-03 10:49:00 +02003596 ctx->target = NULL;
3597 ctx->section = 0; /* start with stream status */
3598 ctx->pos = 0;
3599 ctx->thr = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003600
Willy Tarreauf3629f82022-05-03 11:05:39 +02003601 /* The back-ref must be reset, it will be detected and set by
3602 * the dump code upon first invocation.
3603 */
3604 LIST_INIT(&ctx->bref.users);
3605
Willy Tarreaub9813182021-02-24 11:29:51 +01003606 /* let's set our own stream's epoch to the current one and increment
3607 * it so that we know which streams were already there before us.
3608 */
Willy Tarreau0698c802022-05-11 14:09:57 +02003609 appctx_strm(appctx)->stream_epoch = _HA_ATOMIC_FETCH_ADD(&stream_epoch, 1);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003610 return 0;
3611}
3612
Willy Tarreau4596fe22022-05-17 19:07:51 +02003613/* This function dumps all streams' states onto the stream connector's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003614 * read buffer. It returns 0 if the output buffer is full and it needs
Willy Tarreaue6e52362019-01-04 17:42:57 +01003615 * to be called again, otherwise non-zero. It proceeds in an isolated
3616 * thread so there is no thread safety issue here.
William Lallemand4c5b4d52016-11-21 08:51:11 +01003617 */
3618static int cli_io_handler_dump_sess(struct appctx *appctx)
3619{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003620 struct show_sess_ctx *ctx = appctx->svcctx;
Willy Tarreauc12b3212022-05-27 11:08:15 +02003621 struct stconn *sc = appctx_sc(appctx);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003622 struct connection *conn;
3623
Willy Tarreaue6e52362019-01-04 17:42:57 +01003624 thread_isolate();
3625
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003626 if (ctx->thr >= global.nbthread) {
3627 /* already terminated */
3628 goto done;
3629 }
3630
Christopher Faulet7faac7c2023-04-04 10:05:27 +02003631 /* FIXME: Don't watch the other side !*/
Christopher Faulet208c7122023-04-13 16:16:15 +02003632 if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003633 /* If we're forced to shut down, we might have to remove our
3634 * reference to the last stream being dumped.
3635 */
Willy Tarreauf3629f82022-05-03 11:05:39 +02003636 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3637 LIST_DELETE(&ctx->bref.users);
3638 LIST_INIT(&ctx->bref.users);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003639 }
Willy Tarreaue6e52362019-01-04 17:42:57 +01003640 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003641 }
3642
3643 chunk_reset(&trash);
3644
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003645 /* first, let's detach the back-ref from a possible previous stream */
3646 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3647 LIST_DELETE(&ctx->bref.users);
3648 LIST_INIT(&ctx->bref.users);
3649 } else if (!ctx->bref.ref) {
3650 /* first call, start with first stream */
3651 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3652 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003653
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003654 /* and start from where we stopped */
3655 while (1) {
3656 char pn[INET6_ADDRSTRLEN];
3657 struct stream *curr_strm;
3658 int done= 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003659
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003660 if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].streams)
3661 done = 1;
3662 else {
3663 /* check if we've found a stream created after issuing the "show sess" */
3664 curr_strm = LIST_ELEM(ctx->bref.ref, struct stream *, list);
Willy Tarreau0698c802022-05-11 14:09:57 +02003665 if ((int)(curr_strm->stream_epoch - appctx_strm(appctx)->stream_epoch) > 0)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003666 done = 1;
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003667 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003668
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003669 if (done) {
3670 ctx->thr++;
3671 if (ctx->thr >= global.nbthread)
3672 break;
3673 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3674 continue;
3675 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003676
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003677 if (ctx->target) {
3678 if (ctx->target != (void *)-1 && ctx->target != curr_strm)
3679 goto next_sess;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003680
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003681 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3682 /* call the proper dump() function and return if we're missing space */
Willy Tarreaub49672d2022-05-27 10:13:37 +02003683 if (!stats_dump_full_strm_to_buffer(sc, curr_strm))
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003684 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003685
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003686 /* stream dump complete */
3687 LIST_DELETE(&ctx->bref.users);
3688 LIST_INIT(&ctx->bref.users);
3689 if (ctx->target != (void *)-1) {
3690 ctx->target = NULL;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003691 break;
3692 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003693 else
3694 goto next_sess;
3695 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003696
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003697 chunk_appendf(&trash,
3698 "%p: proto=%s",
3699 curr_strm,
3700 strm_li(curr_strm) ? strm_li(curr_strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003701
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003702 conn = objt_conn(strm_orig(curr_strm));
3703 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3704 case AF_INET:
3705 case AF_INET6:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003706 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003707 " src=%s:%d fe=%s be=%s srv=%s",
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003708 HA_ANON_CLI(pn),
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003709 get_host_port(conn->src),
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003710 HA_ANON_CLI(strm_fe(curr_strm)->id),
3711 (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
3712 objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003713 );
3714 break;
3715 case AF_UNIX:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003716 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003717 " src=unix:%d fe=%s be=%s srv=%s",
3718 strm_li(curr_strm)->luid,
Erwan Le Goas57e35f42022-09-14 17:45:41 +02003719 HA_ANON_CLI(strm_fe(curr_strm)->id),
3720 (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
3721 objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003722 );
3723 break;
3724 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003725
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003726 chunk_appendf(&trash,
3727 " ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
3728 curr_strm->task->state, curr_strm->stream_epoch,
Willy Tarreau18420142023-11-17 18:51:26 +01003729 human_time(ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.request_ts), 1),
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003730 curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
Willy Tarreau6a28a302022-09-07 09:17:45 +02003731 (unsigned long long)curr_strm->cpu_time, (unsigned long long)curr_strm->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003732
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003733 chunk_appendf(&trash,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003734 " rq[f=%06xh,i=%u,an=%02xh",
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003735 curr_strm->req.flags,
3736 (unsigned int)ci_data(&curr_strm->req),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003737 curr_strm->req.analysers);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003738
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003739 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003740 ",ax=%s]",
3741 curr_strm->req.analyse_exp ?
3742 human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
3743 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003744
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003745 chunk_appendf(&trash,
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003746 " rp[f=%06xh,i=%u,an=%02xh",
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003747 curr_strm->res.flags,
3748 (unsigned int)ci_data(&curr_strm->res),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003749 curr_strm->res.analysers);
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003750 chunk_appendf(&trash,
3751 ",ax=%s]",
3752 curr_strm->res.analyse_exp ?
3753 human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
3754 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003755
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003756 conn = sc_conn(curr_strm->scf);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003757 chunk_appendf(&trash," scf=[%d,%1xh,fd=%d",
3758 curr_strm->scf->state, curr_strm->scf->flags, conn_fd(conn));
3759 chunk_appendf(&trash, ",rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003760 sc_ep_rcv_ex(curr_strm->scf) ?
3761 human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scf) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003762 TICKS_TO_MS(1000)) : "");
3763 chunk_appendf(&trash,",wex=%s]",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003764 sc_ep_snd_ex(curr_strm->scf) ?
3765 human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scf) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003766 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003767
Willy Tarreaufd9417b2022-05-18 16:23:22 +02003768 conn = sc_conn(curr_strm->scb);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003769 chunk_appendf(&trash, " scb=[%d,%1xh,fd=%d",
3770 curr_strm->scb->state, curr_strm->scb->flags, conn_fd(conn));
3771 chunk_appendf(&trash, ",rex=%s",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003772 sc_ep_rcv_ex(curr_strm->scb) ?
3773 human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scb) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003774 TICKS_TO_MS(1000)) : "");
3775 chunk_appendf(&trash, ",wex=%s]",
Christopher Faulet03d5e622023-02-22 14:43:22 +01003776 sc_ep_snd_ex(curr_strm->scb) ?
3777 human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scb) - now_ms),
Christopher Fauletf8413cb2023-02-07 16:06:14 +01003778 TICKS_TO_MS(1000)) : "");
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003779
3780 chunk_appendf(&trash,
3781 " exp=%s rc=%d c_exp=%s",
3782 curr_strm->task->expire ?
3783 human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
3784 TICKS_TO_MS(1000)) : "",
3785 curr_strm->conn_retries,
3786 curr_strm->conn_exp ?
3787 human_time(TICKS_TO_MS(curr_strm->conn_exp - now_ms),
3788 TICKS_TO_MS(1000)) : "");
3789 if (task_in_rq(curr_strm->task))
3790 chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
3791
3792 chunk_appendf(&trash, "\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003793
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003794 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003795 /* let's try again later from this stream. We add ourselves into
3796 * this stream's users so that it can remove us upon termination.
3797 */
3798 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3799 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003800 }
3801
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003802 next_sess:
3803 ctx->bref.ref = curr_strm->list.n;
3804 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003805
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003806 if (ctx->target && ctx->target != (void *)-1) {
3807 /* specified stream not found */
3808 if (ctx->section > 0)
3809 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3810 else
3811 chunk_appendf(&trash, "Session not found.\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003812
Willy Tarreaud0a06d52022-05-18 15:07:19 +02003813 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003814 goto full;
3815
3816 ctx->target = NULL;
3817 ctx->uid = 0;
3818 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003819 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003820
Willy Tarreaue6e52362019-01-04 17:42:57 +01003821 done:
3822 thread_release();
3823 return 1;
3824 full:
3825 thread_release();
Willy Tarreaue6e52362019-01-04 17:42:57 +01003826 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003827}
3828
3829static void cli_release_show_sess(struct appctx *appctx)
3830{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003831 struct show_sess_ctx *ctx = appctx->svcctx;
3832
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003833 if (ctx->thr < global.nbthread) {
Willy Tarreau49de6852021-02-24 13:46:12 +01003834 /* a dump was aborted, either in error or timeout. We need to
3835 * safely detach from the target stream's list. It's mandatory
3836 * to lock because a stream on the target thread could be moving
3837 * our node.
3838 */
3839 thread_isolate();
Willy Tarreau39f097d2022-05-03 10:49:00 +02003840 if (!LIST_ISEMPTY(&ctx->bref.users))
3841 LIST_DELETE(&ctx->bref.users);
Willy Tarreau49de6852021-02-24 13:46:12 +01003842 thread_release();
William Lallemand4c5b4d52016-11-21 08:51:11 +01003843 }
3844}
3845
Willy Tarreau61b65212016-11-24 11:09:25 +01003846/* Parses the "shutdown session" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003847static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau61b65212016-11-24 11:09:25 +01003848{
3849 struct stream *strm, *ptr;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003850 int thr;
Willy Tarreau61b65212016-11-24 11:09:25 +01003851
3852 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3853 return 1;
3854
Willy Tarreauc40c4072022-03-31 14:49:45 +02003855 ptr = (void *)strtoul(args[2], NULL, 0);
3856 if (!ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003857 return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003858
Willy Tarreaua698eb62021-02-24 10:37:01 +01003859 strm = NULL;
Willy Tarreau61b65212016-11-24 11:09:25 +01003860
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003861 thread_isolate();
3862
Willy Tarreau61b65212016-11-24 11:09:25 +01003863 /* first, look for the requested stream in the stream table */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003864 for (thr = 0; strm != ptr && thr < global.nbthread; thr++) {
Willy Tarreaub4e34762021-09-30 19:02:18 +02003865 list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
Willy Tarreaua698eb62021-02-24 10:37:01 +01003866 if (strm == ptr) {
3867 stream_shutdown(strm, SF_ERR_KILLED);
3868 break;
3869 }
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003870 }
Willy Tarreau61b65212016-11-24 11:09:25 +01003871 }
3872
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003873 thread_release();
3874
Willy Tarreau61b65212016-11-24 11:09:25 +01003875 /* do we have the stream ? */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003876 if (strm != ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003877 return cli_err(appctx, "No such session (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003878
Willy Tarreau61b65212016-11-24 11:09:25 +01003879 return 1;
3880}
3881
Willy Tarreau4e46b622016-11-23 16:50:48 +01003882/* Parses the "shutdown session server" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003883static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau4e46b622016-11-23 16:50:48 +01003884{
3885 struct server *sv;
Willy Tarreau4e46b622016-11-23 16:50:48 +01003886
3887 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3888 return 1;
3889
3890 sv = cli_find_server(appctx, args[3]);
3891 if (!sv)
3892 return 1;
3893
3894 /* kill all the stream that are on this server */
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003895 HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
Willy Tarreaud9e26a72019-11-14 16:37:16 +01003896 srv_shutdown_streams(sv, SF_ERR_KILLED);
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003897 HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
Willy Tarreau4e46b622016-11-23 16:50:48 +01003898 return 1;
3899}
3900
William Lallemand4c5b4d52016-11-21 08:51:11 +01003901/* register cli keywords */
3902static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaub205bfd2021-05-07 11:38:37 +02003903 { { "show", "sess", NULL }, "show sess [id] : report the list of current sessions or dump this exact session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
3904 { { "shutdown", "session", NULL }, "shutdown session [id] : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
3905 { { "shutdown", "sessions", "server" }, "shutdown sessions server <bk>/<srv> : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
William Lallemand4c5b4d52016-11-21 08:51:11 +01003906 {{},}
3907}};
3908
Willy Tarreau0108d902018-11-25 19:14:37 +01003909INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
3910
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003911/* main configuration keyword registration. */
Christopher Faulet551a6412021-06-25 14:35:29 +02003912static struct action_kw_list stream_tcp_req_keywords = { ILH, {
3913 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003914 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003915 { "switch-mode", stream_parse_switch_mode },
3916 { "use-service", stream_parse_use_service },
3917 { /* END */ }
3918}};
3919
3920INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_req_keywords);
3921
3922/* main configuration keyword registration. */
3923static struct action_kw_list stream_tcp_res_keywords = { ILH, {
3924 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003925 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003926 { /* END */ }
3927}};
3928
3929INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &stream_tcp_res_keywords);
3930
3931static struct action_kw_list stream_http_req_keywords = { ILH, {
3932 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003933 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003934 { "use-service", stream_parse_use_service },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003935 { /* END */ }
3936}};
3937
Christopher Faulet551a6412021-06-25 14:35:29 +02003938INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_req_keywords);
Willy Tarreau0108d902018-11-25 19:14:37 +01003939
Christopher Faulet551a6412021-06-25 14:35:29 +02003940static struct action_kw_list stream_http_res_keywords = { ILH, {
3941 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003942 { "set-nice", stream_parse_set_nice },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003943 { /* END */ }
3944}};
3945
Christopher Faulet551a6412021-06-25 14:35:29 +02003946INITCALL1(STG_REGISTER, http_res_keywords_register, &stream_http_res_keywords);
Willy Tarreau8b22a712010-06-18 17:46:06 +02003947
Christopher Fauleta9248042023-01-05 11:17:38 +01003948static struct action_kw_list stream_http_after_res_actions = { ILH, {
3949 { "set-log-level", stream_parse_set_log_level },
3950 { /* END */ }
3951}};
3952
3953INITCALL1(STG_REGISTER, http_after_res_keywords_register, &stream_http_after_res_actions);
3954
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003955static int smp_fetch_cur_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3956{
3957 smp->flags = SMP_F_VOL_TXN;
3958 smp->data.type = SMP_T_SINT;
3959 if (!smp->strm)
3960 return 0;
3961
Christopher Faulet5aaacfb2023-02-15 08:13:33 +01003962 smp->data.u.sint = TICKS_TO_MS(smp->strm->scb->ioto);
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003963 return 1;
3964}
3965
3966static int smp_fetch_cur_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3967{
3968 smp->flags = SMP_F_VOL_TXN;
3969 smp->data.type = SMP_T_SINT;
3970 if (!smp->strm)
3971 return 0;
3972
3973 smp->data.u.sint = TICKS_TO_MS(smp->strm->tunnel_timeout);
3974 return 1;
3975}
3976
Willy Tarreau0657b932022-03-09 17:33:05 +01003977static int smp_fetch_last_rule_file(const struct arg *args, struct sample *smp, const char *km, void *private)
3978{
3979 smp->flags = SMP_F_VOL_TXN;
3980 smp->data.type = SMP_T_STR;
3981 if (!smp->strm || !smp->strm->last_rule_file)
3982 return 0;
3983
3984 smp->flags |= SMP_F_CONST;
3985 smp->data.u.str.area = (char *)smp->strm->last_rule_file;
3986 smp->data.u.str.data = strlen(smp->strm->last_rule_file);
3987 return 1;
3988}
3989
3990static int smp_fetch_last_rule_line(const struct arg *args, struct sample *smp, const char *km, void *private)
3991{
3992 smp->flags = SMP_F_VOL_TXN;
3993 smp->data.type = SMP_T_SINT;
3994 if (!smp->strm || !smp->strm->last_rule_line)
3995 return 0;
3996
3997 smp->data.u.sint = smp->strm->last_rule_line;
3998 return 1;
3999}
4000
Amaury Denoyelle12bada52020-12-10 13:43:57 +01004001/* Note: must not be declared <const> as its list will be overwritten.
4002 * Please take care of keeping this list alphabetically sorted.
4003 */
4004static struct sample_fetch_kw_list smp_kws = {ILH, {
Amaury Denoyellef7719a22020-12-10 13:43:58 +01004005 { "cur_server_timeout", smp_fetch_cur_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
4006 { "cur_tunnel_timeout", smp_fetch_cur_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
Willy Tarreau0657b932022-03-09 17:33:05 +01004007 { "last_rule_file", smp_fetch_last_rule_file, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
4008 { "last_rule_line", smp_fetch_last_rule_line, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
Amaury Denoyelle12bada52020-12-10 13:43:57 +01004009 { NULL, NULL, 0, 0, 0 },
4010}};
4011
4012INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
4013
Willy Tarreaubaaee002006-06-26 02:48:02 +02004014/*
4015 * Local variables:
4016 * c-indent-level: 8
4017 * c-basic-offset: 8
4018 * End:
4019 */