blob: f8f9eefaebe4bf51ce2c9f4b42871e8380de9242 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau87b09662015-04-03 00:22:06 +02002 * Stream management functions.
Willy Tarreaubaaee002006-06-26 02:48:02 +02003 *
Willy Tarreaud28c3532012-04-19 19:28:33 +02004 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <stdlib.h>
Willy Tarreau81f9aa32010-06-01 17:45:26 +020014#include <unistd.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020015
Willy Tarreaua264d962020-06-04 22:29:18 +020016#include <import/ebistree.h>
17
Willy Tarreaudcc048a2020-06-04 19:11:43 +020018#include <haproxy/acl.h>
Willy Tarreau122eba92020-06-04 10:15:32 +020019#include <haproxy/action.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/activity.h>
21#include <haproxy/api.h>
Willy Tarreau3f0f82e2020-06-04 19:42:41 +020022#include <haproxy/applet.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/arg.h>
Willy Tarreau49801602020-06-04 22:50:02 +020024#include <haproxy/backend.h>
Willy Tarreau278161c2020-06-04 11:18:28 +020025#include <haproxy/capture.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020026#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020027#include <haproxy/channel.h>
Willy Tarreau4aa573d2020-06-04 18:21:56 +020028#include <haproxy/check.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020029#include <haproxy/cli.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020030#include <haproxy/connection.h>
Christopher Faulet908628c2022-03-25 16:43:49 +010031#include <haproxy/conn_stream.h>
32#include <haproxy/cs_utils.h>
Willy Tarreau3afc4c42020-06-03 18:23:19 +020033#include <haproxy/dict.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020034#include <haproxy/dynbuf.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020035#include <haproxy/fd.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020036#include <haproxy/filters.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020037#include <haproxy/freq_ctr.h>
Willy Tarreau762d7a52020-06-04 11:23:07 +020038#include <haproxy/frontend.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020039#include <haproxy/global.h>
Willy Tarreau86416052020-06-04 09:20:54 +020040#include <haproxy/hlua.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020041#include <haproxy/http_ana.h>
Willy Tarreauc761f842020-06-04 11:40:28 +020042#include <haproxy/http_rules.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020043#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020044#include <haproxy/istbuf.h>
Willy Tarreauaeed4a82020-06-04 22:01:04 +020045#include <haproxy/log.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020046#include <haproxy/pipe.h>
Willy Tarreaud0ef4392020-06-02 09:38:52 +020047#include <haproxy/pool.h>
Willy Tarreaua264d962020-06-04 22:29:18 +020048#include <haproxy/proxy.h>
Willy Tarreaua55c4542020-06-04 22:59:39 +020049#include <haproxy/queue.h>
Willy Tarreau1e56f922020-06-04 23:20:13 +020050#include <haproxy/server.h>
Emeric Brunc9437992021-02-12 19:42:55 +010051#include <haproxy/resolvers.h>
Amaury Denoyelle12bada52020-12-10 13:43:57 +010052#include <haproxy/sample.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020053#include <haproxy/session.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020054#include <haproxy/stats-t.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020055#include <haproxy/stick_table.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020056#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020057#include <haproxy/task.h>
Willy Tarreau8b550af2020-06-04 17:42:48 +020058#include <haproxy/tcp_rules.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020059#include <haproxy/thread.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020060#include <haproxy/trace.h>
Willy Tarreaua1718922020-06-04 16:25:31 +020061#include <haproxy/vars.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020062
Willy Tarreaubaaee002006-06-26 02:48:02 +020063
Willy Tarreau8ceae722018-11-26 11:58:30 +010064DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
Tim Duesterhus127a74d2020-02-28 15:13:33 +010065DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
Willy Tarreau8ceae722018-11-26 11:58:30 +010066
Willy Tarreaub9813182021-02-24 11:29:51 +010067/* incremented by each "show sess" to fix a delimiter between streams */
68unsigned stream_epoch = 0;
Willy Tarreaubaaee002006-06-26 02:48:02 +020069
Thierry FOURNIER5a363e72015-09-27 19:29:33 +020070/* List of all use-service keywords. */
71static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
72
Willy Tarreau5790eb02017-08-28 17:18:36 +020073
Christopher Fauleteea8fc72019-11-05 16:18:10 +010074/* trace source and events */
75static void strm_trace(enum trace_level level, uint64_t mask,
76 const struct trace_source *src,
77 const struct ist where, const struct ist func,
78 const void *a1, const void *a2, const void *a3, const void *a4);
79
80/* The event representation is split like this :
81 * strm - stream
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020082 * cs - conn-stream
Christopher Fauleteea8fc72019-11-05 16:18:10 +010083 * http - http analyzis
84 * tcp - tcp analyzis
85 *
86 * STRM_EV_* macros are defined in <proto/stream.h>
87 */
88static const struct trace_event strm_trace_events[] = {
89 { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
90 { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
91 { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
92 { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
93 { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
94
Christopher Faulet62e75742022-03-31 09:16:34 +020095 { .mask = STRM_EV_CS_ST, .name = "cs_state", .desc = "processing conn-stream states" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +010096
97 { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
98 { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
99
100 { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
101 { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
Christopher Faulet50019132022-03-08 15:47:02 +0100102
103 { .mask = STRM_EV_FLT_ANA, .name = "flt_ana", .desc = "Filter analyzers" },
104 { .mask = STRM_EV_FLT_ERR, .name = "flt_err", .desc = "error during filter analyzis" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100105 {}
106};
107
108static const struct name_desc strm_trace_lockon_args[4] = {
109 /* arg1 */ { /* already used by the stream */ },
110 /* arg2 */ { },
111 /* arg3 */ { },
112 /* arg4 */ { }
113};
114
115static const struct name_desc strm_trace_decoding[] = {
116#define STRM_VERB_CLEAN 1
117 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
118#define STRM_VERB_MINIMAL 2
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200119 { .name="minimal", .desc="report info on stream and conn-streams" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100120#define STRM_VERB_SIMPLE 3
121 { .name="simple", .desc="add info on request and response channels" },
122#define STRM_VERB_ADVANCED 4
123 { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
124#define STRM_VERB_COMPLETE 5
125 { .name="complete", .desc="add info on channel's buffer" },
126 { /* end */ }
127};
128
129struct trace_source trace_strm = {
130 .name = IST("stream"),
131 .desc = "Applicative stream",
132 .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
133 .default_cb = strm_trace,
134 .known_events = strm_trace_events,
135 .lockon_args = strm_trace_lockon_args,
136 .decoding = strm_trace_decoding,
137 .report_events = ~0, // report everything by default
138};
139
140#define TRACE_SOURCE &trace_strm
141INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
142
143/* the stream traces always expect that arg1, if non-null, is of a stream (from
144 * which we can derive everything), that arg2, if non-null, is an http
145 * transaction, that arg3, if non-null, is an http message.
146 */
147static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
148 const struct ist where, const struct ist func,
149 const void *a1, const void *a2, const void *a3, const void *a4)
150{
151 const struct stream *s = a1;
152 const struct http_txn *txn = a2;
153 const struct http_msg *msg = a3;
154 struct task *task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100155 const struct channel *req, *res;
156 struct htx *htx;
157
158 if (!s || src->verbosity < STRM_VERB_CLEAN)
159 return;
160
161 task = s->task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100162 req = &s->req;
163 res = &s->res;
164 htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
165
166 /* General info about the stream (htx/tcp, id...) */
167 chunk_appendf(&trace_buf, " : [%u,%s]",
168 s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100169 if (isttest(s->unique_id)) {
170 chunk_appendf(&trace_buf, " id=");
171 b_putist(&trace_buf, s->unique_id);
172 }
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100173
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200174 /* Front and back conn-stream state */
Christopher Faulet62e75742022-03-31 09:16:34 +0200175 chunk_appendf(&trace_buf, " CS=(%s,%s)",
176 cs_state_str(s->csf->state), cs_state_str(s->csb->state));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100177
178 /* If txn is defined, HTTP req/rep states */
179 if (txn)
180 chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
181 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
182 if (msg)
183 chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
184
185 if (src->verbosity == STRM_VERB_CLEAN)
186 return;
187
188 /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
189 if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
190 const struct htx_blk *blk = htx_get_head_blk(htx);
191 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
192 enum htx_blk_type type = htx_get_blk_type(blk);
193
194 if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
195 chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
196 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
197 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
198 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
199 }
200
201
202 /* If txn defined info about HTTP msgs, otherwise info about SI. */
203 if (txn) {
Christopher Faulet50264b42022-03-30 19:39:30 +0200204 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x,0x%x) txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
205 task, s, s->flags, s->conn_err_type, txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100206 }
207 else {
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200208 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x,0x%x) csf=(%p,%d,0x%08x) csb=(%p,%d,0x%08x) retries=%d",
209 task, s, s->flags, s->conn_err_type,
210 s->csf, s->csf->state, s->csf->flags,
211 s->csb, s->csb->state, s->csb->flags,
212 s->conn_retries);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100213 }
214
215 if (src->verbosity == STRM_VERB_MINIMAL)
216 return;
217
218
219 /* If txn defined, don't display all channel info */
220 if (src->verbosity == STRM_VERB_SIMPLE || txn) {
221 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
222 req, req->flags, req->rex, req->wex, req->analyse_exp);
223 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
224 res, res->flags, res->rex, res->wex, res->analyse_exp);
225 }
226 else {
227 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
228 req, req->flags, req->analysers, req->rex, req->wex, req->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100229 (long)req->output, req->total, req->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100230 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
231 res, res->flags, res->analysers, res->rex, res->wex, res->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100232 (long)res->output, res->total, res->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100233 }
234
235 if (src->verbosity == STRM_VERB_SIMPLE ||
236 (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
237 return;
238
239 /* channels' buffer info */
240 if (s->flags & SF_HTX) {
241 struct htx *rqhtx = htxbuf(&req->buf);
242 struct htx *rphtx = htxbuf(&res->buf);
243
244 chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
245 rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
246 rphtx->data, rphtx->size, htx_nbblks(rphtx));
247 }
248 else {
249 chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
250 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
251 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
Christopher Faulet5ce12992022-03-08 15:48:55 +0100252 (unsigned int)b_data(&res->buf), b_orig(&res->buf),
253 (unsigned int)b_head_ofs(&res->buf), (unsigned int)b_size(&res->buf));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100254 }
255
256 /* If msg defined, display htx info if defined (level > USER) */
257 if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
258 int full = 0;
259
260 /* Full htx info (level > STATE && verbosity > SIMPLE) */
261 if (src->level > TRACE_LEVEL_STATE) {
262 if (src->verbosity == STRM_VERB_COMPLETE)
263 full = 1;
264 }
265
266 chunk_memcat(&trace_buf, "\n\t", 2);
267 htx_dump(&trace_buf, htx, full);
268 }
269}
270
Christopher Faulet13a35e52021-12-20 15:34:16 +0100271/* Upgrade an existing stream for conn-stream <cs>. Return < 0 on error. This
272 * is only valid right after a TCP to H1 upgrade. The stream should be
273 * "reativated" by removing SF_IGNORE flag. And the right mode must be set. On
Christopher Faulet16df1782020-12-04 16:47:41 +0100274 * success, <input> buffer is transferred to the stream and thus points to
275 * BUF_NULL. On error, it is unchanged and it is the caller responsibility to
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100276 * release it (this never happens for now).
277 */
278int stream_upgrade_from_cs(struct conn_stream *cs, struct buffer *input)
279{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100280 struct stream *s = __cs_strm(cs);
281 const struct mux_ops *mux = cs_conn_mux(cs);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100282
Christopher Faulet693b23b2022-02-28 09:09:05 +0100283 if (mux) {
Christopher Faulet13a35e52021-12-20 15:34:16 +0100284 if (mux->flags & MX_FL_HTX)
285 s->flags |= SF_HTX;
286 }
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100287
288 if (!b_is_null(input)) {
289 /* Xfer the input buffer to the request channel. <input> will
290 * than point to BUF_NULL. From this point, it is the stream
291 * responsibility to release it.
292 */
293 s->req.buf = *input;
294 *input = BUF_NULL;
295 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
296 s->req.flags |= (s->req.total ? CF_READ_PARTIAL : 0);
297 }
298
299 s->flags &= ~SF_IGNORE;
300
301 task_wakeup(s->task, TASK_WOKEN_INIT);
302 return 0;
303}
304
Willy Tarreaub882dd82018-11-06 15:50:21 +0100305/* Callback used to wake up a stream when an input buffer is available. The
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200306 * stream <s>'s conn-streams are checked for a failed buffer allocation
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200307 * as indicated by the presence of the CS_EP_RXBLK_ROOM flag and the lack of a
Willy Tarreaub882dd82018-11-06 15:50:21 +0100308 * buffer, and and input buffer is assigned there (at most one). The function
309 * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
310 * It's designed to be called from __offer_buffer().
311 */
312int stream_buf_available(void *arg)
313{
314 struct stream *s = arg;
315
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200316 if (!s->req.buf.size && !s->req.pipe && (s->csf->endp->flags & CS_EP_RXBLK_BUFF) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100317 b_alloc(&s->req.buf))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200318 cs_rx_buff_rdy(s->csf);
319 else if (!s->res.buf.size && !s->res.pipe && (s->csb->endp->flags & CS_EP_RXBLK_BUFF) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100320 b_alloc(&s->res.buf))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200321 cs_rx_buff_rdy(s->csb);
Willy Tarreaub882dd82018-11-06 15:50:21 +0100322 else
323 return 0;
324
325 task_wakeup(s->task, TASK_WOKEN_RES);
326 return 1;
327
328}
329
Willy Tarreau9903f0e2015-04-04 18:50:31 +0200330/* This function is called from the session handler which detects the end of
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200331 * handshake, in order to complete initialization of a valid stream. It must be
Joseph Herlant4cc8d0d2018-11-15 09:14:14 -0800332 * called with a completely initialized session. It returns the pointer to
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200333 * the newly created stream, or NULL in case of fatal error. The client-facing
Willy Tarreau87787ac2017-08-28 16:22:54 +0200334 * end point is assigned to <origin>, which must be valid. The stream's task
335 * is configured with a nice value inherited from the listener's nice if any.
336 * The task's context is set to the new stream, and its function is set to
Christopher Faulet16df1782020-12-04 16:47:41 +0100337 * process_stream(). Target and analysers are null. <input> is used as input
338 * buffer for the request channel and may contain data. On success, it is
339 * transfer to the stream and <input> is set to BUF_NULL. On error, <input>
340 * buffer is unchanged and it is the caller responsibility to release it.
Willy Tarreau2542b532012-08-31 16:01:23 +0200341 */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100342struct stream *stream_new(struct session *sess, struct conn_stream *cs, struct buffer *input)
Willy Tarreau2542b532012-08-31 16:01:23 +0200343{
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200344 struct stream *s;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200345 struct task *t;
Willy Tarreau2542b532012-08-31 16:01:23 +0200346
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100347 DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100348 if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
Willy Tarreau87787ac2017-08-28 16:22:54 +0200349 goto out_fail_alloc;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200350
351 /* minimum stream initialization required for an embryonic stream is
352 * fairly low. We need very little to execute L4 ACLs, then we need a
353 * task to make the client-side connection live on its own.
354 * - flags
355 * - stick-entry tracking
356 */
357 s->flags = 0;
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200358 s->logs.logwait = sess->fe->to_log;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200359 s->logs.level = 0;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200360 tv_zero(&s->logs.tv_request);
361 s->logs.t_queue = -1;
362 s->logs.t_connect = -1;
363 s->logs.t_data = -1;
364 s->logs.t_close = 0;
365 s->logs.bytes_in = s->logs.bytes_out = 0;
Patrick Hemmerffe5e8c2018-05-11 12:52:31 -0400366 s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
367 s->logs.srv_queue_pos = 0; /* we will get this number soon */
Baptiste Assmann333939c2019-01-21 08:34:50 +0100368 s->obj_type = OBJ_TYPE_STREAM;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200369
Christopher Faulet15e525f2020-09-30 14:03:54 +0200370 s->logs.accept_date = sess->accept_date;
371 s->logs.tv_accept = sess->tv_accept;
372 s->logs.t_handshake = sess->t_handshake;
Christopher Faulet7a6c5132020-09-30 13:49:56 +0200373 s->logs.t_idle = sess->t_idle;
Christopher Fauletb3484d62018-11-29 15:19:05 +0100374
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200375 /* default logging function */
376 s->do_log = strm_log;
377
378 /* default error reporting function, may be changed by analysers */
379 s->srv_error = default_srv_error;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200380
381 /* Initialise the current rule list pointer to NULL. We are sure that
382 * any rulelist match the NULL pointer.
383 */
384 s->current_rule_list = NULL;
Remi Gacogne7fb9de22015-07-22 17:10:58 +0200385 s->current_rule = NULL;
Christopher Faulet2747fbb2020-07-28 11:56:13 +0200386 s->rules_exp = TICK_ETERNITY;
Willy Tarreauc6dae862022-03-09 17:23:10 +0100387 s->last_rule_file = NULL;
388 s->last_rule_line = 0;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200389
Willy Tarreaua68f7622015-09-21 17:48:24 +0200390 /* Copy SC counters for the stream. We don't touch refcounts because
391 * any reference we have is inherited from the session. Since the stream
392 * doesn't exist without the session, the session's existence guarantees
393 * we don't lose the entry. During the store operation, the stream won't
394 * touch these ones.
Thierry FOURNIER827752e2015-08-18 11:34:18 +0200395 */
Thierry FOURNIERc8fdb982015-08-16 12:03:39 +0200396 memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr));
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200397
398 s->sess = sess;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200399
Willy Tarreaub9813182021-02-24 11:29:51 +0100400 s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
Willy Tarreau18515722021-04-06 11:57:41 +0200401 s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200402
Willy Tarreau87b09662015-04-03 00:22:06 +0200403 /* OK, we're keeping the stream, so let's properly initialize the stream */
Willy Tarreau2542b532012-08-31 16:01:23 +0200404 LIST_INIT(&s->back_refs);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100405
Willy Tarreau90f366b2021-02-20 11:49:49 +0100406 LIST_INIT(&s->buffer_wait.list);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100407 s->buffer_wait.target = s;
Willy Tarreaub882dd82018-11-06 15:50:21 +0100408 s->buffer_wait.wakeup_cb = stream_buf_available;
Willy Tarreauf8a49ea2013-10-14 21:32:07 +0200409
Willy Tarreaufa1258f2021-04-10 23:00:53 +0200410 s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
William Lallemandcf62f7e2018-10-26 14:47:40 +0200411 s->pcli_next_pid = 0;
William Lallemandebf61802018-12-11 16:10:57 +0100412 s->pcli_flags = 0;
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100413 s->unique_id = IST_NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200414
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200415 if ((t = task_new_here()) == NULL)
Willy Tarreau87787ac2017-08-28 16:22:54 +0200416 goto out_fail_alloc;
417
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200418 s->task = t;
Christopher Faulet9d810ca2016-12-08 22:33:52 +0100419 s->pending_events = 0;
Christopher Faulet909f3182022-03-29 15:42:09 +0200420 s->conn_retries = 0;
Christopher Fauletae024ce2022-03-29 19:02:31 +0200421 s->conn_exp = TICK_ETERNITY;
Christopher Faulet50264b42022-03-30 19:39:30 +0200422 s->conn_err_type = STRM_ET_NONE;
Christopher Faulet62e75742022-03-31 09:16:34 +0200423 s->prev_conn_state = CS_ST_INI;
Willy Tarreaud1769b82015-04-06 00:25:48 +0200424 t->process = process_stream;
Willy Tarreau2542b532012-08-31 16:01:23 +0200425 t->context = s;
426 t->expire = TICK_ETERNITY;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200427 if (sess->listener)
428 t->nice = sess->listener->nice;
Willy Tarreau2542b532012-08-31 16:01:23 +0200429
Willy Tarreau87b09662015-04-03 00:22:06 +0200430 /* Note: initially, the stream's backend points to the frontend.
Willy Tarreau2542b532012-08-31 16:01:23 +0200431 * This changes later when switching rules are executed or
432 * when the default backend is assigned.
433 */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200434 s->be = sess->fe;
Willy Tarreaucb7dd012015-04-03 22:16:32 +0200435 s->req_cap = NULL;
436 s->res_cap = NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200437
Willy Tarreauebcd4842015-06-19 11:59:02 +0200438 /* Initialise all the variables contexts even if not used.
439 * This permits to prune these contexts without errors.
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200440 */
Willy Tarreaub7bfcb32021-08-31 08:13:25 +0200441 vars_init_head(&s->vars_txn, SCOPE_TXN);
442 vars_init_head(&s->vars_reqres, SCOPE_REQ);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200443
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100444 /* Set SF_HTX flag for HTTP frontends. */
445 if (sess->fe->mode == PR_MODE_HTTP)
446 s->flags |= SF_HTX;
447
Christopher Faulet95a61e82021-12-22 14:22:03 +0100448 s->csf = cs;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100449 if (cs_attach_strm(s->csf, s) < 0)
450 goto out_fail_attach_csf;
451
Christopher Faulet30995112022-03-25 15:32:38 +0100452 s->csb = cs_new_from_strm(s, CS_FL_ISBACK);
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100453 if (!s->csb)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100454 goto out_fail_alloc_csb;
Christopher Faulet95a61e82021-12-22 14:22:03 +0100455
Christopher Faulet62e75742022-03-31 09:16:34 +0200456 cs_set_state(s->csf, CS_ST_EST);
Christopher Faulet1d987772022-03-29 18:03:35 +0200457 s->csf->hcto = sess->fe->timeout.clientfin;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100458
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100459 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Christopher Fauleta7285182022-03-30 15:43:23 +0200460 s->csf->flags |= CS_FL_INDEP_STR;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100461
Christopher Faulet1d987772022-03-29 18:03:35 +0200462 s->csb->hcto = TICK_ETERNITY;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100463 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Christopher Fauleta7285182022-03-30 15:43:23 +0200464 s->csb->flags |= CS_FL_INDEP_STR;
Willy Tarreau984fca92017-12-20 16:31:43 +0100465
Christopher Faulete9e48202022-03-22 18:13:29 +0100466 if (cs->endp->flags & CS_EP_WEBSOCKET)
Christopher Faulet13a35e52021-12-20 15:34:16 +0100467 s->flags |= SF_WEBSOCKET;
468 if (cs_conn(cs)) {
Christopher Faulet897d6122021-12-17 17:28:35 +0100469 const struct mux_ops *mux = cs_conn_mux(cs);
470
Christopher Faulet78ed7f22022-03-30 16:31:41 +0200471 if (mux && mux->flags & MX_FL_HTX)
472 s->flags |= SF_HTX;
Christopher Fauleta7422932021-12-15 11:42:23 +0100473 }
474
Willy Tarreau87b09662015-04-03 00:22:06 +0200475 stream_init_srv_conn(s);
Willy Tarreau9b82d942016-12-05 00:26:31 +0100476 s->target = sess->listener ? sess->listener->default_target : NULL;
477
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200478 s->pend_pos = NULL;
Patrick Hemmer268a7072018-05-11 12:52:31 -0400479 s->priority_class = 0;
480 s->priority_offset = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200481
482 /* init store persistence */
483 s->store_count = 0;
484
Christopher Faulet16df1782020-12-04 16:47:41 +0100485 channel_init(&s->req);
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100486 s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
Christopher Faulete9382e02022-03-07 15:31:46 +0100487 s->req.analysers = sess->listener ? sess->listener->analysers : sess->fe->fe_req_ana;
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100488
Christopher Faulet57e4a1b2021-03-15 17:09:27 +0100489 if (IS_HTX_STRM(s)) {
490 /* Be sure to have HTTP analysers because in case of
491 * "destructive" stream upgrade, they may be missing (e.g
492 * TCP>H2)
493 */
494 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
495 }
496
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100497 if (!sess->fe->fe_req_ana) {
498 channel_auto_connect(&s->req); /* don't wait to establish connection */
499 channel_auto_close(&s->req); /* let the producer forward close requests */
500 }
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200501
502 s->req.rto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100503 s->req.wto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100504 s->req.rex = TICK_ETERNITY;
505 s->req.wex = TICK_ETERNITY;
506 s->req.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200507
Christopher Faulet16df1782020-12-04 16:47:41 +0100508 channel_init(&s->res);
Willy Tarreauef573c02014-11-28 14:17:09 +0100509 s->res.flags |= CF_ISRESP;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100510 s->res.analysers = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200511
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200512 if (sess->fe->options2 & PR_O2_NODELAY) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100513 s->req.flags |= CF_NEVER_WAIT;
514 s->res.flags |= CF_NEVER_WAIT;
Willy Tarreau96e31212011-05-30 18:10:30 +0200515 }
516
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200517 s->res.wto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100518 s->res.rto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100519 s->res.rex = TICK_ETERNITY;
520 s->res.wex = TICK_ETERNITY;
521 s->res.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200522
Willy Tarreaueee5b512015-04-03 23:46:31 +0200523 s->txn = NULL;
Thierry FOURNIER2c8b54e2016-12-17 12:45:32 +0100524 s->hlua = NULL;
Thierry FOURNIER65f34c62015-02-16 20:11:43 +0100525
Emeric Brun08622d32020-12-23 17:41:43 +0100526 s->resolv_ctx.requester = NULL;
527 s->resolv_ctx.hostname_dn = NULL;
528 s->resolv_ctx.hostname_dn_len = 0;
529 s->resolv_ctx.parent = NULL;
Frédéric Lécaillebed883a2019-04-23 17:26:33 +0200530
Amaury Denoyellefb504432020-12-10 13:43:53 +0100531 s->tunnel_timeout = TICK_ETERNITY;
532
Willy Tarreaub4e34762021-09-30 19:02:18 +0200533 LIST_APPEND(&th_ctx->streams, &s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200534
Christopher Faulet92d36382015-11-05 13:35:03 +0100535 if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200536 goto out_fail_accept;
537
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200538 /* finish initialization of the accepted file descriptor */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100539 if (cs_appctx(cs))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200540 cs_want_get(s->csf);
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200541
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200542 if (sess->fe->accept && sess->fe->accept(s) < 0)
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200543 goto out_fail_accept;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200544
Christopher Faulet16df1782020-12-04 16:47:41 +0100545 if (!b_is_null(input)) {
546 /* Xfer the input buffer to the request channel. <input> will
547 * than point to BUF_NULL. From this point, it is the stream
548 * responsibility to release it.
549 */
550 s->req.buf = *input;
551 *input = BUF_NULL;
Christopher Fauletc43fca02020-12-04 17:22:49 +0100552 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet16df1782020-12-04 16:47:41 +0100553 s->req.flags |= (s->req.total ? CF_READ_PARTIAL : 0);
554 }
555
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200556 /* it is important not to call the wakeup function directly but to
557 * pass through task_wakeup(), because this one knows how to apply
Emeric Brun5f77fef2017-05-29 15:26:51 +0200558 * priorities to tasks. Using multi thread we must be sure that
559 * stream is fully initialized before calling task_wakeup. So
560 * the caller must handle the task_wakeup
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200561 */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100562 DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
Christopher Faulet13a35e52021-12-20 15:34:16 +0100563 task_wakeup(s->task, TASK_WOKEN_INIT);
Willy Tarreau02d86382015-04-05 12:00:52 +0200564 return s;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200565
566 /* Error unrolling */
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200567 out_fail_accept:
Christopher Faulet92d36382015-11-05 13:35:03 +0100568 flt_stream_release(s, 0);
Willy Tarreau2b718102021-04-21 07:32:39 +0200569 LIST_DELETE(&s->list);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100570 out_fail_attach_csf:
571 cs_free(s->csb);
572 out_fail_alloc_csb:
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100573 task_destroy(t);
Willy Tarreau87787ac2017-08-28 16:22:54 +0200574 out_fail_alloc:
Willy Tarreaubafbe012017-11-24 17:34:44 +0100575 pool_free(pool_head_stream, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100576 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
Willy Tarreau02d86382015-04-05 12:00:52 +0200577 return NULL;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200578}
579
Willy Tarreaubaaee002006-06-26 02:48:02 +0200580/*
Willy Tarreau87b09662015-04-03 00:22:06 +0200581 * frees the context associated to a stream. It must have been removed first.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200582 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200583static void stream_free(struct stream *s)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200584{
Willy Tarreau9ad7bd42015-04-03 19:19:59 +0200585 struct session *sess = strm_sess(s);
586 struct proxy *fe = sess->fe;
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100587 struct bref *bref, *back;
Willy Tarreau5bcfd562017-11-29 14:05:38 +0100588 int must_free_sess;
Willy Tarreaua4cda672010-06-06 18:28:49 +0200589 int i;
Willy Tarreau0f7562b2007-01-07 15:46:13 +0100590
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100591 DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
592
Willy Tarreau0ad46fa2019-05-17 14:20:05 +0200593 /* detach the stream from its own task before even releasing it so
594 * that walking over a task list never exhibits a dying stream.
595 */
596 s->task->context = NULL;
597 __ha_barrier_store();
598
Willy Tarreaud0ad4a82018-07-25 11:13:53 +0200599 pendconn_free(s);
Willy Tarreau922a8062008-12-04 09:33:58 +0100600
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100601 if (objt_server(s->target)) { /* there may be requests left pending in queue */
Willy Tarreaue7dff022015-04-03 01:14:29 +0200602 if (s->flags & SF_CURR_SESS) {
603 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +0200604 _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
Willy Tarreau1e62de62008-11-11 20:20:02 +0100605 }
Willy Tarreau88bc8002021-12-06 07:01:02 +0000606 if (may_dequeue_tasks(__objt_server(s->target), s->be))
607 process_srv_queue(__objt_server(s->target));
Willy Tarreau1e62de62008-11-11 20:20:02 +0100608 }
Willy Tarreau922a8062008-12-04 09:33:58 +0100609
Willy Tarreau7c669d72008-06-20 15:04:11 +0200610 if (unlikely(s->srv_conn)) {
Willy Tarreau87b09662015-04-03 00:22:06 +0200611 /* the stream still has a reserved slot on a server, but
Willy Tarreau7c669d72008-06-20 15:04:11 +0200612 * it should normally be only the same as the one above,
613 * so this should not happen in fact.
614 */
615 sess_change_server(s, NULL);
616 }
617
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100618 if (s->req.pipe)
619 put_pipe(s->req.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100620
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100621 if (s->res.pipe)
622 put_pipe(s->res.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100623
Willy Tarreaubf883e02014-11-25 21:10:35 +0100624 /* We may still be present in the buffer wait queue */
Willy Tarreau2b718102021-04-21 07:32:39 +0200625 if (LIST_INLIST(&s->buffer_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +0100626 LIST_DEL_INIT(&s->buffer_wait.list);
Willy Tarreau21046592020-02-26 10:39:36 +0100627
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200628 if (s->req.buf.size || s->res.buf.size) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100629 int count = !!s->req.buf.size + !!s->res.buf.size;
630
Willy Tarreaue0d0b402019-08-08 08:06:27 +0200631 b_free(&s->req.buf);
632 b_free(&s->res.buf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100633 offer_buffers(NULL, count);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100634 }
Willy Tarreau9b28e032012-10-12 23:49:43 +0200635
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100636 pool_free(pool_head_uniqueid, s->unique_id.ptr);
637 s->unique_id = IST_NULL;
Olivier Houchard32211a12019-02-01 18:10:46 +0100638
Christopher Faulet03fb1b22020-02-24 16:26:55 +0100639 flt_stream_stop(s);
640 flt_stream_release(s, 0);
641
Thierry FOURNIER2c8b54e2016-12-17 12:45:32 +0100642 hlua_ctx_destroy(s->hlua);
643 s->hlua = NULL;
Willy Tarreaueee5b512015-04-03 23:46:31 +0200644 if (s->txn)
Christopher Faulet75f619a2021-03-08 19:12:58 +0100645 http_destroy_txn(s);
Willy Tarreau46023632010-01-07 22:51:47 +0100646
Willy Tarreau1e954912012-10-12 17:50:05 +0200647 /* ensure the client-side transport layer is destroyed */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100648 /* Be sure it is useless !! */
649 /* if (cli_cs) */
650 /* cs_close(cli_cs); */
Willy Tarreau1e954912012-10-12 17:50:05 +0200651
Willy Tarreaua4cda672010-06-06 18:28:49 +0200652 for (i = 0; i < s->store_count; i++) {
653 if (!s->store[i].ts)
654 continue;
655 stksess_free(s->store[i].table, s->store[i].ts);
656 s->store[i].ts = NULL;
657 }
658
Emeric Brun08622d32020-12-23 17:41:43 +0100659 if (s->resolv_ctx.requester) {
Emeric Brun21fbeed2020-12-23 18:01:04 +0100660 __decl_thread(struct resolvers *resolvers = s->resolv_ctx.parent->arg.resolv.resolvers);
Christopher Faulet5098a082020-07-22 11:46:32 +0200661
662 HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100663 ha_free(&s->resolv_ctx.hostname_dn);
Emeric Brun08622d32020-12-23 17:41:43 +0100664 s->resolv_ctx.hostname_dn_len = 0;
Willy Tarreau6878f802021-10-20 14:07:31 +0200665 resolv_unlink_resolution(s->resolv_ctx.requester);
Christopher Faulet5098a082020-07-22 11:46:32 +0200666 HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
Baptiste Assmann333939c2019-01-21 08:34:50 +0100667
Emeric Brun08622d32020-12-23 17:41:43 +0100668 pool_free(resolv_requester_pool, s->resolv_ctx.requester);
669 s->resolv_ctx.requester = NULL;
Baptiste Assmann333939c2019-01-21 08:34:50 +0100670 }
671
Willy Tarreau92fb9832007-10-16 17:34:28 +0200672 if (fe) {
Christopher Faulet59399252019-11-07 14:27:52 +0100673 if (s->req_cap) {
674 struct cap_hdr *h;
675 for (h = fe->req_cap; h; h = h->next)
676 pool_free(h->pool, s->req_cap[h->index]);
677 }
678
679 if (s->res_cap) {
680 struct cap_hdr *h;
681 for (h = fe->rsp_cap; h; h = h->next)
682 pool_free(h->pool, s->res_cap[h->index]);
683 }
684
Willy Tarreaubafbe012017-11-24 17:34:44 +0100685 pool_free(fe->rsp_cap_pool, s->res_cap);
686 pool_free(fe->req_cap_pool, s->req_cap);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200687 }
Willy Tarreau0937bc42009-12-22 15:03:09 +0100688
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200689 /* Cleanup all variable contexts. */
Willy Tarreaucda7f3f2018-10-28 13:44:36 +0100690 if (!LIST_ISEMPTY(&s->vars_txn.head))
691 vars_prune(&s->vars_txn, s->sess, s);
692 if (!LIST_ISEMPTY(&s->vars_reqres.head))
693 vars_prune(&s->vars_reqres, s->sess, s);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200694
Willy Tarreau87b09662015-04-03 00:22:06 +0200695 stream_store_counters(s);
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +0200696
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100697 list_for_each_entry_safe(bref, back, &s->back_refs, users) {
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100698 /* we have to unlink all watchers. We must not relink them if
Willy Tarreau49de6852021-02-24 13:46:12 +0100699 * this stream was the last one in the list. This is safe to do
700 * here because we're touching our thread's list so we know
701 * that other streams are not active, and the watchers will
702 * only touch their node under thread isolation.
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100703 */
Willy Tarreau49de6852021-02-24 13:46:12 +0100704 LIST_DEL_INIT(&bref->users);
Willy Tarreaub4e34762021-09-30 19:02:18 +0200705 if (s->list.n != &th_ctx->streams)
Willy Tarreau2b718102021-04-21 07:32:39 +0200706 LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100707 bref->ref = s->list.n;
Willy Tarreau49de6852021-02-24 13:46:12 +0100708 __ha_barrier_store();
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100709 }
Willy Tarreau2b718102021-04-21 07:32:39 +0200710 LIST_DELETE(&s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200711
Willy Tarreau5bcfd562017-11-29 14:05:38 +0100712 /* applets do not release session yet */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100713 /* FIXME: Handle it in appctx_free ??? */
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100714 must_free_sess = objt_appctx(sess->origin) && sess->origin == __cs_endp_target(s->csf);
Willy Tarreau5bcfd562017-11-29 14:05:38 +0100715
Christopher Fauleteb50c012022-04-21 14:22:53 +0200716 cs_destroy(s->csb);
717 cs_destroy(s->csf);
Olivier Houchard4fdec7a2018-10-11 17:09:14 +0200718
Olivier Houchard25607af2018-11-23 14:50:47 +0100719 if (must_free_sess) {
720 sess->origin = NULL;
Willy Tarreau5bcfd562017-11-29 14:05:38 +0100721 session_free(sess);
Olivier Houchard25607af2018-11-23 14:50:47 +0100722 }
Willy Tarreau5bcfd562017-11-29 14:05:38 +0100723
Willy Tarreaubafbe012017-11-24 17:34:44 +0100724 pool_free(pool_head_stream, s);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200725
726 /* We may want to free the maximum amount of pools if the proxy is stopping */
Christopher Fauletdfd10ab2021-10-06 14:24:19 +0200727 if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100728 pool_flush(pool_head_buffer);
729 pool_flush(pool_head_http_txn);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100730 pool_flush(pool_head_requri);
731 pool_flush(pool_head_capture);
732 pool_flush(pool_head_stream);
733 pool_flush(pool_head_session);
734 pool_flush(pool_head_connection);
735 pool_flush(pool_head_pendconn);
736 pool_flush(fe->req_cap_pool);
737 pool_flush(fe->rsp_cap_pool);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200738 }
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200739}
740
Willy Tarreau656859d2014-11-25 19:46:36 +0100741
Willy Tarreau87b09662015-04-03 00:22:06 +0200742/* Allocates a work buffer for stream <s>. It is meant to be called inside
743 * process_stream(). It will only allocate the side needed for the function
Willy Tarreaubc39a5d2015-04-20 15:52:18 +0200744 * to work fine, which is the response buffer so that an error message may be
745 * built and returned. Response buffers may be allocated from the reserve, this
746 * is critical to ensure that a response may always flow and will never block a
747 * server from releasing a connection. Returns 0 in case of failure, non-zero
748 * otherwise.
Willy Tarreau656859d2014-11-25 19:46:36 +0100749 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100750static int stream_alloc_work_buffer(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100751{
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100752 if (b_alloc(&s->res.buf))
Willy Tarreau656859d2014-11-25 19:46:36 +0100753 return 1;
Willy Tarreau656859d2014-11-25 19:46:36 +0100754 return 0;
755}
756
757/* releases unused buffers after processing. Typically used at the end of the
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100758 * update() functions. It will try to wake up as many tasks/applets as the
759 * number of buffers that it releases. In practice, most often streams are
760 * blocked on a single buffer, so it makes sense to try to wake two up when two
761 * buffers are released at once.
Willy Tarreau656859d2014-11-25 19:46:36 +0100762 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200763void stream_release_buffers(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100764{
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100765 int offer = 0;
Willy Tarreau656859d2014-11-25 19:46:36 +0100766
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200767 if (c_size(&s->req) && c_empty(&s->req)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100768 offer++;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100769 b_free(&s->req.buf);
770 }
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200771 if (c_size(&s->res) && c_empty(&s->res)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100772 offer++;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100773 b_free(&s->res.buf);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100774 }
Willy Tarreau656859d2014-11-25 19:46:36 +0100775
Willy Tarreaubf883e02014-11-25 21:10:35 +0100776 /* if we're certain to have at least 1 buffer available, and there is
777 * someone waiting, we can wake up a waiter and offer them.
778 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100779 if (offer)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100780 offer_buffers(s, offer);
Willy Tarreau656859d2014-11-25 19:46:36 +0100781}
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200782
Willy Tarreau87b09662015-04-03 00:22:06 +0200783void stream_process_counters(struct stream *s)
Willy Tarreau30e71012007-11-26 20:15:35 +0100784{
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200785 struct session *sess = s->sess;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100786 unsigned long long bytes;
Willy Tarreau20d46a52012-12-09 15:55:40 +0100787 int i;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100788
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100789 bytes = s->req.total - s->logs.bytes_in;
790 s->logs.bytes_in = s->req.total;
791 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100792 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
793 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100794
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100795 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000796 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_in, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200797
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200798 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100799 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
Willy Tarreau855e4bb2010-06-18 18:33:32 +0200800
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100801 for (i = 0; i < MAX_SESS_STKCTR; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200802 if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
803 stkctr_inc_bytes_in_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100804 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100805 }
806
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100807 bytes = s->res.total - s->logs.bytes_out;
808 s->logs.bytes_out = s->res.total;
809 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100810 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
811 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100812
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100813 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000814 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_out, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200815
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200816 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100817 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
Willy Tarreauf059a0f2010-08-03 16:29:52 +0200818
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100819 for (i = 0; i < MAX_SESS_STKCTR; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200820 if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
821 stkctr_inc_bytes_out_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100822 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100823 }
824}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200825
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200826/*
827 * Returns a message to the client ; the connection is shut down for read,
828 * and the request is cleared so that no server connection can be initiated.
829 * The buffer is marked for read shutdown on the other side to protect the
830 * message, and the buffer write is enabled. The message is contained in a
831 * "chunk". If it is null, then an empty message is used. The reply buffer does
832 * not need to be empty before this, and its contents will not be overwritten.
833 * The primary goal of this function is to return error messages to a client.
834 */
835void stream_retnclose(struct stream *s, const struct buffer *msg)
836{
837 struct channel *ic = &s->req;
838 struct channel *oc = &s->res;
839
840 channel_auto_read(ic);
841 channel_abort(ic);
842 channel_auto_close(ic);
843 channel_erase(ic);
844 channel_truncate(oc);
845
846 if (likely(msg && msg->data))
847 co_inject(oc, msg->area, msg->data);
848
849 oc->wex = tick_add_ifset(now_ms, oc->wto);
850 channel_auto_read(oc);
851 channel_auto_close(oc);
852 channel_shutr_now(oc);
853}
854
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100855int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout)
856{
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100857 switch (name) {
858 case ACT_TIMEOUT_SERVER:
859 s->req.wto = timeout;
860 s->res.rto = timeout;
861 return 1;
862
Amaury Denoyellefb504432020-12-10 13:43:53 +0100863 case ACT_TIMEOUT_TUNNEL:
864 s->tunnel_timeout = timeout;
865 return 1;
866
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100867 default:
868 return 0;
869 }
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100870}
871
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100872/*
Christopher Faulet62e75742022-03-31 09:16:34 +0200873 * This function handles the transition between the CS_ST_CON state and the
874 * CS_ST_EST state. It must only be called after switching from CS_ST_CON (or
875 * CS_ST_INI or CS_ST_RDY) to CS_ST_EST, but only when a ->proto is defined.
876 * Note that it will switch the interface to CS_ST_DIS if we already have
Olivier Houchardaacc4052019-05-21 17:43:50 +0200877 * the CF_SHUTR flag, it means we were able to forward the request, and
878 * receive the response, before process_stream() had the opportunity to
Christopher Faulet62e75742022-03-31 09:16:34 +0200879 * make the switch from CS_ST_CON to CS_ST_EST. When that happens, we want
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100880 * to go through back_establish() anyway, to make sure the analysers run.
Willy Tarreaud66ed882019-06-05 18:02:04 +0200881 * Timeouts are cleared. Error are reported on the channel so that analysers
882 * can handle them.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100883 */
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100884static void back_establish(struct stream *s)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100885{
Christopher Faulet95a61e82021-12-22 14:22:03 +0100886 struct connection *conn = cs_conn(s->csb);
Willy Tarreau7b8c4f92014-11-28 15:15:44 +0100887 struct channel *req = &s->req;
888 struct channel *rep = &s->res;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100889
Christopher Faulet62e75742022-03-31 09:16:34 +0200890 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200891 /* First, centralize the timers information, and clear any irrelevant
892 * timeout.
893 */
Willy Tarreau0e37f1c2013-12-31 23:06:46 +0100894 s->logs.t_connect = tv_ms_elapsed(&s->logs.tv_accept, &now);
Christopher Fauletae024ce2022-03-29 19:02:31 +0200895 s->conn_exp = TICK_ETERNITY;
896 s->flags &= ~SF_CONN_EXP;
Willy Tarreaud66ed882019-06-05 18:02:04 +0200897
898 /* errors faced after sending data need to be reported */
Christopher Faulet6cd56d52022-03-30 10:47:32 +0200899 if (s->csb->endp->flags & CS_EP_ERROR && req->flags & CF_WROTE_DATA) {
Willy Tarreaud66ed882019-06-05 18:02:04 +0200900 /* Don't add CF_WRITE_ERROR if we're here because
901 * early data were rejected by the server, or
902 * http_wait_for_response() will never be called
903 * to send a 425.
904 */
905 if (conn && conn->err_code != CO_ER_SSL_EARLY_FAILED)
906 req->flags |= CF_WRITE_ERROR;
907 rep->flags |= CF_READ_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +0200908 s->conn_err_type = STRM_ET_DATA_ERR;
Christopher Faulet62e75742022-03-31 09:16:34 +0200909 DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200910 }
911
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100912 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000913 health_adjust(__objt_server(s->target), HANA_STATUS_L4_OK);
Krzysztof Piotr Oledzki97f07b82009-12-15 22:31:24 +0100914
Christopher Faulet1bb6afa2021-03-08 17:57:53 +0100915 if (!IS_HTX_STRM(s)) { /* let's allow immediate data connection in this case */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100916 /* if the user wants to log as soon as possible, without counting
917 * bytes from the server, then this is the right moment. */
Willy Tarreaud0d8da92015-04-04 02:10:38 +0200918 if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
Willy Tarreau66425e32018-07-25 06:55:12 +0200919 /* note: no pend_pos here, session is established */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100920 s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
Willy Tarreaua5555ec2008-11-30 19:02:32 +0100921 s->do_log(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100922 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100923 }
924 else {
Willy Tarreaud81ca042013-12-31 22:33:13 +0100925 rep->flags |= CF_READ_DONTWAIT; /* a single read is enough to get response headers */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100926 }
927
Willy Tarreau0007d0a2018-12-11 18:01:38 +0100928 rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
Christopher Faulet309c6412015-12-02 09:57:32 +0100929
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200930 cs_rx_endp_more(s->csb);
Willy Tarreau03cdb7c2012-08-27 23:14:58 +0200931 rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
Christopher Faulet0256da12021-12-15 09:50:17 +0100932 if (conn) {
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100933 /* real connections have timeouts
934 * if already defined, it means that a set-timeout rule has
935 * been executed so do not overwrite them
936 */
937 if (!tick_isset(req->wto))
938 req->wto = s->be->timeout.server;
939 if (!tick_isset(rep->rto))
940 rep->rto = s->be->timeout.server;
Amaury Denoyellefb504432020-12-10 13:43:53 +0100941 if (!tick_isset(s->tunnel_timeout))
942 s->tunnel_timeout = s->be->timeout.tunnel;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100943
Olivier Houchard47e9a1a2018-11-07 17:55:19 +0100944 /* The connection is now established, try to read data from the
945 * underlying layer, and subscribe to recv events. We use a
946 * delayed recv here to give a chance to the data to flow back
947 * by the time we process other tasks.
948 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200949 cs_chk_rcv(s->csb);
Willy Tarreaud04e8582010-05-31 12:31:35 +0200950 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100951 req->wex = TICK_ETERNITY;
Olivier Houchard78595262019-07-26 14:54:34 +0200952 /* If we managed to get the whole response, and we don't have anything
Christopher Faulet62e75742022-03-31 09:16:34 +0200953 * left to send, or can't, switch to CS_ST_DIS now. */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100954 if (rep->flags & (CF_SHUTR | CF_SHUTW)) {
Christopher Faulet62e75742022-03-31 09:16:34 +0200955 s->csb->state = CS_ST_DIS;
956 DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100957 }
958
Christopher Faulet62e75742022-03-31 09:16:34 +0200959 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100960}
961
Willy Tarreau87b09662015-04-03 00:22:06 +0200962/* Set correct stream termination flags in case no analyser has done it. It
Simon Hormandec5be42011-06-08 09:19:07 +0900963 * also counts a failed request if the server state has not reached the request
964 * stage.
965 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200966static void sess_set_term_flags(struct stream *s)
Simon Hormandec5be42011-06-08 09:19:07 +0900967{
Willy Tarreaue7dff022015-04-03 01:14:29 +0200968 if (!(s->flags & SF_FINST_MASK)) {
Christopher Faulet62e75742022-03-31 09:16:34 +0200969 if (s->csb->state == CS_ST_INI) {
Willy Tarreau7ab22adb2019-06-05 14:53:22 +0200970 /* anything before REQ in fact */
Willy Tarreau4781b152021-04-06 13:53:36 +0200971 _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
Willy Tarreau2c1068c2015-09-23 12:21:21 +0200972 if (strm_li(s) && strm_li(s)->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +0200973 _HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
Simon Hormandec5be42011-06-08 09:19:07 +0900974
Willy Tarreaue7dff022015-04-03 01:14:29 +0200975 s->flags |= SF_FINST_R;
Simon Hormandec5be42011-06-08 09:19:07 +0900976 }
Christopher Faulet62e75742022-03-31 09:16:34 +0200977 else if (s->csb->state == CS_ST_QUE)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200978 s->flags |= SF_FINST_Q;
Christopher Faulet62e75742022-03-31 09:16:34 +0200979 else if (cs_state_in(s->csb->state, CS_SB_REQ|CS_SB_TAR|CS_SB_ASS|CS_SB_CON|CS_SB_CER|CS_SB_RDY))
Willy Tarreaue7dff022015-04-03 01:14:29 +0200980 s->flags |= SF_FINST_C;
Christopher Faulet62e75742022-03-31 09:16:34 +0200981 else if (s->csb->state == CS_ST_EST || s->prev_conn_state == CS_ST_EST)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200982 s->flags |= SF_FINST_D;
Simon Hormandec5be42011-06-08 09:19:07 +0900983 else
Willy Tarreaue7dff022015-04-03 01:14:29 +0200984 s->flags |= SF_FINST_L;
Simon Hormandec5be42011-06-08 09:19:07 +0900985 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100986}
987
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200988/* This function parses the use-service action ruleset. It executes
989 * the associated ACL and set an applet as a stream or txn final node.
990 * it returns ACT_RET_ERR if an error occurs, the proxy left in
Ilya Shipitsinc02a23f2020-05-06 00:53:22 +0500991 * consistent state. It returns ACT_RET_STOP in success case because
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200992 * use-service must be a terminal action. Returns ACT_RET_YIELD
993 * if the initialisation function require more data.
994 */
995enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
996 struct session *sess, struct stream *s, int flags)
997
998{
999 struct appctx *appctx;
1000
1001 /* Initialises the applet if it is required. */
Christopher Faulet105ba6c2019-12-18 14:41:51 +01001002 if (flags & ACT_OPT_FIRST) {
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001003 /* Register applet. this function schedules the applet. */
1004 s->target = &rule->applet.obj_type;
Christopher Faulet1336ccf2022-04-12 18:15:16 +02001005 appctx = cs_applet_create(s->csb, objt_applet(s->target));
Christopher Faulet2da02ae2022-02-24 13:45:27 +01001006 if (unlikely(!appctx))
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001007 return ACT_RET_ERR;
1008
Christopher Faulet93882042022-01-19 14:56:50 +01001009 /* Finish initialisation of the context. */
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001010 memset(&appctx->ctx, 0, sizeof(appctx->ctx));
1011 appctx->rule = rule;
Christopher Faulet4aa1d282022-01-13 16:01:35 +01001012 if (appctx->applet->init && !appctx->applet->init(appctx))
1013 return ACT_RET_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001014 }
1015 else
Christopher Faulet693b23b2022-02-28 09:09:05 +01001016 appctx = __cs_appctx(s->csb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001017
Christopher Faulet2571bc62019-03-01 11:44:26 +01001018 if (rule->from != ACT_F_HTTP_REQ) {
1019 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
Willy Tarreau4781b152021-04-06 13:53:36 +02001020 _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
Christopher Faulet2571bc62019-03-01 11:44:26 +01001021
1022 /* The flag SF_ASSIGNED prevent from server assignment. */
1023 s->flags |= SF_ASSIGNED;
1024 }
1025
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001026 /* Now we can schedule the applet. */
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001027 cs_cant_get(s->csb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001028 appctx_wakeup(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001029 return ACT_RET_STOP;
1030}
1031
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001032/* This stream analyser checks the switching rules and changes the backend
Willy Tarreau4de91492010-01-22 19:10:05 +01001033 * if appropriate. The default_backend rule is also considered, then the
1034 * target backend's forced persistence rules are also evaluated last if any.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001035 * It returns 1 if the processing can continue on next analysers, or zero if it
1036 * either needs more data or wants to immediately abort the request.
1037 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001038static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001039{
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001040 struct persist_rule *prst_rule;
Willy Tarreau192252e2015-04-04 01:47:55 +02001041 struct session *sess = s->sess;
1042 struct proxy *fe = sess->fe;
Willy Tarreau4de91492010-01-22 19:10:05 +01001043
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001044 req->analysers &= ~an_bit;
1045 req->analyse_exp = TICK_ETERNITY;
1046
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001047 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001048
1049 /* now check whether we have some switching rules for this request */
Willy Tarreaue7dff022015-04-03 01:14:29 +02001050 if (!(s->flags & SF_BE_ASSIGNED)) {
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001051 struct switching_rule *rule;
1052
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001053 list_for_each_entry(rule, &fe->switching_rules, list) {
Willy Tarreauf51658d2014-04-23 01:21:56 +02001054 int ret = 1;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001055
Willy Tarreauf51658d2014-04-23 01:21:56 +02001056 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001057 ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreauf51658d2014-04-23 01:21:56 +02001058 ret = acl_pass(ret);
1059 if (rule->cond->pol == ACL_COND_UNLESS)
1060 ret = !ret;
1061 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001062
1063 if (ret) {
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001064 /* If the backend name is dynamic, try to resolve the name.
1065 * If we can't resolve the name, or if any error occurs, break
1066 * the loop and fallback to the default backend.
1067 */
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001068 struct proxy *backend = NULL;
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001069
1070 if (rule->dynamic) {
Willy Tarreau83061a82018-07-13 11:56:34 +02001071 struct buffer *tmp;
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001072
1073 tmp = alloc_trash_chunk();
1074 if (!tmp)
1075 goto sw_failed;
1076
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001077 if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
1078 backend = proxy_be_by_name(tmp->area);
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001079
1080 free_trash_chunk(tmp);
1081 tmp = NULL;
1082
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001083 if (!backend)
1084 break;
1085 }
1086 else
1087 backend = rule->be.backend;
1088
Willy Tarreau87b09662015-04-03 00:22:06 +02001089 if (!stream_set_backend(s, backend))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001090 goto sw_failed;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001091 break;
1092 }
1093 }
1094
1095 /* To ensure correct connection accounting on the backend, we
1096 * have to assign one if it was not set (eg: a listen). This
1097 * measure also takes care of correctly setting the default
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001098 * backend if any. Don't do anything if an upgrade is already in
1099 * progress.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001100 */
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001101 if (!(s->flags & (SF_BE_ASSIGNED|SF_IGNORE)))
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001102 if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001103 goto sw_failed;
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001104
1105 /* No backend assigned but no error reported. It happens when a
1106 * TCP stream is upgraded to HTTP/2.
1107 */
1108 if ((s->flags & (SF_BE_ASSIGNED|SF_IGNORE)) == SF_IGNORE) {
1109 DBG_TRACE_DEVEL("leaving with no backend because of a destructive upgrade", STRM_EV_STRM_ANA, s);
1110 return 0;
1111 }
1112
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001113 }
1114
Willy Tarreaufb356202010-08-03 14:02:05 +02001115 /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001116 if (fe == s->be) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001117 s->req.analysers &= ~AN_REQ_INSPECT_BE;
1118 s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001119 s->req.analysers &= ~AN_REQ_FLT_START_BE;
Willy Tarreaufb356202010-08-03 14:02:05 +02001120 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001121
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001122 /* as soon as we know the backend, we must check if we have a matching forced or ignored
Willy Tarreau87b09662015-04-03 00:22:06 +02001123 * persistence rule, and report that in the stream.
Willy Tarreau4de91492010-01-22 19:10:05 +01001124 */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001125 list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
Willy Tarreau4de91492010-01-22 19:10:05 +01001126 int ret = 1;
1127
1128 if (prst_rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001129 ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4de91492010-01-22 19:10:05 +01001130 ret = acl_pass(ret);
1131 if (prst_rule->cond->pol == ACL_COND_UNLESS)
1132 ret = !ret;
1133 }
1134
1135 if (ret) {
1136 /* no rule, or the rule matches */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001137 if (prst_rule->type == PERSIST_TYPE_FORCE) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001138 s->flags |= SF_FORCE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001139 } else {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001140 s->flags |= SF_IGNORE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001141 }
Willy Tarreau4de91492010-01-22 19:10:05 +01001142 break;
1143 }
1144 }
1145
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001146 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001147 return 1;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001148
1149 sw_failed:
1150 /* immediately abort this request in case of allocation failure */
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001151 channel_abort(&s->req);
1152 channel_abort(&s->res);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001153
Willy Tarreaue7dff022015-04-03 01:14:29 +02001154 if (!(s->flags & SF_ERR_MASK))
1155 s->flags |= SF_ERR_RESOURCE;
1156 if (!(s->flags & SF_FINST_MASK))
1157 s->flags |= SF_FINST_R;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001158
Willy Tarreaueee5b512015-04-03 23:46:31 +02001159 if (s->txn)
1160 s->txn->status = 500;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001161 s->req.analysers &= AN_REQ_FLT_END;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001162 s->req.analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001163 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001164 return 0;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001165}
1166
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001167/* This stream analyser works on a request. It applies all use-server rules on
1168 * it then returns 1. The data must already be present in the buffer otherwise
1169 * they won't match. It always returns 1.
1170 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001171static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001172{
1173 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001174 struct session *sess = s->sess;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001175 struct server_rule *rule;
1176
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001177 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001178
Willy Tarreaue7dff022015-04-03 01:14:29 +02001179 if (!(s->flags & SF_ASSIGNED)) {
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001180 list_for_each_entry(rule, &px->server_rules, list) {
1181 int ret;
1182
Willy Tarreau192252e2015-04-04 01:47:55 +02001183 ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001184 ret = acl_pass(ret);
1185 if (rule->cond->pol == ACL_COND_UNLESS)
1186 ret = !ret;
1187
1188 if (ret) {
Jerome Magnin824186b2020-03-29 09:37:12 +02001189 struct server *srv;
1190
1191 if (rule->dynamic) {
1192 struct buffer *tmp = get_trash_chunk();
1193
1194 if (!build_logline(s, tmp->area, tmp->size, &rule->expr))
1195 break;
1196
1197 srv = findserver(s->be, tmp->area);
1198 if (!srv)
1199 break;
1200 }
1201 else
1202 srv = rule->srv.ptr;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001203
Emeric Brun52a91d32017-08-31 14:41:55 +02001204 if ((srv->cur_state != SRV_ST_STOPPED) ||
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001205 (px->options & PR_O_PERSIST) ||
Willy Tarreaue7dff022015-04-03 01:14:29 +02001206 (s->flags & SF_FORCE_PRST)) {
1207 s->flags |= SF_DIRECT | SF_ASSIGNED;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001208 s->target = &srv->obj_type;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001209 break;
1210 }
1211 /* if the server is not UP, let's go on with next rules
1212 * just in case another one is suited.
1213 */
1214 }
1215 }
1216 }
1217
1218 req->analysers &= ~an_bit;
1219 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001220 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001221 return 1;
1222}
1223
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001224static inline void sticking_rule_find_target(struct stream *s,
1225 struct stktable *t, struct stksess *ts)
1226{
1227 struct proxy *px = s->be;
1228 struct eb32_node *node;
1229 struct dict_entry *de;
1230 void *ptr;
1231 struct server *srv;
1232
1233 /* Look for the server name previously stored in <t> stick-table */
1234 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001235 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001236 de = stktable_data_cast(ptr, std_t_dict);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001237 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1238
1239 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001240 struct ebpt_node *node;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001241
Thayne McCombs92149f92020-11-20 01:28:26 -07001242 if (t->server_key_type == STKTABLE_SRV_NAME) {
1243 node = ebis_lookup(&px->conf.used_server_name, de->value.key);
1244 if (node) {
1245 srv = container_of(node, struct server, conf.name);
1246 goto found;
1247 }
1248 } else if (t->server_key_type == STKTABLE_SRV_ADDR) {
1249 HA_RWLOCK_RDLOCK(PROXY_LOCK, &px->lock);
1250 node = ebis_lookup(&px->used_server_addr, de->value.key);
1251 HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &px->lock);
1252 if (node) {
1253 srv = container_of(node, struct server, addr_node);
1254 goto found;
1255 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001256 }
1257 }
1258
1259 /* Look for the server ID */
1260 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1261 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001262 node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, std_t_sint));
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001263 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1264
1265 if (!node)
1266 return;
1267
1268 srv = container_of(node, struct server, conf.id);
1269 found:
1270 if ((srv->cur_state != SRV_ST_STOPPED) ||
1271 (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
1272 s->flags |= SF_DIRECT | SF_ASSIGNED;
1273 s->target = &srv->obj_type;
1274 }
1275}
1276
Emeric Brun1d33b292010-01-04 15:47:17 +01001277/* This stream analyser works on a request. It applies all sticking rules on
1278 * it then returns 1. The data must already be present in the buffer otherwise
1279 * they won't match. It always returns 1.
1280 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001281static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001282{
1283 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001284 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001285 struct sticking_rule *rule;
1286
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001287 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001288
1289 list_for_each_entry(rule, &px->sticking_rules, list) {
1290 int ret = 1 ;
1291 int i;
1292
Willy Tarreau9667a802013-12-09 12:52:13 +01001293 /* Only the first stick store-request of each table is applied
1294 * and other ones are ignored. The purpose is to allow complex
1295 * configurations which look for multiple entries by decreasing
1296 * order of precision and to stop at the first which matches.
1297 * An example could be a store of the IP address from an HTTP
1298 * header first, then from the source if not found.
1299 */
Jerome Magninbee00ad2020-01-16 17:37:21 +01001300 if (rule->flags & STK_IS_STORE) {
1301 for (i = 0; i < s->store_count; i++) {
1302 if (rule->table.t == s->store[i].table)
1303 break;
1304 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001305
Jerome Magninbee00ad2020-01-16 17:37:21 +01001306 if (i != s->store_count)
1307 continue;
1308 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001309
1310 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001311 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001312 ret = acl_pass(ret);
1313 if (rule->cond->pol == ACL_COND_UNLESS)
1314 ret = !ret;
1315 }
1316
1317 if (ret) {
1318 struct stktable_key *key;
1319
Willy Tarreau192252e2015-04-04 01:47:55 +02001320 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001321 if (!key)
1322 continue;
1323
1324 if (rule->flags & STK_IS_MATCH) {
1325 struct stksess *ts;
1326
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001327 if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001328 if (!(s->flags & SF_ASSIGNED))
1329 sticking_rule_find_target(s, rule->table.t, ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001330 stktable_touch_local(rule->table.t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001331 }
1332 }
1333 if (rule->flags & STK_IS_STORE) {
1334 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1335 struct stksess *ts;
1336
1337 ts = stksess_new(rule->table.t, key);
1338 if (ts) {
1339 s->store[s->store_count].table = rule->table.t;
1340 s->store[s->store_count++].ts = ts;
1341 }
1342 }
1343 }
1344 }
1345 }
1346
1347 req->analysers &= ~an_bit;
1348 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001349 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001350 return 1;
1351}
1352
1353/* This stream analyser works on a response. It applies all store rules on it
1354 * then returns 1. The data must already be present in the buffer otherwise
1355 * they won't match. It always returns 1.
1356 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001357static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001358{
1359 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001360 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001361 struct sticking_rule *rule;
1362 int i;
Willy Tarreau9667a802013-12-09 12:52:13 +01001363 int nbreq = s->store_count;
Emeric Brun1d33b292010-01-04 15:47:17 +01001364
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001365 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001366
1367 list_for_each_entry(rule, &px->storersp_rules, list) {
1368 int ret = 1 ;
Emeric Brun1d33b292010-01-04 15:47:17 +01001369
Willy Tarreau9667a802013-12-09 12:52:13 +01001370 /* Only the first stick store-response of each table is applied
1371 * and other ones are ignored. The purpose is to allow complex
1372 * configurations which look for multiple entries by decreasing
1373 * order of precision and to stop at the first which matches.
1374 * An example could be a store of a set-cookie value, with a
1375 * fallback to a parameter found in a 302 redirect.
1376 *
1377 * The store-response rules are not allowed to override the
1378 * store-request rules for the same table, but they may coexist.
1379 * Thus we can have up to one store-request entry and one store-
1380 * response entry for the same table at any time.
1381 */
1382 for (i = nbreq; i < s->store_count; i++) {
1383 if (rule->table.t == s->store[i].table)
1384 break;
1385 }
1386
1387 /* skip existing entries for this table */
1388 if (i < s->store_count)
1389 continue;
1390
Emeric Brun1d33b292010-01-04 15:47:17 +01001391 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001392 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001393 ret = acl_pass(ret);
1394 if (rule->cond->pol == ACL_COND_UNLESS)
1395 ret = !ret;
1396 }
1397
1398 if (ret) {
1399 struct stktable_key *key;
1400
Willy Tarreau192252e2015-04-04 01:47:55 +02001401 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001402 if (!key)
1403 continue;
1404
Willy Tarreau37e340c2013-12-06 23:05:21 +01001405 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
Emeric Brun1d33b292010-01-04 15:47:17 +01001406 struct stksess *ts;
1407
1408 ts = stksess_new(rule->table.t, key);
1409 if (ts) {
1410 s->store[s->store_count].table = rule->table.t;
Emeric Brun1d33b292010-01-04 15:47:17 +01001411 s->store[s->store_count++].ts = ts;
1412 }
1413 }
1414 }
1415 }
1416
1417 /* process store request and store response */
1418 for (i = 0; i < s->store_count; i++) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001419 struct stksess *ts;
Willy Tarreau13c29de2010-06-06 16:40:39 +02001420 void *ptr;
Thayne McCombs92149f92020-11-20 01:28:26 -07001421 char *key;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001422 struct dict_entry *de;
Thayne McCombs92149f92020-11-20 01:28:26 -07001423 struct stktable *t = s->store[i].table;
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001424
Willy Tarreau88bc8002021-12-06 07:01:02 +00001425 if (objt_server(s->target) && __objt_server(s->target)->flags & SRV_F_NON_STICK) {
Simon Hormanfa461682011-06-25 09:39:49 +09001426 stksess_free(s->store[i].table, s->store[i].ts);
1427 s->store[i].ts = NULL;
1428 continue;
1429 }
1430
Thayne McCombs92149f92020-11-20 01:28:26 -07001431 ts = stktable_set_entry(t, s->store[i].ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001432 if (ts != s->store[i].ts) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001433 /* the entry already existed, we can free ours */
Thayne McCombs92149f92020-11-20 01:28:26 -07001434 stksess_free(t, s->store[i].ts);
Emeric Brun1d33b292010-01-04 15:47:17 +01001435 }
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001436 s->store[i].ts = NULL;
Emeric Brun819fc6f2017-06-13 19:37:32 +02001437
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001438 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001439 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001440 stktable_data_cast(ptr, std_t_sint) = __objt_server(s->target)->puid;
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001441 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001442
Thayne McCombs92149f92020-11-20 01:28:26 -07001443 if (t->server_key_type == STKTABLE_SRV_NAME)
1444 key = __objt_server(s->target)->id;
1445 else if (t->server_key_type == STKTABLE_SRV_ADDR)
1446 key = __objt_server(s->target)->addr_node.key;
1447 else
1448 continue;
1449
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001450 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001451 de = dict_insert(&server_key_dict, key);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001452 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001453 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001454 stktable_data_cast(ptr, std_t_dict) = de;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001455 }
1456 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1457
Thayne McCombs92149f92020-11-20 01:28:26 -07001458 stktable_touch_local(t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001459 }
Willy Tarreau2a164ee2010-06-18 09:57:45 +02001460 s->store_count = 0; /* everything is stored */
Emeric Brun1d33b292010-01-04 15:47:17 +01001461
1462 rep->analysers &= ~an_bit;
1463 rep->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001464
1465 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001466 return 1;
1467}
1468
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001469/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
1470 * are set and the client mux is upgraded. It returns 1 if the stream processing
1471 * may continue or 0 if it should be stopped. It happens on error or if the
Christopher Fauletae863c62021-03-15 12:03:44 +01001472 * upgrade required a new stream. The mux protocol may be specified.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001473 */
Christopher Fauletae863c62021-03-15 12:03:44 +01001474int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001475{
Christopher Faulet95a61e82021-12-22 14:22:03 +01001476 struct conn_stream *cs = s->csf;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001477 struct connection *conn;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001478
1479 /* Already an HTTP stream */
1480 if (IS_HTX_STRM(s))
1481 return 1;
1482
1483 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
1484
1485 if (unlikely(!s->txn && !http_create_txn(s)))
1486 return 0;
1487
Christopher Faulet13a35e52021-12-20 15:34:16 +01001488 conn = cs_conn(cs);
1489 if (conn) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001490 cs_rx_endp_more(s->csf);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001491 /* Make sure we're unsubscribed, the the new
1492 * mux will probably want to subscribe to
1493 * the underlying XPRT
1494 */
Christopher Faulet2f35e7b2022-03-31 11:09:28 +02001495 if (s->csf->wait_event.events)
1496 conn->mux->unsubscribe(cs, s->csf->wait_event.events, &(s->csf->wait_event));
Christopher Fauletae863c62021-03-15 12:03:44 +01001497
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001498 if (conn->mux->flags & MX_FL_NO_UPG)
1499 return 0;
Christopher Fauletae863c62021-03-15 12:03:44 +01001500 if (conn_upgrade_mux_fe(conn, cs, &s->req.buf,
1501 (mux_proto ? mux_proto->token : ist("")),
1502 PROTO_MODE_HTTP) == -1)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001503 return 0;
1504
1505 s->req.flags &= ~(CF_READ_PARTIAL|CF_AUTO_CONNECT);
1506 s->req.total = 0;
1507 s->flags |= SF_IGNORE;
1508 if (strcmp(conn->mux->name, "H2") == 0) {
1509 /* For HTTP/2, destroy the conn_stream, disable logging,
1510 * and abort the stream process. Thus it will be
1511 * silently destroyed. The new mux will create new
1512 * streams.
1513 */
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001514 s->logs.logwait = 0;
1515 s->logs.level = 0;
1516 channel_abort(&s->req);
1517 channel_abort(&s->res);
1518 s->req.analysers &= AN_REQ_FLT_END;
1519 s->req.analyse_exp = TICK_ETERNITY;
1520 }
1521 }
1522
1523 return 1;
1524}
1525
1526
Christopher Fauletef285c12022-04-01 14:48:06 +02001527/* Updates at once the channel flags, and timers of both conn-streams of a
1528 * same stream, to complete the work after the analysers, then updates the data
1529 * layer below. This will ensure that any synchronous update performed at the
1530 * data layer will be reflected in the channel flags and/or conn-stream.
1531 * Note that this does not change the conn-stream's current state, though
1532 * it updates the previous state to the current one.
1533 */
1534static void stream_update_both_cs(struct stream *s)
1535{
1536 struct conn_stream *csf = s->csf;
1537 struct conn_stream *csb = s->csb;
1538 struct channel *req = &s->req;
1539 struct channel *res = &s->res;
1540
1541 req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
1542 res->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
1543
1544 s->prev_conn_state = csb->state;
1545
1546 /* let's recompute both sides states */
1547 if (cs_state_in(csf->state, CS_SB_RDY|CS_SB_EST))
1548 cs_update(csf);
1549
1550 if (cs_state_in(csb->state, CS_SB_RDY|CS_SB_EST))
1551 cs_update(csb);
1552
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001553 /* conn-streams are processed outside of process_stream() and must be
Christopher Fauletef285c12022-04-01 14:48:06 +02001554 * handled at the latest moment.
1555 */
1556 if (cs_appctx(csf)) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001557 if ((cs_rx_endp_ready(csf) && !cs_rx_blocked(csf)) ||
1558 (cs_tx_endp_ready(csf) && !cs_tx_blocked(csf)))
Christopher Fauletef285c12022-04-01 14:48:06 +02001559 appctx_wakeup(__cs_appctx(csf));
1560 }
1561 if (cs_appctx(csb)) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001562 if ((cs_rx_endp_ready(csb) && !cs_rx_blocked(csb)) ||
1563 (cs_tx_endp_ready(csb) && !cs_tx_blocked(csb)))
Christopher Fauletef285c12022-04-01 14:48:06 +02001564 appctx_wakeup(__cs_appctx(csb));
1565 }
1566}
1567
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001568
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001569/* This macro is very specific to the function below. See the comments in
Willy Tarreau87b09662015-04-03 00:22:06 +02001570 * process_stream() below to understand the logic and the tests.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001571 */
1572#define UPDATE_ANALYSERS(real, list, back, flag) { \
1573 list = (((list) & ~(flag)) | ~(back)) & (real); \
1574 back = real; \
1575 if (!(list)) \
1576 break; \
1577 if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
1578 continue; \
1579}
1580
Christopher Fauleta9215b72016-05-11 17:06:28 +02001581/* These 2 following macros call an analayzer for the specified channel if the
1582 * right flag is set. The first one is used for "filterable" analyzers. If a
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001583 * stream has some registered filters, pre and post analyaze callbacks are
Christopher Faulet0184ea72017-01-05 14:06:34 +01001584 * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
Christopher Fauleta9215b72016-05-11 17:06:28 +02001585 * AN_REQ/RES_HTTP_XFER_BODY) */
1586#define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
1587 { \
1588 if ((list) & (flag)) { \
1589 if (HAS_FILTERS(strm)) { \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001590 if (!flt_pre_analyze((strm), (chn), (flag))) \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001591 break; \
1592 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1593 break; \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001594 if (!flt_post_analyze((strm), (chn), (flag))) \
1595 break; \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001596 } \
1597 else { \
1598 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1599 break; \
1600 } \
1601 UPDATE_ANALYSERS((chn)->analysers, (list), \
1602 (back), (flag)); \
1603 } \
1604 }
1605
1606#define ANALYZE(strm, chn, fun, list, back, flag, ...) \
1607 { \
1608 if ((list) & (flag)) { \
1609 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1610 break; \
1611 UPDATE_ANALYSERS((chn)->analysers, (list), \
1612 (back), (flag)); \
1613 } \
1614 }
1615
Willy Tarreau87b09662015-04-03 00:22:06 +02001616/* Processes the client, server, request and response jobs of a stream task,
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001617 * then puts it back to the wait queue in a clean state, or cleans up its
1618 * resources if it must be deleted. Returns in <next> the date the task wants
1619 * to be woken up, or TICK_ETERNITY. In order not to call all functions for
1620 * nothing too many times, the request and response buffers flags are monitored
1621 * and each function is called only if at least another function has changed at
1622 * least one flag it is interested in.
1623 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01001624struct task *process_stream(struct task *t, void *context, unsigned int state)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001625{
Willy Tarreau827aee92011-03-10 16:55:02 +01001626 struct server *srv;
Olivier Houchard9f6af332018-05-25 14:04:04 +02001627 struct stream *s = context;
Willy Tarreaufb0afa72015-04-03 14:46:27 +02001628 struct session *sess = s->sess;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001629 unsigned int rqf_last, rpf_last;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001630 unsigned int rq_prod_last, rq_cons_last;
1631 unsigned int rp_cons_last, rp_prod_last;
Willy Tarreau576507f2010-01-07 00:09:04 +01001632 unsigned int req_ana_back;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001633 struct channel *req, *res;
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001634 struct conn_stream *csf, *csb;
Willy Tarreau3d07a162019-04-25 19:15:20 +02001635 unsigned int rate;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001636
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001637 DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
1638
Willy Tarreau7af4fa92020-06-17 20:49:49 +02001639 activity[tid].stream_calls++;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +01001640
Willy Tarreau8f128b42014-11-28 15:07:47 +01001641 req = &s->req;
1642 res = &s->res;
1643
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001644 csf = s->csf;
1645 csb = s->csb;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001646
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001647 /* First, attempt to receive pending data from I/O layers */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001648 cs_conn_sync_recv(csf);
1649 cs_conn_sync_recv(csb);
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001650
Willy Tarreau6c539c42022-01-20 18:42:16 +01001651 /* Let's check if we're looping without making any progress, e.g. due
1652 * to a bogus analyser or the fact that we're ignoring a read0. The
1653 * call_rate counter only counts calls with no progress made.
1654 */
1655 if (!((req->flags | res->flags) & (CF_READ_PARTIAL|CF_WRITE_PARTIAL))) {
1656 rate = update_freq_ctr(&s->call_rate, 1);
1657 if (rate >= 100000 && s->call_rate.prev_ctr) // make sure to wait at least a full second
1658 stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
Willy Tarreau3d07a162019-04-25 19:15:20 +02001659 }
Olivier Houchardc2aa7112018-09-11 18:27:21 +02001660
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001661 /* this data may be no longer valid, clear it */
Willy Tarreaueee5b512015-04-03 23:46:31 +02001662 if (s->txn)
1663 memset(&s->txn->auth, 0, sizeof(s->txn->auth));
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001664
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02001665 /* This flag must explicitly be set every time */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001666 req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
1667 res->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001668
1669 /* Keep a copy of req/rep flags so that we can detect shutdowns */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001670 rqf_last = req->flags & ~CF_MASK_ANALYSER;
1671 rpf_last = res->flags & ~CF_MASK_ANALYSER;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001672
Christopher Faulet974da9f2022-03-30 15:30:03 +02001673 /* we don't want the conn-stream functions to recursively wake us up */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001674 csf->flags |= CS_FL_DONT_WAKE;
1675 csb->flags |= CS_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02001676
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001677 /* update pending events */
Olivier Houchard9f6af332018-05-25 14:04:04 +02001678 s->pending_events |= (state & TASK_WOKEN_ANY);
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001679
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001680 /* 1a: Check for low level timeouts if needed. We just set a flag on
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001681 * conn-streams when their timeouts have expired.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001682 */
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001683 if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
Christopher Fauletae024ce2022-03-29 19:02:31 +02001684 stream_check_conn_timeout(s);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001685
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001686 /* check channel timeouts, and close the corresponding conn-streams
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001687 * for future reads or writes. Note: this will also concern upper layers
1688 * but we do not touch any other flag. We must be careful and correctly
1689 * detect state changes when calling them.
1690 */
1691
Willy Tarreau8f128b42014-11-28 15:07:47 +01001692 channel_check_timeouts(req);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001693
Willy Tarreau8f128b42014-11-28 15:07:47 +01001694 if (unlikely((req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001695 csb->flags |= CS_FL_NOLINGER;
1696 cs_shutw(csb);
Willy Tarreau14641402009-12-29 14:49:56 +01001697 }
1698
Willy Tarreau8f128b42014-11-28 15:07:47 +01001699 if (unlikely((req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001700 if (csf->flags & CS_FL_NOHALF)
1701 csf->flags |= CS_FL_NOLINGER;
1702 cs_shutr(csf);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02001703 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001704
Willy Tarreau8f128b42014-11-28 15:07:47 +01001705 channel_check_timeouts(res);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001706
Willy Tarreau8f128b42014-11-28 15:07:47 +01001707 if (unlikely((res->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001708 csf->flags |= CS_FL_NOLINGER;
1709 cs_shutw(csf);
Willy Tarreau14641402009-12-29 14:49:56 +01001710 }
1711
Willy Tarreau8f128b42014-11-28 15:07:47 +01001712 if (unlikely((res->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001713 if (csb->flags & CS_FL_NOHALF)
1714 csb->flags |= CS_FL_NOLINGER;
1715 cs_shutr(csb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02001716 }
Willy Tarreau798f4322012-11-08 14:49:17 +01001717
Christopher Fauleta00d8172016-11-10 14:58:05 +01001718 if (HAS_FILTERS(s))
1719 flt_stream_check_timeouts(s);
1720
Willy Tarreau798f4322012-11-08 14:49:17 +01001721 /* Once in a while we're woken up because the task expires. But
1722 * this does not necessarily mean that a timeout has been reached.
Willy Tarreau87b09662015-04-03 00:22:06 +02001723 * So let's not run a whole stream processing if only an expiration
Willy Tarreau798f4322012-11-08 14:49:17 +01001724 * timeout needs to be refreshed.
1725 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001726 if (!((req->flags | res->flags) &
Willy Tarreau798f4322012-11-08 14:49:17 +01001727 (CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
Willy Tarreauede3d882018-10-24 17:17:56 +02001728 CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
Christopher Fauletae024ce2022-03-29 19:02:31 +02001729 !(s->flags & SF_CONN_EXP) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001730 !((csf->endp->flags | csb->flags) & CS_EP_ERROR) &&
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001731 ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001732 csf->flags &= ~CS_FL_DONT_WAKE;
1733 csb->flags &= ~CS_FL_DONT_WAKE;
Willy Tarreau798f4322012-11-08 14:49:17 +01001734 goto update_exp_and_leave;
Willy Tarreau5fb04712016-05-04 10:18:37 +02001735 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001736 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001737
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001738 resync_conn_stream:
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001739 /* below we may emit error messages so we have to ensure that we have
Christopher Faulet686501c2022-02-01 18:53:53 +01001740 * our buffers properly allocated. If the allocation failed, an error is
1741 * triggered.
1742 *
1743 * NOTE: An error is returned because the mechanism to queue entities
1744 * waiting for a buffer is totally broken for now. However, this
1745 * part must be refactored. When it will be handled, this part
1746 * must be be reviewed too.
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001747 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001748 if (!stream_alloc_work_buffer(s)) {
Christopher Faulet6cd56d52022-03-30 10:47:32 +02001749 s->csf->endp->flags |= CS_EP_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001750 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001751
Christopher Faulet6cd56d52022-03-30 10:47:32 +02001752 s->csb->endp->flags |= CS_EP_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001753 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001754
1755 if (!(s->flags & SF_ERR_MASK))
1756 s->flags |= SF_ERR_RESOURCE;
1757 sess_set_term_flags(s);
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001758 }
1759
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001760 /* 1b: check for low-level errors reported at the conn-stream.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001761 * First we check if it's a retryable error (in which case we don't
1762 * want to tell the buffer). Otherwise we report the error one level
1763 * upper by setting flags into the buffers. Note that the side towards
1764 * the client cannot have connect (hence retryable) errors. Also, the
1765 * connection setup code must be able to deal with any type of abort.
1766 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001767 srv = objt_server(s->target);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001768 if (unlikely(csf->endp->flags & CS_EP_ERROR)) {
1769 if (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS)) {
1770 cs_shutr(csf);
1771 cs_shutw(csf);
1772 cs_report_error(csf);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001773 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001774 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
1775 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001776 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001777 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001778 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001779 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001780 if (!(s->flags & SF_ERR_MASK))
1781 s->flags |= SF_ERR_CLICL;
1782 if (!(s->flags & SF_FINST_MASK))
1783 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001784 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001785 }
1786 }
1787
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001788 if (unlikely(csb->endp->flags & CS_EP_ERROR)) {
1789 if (cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS)) {
1790 cs_shutr(csb);
1791 cs_shutw(csb);
1792 cs_report_error(csb);
Willy Tarreau4781b152021-04-06 13:53:36 +02001793 _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
Willy Tarreau827aee92011-03-10 16:55:02 +01001794 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001795 _HA_ATOMIC_INC(&srv->counters.failed_resp);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001796 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001797 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
1798 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001799 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001800 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001801 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001802 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001803 if (!(s->flags & SF_ERR_MASK))
1804 s->flags |= SF_ERR_SRVCL;
1805 if (!(s->flags & SF_FINST_MASK))
1806 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001807 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001808 }
1809 /* note: maybe we should process connection errors here ? */
1810 }
1811
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001812 if (cs_state_in(csb->state, CS_SB_CON|CS_SB_RDY)) {
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001813 /* we were trying to establish a connection on the server side,
1814 * maybe it succeeded, maybe it failed, maybe we timed out, ...
1815 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001816 if (csb->state == CS_ST_RDY)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001817 back_handle_st_rdy(s);
Christopher Faulet62e75742022-03-31 09:16:34 +02001818 else if (s->csb->state == CS_ST_CON)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001819 back_handle_st_con(s);
Willy Tarreaud66ed882019-06-05 18:02:04 +02001820
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001821 if (csb->state == CS_ST_CER)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001822 back_handle_st_cer(s);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001823 else if (csb->state == CS_ST_EST)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001824 back_establish(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001825
Christopher Faulet62e75742022-03-31 09:16:34 +02001826 /* state is now one of CS_ST_CON (still in progress), CS_ST_EST
1827 * (established), CS_ST_DIS (abort), CS_ST_CLO (last error),
1828 * CS_ST_ASS/CS_ST_TAR/CS_ST_REQ for retryable errors.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001829 */
1830 }
1831
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001832 rq_prod_last = csf->state;
1833 rq_cons_last = csb->state;
1834 rp_cons_last = csf->state;
1835 rp_prod_last = csb->state;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001836
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001837 /* Check for connection closure */
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001838 DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001839
1840 /* nothing special to be done on client side */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001841 if (unlikely(csf->state == CS_ST_DIS)) {
1842 csf->state = CS_ST_CLO;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001843
Christopher Fauleta70a3542022-03-30 17:13:02 +02001844 /* This is needed only when debugging is enabled, to indicate
1845 * client-side close.
1846 */
1847 if (unlikely((global.mode & MODE_DEBUG) &&
1848 (!(global.mode & MODE_QUIET) ||
1849 (global.mode & MODE_VERBOSE)))) {
1850 chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
1851 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001852 (unsigned short)conn_fd(cs_conn(csf)),
1853 (unsigned short)conn_fd(cs_conn(csb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001854 DISGUISE(write(1, trash.area, trash.data));
1855 }
1856 }
1857
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001858 /* When a server-side connection is released, we have to count it and
1859 * check for pending connections on this server.
1860 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001861 if (unlikely(csb->state == CS_ST_DIS)) {
1862 csb->state = CS_ST_CLO;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001863 srv = objt_server(s->target);
Willy Tarreau827aee92011-03-10 16:55:02 +01001864 if (srv) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001865 if (s->flags & SF_CURR_SESS) {
1866 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +02001867 _HA_ATOMIC_DEC(&srv->cur_sess);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001868 }
1869 sess_change_server(s, NULL);
Willy Tarreau827aee92011-03-10 16:55:02 +01001870 if (may_dequeue_tasks(srv, s->be))
Willy Tarreau9ab78292021-06-22 18:47:51 +02001871 process_srv_queue(srv);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001872 }
Christopher Fauleta70a3542022-03-30 17:13:02 +02001873
1874 /* This is needed only when debugging is enabled, to indicate
1875 * server-side close.
1876 */
1877 if (unlikely((global.mode & MODE_DEBUG) &&
1878 (!(global.mode & MODE_QUIET) ||
1879 (global.mode & MODE_VERBOSE)))) {
Christopher Faulet62e75742022-03-31 09:16:34 +02001880 if (s->prev_conn_state == CS_ST_EST) {
Christopher Fauleta70a3542022-03-30 17:13:02 +02001881 chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
1882 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001883 (unsigned short)conn_fd(cs_conn(csf)),
1884 (unsigned short)conn_fd(cs_conn(csb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001885 DISGUISE(write(1, trash.area, trash.data));
1886 }
1887 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001888 }
1889
1890 /*
1891 * Note: of the transient states (REQ, CER, DIS), only REQ may remain
1892 * at this point.
1893 */
1894
Willy Tarreau0be0ef92009-03-08 19:20:25 +01001895 resync_request:
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001896 /* Analyse request */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001897 if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
1898 ((req->flags ^ rqf_last) & CF_MASK_STATIC) ||
Willy Tarreau33982cb2017-11-20 15:37:13 +01001899 (req->analysers && (req->flags & CF_SHUTW)) ||
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001900 csf->state != rq_prod_last ||
1901 csb->state != rq_cons_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001902 s->pending_events & TASK_WOKEN_MSG) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01001903 unsigned int flags = req->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001904
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001905 if (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01001906 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001907 unsigned int ana_list;
1908 unsigned int ana_back;
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001909
Willy Tarreau90deb182010-01-07 00:20:41 +01001910 /* it's up to the analysers to stop new connections,
1911 * disable reading or closing. Note: if an analyser
1912 * disables any of these bits, it is responsible for
1913 * enabling them again when it disables itself, so
1914 * that other analysers are called in similar conditions.
1915 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001916 channel_auto_read(req);
1917 channel_auto_connect(req);
1918 channel_auto_close(req);
Willy Tarreauedcf6682008-11-30 23:15:34 +01001919
1920 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01001921 * req->analysers, following the bit order from LSB
Willy Tarreauedcf6682008-11-30 23:15:34 +01001922 * to MSB. The analysers must remove themselves from
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001923 * the list when not needed. Any analyser may return 0
1924 * to break out of the loop, either because of missing
1925 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02001926 * kill the stream. We loop at least once through each
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001927 * analyser, and we may loop again if other analysers
1928 * are added in the middle.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001929 *
1930 * We build a list of analysers to run. We evaluate all
1931 * of these analysers in the order of the lower bit to
1932 * the higher bit. This ordering is very important.
1933 * An analyser will often add/remove other analysers,
1934 * including itself. Any changes to itself have no effect
1935 * on the loop. If it removes any other analysers, we
1936 * want those analysers not to be called anymore during
1937 * this loop. If it adds an analyser that is located
1938 * after itself, we want it to be scheduled for being
1939 * processed during the loop. If it adds an analyser
1940 * which is located before it, we want it to switch to
1941 * it immediately, even if it has already been called
1942 * once but removed since.
1943 *
1944 * In order to achieve this, we compare the analyser
1945 * list after the call with a copy of it before the
1946 * call. The work list is fed with analyser bits that
1947 * appeared during the call. Then we compare previous
1948 * work list with the new one, and check the bits that
1949 * appeared. If the lowest of these bits is lower than
1950 * the current bit, it means we have enabled a previous
1951 * analyser and must immediately loop again.
Willy Tarreauedcf6682008-11-30 23:15:34 +01001952 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001953
Willy Tarreau8f128b42014-11-28 15:07:47 +01001954 ana_list = ana_back = req->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01001955 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001956 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01001957 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001958 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001959 FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
1960 FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
1961 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001962 FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001963 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001964 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001965 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
1966 FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001967 FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001968 FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001969 FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
1970 FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001971 ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001972 ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02001973 ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001974 ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
1975 ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01001976 break;
1977 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001978 }
Willy Tarreau84455332009-03-15 22:34:05 +01001979
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001980 rq_prod_last = csf->state;
1981 rq_cons_last = csb->state;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001982 req->flags &= ~CF_WAKE_ONCE;
1983 rqf_last = req->flags;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001984
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02001985 if ((req->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001986 goto resync_request;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001987 }
1988
Willy Tarreau576507f2010-01-07 00:09:04 +01001989 /* we'll monitor the request analysers while parsing the response,
1990 * because some response analysers may indirectly enable new request
1991 * analysers (eg: HTTP keep-alive).
1992 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001993 req_ana_back = req->analysers;
Willy Tarreau576507f2010-01-07 00:09:04 +01001994
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001995 resync_response:
1996 /* Analyse response */
1997
Willy Tarreau8f128b42014-11-28 15:07:47 +01001998 if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
1999 (res->flags ^ rpf_last) & CF_MASK_STATIC ||
Willy Tarreau33982cb2017-11-20 15:37:13 +01002000 (res->analysers && (res->flags & CF_SHUTW)) ||
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002001 csf->state != rp_cons_last ||
2002 csb->state != rp_prod_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002003 s->pending_events & TASK_WOKEN_MSG) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002004 unsigned int flags = res->flags;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002005
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002006 if (cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01002007 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002008 unsigned int ana_list;
2009 unsigned int ana_back;
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002010
Willy Tarreau90deb182010-01-07 00:20:41 +01002011 /* it's up to the analysers to stop disable reading or
2012 * closing. Note: if an analyser disables any of these
2013 * bits, it is responsible for enabling them again when
2014 * it disables itself, so that other analysers are called
2015 * in similar conditions.
2016 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002017 channel_auto_read(res);
2018 channel_auto_close(res);
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002019
2020 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01002021 * res->analysers, following the bit order from LSB
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002022 * to MSB. The analysers must remove themselves from
2023 * the list when not needed. Any analyser may return 0
2024 * to break out of the loop, either because of missing
2025 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02002026 * kill the stream. We loop at least once through each
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002027 * analyser, and we may loop again if other analysers
2028 * are added in the middle.
2029 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002030
Willy Tarreau8f128b42014-11-28 15:07:47 +01002031 ana_list = ana_back = res->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002032 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002033 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002034 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
2035 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002036 FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002037 FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002038 FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002039 FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002040 ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002041 ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002042 ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002043 ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
2044 ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002045 break;
2046 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002047 }
2048
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002049 rp_cons_last = csf->state;
2050 rp_prod_last = csb->state;
Christopher Fauletcdaea892017-07-06 15:49:30 +02002051 res->flags &= ~CF_WAKE_ONCE;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002052 rpf_last = res->flags;
Willy Tarreau815a9b22010-07-27 17:15:12 +02002053
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02002054 if ((res->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002055 goto resync_response;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002056 }
2057
Willy Tarreau576507f2010-01-07 00:09:04 +01002058 /* maybe someone has added some request analysers, so we must check and loop */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002059 if (req->analysers & ~req_ana_back)
Willy Tarreau576507f2010-01-07 00:09:04 +01002060 goto resync_request;
2061
Willy Tarreau8f128b42014-11-28 15:07:47 +01002062 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0499e352010-12-17 07:13:42 +01002063 goto resync_request;
2064
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002065 /* FIXME: here we should call protocol handlers which rely on
2066 * both buffers.
2067 */
2068
2069
2070 /*
Willy Tarreau87b09662015-04-03 00:22:06 +02002071 * Now we propagate unhandled errors to the stream. Normally
Willy Tarreauae526782010-03-04 20:34:23 +01002072 * we're just in a data phase here since it means we have not
2073 * seen any analyser who could set an error status.
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002074 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002075 srv = objt_server(s->target);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002076 if (unlikely(!(s->flags & SF_ERR_MASK))) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002077 if (req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002078 /* Report it if the client got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002079 req->analysers &= AN_REQ_FLT_END;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002080 if (req->flags & CF_READ_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002081 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2082 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002083 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002084 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002085 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002086 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002087 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002088 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002089 else if (req->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002090 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2091 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002092 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002093 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002094 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002095 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002096 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002097 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002098 else if (req->flags & CF_WRITE_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002099 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2100 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002101 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002102 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002103 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002104 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002105 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002106 }
2107 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002108 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2109 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002110 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002111 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002112 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002113 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002114 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002115 }
Willy Tarreau84455332009-03-15 22:34:05 +01002116 sess_set_term_flags(s);
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002117
2118 /* Abort the request if a client error occurred while
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02002119 * the backend conn-stream is in the CS_ST_INI
Christopher Faulet62e75742022-03-31 09:16:34 +02002120 * state. It is switched into the CS_ST_CLO state and
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002121 * the request channel is erased. */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002122 if (csb->state == CS_ST_INI) {
Christopher Faulet62e75742022-03-31 09:16:34 +02002123 s->csb->state = CS_ST_CLO;
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002124 channel_abort(req);
2125 if (IS_HTX_STRM(s))
2126 channel_htx_erase(req, htxbuf(&req->buf));
2127 else
2128 channel_erase(req);
2129 }
Willy Tarreau84455332009-03-15 22:34:05 +01002130 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002131 else if (res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002132 /* Report it if the server got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002133 res->analysers &= AN_RES_FLT_END;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002134 if (res->flags & CF_READ_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002135 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2136 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002137 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002138 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002139 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002140 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002141 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002142 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002143 else if (res->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002144 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2145 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002146 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002147 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002148 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002149 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002150 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002151 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002152 else if (res->flags & CF_WRITE_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002153 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2154 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002155 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002156 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002157 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002158 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002159 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002160 }
2161 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002162 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2163 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002164 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002165 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002166 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002167 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002168 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002169 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002170 sess_set_term_flags(s);
2171 }
Willy Tarreau84455332009-03-15 22:34:05 +01002172 }
2173
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002174 /*
2175 * Here we take care of forwarding unhandled data. This also includes
2176 * connection establishments and shutdown requests.
2177 */
2178
2179
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002180 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002181 * everything. We configure the buffer to forward indefinitely.
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002182 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002183 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002184 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002185 if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002186 !(req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002187 (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002188 (req->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002189 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002190 * attached to it. If any data are left in, we'll permit them to
2191 * move.
2192 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002193 channel_auto_read(req);
2194 channel_auto_connect(req);
2195 channel_auto_close(req);
Willy Tarreau5bd8c372009-01-19 00:32:22 +01002196
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002197 if (IS_HTX_STRM(s)) {
2198 struct htx *htx = htxbuf(&req->buf);
2199
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002200 /* We'll let data flow between the producer (if still connected)
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002201 * to the consumer.
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002202 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002203 co_set_data(req, htx->data);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002204 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002205 channel_htx_forward_forever(req, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002206 }
2207 else {
2208 /* We'll let data flow between the producer (if still connected)
2209 * to the consumer (which might possibly not be connected yet).
2210 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002211 c_adv(req, ci_data(req));
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002212 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2213 channel_forward_forever(req);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002214 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002215 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002216
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002217 /* check if it is wise to enable kernel splicing to forward request data */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002218 if (!(req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2219 req->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002220 (global.tune.options & GTUNE_USE_SPLICE) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002221 (cs_conn(csf) && __cs_conn(csf)->xprt && __cs_conn(csf)->xprt->rcv_pipe &&
2222 __cs_conn(csf)->mux && __cs_conn(csf)->mux->rcv_pipe) &&
2223 (cs_conn(csb) && __cs_conn(csb)->xprt && __cs_conn(csb)->xprt->snd_pipe &&
2224 __cs_conn(csb)->mux && __cs_conn(csb)->mux->snd_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002225 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002226 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
2227 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002228 (req->flags & CF_STREAMER_FAST)))) {
2229 req->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002230 }
2231
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002232 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002233 rqf_last = req->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002234
Willy Tarreau520d95e2009-09-19 21:04:57 +02002235 /* it's possible that an upper layer has requested a connection setup or abort.
2236 * There are 2 situations where we decide to establish a new connection :
2237 * - there are data scheduled for emission in the buffer
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002238 * - the CF_AUTO_CONNECT flag is set (active connection)
Willy Tarreau520d95e2009-09-19 21:04:57 +02002239 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002240 if (csb->state == CS_ST_INI) {
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002241 if (!(req->flags & CF_SHUTW)) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002242 if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
Willy Tarreaucf644ed2013-09-29 17:19:56 +02002243 /* If we have an appctx, there is no connect method, so we
2244 * immediately switch to the connected state, otherwise we
2245 * perform a connection request.
Willy Tarreau520d95e2009-09-19 21:04:57 +02002246 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002247 csb->state = CS_ST_REQ; /* new connection requested */
Christopher Faulet731c8e62022-03-29 16:08:44 +02002248 s->conn_retries = 0;
Christopher Faulet9f5382e2021-05-21 13:46:14 +02002249 if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
2250 (s->be->mode == PR_MODE_HTTP) &&
Christopher Faulete05bf9e2022-03-29 15:23:40 +02002251 !(s->txn->flags & TX_D_L7_RETRY))
2252 s->txn->flags |= TX_L7_RETRY;
Willy Tarreau520d95e2009-09-19 21:04:57 +02002253 }
Willy Tarreau73201222009-08-16 18:27:24 +02002254 }
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002255 else {
Christopher Faulet62e75742022-03-31 09:16:34 +02002256 s->csb->state = CS_ST_CLO; /* shutw+ini = abort */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002257 channel_shutw_now(req); /* fix buffer flags upon abort */
2258 channel_shutr_now(res);
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002259 }
Willy Tarreau92795622009-03-06 12:51:23 +01002260 }
2261
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002262
2263 /* we may have a pending connection request, or a connection waiting
2264 * for completion.
2265 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002266 if (cs_state_in(csb->state, CS_SB_REQ|CS_SB_QUE|CS_SB_TAR|CS_SB_ASS)) {
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002267 /* prune the request variables and swap to the response variables. */
2268 if (s->vars_reqres.scope != SCOPE_RES) {
Jerome Magnin2f44e882019-11-09 18:00:47 +01002269 if (!LIST_ISEMPTY(&s->vars_reqres.head))
Willy Tarreaucda7f3f2018-10-28 13:44:36 +01002270 vars_prune(&s->vars_reqres, s->sess, s);
Willy Tarreaub7bfcb32021-08-31 08:13:25 +02002271 vars_init_head(&s->vars_reqres, SCOPE_RES);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002272 }
2273
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002274 do {
2275 /* nb: step 1 might switch from QUE to ASS, but we first want
2276 * to give a chance to step 2 to perform a redirect if needed.
2277 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002278 if (csb->state != CS_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002279 back_try_conn_req(s);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002280 if (csb->state == CS_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002281 back_handle_st_req(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002282
Willy Tarreauada4c582020-03-04 16:42:03 +01002283 /* get a chance to complete an immediate connection setup */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002284 if (csb->state == CS_ST_RDY)
2285 goto resync_conn_stream;
Willy Tarreauada4c582020-03-04 16:42:03 +01002286
Willy Tarreau9e5a3aa2013-12-31 23:32:12 +01002287 /* applets directly go to the ESTABLISHED state. Similarly,
2288 * servers experience the same fate when their connection
2289 * is reused.
2290 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002291 if (unlikely(csb->state == CS_ST_EST))
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002292 back_establish(s);
Willy Tarreaufac4bd12013-11-30 09:21:49 +01002293
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002294 srv = objt_server(s->target);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002295 if (csb->state == CS_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
2296 http_perform_server_redirect(s, csb);
2297 } while (csb->state == CS_ST_ASS);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002298 }
2299
Willy Tarreau829bd472019-06-06 09:17:23 +02002300 /* Let's see if we can send the pending request now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002301 cs_conn_sync_send(csb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002302
2303 /*
2304 * Now forward all shutdown requests between both sides of the request buffer
2305 */
2306
2307 /* first, let's check if the request buffer needs to shutdown(write), which may
2308 * happen either because the input is closed or because we want to force a close
2309 * once the server has begun to respond. If a half-closed timeout is set, we adjust
Willy Tarreaua544c662022-04-14 17:39:48 +02002310 * the other side's timeout as well. However this doesn't have effect during the
2311 * connection setup unless the backend has abortonclose set.
Willy Tarreau829bd472019-06-06 09:17:23 +02002312 */
2313 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
Willy Tarreaua544c662022-04-14 17:39:48 +02002314 (CF_AUTO_CLOSE|CF_SHUTR) &&
2315 (csb->state != CS_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
Willy Tarreau829bd472019-06-06 09:17:23 +02002316 channel_shutw_now(req);
2317 }
2318
2319 /* shutdown(write) pending */
2320 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2321 channel_is_empty(req))) {
2322 if (req->flags & CF_READ_ERROR)
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002323 csb->flags |= CS_FL_NOLINGER;
2324 cs_shutw(csb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002325 }
2326
2327 /* shutdown(write) done on server side, we must stop the client too */
2328 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
2329 !req->analysers))
2330 channel_shutr_now(req);
2331
2332 /* shutdown(read) pending */
2333 if (unlikely((req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002334 if (csf->flags & CS_FL_NOHALF)
2335 csf->flags |= CS_FL_NOLINGER;
2336 cs_shutr(csf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002337 }
2338
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002339 /* Benchmarks have shown that it's optimal to do a full resync now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002340 if (csf->state == CS_ST_DIS ||
2341 cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
2342 (csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
2343 (csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
2344 goto resync_conn_stream;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002345
Willy Tarreau815a9b22010-07-27 17:15:12 +02002346 /* otherwise we want to check if we need to resync the req buffer or not */
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02002347 if ((req->flags ^ rqf_last) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002348 goto resync_request;
2349
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002350 /* perform output updates to the response buffer */
Willy Tarreau84455332009-03-15 22:34:05 +01002351
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002352 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002353 * everything. We configure the buffer to forward indefinitely.
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002354 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002355 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002356 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002357 if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002358 !(res->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002359 cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002360 (res->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002361 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002362 * attached to it. If any data are left in, we'll permit them to
2363 * move.
2364 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002365 channel_auto_read(res);
2366 channel_auto_close(res);
Willy Tarreauda4d9fe2010-11-07 20:26:56 +01002367
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002368 if (IS_HTX_STRM(s)) {
2369 struct htx *htx = htxbuf(&res->buf);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002370
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002371 /* We'll let data flow between the producer (if still connected)
2372 * to the consumer.
2373 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002374 co_set_data(res, htx->data);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002375 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002376 channel_htx_forward_forever(res, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002377 }
2378 else {
2379 /* We'll let data flow between the producer (if still connected)
2380 * to the consumer.
2381 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002382 c_adv(res, ci_data(res));
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002383 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2384 channel_forward_forever(res);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002385 }
Willy Tarreau42529c32015-07-09 18:38:57 +02002386
Willy Tarreauce887fd2012-05-12 12:50:00 +02002387 /* if we have no analyser anymore in any direction and have a
Willy Tarreau05cdd962014-05-10 14:30:07 +02002388 * tunnel timeout set, use it now. Note that we must respect
2389 * the half-closed timeouts as well.
Willy Tarreauce887fd2012-05-12 12:50:00 +02002390 */
Amaury Denoyellefb504432020-12-10 13:43:53 +01002391 if (!req->analysers && s->tunnel_timeout) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002392 req->rto = req->wto = res->rto = res->wto =
Amaury Denoyellefb504432020-12-10 13:43:53 +01002393 s->tunnel_timeout;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002394
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002395 if ((req->flags & CF_SHUTR) && tick_isset(sess->fe->timeout.clientfin))
2396 res->wto = sess->fe->timeout.clientfin;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002397 if ((req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
2398 res->rto = s->be->timeout.serverfin;
2399 if ((res->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
2400 req->wto = s->be->timeout.serverfin;
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002401 if ((res->flags & CF_SHUTW) && tick_isset(sess->fe->timeout.clientfin))
2402 req->rto = sess->fe->timeout.clientfin;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002403
Willy Tarreau8f128b42014-11-28 15:07:47 +01002404 req->rex = tick_add(now_ms, req->rto);
2405 req->wex = tick_add(now_ms, req->wto);
2406 res->rex = tick_add(now_ms, res->rto);
2407 res->wex = tick_add(now_ms, res->wto);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002408 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002409 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002410
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002411 /* check if it is wise to enable kernel splicing to forward response data */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002412 if (!(res->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2413 res->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002414 (global.tune.options & GTUNE_USE_SPLICE) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002415 (cs_conn(csf) && __cs_conn(csf)->xprt && __cs_conn(csf)->xprt->snd_pipe &&
2416 __cs_conn(csf)->mux && __cs_conn(csf)->mux->snd_pipe) &&
2417 (cs_conn(csb) && __cs_conn(csb)->xprt && __cs_conn(csb)->xprt->rcv_pipe &&
2418 __cs_conn(csb)->mux && __cs_conn(csb)->mux->rcv_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002419 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002420 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
2421 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002422 (res->flags & CF_STREAMER_FAST)))) {
2423 res->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002424 }
2425
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002426 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002427 rpf_last = res->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002428
Willy Tarreau829bd472019-06-06 09:17:23 +02002429 /* Let's see if we can send the pending response now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002430 cs_conn_sync_send(csf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002431
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002432 /*
2433 * Now forward all shutdown requests between both sides of the buffer
2434 */
2435
2436 /*
2437 * FIXME: this is probably where we should produce error responses.
2438 */
2439
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002440 /* first, let's check if the response buffer needs to shutdown(write) */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002441 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
Willy Tarreau05cdd962014-05-10 14:30:07 +02002442 (CF_AUTO_CLOSE|CF_SHUTR))) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002443 channel_shutw_now(res);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002444 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002445
2446 /* shutdown(write) pending */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002447 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2448 channel_is_empty(res))) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002449 cs_shutw(csf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002450 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002451
2452 /* shutdown(write) done on the client side, we must stop the server too */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002453 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
2454 !res->analysers)
2455 channel_shutr_now(res);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002456
2457 /* shutdown(read) pending */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002458 if (unlikely((res->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002459 if (csb->flags & CS_FL_NOHALF)
2460 csb->flags |= CS_FL_NOLINGER;
2461 cs_shutr(csb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02002462 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002463
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002464 if (csf->state == CS_ST_DIS ||
2465 cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
2466 (csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
2467 (csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
2468 goto resync_conn_stream;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002469
Willy Tarreau3c5c0662019-06-06 14:32:49 +02002470 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002471 goto resync_request;
2472
Willy Tarreau8f128b42014-11-28 15:07:47 +01002473 if ((res->flags ^ rpf_last) & CF_MASK_STATIC)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002474 goto resync_response;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002475
Willy Tarreau829bd472019-06-06 09:17:23 +02002476 if (((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER)
2477 goto resync_request;
2478
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002479 /* we're interested in getting wakeups again */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002480 csf->flags &= ~CS_FL_DONT_WAKE;
2481 csb->flags &= ~CS_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002482
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002483 if (likely((csf->state != CS_ST_CLO) || !cs_state_in(csb->state, CS_SB_INI|CS_SB_CLO) ||
Christopher Faulet6fcd2d32019-11-13 11:12:32 +01002484 (req->analysers & AN_REQ_FLT_END) || (res->analysers & AN_RES_FLT_END))) {
Olivier Houchard4c18f942019-07-31 18:05:26 +02002485 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
Willy Tarreau87b09662015-04-03 00:22:06 +02002486 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002487
Christopher Fauletef285c12022-04-01 14:48:06 +02002488 stream_update_both_cs(s);
Olivier Houchard53216e72018-10-10 15:46:36 +02002489
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02002490 /* Trick: if a request is being waiting for the server to respond,
2491 * and if we know the server can timeout, we don't want the timeout
2492 * to expire on the client side first, but we're still interested
2493 * in passing data from the client to the server (eg: POST). Thus,
2494 * we can cancel the client's request timeout if the server's
2495 * request timeout is set and the server has not yet sent a response.
2496 */
2497
Willy Tarreau8f128b42014-11-28 15:07:47 +01002498 if ((res->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
2499 (tick_isset(req->wex) || tick_isset(res->rex))) {
2500 req->flags |= CF_READ_NOEXP;
2501 req->rex = TICK_ETERNITY;
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02002502 }
2503
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002504 /* Reset pending events now */
2505 s->pending_events = 0;
2506
Willy Tarreau798f4322012-11-08 14:49:17 +01002507 update_exp_and_leave:
Christopher Faulet974da9f2022-03-30 15:30:03 +02002508 /* Note: please ensure that if you branch here you disable CS_FL_DONT_WAKE */
Christopher Fauleta00d8172016-11-10 14:58:05 +01002509 t->expire = tick_first((tick_is_expired(t->expire, now_ms) ? 0 : t->expire),
2510 tick_first(tick_first(req->rex, req->wex),
2511 tick_first(res->rex, res->wex)));
Willy Tarreaudef0d222016-11-08 22:03:00 +01002512 if (!req->analysers)
2513 req->analyse_exp = TICK_ETERNITY;
2514
2515 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
2516 (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
2517 req->analyse_exp = tick_add(now_ms, 5000);
2518
2519 t->expire = tick_first(t->expire, req->analyse_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002520
Willy Tarreau9a398be2017-11-10 17:14:23 +01002521 t->expire = tick_first(t->expire, res->analyse_exp);
2522
Christopher Fauletae024ce2022-03-29 19:02:31 +02002523 t->expire = tick_first(t->expire, s->conn_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002524
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002525 s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
Willy Tarreau87b09662015-04-03 00:22:06 +02002526 stream_release_buffers(s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002527
2528 DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
Willy Tarreau26c25062009-03-08 09:38:41 +01002529 return t; /* nothing more to do */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002530 }
2531
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002532 DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
2533
Willy Tarreaue7dff022015-04-03 01:14:29 +02002534 if (s->flags & SF_BE_ASSIGNED)
Willy Tarreau4781b152021-04-06 13:53:36 +02002535 _HA_ATOMIC_DEC(&s->be->beconn);
Willy Tarreau6f5e4b92017-09-15 09:07:56 +02002536
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002537 if (unlikely((global.mode & MODE_DEBUG) &&
2538 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
Willy Tarreau19d14ef2012-10-29 16:51:55 +01002539 chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
Christopher Faulet0256da12021-12-15 09:50:17 +01002540 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002541 (unsigned short)conn_fd(cs_conn(csf)),
2542 (unsigned short)conn_fd(cs_conn(csb)));
Willy Tarreau2e8ab6b2020-03-14 11:03:20 +01002543 DISGUISE(write(1, trash.area, trash.data));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002544 }
2545
Christopher Faulet341064e2021-01-21 17:10:44 +01002546 if (!(s->flags & SF_IGNORE)) {
2547 s->logs.t_close = tv_ms_elapsed(&s->logs.tv_accept, &now);
2548
Olivier Houchard4c18f942019-07-31 18:05:26 +02002549 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002550
Christopher Faulet341064e2021-01-21 17:10:44 +01002551 if (s->txn && s->txn->status) {
2552 int n;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002553
Christopher Faulet341064e2021-01-21 17:10:44 +01002554 n = s->txn->status / 100;
2555 if (n < 1 || n > 5)
2556 n = 0;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002557
Christopher Faulet341064e2021-01-21 17:10:44 +01002558 if (sess->fe->mode == PR_MODE_HTTP) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002559 _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
Christopher Faulet341064e2021-01-21 17:10:44 +01002560 }
2561 if ((s->flags & SF_BE_ASSIGNED) &&
2562 (s->be->mode == PR_MODE_HTTP)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002563 _HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
2564 _HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
Christopher Faulet341064e2021-01-21 17:10:44 +01002565 }
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002566 }
Christopher Faulet341064e2021-01-21 17:10:44 +01002567
2568 /* let's do a final log if we need it */
2569 if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
2570 !(s->flags & SF_MONITOR) &&
2571 (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
2572 /* we may need to know the position in the queue */
2573 pendconn_free(s);
2574 s->do_log(s);
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002575 }
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002576
Christopher Faulet341064e2021-01-21 17:10:44 +01002577 /* update time stats for this stream */
2578 stream_update_time_stats(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002579 }
2580
2581 /* the task MUST not be in the run queue anymore */
Willy Tarreau87b09662015-04-03 00:22:06 +02002582 stream_free(s);
Olivier Houchard3f795f72019-04-17 22:51:06 +02002583 task_destroy(t);
Willy Tarreau26c25062009-03-08 09:38:41 +01002584 return NULL;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002585}
2586
Willy Tarreau87b09662015-04-03 00:22:06 +02002587/* Update the stream's backend and server time stats */
2588void stream_update_time_stats(struct stream *s)
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002589{
2590 int t_request;
2591 int t_queue;
2592 int t_connect;
2593 int t_data;
2594 int t_close;
2595 struct server *srv;
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002596 unsigned int samples_window;
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002597
2598 t_request = 0;
2599 t_queue = s->logs.t_queue;
2600 t_connect = s->logs.t_connect;
2601 t_close = s->logs.t_close;
2602 t_data = s->logs.t_data;
2603
2604 if (s->be->mode != PR_MODE_HTTP)
2605 t_data = t_connect;
2606
2607 if (t_connect < 0 || t_data < 0)
2608 return;
2609
2610 if (tv_isge(&s->logs.tv_request, &s->logs.tv_accept))
2611 t_request = tv_ms_elapsed(&s->logs.tv_accept, &s->logs.tv_request);
2612
2613 t_data -= t_connect;
2614 t_connect -= t_queue;
2615 t_queue -= t_request;
2616
2617 srv = objt_server(s->target);
2618 if (srv) {
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002619 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2620 srv->counters.p.http.cum_req : srv->counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2621 swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
2622 swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
2623 swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
2624 swrate_add_dynamic(&srv->counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002625 HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
2626 HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
2627 HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
2628 HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002629 }
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002630 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2631 s->be->be_counters.p.http.cum_req : s->be->be_counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2632 swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
2633 swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
2634 swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
2635 swrate_add_dynamic(&s->be->be_counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002636 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
2637 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
2638 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
2639 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002640}
2641
Willy Tarreau7c669d72008-06-20 15:04:11 +02002642/*
2643 * This function adjusts sess->srv_conn and maintains the previous and new
Willy Tarreau87b09662015-04-03 00:22:06 +02002644 * server's served stream counts. Setting newsrv to NULL is enough to release
Willy Tarreau7c669d72008-06-20 15:04:11 +02002645 * current connection slot. This function also notifies any LB algo which might
Willy Tarreau87b09662015-04-03 00:22:06 +02002646 * expect to be informed about any change in the number of active streams on a
Willy Tarreau7c669d72008-06-20 15:04:11 +02002647 * server.
2648 */
Willy Tarreaue89fae32021-03-09 15:43:32 +01002649void sess_change_server(struct stream *strm, struct server *newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002650{
Willy Tarreaue89fae32021-03-09 15:43:32 +01002651 struct server *oldsrv = strm->srv_conn;
Willy Tarreau751153e2021-02-17 13:33:24 +01002652
2653 if (oldsrv == newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002654 return;
2655
Willy Tarreau751153e2021-02-17 13:33:24 +01002656 if (oldsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002657 _HA_ATOMIC_DEC(&oldsrv->served);
2658 _HA_ATOMIC_DEC(&oldsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002659 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002660 if (oldsrv->proxy->lbprm.server_drop_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002661 oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002662 stream_del_srv_conn(strm);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002663 }
2664
2665 if (newsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002666 _HA_ATOMIC_INC(&newsrv->served);
2667 _HA_ATOMIC_INC(&newsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002668 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002669 if (newsrv->proxy->lbprm.server_take_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002670 newsrv->proxy->lbprm.server_take_conn(newsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002671 stream_add_srv_conn(strm, newsrv);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002672 }
2673}
2674
Willy Tarreau84455332009-03-15 22:34:05 +01002675/* Handle server-side errors for default protocols. It is called whenever a a
2676 * connection setup is aborted or a request is aborted in queue. It sets the
Willy Tarreau87b09662015-04-03 00:22:06 +02002677 * stream termination flags so that the caller does not have to worry about
Christopher Faulet0eb32c02022-04-04 11:06:31 +02002678 * them. It's installed as ->srv_error for the server-side conn_stream.
Willy Tarreau84455332009-03-15 22:34:05 +01002679 */
Christopher Faulet0eb32c02022-04-04 11:06:31 +02002680void default_srv_error(struct stream *s, struct conn_stream *cs)
Willy Tarreau84455332009-03-15 22:34:05 +01002681{
Christopher Faulet50264b42022-03-30 19:39:30 +02002682 int err_type = s->conn_err_type;
Willy Tarreau84455332009-03-15 22:34:05 +01002683 int err = 0, fin = 0;
2684
Christopher Faulet50264b42022-03-30 19:39:30 +02002685 if (err_type & STRM_ET_QUEUE_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002686 err = SF_ERR_CLICL;
2687 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002688 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002689 else if (err_type & STRM_ET_CONN_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002690 err = SF_ERR_CLICL;
2691 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002692 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002693 else if (err_type & STRM_ET_QUEUE_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002694 err = SF_ERR_SRVTO;
2695 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002696 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002697 else if (err_type & STRM_ET_QUEUE_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002698 err = SF_ERR_SRVCL;
2699 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002700 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002701 else if (err_type & STRM_ET_CONN_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002702 err = SF_ERR_SRVTO;
2703 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002704 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002705 else if (err_type & STRM_ET_CONN_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002706 err = SF_ERR_SRVCL;
2707 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002708 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002709 else if (err_type & STRM_ET_CONN_RES) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002710 err = SF_ERR_RESOURCE;
2711 fin = SF_FINST_C;
Willy Tarreau2d400bb2012-05-14 12:11:47 +02002712 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002713 else /* STRM_ET_CONN_OTHER and others */ {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002714 err = SF_ERR_INTERNAL;
2715 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002716 }
2717
Willy Tarreaue7dff022015-04-03 01:14:29 +02002718 if (!(s->flags & SF_ERR_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002719 s->flags |= err;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002720 if (!(s->flags & SF_FINST_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002721 s->flags |= fin;
2722}
Willy Tarreau7c669d72008-06-20 15:04:11 +02002723
Willy Tarreaue7dff022015-04-03 01:14:29 +02002724/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
Willy Tarreau87b09662015-04-03 00:22:06 +02002725void stream_shutdown(struct stream *stream, int why)
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002726{
Willy Tarreau87b09662015-04-03 00:22:06 +02002727 if (stream->req.flags & (CF_SHUTW|CF_SHUTW_NOW))
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002728 return;
2729
Willy Tarreau87b09662015-04-03 00:22:06 +02002730 channel_shutw_now(&stream->req);
2731 channel_shutr_now(&stream->res);
2732 stream->task->nice = 1024;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002733 if (!(stream->flags & SF_ERR_MASK))
Willy Tarreau87b09662015-04-03 00:22:06 +02002734 stream->flags |= why;
2735 task_wakeup(stream->task, TASK_WOKEN_OTHER);
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002736}
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +02002737
Willy Tarreau5484d582019-05-22 09:33:03 +02002738/* Appends a dump of the state of stream <s> into buffer <buf> which must have
2739 * preliminary be prepared by its caller, with each line prepended by prefix
2740 * <pfx>, and each line terminated by character <eol>.
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002741 */
Willy Tarreau5484d582019-05-22 09:33:03 +02002742void stream_dump(struct buffer *buf, const struct stream *s, const char *pfx, char eol)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002743{
2744 const struct conn_stream *csf, *csb;
2745 const struct connection *cof, *cob;
2746 const struct appctx *acf, *acb;
2747 const struct server *srv;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002748 const char *src = "unknown";
2749 const char *dst = "unknown";
2750 char pn[INET6_ADDRSTRLEN];
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002751 const struct channel *req, *res;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002752
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002753 if (!s) {
Willy Tarreau5484d582019-05-22 09:33:03 +02002754 chunk_appendf(buf, "%sstrm=%p%c", pfx, s, eol);
2755 return;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002756 }
2757
Willy Tarreau5484d582019-05-22 09:33:03 +02002758 if (s->obj_type != OBJ_TYPE_STREAM) {
2759 chunk_appendf(buf, "%sstrm=%p [invalid type=%d(%s)]%c",
2760 pfx, s, s->obj_type, obj_type_name(&s->obj_type), eol);
2761 return;
2762 }
2763
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002764 req = &s->req;
2765 res = &s->res;
2766
Christopher Faulet10c9c742022-03-01 15:16:57 +01002767 csf = s->csf;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002768 cof = cs_conn(csf);
Christopher Faulet13a35e52021-12-20 15:34:16 +01002769 acf = cs_appctx(csf);
Willy Tarreau71e34c12019-07-17 15:07:06 +02002770 if (cof && cof->src && addr_to_str(cof->src, pn, sizeof(pn)) >= 0)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002771 src = pn;
2772 else if (acf)
2773 src = acf->applet->name;
2774
Christopher Faulet10c9c742022-03-01 15:16:57 +01002775 csb = s->csb;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002776 cob = cs_conn(csb);
Christopher Faulet13a35e52021-12-20 15:34:16 +01002777 acb = cs_appctx(csb);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002778 srv = objt_server(s->target);
2779 if (srv)
2780 dst = srv->id;
2781 else if (acb)
2782 dst = acb->applet->name;
2783
Willy Tarreau5484d582019-05-22 09:33:03 +02002784 chunk_appendf(buf,
Christopher Faulete8f35962021-11-02 17:18:15 +01002785 "%sstrm=%p,%x src=%s fe=%s be=%s dst=%s%c"
2786 "%stxn=%p,%x txn.req=%s,%x txn.rsp=%s,%x%c"
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002787 "%srqf=%x rqa=%x rpf=%x rpa=%x%c"
Christopher Faulet62e75742022-03-31 09:16:34 +02002788 "%scsf=%p,%s,%x csb=%p,%s,%x%c"
Christopher Faulet13a35e52021-12-20 15:34:16 +01002789 "%saf=%p,%u sab=%p,%u%c"
Willy Tarreau5484d582019-05-22 09:33:03 +02002790 "%scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2791 "%scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2792 "",
Christopher Faulete8f35962021-11-02 17:18:15 +01002793 pfx, s, s->flags, src, s->sess->fe->id, s->be->id, dst, eol,
2794 pfx, s->txn, (s->txn ? s->txn->flags : 0),
2795 (s->txn ? h1_msg_state_str(s->txn->req.msg_state): "-"), (s->txn ? s->txn->req.flags : 0),
2796 (s->txn ? h1_msg_state_str(s->txn->rsp.msg_state): "-"), (s->txn ? s->txn->rsp.flags : 0), eol,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002797 pfx, req->flags, req->analysers, res->flags, res->analysers, eol,
Christopher Faulet62e75742022-03-31 09:16:34 +02002798 pfx, csf, cs_state_str(csf->state), csf->flags, csb, cs_state_str(csb->state), csb->flags, eol,
Christopher Faulet13a35e52021-12-20 15:34:16 +01002799 pfx, acf, acf ? acf->st0 : 0, acb, acb ? acb->st0 : 0, eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002800 pfx, cof, cof ? cof->flags : 0, conn_get_mux_name(cof), cof?cof->ctx:0, conn_get_xprt_name(cof),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002801 cof ? cof->xprt_ctx : 0, conn_get_ctrl_name(cof), conn_fd(cof), eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002802 pfx, cob, cob ? cob->flags : 0, conn_get_mux_name(cob), cob?cob->ctx:0, conn_get_xprt_name(cob),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002803 cob ? cob->xprt_ctx : 0, conn_get_ctrl_name(cob), conn_fd(cob), eol);
Willy Tarreau5484d582019-05-22 09:33:03 +02002804}
2805
2806/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
Willy Tarreaub106ce12019-05-22 08:57:01 +02002807 * having reached loop rate <rate>, then aborts hoping to retrieve a core.
Willy Tarreau5484d582019-05-22 09:33:03 +02002808 */
2809void stream_dump_and_crash(enum obj_type *obj, int rate)
2810{
2811 const struct stream *s;
Willy Tarreau5484d582019-05-22 09:33:03 +02002812 char *msg = NULL;
2813 const void *ptr;
2814
2815 ptr = s = objt_stream(obj);
2816 if (!s) {
2817 const struct appctx *appctx = objt_appctx(obj);
2818 if (!appctx)
2819 return;
2820 ptr = appctx;
Christopher Faulet908628c2022-03-25 16:43:49 +01002821 s = __cs_strm(appctx->owner);
Willy Tarreau5484d582019-05-22 09:33:03 +02002822 if (!s)
2823 return;
2824 }
2825
Willy Tarreau5484d582019-05-22 09:33:03 +02002826 chunk_reset(&trash);
2827 stream_dump(&trash, s, "", ' ');
Willy Tarreau9753d612020-05-01 16:57:02 +02002828
2829 chunk_appendf(&trash, "filters={");
2830 if (HAS_FILTERS(s)) {
2831 struct filter *filter;
2832
2833 list_for_each_entry(filter, &s->strm_flt.filters, list) {
2834 if (filter->list.p != &s->strm_flt.filters)
2835 chunk_appendf(&trash, ", ");
2836 chunk_appendf(&trash, "%p=\"%s\"", filter, FLT_ID(filter));
2837 }
2838 }
2839 chunk_appendf(&trash, "}");
2840
Willy Tarreaub106ce12019-05-22 08:57:01 +02002841 memprintf(&msg,
2842 "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
2843 "aborting now! Please report this error to developers "
2844 "[%s]\n",
Willy Tarreau5484d582019-05-22 09:33:03 +02002845 obj_type_name(obj), ptr, rate, trash.area);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002846
2847 ha_alert("%s", msg);
2848 send_log(NULL, LOG_EMERG, "%s", msg);
Willy Tarreau2f67e542021-03-02 19:19:41 +01002849 ABORT_NOW();
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002850}
2851
Willy Tarreaua698eb62021-02-24 10:37:01 +01002852/* initialize the require structures */
2853static void init_stream()
2854{
2855 int thr;
2856
2857 for (thr = 0; thr < MAX_THREADS; thr++)
Willy Tarreaub4e34762021-09-30 19:02:18 +02002858 LIST_INIT(&ha_thread_ctx[thr].streams);
Willy Tarreaua698eb62021-02-24 10:37:01 +01002859}
2860INITCALL0(STG_INIT, init_stream);
2861
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002862/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002863 * returns the unique ID.
2864
2865 * If this function fails to allocate memory IST_NULL is returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002866 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002867 * If an ID is already stored within the stream nothing happens existing unique ID is
2868 * returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002869 */
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002870struct ist stream_generate_unique_id(struct stream *strm, struct list *format)
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002871{
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002872 if (isttest(strm->unique_id)) {
2873 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002874 }
2875 else {
2876 char *unique_id;
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002877 int length;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002878 if ((unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002879 return IST_NULL;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002880
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002881 length = build_logline(strm, unique_id, UNIQUEID_LEN, format);
2882 strm->unique_id = ist2(unique_id, length);
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002883
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002884 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002885 }
2886}
2887
Willy Tarreau8b22a712010-06-18 17:46:06 +02002888/************************************************************************/
2889/* All supported ACL keywords must be declared here. */
2890/************************************************************************/
Christopher Faulet551a6412021-06-25 14:35:29 +02002891static enum act_return stream_action_set_log_level(struct act_rule *rule, struct proxy *px,
2892 struct session *sess, struct stream *s, int flags)
2893{
2894 s->logs.level = (uintptr_t)rule->arg.act.p[0];
2895 return ACT_RET_CONT;
2896}
2897
2898
2899/* Parse a "set-log-level" action. It takes the level value as argument. It
2900 * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2901 */
2902static enum act_parse_ret stream_parse_set_log_level(const char **args, int *cur_arg, struct proxy *px,
2903 struct act_rule *rule, char **err)
2904{
2905 int level;
2906
2907 if (!*args[*cur_arg]) {
2908 bad_log_level:
2909 memprintf(err, "expects exactly 1 argument (log level name or 'silent')");
2910 return ACT_RET_PRS_ERR;
2911 }
2912 if (strcmp(args[*cur_arg], "silent") == 0)
2913 level = -1;
2914 else if ((level = get_log_level(args[*cur_arg]) + 1) == 0)
2915 goto bad_log_level;
2916
2917 (*cur_arg)++;
2918
2919 /* Register processing function. */
2920 rule->action_ptr = stream_action_set_log_level;
2921 rule->action = ACT_CUSTOM;
2922 rule->arg.act.p[0] = (void *)(uintptr_t)level;
2923 return ACT_RET_PRS_OK;
2924}
2925
Christopher Faulet1da374a2021-06-25 14:46:02 +02002926static enum act_return stream_action_set_nice(struct act_rule *rule, struct proxy *px,
2927 struct session *sess, struct stream *s, int flags)
2928{
2929 s->task->nice = (uintptr_t)rule->arg.act.p[0];
2930 return ACT_RET_CONT;
2931}
2932
2933
2934/* Parse a "set-nice" action. It takes the nice value as argument. It returns
2935 * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2936 */
2937static enum act_parse_ret stream_parse_set_nice(const char **args, int *cur_arg, struct proxy *px,
2938 struct act_rule *rule, char **err)
2939{
2940 int nice;
2941
2942 if (!*args[*cur_arg]) {
2943 bad_log_level:
2944 memprintf(err, "expects exactly 1 argument (integer value)");
2945 return ACT_RET_PRS_ERR;
2946 }
2947
2948 nice = atoi(args[*cur_arg]);
2949 if (nice < -1024)
2950 nice = -1024;
2951 else if (nice > 1024)
2952 nice = 1024;
2953
2954 (*cur_arg)++;
2955
2956 /* Register processing function. */
2957 rule->action_ptr = stream_action_set_nice;
2958 rule->action = ACT_CUSTOM;
2959 rule->arg.act.p[0] = (void *)(uintptr_t)nice;
2960 return ACT_RET_PRS_OK;
2961}
2962
Christopher Faulet551a6412021-06-25 14:35:29 +02002963
Christopher Fauletae863c62021-03-15 12:03:44 +01002964static enum act_return tcp_action_switch_stream_mode(struct act_rule *rule, struct proxy *px,
2965 struct session *sess, struct stream *s, int flags)
2966{
2967 enum pr_mode mode = (uintptr_t)rule->arg.act.p[0];
2968 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
2969
2970 if (!IS_HTX_STRM(s) && mode == PR_MODE_HTTP) {
2971 if (!stream_set_http_mode(s, mux_proto)) {
2972 channel_abort(&s->req);
2973 channel_abort(&s->res);
2974 return ACT_RET_ABRT;
2975 }
2976 }
2977 return ACT_RET_STOP;
2978}
2979
2980
2981static int check_tcp_switch_stream_mode(struct act_rule *rule, struct proxy *px, char **err)
2982{
2983 const struct mux_proto_list *mux_ent;
2984 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
2985 enum pr_mode pr_mode = (uintptr_t)rule->arg.act.p[0];
2986 enum proto_proxy_mode mode = (1 << (pr_mode == PR_MODE_HTTP));
2987
Christopher Faulet3b6446f2021-03-15 15:10:38 +01002988 if (pr_mode == PR_MODE_HTTP)
2989 px->options |= PR_O_HTTP_UPG;
2990
Christopher Fauletae863c62021-03-15 12:03:44 +01002991 if (mux_proto) {
2992 mux_ent = conn_get_best_mux_entry(mux_proto->token, PROTO_SIDE_FE, mode);
2993 if (!mux_ent || !isteq(mux_ent->token, mux_proto->token)) {
2994 memprintf(err, "MUX protocol '%.*s' is not compatible with the selected mode",
2995 (int)mux_proto->token.len, mux_proto->token.ptr);
2996 return 0;
2997 }
2998 }
2999 else {
3000 mux_ent = conn_get_best_mux_entry(IST_NULL, PROTO_SIDE_FE, mode);
3001 if (!mux_ent) {
3002 memprintf(err, "Unable to find compatible MUX protocol with the selected mode");
3003 return 0;
3004 }
3005 }
3006
3007 /* Update the mux */
3008 rule->arg.act.p[1] = (void *)mux_ent;
3009 return 1;
3010
3011}
3012
3013static enum act_parse_ret stream_parse_switch_mode(const char **args, int *cur_arg,
3014 struct proxy *px, struct act_rule *rule,
3015 char **err)
3016{
3017 const struct mux_proto_list *mux_proto = NULL;
3018 struct ist proto;
3019 enum pr_mode mode;
3020
3021 /* must have at least the mode */
3022 if (*(args[*cur_arg]) == 0) {
3023 memprintf(err, "'%s %s' expects a mode as argument.", args[0], args[*cur_arg-1]);
3024 return ACT_RET_PRS_ERR;
3025 }
3026
3027 if (!(px->cap & PR_CAP_FE)) {
3028 memprintf(err, "'%s %s' not allowed because %s '%s' has no frontend capability",
3029 args[0], args[*cur_arg-1], proxy_type_str(px), px->id);
3030 return ACT_RET_PRS_ERR;
3031 }
3032 /* Check if the mode. For now "tcp" is disabled because downgrade is not
3033 * supported and PT is the only TCP mux.
3034 */
3035 if (strcmp(args[*cur_arg], "http") == 0)
3036 mode = PR_MODE_HTTP;
3037 else {
3038 memprintf(err, "'%s %s' expects a valid mode (got '%s').", args[0], args[*cur_arg-1], args[*cur_arg]);
3039 return ACT_RET_PRS_ERR;
3040 }
3041
3042 /* check the proto, if specified */
3043 if (*(args[*cur_arg+1]) && strcmp(args[*cur_arg+1], "proto") == 0) {
3044 if (*(args[*cur_arg+2]) == 0) {
3045 memprintf(err, "'%s %s': '%s' expects a protocol as argument.",
3046 args[0], args[*cur_arg-1], args[*cur_arg+1]);
3047 return ACT_RET_PRS_ERR;
3048 }
3049
Tim Duesterhusb113b5c2021-09-15 13:58:44 +02003050 proto = ist(args[*cur_arg + 2]);
Christopher Fauletae863c62021-03-15 12:03:44 +01003051 mux_proto = get_mux_proto(proto);
3052 if (!mux_proto) {
3053 memprintf(err, "'%s %s': '%s' expects a valid MUX protocol, if specified (got '%s')",
3054 args[0], args[*cur_arg-1], args[*cur_arg+1], args[*cur_arg+2]);
3055 return ACT_RET_PRS_ERR;
3056 }
3057 *cur_arg += 2;
3058 }
3059
3060 (*cur_arg)++;
3061
3062 /* Register processing function. */
3063 rule->action_ptr = tcp_action_switch_stream_mode;
3064 rule->check_ptr = check_tcp_switch_stream_mode;
3065 rule->action = ACT_CUSTOM;
3066 rule->arg.act.p[0] = (void *)(uintptr_t)mode;
3067 rule->arg.act.p[1] = (void *)mux_proto;
3068 return ACT_RET_PRS_OK;
3069}
Willy Tarreau8b22a712010-06-18 17:46:06 +02003070
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003071/* 0=OK, <0=Alert, >0=Warning */
3072static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
3073 struct proxy *px, struct act_rule *rule,
3074 char **err)
3075{
3076 struct action_kw *kw;
3077
3078 /* Check if the service name exists. */
3079 if (*(args[*cur_arg]) == 0) {
3080 memprintf(err, "'%s' expects a service name.", args[0]);
Thierry FOURNIER337eae12015-11-26 19:48:04 +01003081 return ACT_RET_PRS_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003082 }
3083
3084 /* lookup for keyword corresponding to a service. */
3085 kw = action_lookup(&service_keywords, args[*cur_arg]);
3086 if (!kw) {
3087 memprintf(err, "'%s' unknown service name.", args[1]);
3088 return ACT_RET_PRS_ERR;
3089 }
3090 (*cur_arg)++;
3091
3092 /* executes specific rule parser. */
3093 rule->kw = kw;
3094 if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
3095 return ACT_RET_PRS_ERR;
3096
3097 /* Register processing function. */
3098 rule->action_ptr = process_use_service;
3099 rule->action = ACT_CUSTOM;
3100
3101 return ACT_RET_PRS_OK;
3102}
3103
3104void service_keywords_register(struct action_kw_list *kw_list)
3105{
Willy Tarreau2b718102021-04-21 07:32:39 +02003106 LIST_APPEND(&service_keywords, &kw_list->list);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003107}
3108
Thierry Fournier87e53992020-11-28 19:32:14 +01003109struct action_kw *service_find(const char *kw)
3110{
3111 return action_lookup(&service_keywords, kw);
3112}
3113
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003114/* Lists the known services on <out>. If <out> is null, emit them on stdout one
3115 * per line.
3116 */
Willy Tarreau679bba12019-03-19 08:08:10 +01003117void list_services(FILE *out)
3118{
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003119 const struct action_kw *akwp, *akwn;
Willy Tarreau679bba12019-03-19 08:08:10 +01003120 struct action_kw_list *kw_list;
3121 int found = 0;
3122 int i;
3123
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003124 if (out)
3125 fprintf(out, "Available services :");
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003126
3127 for (akwn = akwp = NULL;; akwp = akwn) {
3128 list_for_each_entry(kw_list, &service_keywords, list) {
3129 for (i = 0; kw_list->kw[i].kw != NULL; i++) {
3130 if (strordered(akwp ? akwp->kw : NULL,
3131 kw_list->kw[i].kw,
3132 akwn != akwp ? akwn->kw : NULL))
3133 akwn = &kw_list->kw[i];
3134 found = 1;
3135 }
Willy Tarreau679bba12019-03-19 08:08:10 +01003136 }
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003137 if (akwn == akwp)
3138 break;
3139 if (out)
3140 fprintf(out, " %s", akwn->kw);
3141 else
3142 printf("%s\n", akwn->kw);
Willy Tarreau679bba12019-03-19 08:08:10 +01003143 }
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003144 if (!found && out)
Willy Tarreau679bba12019-03-19 08:08:10 +01003145 fprintf(out, " none\n");
3146}
William Lallemand4c5b4d52016-11-21 08:51:11 +01003147
Willy Tarreau39f097d2022-05-03 10:49:00 +02003148/* appctx context used by the "show sess" command */
3149
3150struct show_sess_ctx {
3151 struct bref bref; /* back-reference from the session being dumped */
3152 void *target; /* session we want to dump, or NULL for all */
3153 unsigned int thr; /* the thread number being explored (0..MAX_THREADS-1) */
3154 unsigned int uid; /* if non-null, the uniq_id of the session being dumped */
3155 int section; /* section of the session being dumped */
3156 int pos; /* last position of the current session's buffer */
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003157 enum {
Willy Tarreauf3629f82022-05-03 11:05:39 +02003158 STATE_LIST = 0,
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003159 STATE_FIN,
3160 } state; /* dump state */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003161};
3162
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02003163/* This function dumps a complete stream state onto the conn-stream's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003164 * read buffer. The stream has to be set in strm. It returns 0 if the output
3165 * buffer is full and it needs to be called again, otherwise non-zero. It is
3166 * designed to be called from stats_dump_strm_to_buffer() below.
3167 */
Christopher Faulet908628c2022-03-25 16:43:49 +01003168static int stats_dump_full_strm_to_buffer(struct conn_stream *cs, struct stream *strm)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003169{
Christopher Faulet908628c2022-03-25 16:43:49 +01003170 struct appctx *appctx = __cs_appctx(cs);
Willy Tarreau39f097d2022-05-03 10:49:00 +02003171 struct show_sess_ctx *ctx = appctx->svcctx;
Christopher Faulet908628c2022-03-25 16:43:49 +01003172 struct conn_stream *csf, *csb;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003173 struct tm tm;
3174 extern const char *monthname[12];
3175 char pn[INET6_ADDRSTRLEN];
3176 struct connection *conn;
3177 struct appctx *tmpctx;
3178
3179 chunk_reset(&trash);
3180
Willy Tarreau39f097d2022-05-03 10:49:00 +02003181 if (ctx->section > 0 && ctx->uid != strm->uniq_id) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003182 /* stream changed, no need to go any further */
3183 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
Christopher Faulet908628c2022-03-25 16:43:49 +01003184 if (ci_putchk(cs_ic(cs), &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003185 goto full;
3186 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003187 }
3188
Willy Tarreau39f097d2022-05-03 10:49:00 +02003189 switch (ctx->section) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003190 case 0: /* main status of the stream */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003191 ctx->uid = strm->uniq_id;
3192 ctx->section = 1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003193 /* fall through */
3194
3195 case 1:
3196 get_localtime(strm->logs.accept_date.tv_sec, &tm);
3197 chunk_appendf(&trash,
3198 "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
3199 strm,
3200 tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
3201 tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
3202 strm->uniq_id,
Willy Tarreaub7436612020-08-28 19:51:44 +02003203 strm_li(strm) ? strm_li(strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003204
3205 conn = objt_conn(strm_orig(strm));
Willy Tarreau71e34c12019-07-17 15:07:06 +02003206 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003207 case AF_INET:
3208 case AF_INET6:
3209 chunk_appendf(&trash, " source=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003210 pn, get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003211 break;
3212 case AF_UNIX:
3213 chunk_appendf(&trash, " source=unix:%d\n", strm_li(strm)->luid);
3214 break;
3215 default:
3216 /* no more information to print right now */
3217 chunk_appendf(&trash, "\n");
3218 break;
3219 }
3220
3221 chunk_appendf(&trash,
Christopher Faulet50264b42022-03-30 19:39:30 +02003222 " flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x\n",
Christopher Fauletae024ce2022-03-29 19:02:31 +02003223 strm->flags, strm->conn_retries,
3224 strm->conn_exp ?
3225 tick_is_expired(strm->conn_exp, now_ms) ? "<PAST>" :
3226 human_time(TICKS_TO_MS(strm->conn_exp - now_ms),
3227 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Faulet50264b42022-03-30 19:39:30 +02003228 strm->conn_err_type, strm->srv_conn, strm->pend_pos,
Willy Tarreau2b718102021-04-21 07:32:39 +02003229 LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003230
3231 chunk_appendf(&trash,
3232 " frontend=%s (id=%u mode=%s), listener=%s (id=%u)",
William Lallemandb0dfd092022-03-08 12:05:31 +01003233 strm_fe(strm)->id, strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003234 strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
3235 strm_li(strm) ? strm_li(strm)->luid : 0);
3236
Willy Tarreau71e34c12019-07-17 15:07:06 +02003237 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003238 case AF_INET:
3239 case AF_INET6:
3240 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003241 pn, get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003242 break;
3243 case AF_UNIX:
3244 chunk_appendf(&trash, " addr=unix:%d\n", strm_li(strm)->luid);
3245 break;
3246 default:
3247 /* no more information to print right now */
3248 chunk_appendf(&trash, "\n");
3249 break;
3250 }
3251
3252 if (strm->be->cap & PR_CAP_BE)
3253 chunk_appendf(&trash,
3254 " backend=%s (id=%u mode=%s)",
3255 strm->be->id,
William Lallemandb0dfd092022-03-08 12:05:31 +01003256 strm->be->uuid, proxy_mode_str(strm->be->mode));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003257 else
3258 chunk_appendf(&trash, " backend=<NONE> (id=-1 mode=-)");
3259
Christopher Faulet95a61e82021-12-22 14:22:03 +01003260 conn = cs_conn(strm->csb);
Willy Tarreau71e34c12019-07-17 15:07:06 +02003261 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003262 case AF_INET:
3263 case AF_INET6:
3264 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003265 pn, get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003266 break;
3267 case AF_UNIX:
3268 chunk_appendf(&trash, " addr=unix\n");
3269 break;
3270 default:
3271 /* no more information to print right now */
3272 chunk_appendf(&trash, "\n");
3273 break;
3274 }
3275
3276 if (strm->be->cap & PR_CAP_BE)
3277 chunk_appendf(&trash,
3278 " server=%s (id=%u)",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003279 objt_server(strm->target) ? __objt_server(strm->target)->id : "<none>",
3280 objt_server(strm->target) ? __objt_server(strm->target)->puid : 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003281 else
3282 chunk_appendf(&trash, " server=<NONE> (id=-1)");
3283
Willy Tarreau71e34c12019-07-17 15:07:06 +02003284 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003285 case AF_INET:
3286 case AF_INET6:
3287 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003288 pn, get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003289 break;
3290 case AF_UNIX:
3291 chunk_appendf(&trash, " addr=unix\n");
3292 break;
3293 default:
3294 /* no more information to print right now */
3295 chunk_appendf(&trash, "\n");
3296 break;
3297 }
3298
3299 chunk_appendf(&trash,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003300 " task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tmask=0x%lx%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003301 strm->task,
3302 strm->task->state,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003303 strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003304 strm->task->expire ?
3305 tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
3306 human_time(TICKS_TO_MS(strm->task->expire - now_ms),
3307 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Fauletf0205062017-11-15 20:56:43 +01003308 strm->task->thread_mask,
William Lallemand4c5b4d52016-11-21 08:51:11 +01003309 task_in_rq(strm->task) ? ", running" : "");
3310
3311 chunk_appendf(&trash,
3312 " age=%s)\n",
3313 human_time(now.tv_sec - strm->logs.accept_date.tv_sec, 1));
3314
3315 if (strm->txn)
3316 chunk_appendf(&trash,
Christopher Fauletbcac7862019-07-17 10:46:50 +02003317 " txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003318 strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
Willy Tarreau7778b592019-01-07 10:38:10 +01003319 h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003320 strm->txn->req.flags, strm->txn->rsp.flags);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003321
Christopher Faulet908628c2022-03-25 16:43:49 +01003322 csf = strm->csf;
Christopher Faulet02642122022-04-19 10:35:22 +02003323 chunk_appendf(&trash, " csf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003324 csf, csf->flags, cs_state_str(csf->state),
Christopher Faulet02642122022-04-19 10:35:22 +02003325 (csf->endp->flags & CS_EP_T_MUX ? "CONN" : (csf->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003326 csf->endp->target, csf->endp->flags, csf->wait_event.events);
Olivier Houchard9aaf7782017-09-13 18:30:23 +02003327
Christopher Faulet908628c2022-03-25 16:43:49 +01003328 if ((conn = cs_conn(csf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003329 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003330 " co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003331 conn,
3332 conn_get_ctrl_name(conn),
3333 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003334 conn_get_mux_name(conn),
Christopher Faulet908628c2022-03-25 16:43:49 +01003335 cs_get_data_name(csf),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003336 obj_type_name(conn->target),
3337 obj_base_ptr(conn->target));
3338
3339 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003340 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003341 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003342 conn_fd(conn),
3343 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
3344 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3345 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
Willy Tarreaufb3b1b02018-12-18 14:28:24 +01003346
William Lallemand4c5b4d52016-11-21 08:51:11 +01003347 }
Christopher Faulet908628c2022-03-25 16:43:49 +01003348 else if ((tmpctx = cs_appctx(csf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003349 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003350 " app0=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003351 tmpctx,
3352 tmpctx->st0,
3353 tmpctx->st1,
3354 tmpctx->st2,
Christopher Fauletf0205062017-11-15 20:56:43 +01003355 tmpctx->applet->name,
Willy Tarreau4c6986a2021-07-13 18:01:46 +02003356 tmpctx->t->thread_mask,
Willy Tarreau22d63a22019-04-24 08:41:29 +02003357 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
Willy Tarreau9efd7452018-05-31 14:48:54 +02003358 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003359 }
3360
Christopher Faulet908628c2022-03-25 16:43:49 +01003361 csb = strm->csb;
Christopher Faulet02642122022-04-19 10:35:22 +02003362 chunk_appendf(&trash, " csb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003363 csb, csb->flags, cs_state_str(csb->state),
Christopher Faulet02642122022-04-19 10:35:22 +02003364 (csb->endp->flags & CS_EP_T_MUX ? "CONN" : (csb->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003365 csb->endp->target, csb->endp->flags, csb->wait_event.events);
Christopher Faulet908628c2022-03-25 16:43:49 +01003366 if ((conn = cs_conn(csb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003367 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003368 " co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003369 conn,
3370 conn_get_ctrl_name(conn),
3371 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003372 conn_get_mux_name(conn),
Christopher Faulet908628c2022-03-25 16:43:49 +01003373 cs_get_data_name(csb),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003374 obj_type_name(conn->target),
3375 obj_base_ptr(conn->target));
3376
3377 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003378 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003379 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003380 conn_fd(conn),
3381 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
3382 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3383 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
Willy Tarreaufb3b1b02018-12-18 14:28:24 +01003384
William Lallemand4c5b4d52016-11-21 08:51:11 +01003385 }
Christopher Faulet908628c2022-03-25 16:43:49 +01003386 else if ((tmpctx = cs_appctx(csb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003387 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003388 " app1=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003389 tmpctx,
3390 tmpctx->st0,
3391 tmpctx->st1,
3392 tmpctx->st2,
Christopher Fauletf0205062017-11-15 20:56:43 +01003393 tmpctx->applet->name,
Willy Tarreau4c6986a2021-07-13 18:01:46 +02003394 tmpctx->t->thread_mask,
Willy Tarreau22d63a22019-04-24 08:41:29 +02003395 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
Willy Tarreau9efd7452018-05-31 14:48:54 +02003396 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003397 }
3398
3399 chunk_appendf(&trash,
3400 " req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3401 " an_exp=%s",
3402 &strm->req,
3403 strm->req.flags, strm->req.analysers,
3404 strm->req.pipe ? strm->req.pipe->data : 0,
3405 strm->req.to_forward, strm->req.total,
3406 strm->req.analyse_exp ?
3407 human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
3408 TICKS_TO_MS(1000)) : "<NEVER>");
3409
3410 chunk_appendf(&trash,
3411 " rex=%s",
3412 strm->req.rex ?
3413 human_time(TICKS_TO_MS(strm->req.rex - now_ms),
3414 TICKS_TO_MS(1000)) : "<NEVER>");
3415
3416 chunk_appendf(&trash,
3417 " wex=%s\n"
Christopher Fauletbcac7862019-07-17 10:46:50 +02003418 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003419 strm->req.wex ?
3420 human_time(TICKS_TO_MS(strm->req.wex - now_ms),
3421 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003422 &strm->req.buf,
3423 b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003424 (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003425 (unsigned int)strm->req.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003426
Christopher Fauletb9af8812019-01-04 14:30:44 +01003427 if (IS_HTX_STRM(strm)) {
3428 struct htx *htx = htxbuf(&strm->req.buf);
3429
3430 chunk_appendf(&trash,
Willy Tarreaub84e67f2019-01-07 10:01:34 +01003431 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003432 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003433 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003434 (unsigned long long)htx->extra);
3435 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003436 if (HAS_FILTERS(strm) && strm_flt(strm)->current[0]) {
3437 struct filter *flt = strm_flt(strm)->current[0];
3438
3439 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3440 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3441 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003442
William Lallemand4c5b4d52016-11-21 08:51:11 +01003443 chunk_appendf(&trash,
3444 " res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3445 " an_exp=%s",
3446 &strm->res,
3447 strm->res.flags, strm->res.analysers,
3448 strm->res.pipe ? strm->res.pipe->data : 0,
3449 strm->res.to_forward, strm->res.total,
3450 strm->res.analyse_exp ?
3451 human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
3452 TICKS_TO_MS(1000)) : "<NEVER>");
3453
3454 chunk_appendf(&trash,
3455 " rex=%s",
3456 strm->res.rex ?
3457 human_time(TICKS_TO_MS(strm->res.rex - now_ms),
3458 TICKS_TO_MS(1000)) : "<NEVER>");
3459
3460 chunk_appendf(&trash,
3461 " wex=%s\n"
Christopher Fauletbcac7862019-07-17 10:46:50 +02003462 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003463 strm->res.wex ?
3464 human_time(TICKS_TO_MS(strm->res.wex - now_ms),
3465 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003466 &strm->res.buf,
3467 b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003468 (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003469 (unsigned int)strm->res.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003470
Christopher Fauletb9af8812019-01-04 14:30:44 +01003471 if (IS_HTX_STRM(strm)) {
3472 struct htx *htx = htxbuf(&strm->res.buf);
3473
3474 chunk_appendf(&trash,
3475 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003476 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003477 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003478 (unsigned long long)htx->extra);
3479 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003480 if (HAS_FILTERS(strm) && strm_flt(strm)->current[1]) {
3481 struct filter *flt = strm_flt(strm)->current[1];
3482
3483 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3484 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3485 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003486
Willy Tarreau1274e102021-10-11 09:49:03 +02003487 if (strm->current_rule_list && strm->current_rule) {
3488 const struct act_rule *rule = strm->current_rule;
Christopher Faulet8c67ece2021-10-12 11:10:31 +02003489 chunk_appendf(&trash, " current_rule=\"%s\" [%s:%d]\n", rule->kw->kw, rule->conf.file, rule->conf.line);
Willy Tarreau1274e102021-10-11 09:49:03 +02003490 }
3491
Christopher Faulet908628c2022-03-25 16:43:49 +01003492 if (ci_putchk(cs_ic(cs), &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003493 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003494
3495 /* use other states to dump the contents */
3496 }
3497 /* end of dump */
Willy Tarreaue6e52362019-01-04 17:42:57 +01003498 done:
Willy Tarreau39f097d2022-05-03 10:49:00 +02003499 ctx->uid = 0;
3500 ctx->section = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003501 return 1;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003502 full:
3503 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003504}
3505
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003506static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003507{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003508 struct show_sess_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
3509
William Lallemand4c5b4d52016-11-21 08:51:11 +01003510 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
3511 return 1;
3512
3513 if (*args[2] && strcmp(args[2], "all") == 0)
Willy Tarreau39f097d2022-05-03 10:49:00 +02003514 ctx->target = (void *)-1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003515 else if (*args[2])
Willy Tarreau39f097d2022-05-03 10:49:00 +02003516 ctx->target = (void *)strtoul(args[2], NULL, 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003517 else
Willy Tarreau39f097d2022-05-03 10:49:00 +02003518 ctx->target = NULL;
3519 ctx->section = 0; /* start with stream status */
3520 ctx->pos = 0;
3521 ctx->thr = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003522
Willy Tarreauf3629f82022-05-03 11:05:39 +02003523 /* The back-ref must be reset, it will be detected and set by
3524 * the dump code upon first invocation.
3525 */
3526 LIST_INIT(&ctx->bref.users);
3527
Willy Tarreaub9813182021-02-24 11:29:51 +01003528 /* let's set our own stream's epoch to the current one and increment
3529 * it so that we know which streams were already there before us.
3530 */
Christopher Faulet908628c2022-03-25 16:43:49 +01003531 __cs_strm(appctx->owner)->stream_epoch = _HA_ATOMIC_FETCH_ADD(&stream_epoch, 1);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003532 return 0;
3533}
3534
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02003535/* This function dumps all streams' states onto the conn-stream's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003536 * read buffer. It returns 0 if the output buffer is full and it needs
Willy Tarreaue6e52362019-01-04 17:42:57 +01003537 * to be called again, otherwise non-zero. It proceeds in an isolated
3538 * thread so there is no thread safety issue here.
William Lallemand4c5b4d52016-11-21 08:51:11 +01003539 */
3540static int cli_io_handler_dump_sess(struct appctx *appctx)
3541{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003542 struct show_sess_ctx *ctx = appctx->svcctx;
Christopher Faulet908628c2022-03-25 16:43:49 +01003543 struct conn_stream *cs = appctx->owner;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003544 struct connection *conn;
3545
Willy Tarreaue6e52362019-01-04 17:42:57 +01003546 thread_isolate();
3547
Christopher Faulet908628c2022-03-25 16:43:49 +01003548 if (unlikely(cs_ic(cs)->flags & (CF_WRITE_ERROR|CF_SHUTW))) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003549 /* If we're forced to shut down, we might have to remove our
3550 * reference to the last stream being dumped.
3551 */
Willy Tarreauf3629f82022-05-03 11:05:39 +02003552 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3553 LIST_DELETE(&ctx->bref.users);
3554 LIST_INIT(&ctx->bref.users);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003555 }
Willy Tarreaue6e52362019-01-04 17:42:57 +01003556 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003557 }
3558
3559 chunk_reset(&trash);
3560
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003561 switch (ctx->state) {
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003562 case STATE_LIST:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003563 /* first, let's detach the back-ref from a possible previous stream */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003564 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3565 LIST_DELETE(&ctx->bref.users);
3566 LIST_INIT(&ctx->bref.users);
Willy Tarreauf3629f82022-05-03 11:05:39 +02003567 } else if (!ctx->bref.ref) {
3568 /* first call, start with first stream */
3569 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003570 }
3571
Willy Tarreau5d533e22021-02-24 11:53:17 +01003572 /* and start from where we stopped */
Willy Tarreaua698eb62021-02-24 10:37:01 +01003573 while (1) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003574 char pn[INET6_ADDRSTRLEN];
3575 struct stream *curr_strm;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003576 int done= 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003577
Willy Tarreau39f097d2022-05-03 10:49:00 +02003578 if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].streams)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003579 done = 1;
3580 else {
3581 /* check if we've found a stream created after issuing the "show sess" */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003582 curr_strm = LIST_ELEM(ctx->bref.ref, struct stream *, list);
Christopher Faulet908628c2022-03-25 16:43:49 +01003583 if ((int)(curr_strm->stream_epoch - __cs_strm(appctx->owner)->stream_epoch) > 0)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003584 done = 1;
3585 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003586
Willy Tarreaua698eb62021-02-24 10:37:01 +01003587 if (done) {
Willy Tarreau39f097d2022-05-03 10:49:00 +02003588 ctx->thr++;
3589 if (ctx->thr >= global.nbthread)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003590 break;
Willy Tarreau39f097d2022-05-03 10:49:00 +02003591 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003592 continue;
3593 }
Willy Tarreau5d533e22021-02-24 11:53:17 +01003594
Willy Tarreau39f097d2022-05-03 10:49:00 +02003595 if (ctx->target) {
3596 if (ctx->target != (void *)-1 && ctx->target != curr_strm)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003597 goto next_sess;
3598
Willy Tarreau39f097d2022-05-03 10:49:00 +02003599 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003600 /* call the proper dump() function and return if we're missing space */
Christopher Faulet908628c2022-03-25 16:43:49 +01003601 if (!stats_dump_full_strm_to_buffer(cs, curr_strm))
Willy Tarreaue6e52362019-01-04 17:42:57 +01003602 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003603
3604 /* stream dump complete */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003605 LIST_DELETE(&ctx->bref.users);
3606 LIST_INIT(&ctx->bref.users);
3607 if (ctx->target != (void *)-1) {
3608 ctx->target = NULL;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003609 break;
3610 }
3611 else
3612 goto next_sess;
3613 }
3614
3615 chunk_appendf(&trash,
3616 "%p: proto=%s",
3617 curr_strm,
Willy Tarreaub7436612020-08-28 19:51:44 +02003618 strm_li(curr_strm) ? strm_li(curr_strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003619
3620 conn = objt_conn(strm_orig(curr_strm));
Willy Tarreau71e34c12019-07-17 15:07:06 +02003621 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003622 case AF_INET:
3623 case AF_INET6:
3624 chunk_appendf(&trash,
3625 " src=%s:%d fe=%s be=%s srv=%s",
3626 pn,
Willy Tarreau71e34c12019-07-17 15:07:06 +02003627 get_host_port(conn->src),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003628 strm_fe(curr_strm)->id,
3629 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003630 objt_server(curr_strm->target) ? __objt_server(curr_strm->target)->id : "<none>"
William Lallemand4c5b4d52016-11-21 08:51:11 +01003631 );
3632 break;
3633 case AF_UNIX:
3634 chunk_appendf(&trash,
3635 " src=unix:%d fe=%s be=%s srv=%s",
3636 strm_li(curr_strm)->luid,
3637 strm_fe(curr_strm)->id,
3638 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003639 objt_server(curr_strm->target) ? __objt_server(curr_strm->target)->id : "<none>"
William Lallemand4c5b4d52016-11-21 08:51:11 +01003640 );
3641 break;
3642 }
3643
3644 chunk_appendf(&trash,
Willy Tarreaub9813182021-02-24 11:29:51 +01003645 " ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
3646 curr_strm->task->state, curr_strm->stream_epoch,
William Lallemand4c5b4d52016-11-21 08:51:11 +01003647 human_time(now.tv_sec - curr_strm->logs.tv_accept.tv_sec, 1),
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003648 curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
Willy Tarreau9efd7452018-05-31 14:48:54 +02003649 (unsigned long long)curr_strm->task->cpu_time, (unsigned long long)curr_strm->task->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003650
3651 chunk_appendf(&trash,
Willy Tarreau506a29a2018-07-18 10:07:58 +02003652 " rq[f=%06xh,i=%u,an=%02xh,rx=%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003653 curr_strm->req.flags,
Willy Tarreau7e9c30a2018-06-15 19:24:46 +02003654 (unsigned int)ci_data(&curr_strm->req),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003655 curr_strm->req.analysers,
3656 curr_strm->req.rex ?
3657 human_time(TICKS_TO_MS(curr_strm->req.rex - now_ms),
3658 TICKS_TO_MS(1000)) : "");
3659
3660 chunk_appendf(&trash,
3661 ",wx=%s",
3662 curr_strm->req.wex ?
3663 human_time(TICKS_TO_MS(curr_strm->req.wex - now_ms),
3664 TICKS_TO_MS(1000)) : "");
3665
3666 chunk_appendf(&trash,
3667 ",ax=%s]",
3668 curr_strm->req.analyse_exp ?
3669 human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
3670 TICKS_TO_MS(1000)) : "");
3671
3672 chunk_appendf(&trash,
Willy Tarreau506a29a2018-07-18 10:07:58 +02003673 " rp[f=%06xh,i=%u,an=%02xh,rx=%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003674 curr_strm->res.flags,
Willy Tarreau7e9c30a2018-06-15 19:24:46 +02003675 (unsigned int)ci_data(&curr_strm->res),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003676 curr_strm->res.analysers,
3677 curr_strm->res.rex ?
3678 human_time(TICKS_TO_MS(curr_strm->res.rex - now_ms),
3679 TICKS_TO_MS(1000)) : "");
3680
3681 chunk_appendf(&trash,
3682 ",wx=%s",
3683 curr_strm->res.wex ?
3684 human_time(TICKS_TO_MS(curr_strm->res.wex - now_ms),
3685 TICKS_TO_MS(1000)) : "");
3686
3687 chunk_appendf(&trash,
3688 ",ax=%s]",
3689 curr_strm->res.analyse_exp ?
3690 human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
3691 TICKS_TO_MS(1000)) : "");
3692
Christopher Faulet95a61e82021-12-22 14:22:03 +01003693 conn = cs_conn(curr_strm->csf);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003694 chunk_appendf(&trash,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003695 " csf=[%d,%1xh,fd=%d]",
3696 curr_strm->csf->state,
3697 curr_strm->csf->flags,
Christopher Fauletae024ce2022-03-29 19:02:31 +02003698 conn_fd(conn));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003699
Christopher Faulet95a61e82021-12-22 14:22:03 +01003700 conn = cs_conn(curr_strm->csb);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003701 chunk_appendf(&trash,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003702 " csb=[%d,%1xh,fd=%d]",
3703 curr_strm->csb->state,
3704 curr_strm->csb->flags,
Christopher Fauletae024ce2022-03-29 19:02:31 +02003705 conn_fd(conn));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003706
3707 chunk_appendf(&trash,
Christopher Fauletae024ce2022-03-29 19:02:31 +02003708 " exp=%s rc=%d c_exp=%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003709 curr_strm->task->expire ?
3710 human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
Christopher Fauletae024ce2022-03-29 19:02:31 +02003711 TICKS_TO_MS(1000)) : "",
3712 curr_strm->conn_retries,
3713 curr_strm->conn_exp ?
3714 human_time(TICKS_TO_MS(curr_strm->conn_exp - now_ms),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003715 TICKS_TO_MS(1000)) : "");
3716 if (task_in_rq(curr_strm->task))
3717 chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
3718
3719 chunk_appendf(&trash, "\n");
3720
Christopher Faulet908628c2022-03-25 16:43:49 +01003721 if (ci_putchk(cs_ic(cs), &trash) == -1) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003722 /* let's try again later from this stream. We add ourselves into
3723 * this stream's users so that it can remove us upon termination.
3724 */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003725 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
Willy Tarreaue6e52362019-01-04 17:42:57 +01003726 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003727 }
3728
3729 next_sess:
Willy Tarreau39f097d2022-05-03 10:49:00 +02003730 ctx->bref.ref = curr_strm->list.n;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003731 }
3732
Willy Tarreau39f097d2022-05-03 10:49:00 +02003733 if (ctx->target && ctx->target != (void *)-1) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003734 /* specified stream not found */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003735 if (ctx->section > 0)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003736 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3737 else
3738 chunk_appendf(&trash, "Session not found.\n");
3739
Christopher Faulet908628c2022-03-25 16:43:49 +01003740 if (ci_putchk(cs_ic(cs), &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003741 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003742
Willy Tarreau39f097d2022-05-03 10:49:00 +02003743 ctx->target = NULL;
3744 ctx->uid = 0;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003745 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003746 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003747 /* fall through */
3748
3749 default:
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003750 ctx->state = STATE_FIN;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003751 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003752 }
Willy Tarreaue6e52362019-01-04 17:42:57 +01003753 done:
3754 thread_release();
3755 return 1;
3756 full:
3757 thread_release();
Christopher Fauleta0bdec32022-04-04 07:51:21 +02003758 cs_rx_room_blk(cs);
Willy Tarreaue6e52362019-01-04 17:42:57 +01003759 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003760}
3761
3762static void cli_release_show_sess(struct appctx *appctx)
3763{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003764 struct show_sess_ctx *ctx = appctx->svcctx;
3765
Willy Tarreau7fb591a2022-05-03 10:57:54 +02003766 if (ctx->state == STATE_LIST && ctx->thr < global.nbthread) {
Willy Tarreau49de6852021-02-24 13:46:12 +01003767 /* a dump was aborted, either in error or timeout. We need to
3768 * safely detach from the target stream's list. It's mandatory
3769 * to lock because a stream on the target thread could be moving
3770 * our node.
3771 */
3772 thread_isolate();
Willy Tarreau39f097d2022-05-03 10:49:00 +02003773 if (!LIST_ISEMPTY(&ctx->bref.users))
3774 LIST_DELETE(&ctx->bref.users);
Willy Tarreau49de6852021-02-24 13:46:12 +01003775 thread_release();
William Lallemand4c5b4d52016-11-21 08:51:11 +01003776 }
3777}
3778
Willy Tarreau61b65212016-11-24 11:09:25 +01003779/* Parses the "shutdown session" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003780static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau61b65212016-11-24 11:09:25 +01003781{
3782 struct stream *strm, *ptr;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003783 int thr;
Willy Tarreau61b65212016-11-24 11:09:25 +01003784
3785 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3786 return 1;
3787
Willy Tarreauc40c4072022-03-31 14:49:45 +02003788 ptr = (void *)strtoul(args[2], NULL, 0);
3789 if (!ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003790 return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003791
Willy Tarreaua698eb62021-02-24 10:37:01 +01003792 strm = NULL;
Willy Tarreau61b65212016-11-24 11:09:25 +01003793
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003794 thread_isolate();
3795
Willy Tarreau61b65212016-11-24 11:09:25 +01003796 /* first, look for the requested stream in the stream table */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003797 for (thr = 0; strm != ptr && thr < global.nbthread; thr++) {
Willy Tarreaub4e34762021-09-30 19:02:18 +02003798 list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
Willy Tarreaua698eb62021-02-24 10:37:01 +01003799 if (strm == ptr) {
3800 stream_shutdown(strm, SF_ERR_KILLED);
3801 break;
3802 }
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003803 }
Willy Tarreau61b65212016-11-24 11:09:25 +01003804 }
3805
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003806 thread_release();
3807
Willy Tarreau61b65212016-11-24 11:09:25 +01003808 /* do we have the stream ? */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003809 if (strm != ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003810 return cli_err(appctx, "No such session (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003811
Willy Tarreau61b65212016-11-24 11:09:25 +01003812 return 1;
3813}
3814
Willy Tarreau4e46b622016-11-23 16:50:48 +01003815/* Parses the "shutdown session server" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003816static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau4e46b622016-11-23 16:50:48 +01003817{
3818 struct server *sv;
Willy Tarreau4e46b622016-11-23 16:50:48 +01003819
3820 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3821 return 1;
3822
3823 sv = cli_find_server(appctx, args[3]);
3824 if (!sv)
3825 return 1;
3826
3827 /* kill all the stream that are on this server */
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003828 HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
Willy Tarreaud9e26a72019-11-14 16:37:16 +01003829 srv_shutdown_streams(sv, SF_ERR_KILLED);
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003830 HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
Willy Tarreau4e46b622016-11-23 16:50:48 +01003831 return 1;
3832}
3833
William Lallemand4c5b4d52016-11-21 08:51:11 +01003834/* register cli keywords */
3835static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaub205bfd2021-05-07 11:38:37 +02003836 { { "show", "sess", NULL }, "show sess [id] : report the list of current sessions or dump this exact session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
3837 { { "shutdown", "session", NULL }, "shutdown session [id] : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
3838 { { "shutdown", "sessions", "server" }, "shutdown sessions server <bk>/<srv> : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
William Lallemand4c5b4d52016-11-21 08:51:11 +01003839 {{},}
3840}};
3841
Willy Tarreau0108d902018-11-25 19:14:37 +01003842INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
3843
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003844/* main configuration keyword registration. */
Christopher Faulet551a6412021-06-25 14:35:29 +02003845static struct action_kw_list stream_tcp_req_keywords = { ILH, {
3846 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003847 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003848 { "switch-mode", stream_parse_switch_mode },
3849 { "use-service", stream_parse_use_service },
3850 { /* END */ }
3851}};
3852
3853INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_req_keywords);
3854
3855/* main configuration keyword registration. */
3856static struct action_kw_list stream_tcp_res_keywords = { ILH, {
3857 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003858 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003859 { /* END */ }
3860}};
3861
3862INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &stream_tcp_res_keywords);
3863
3864static struct action_kw_list stream_http_req_keywords = { ILH, {
3865 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003866 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003867 { "use-service", stream_parse_use_service },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003868 { /* END */ }
3869}};
3870
Christopher Faulet551a6412021-06-25 14:35:29 +02003871INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_req_keywords);
Willy Tarreau0108d902018-11-25 19:14:37 +01003872
Christopher Faulet551a6412021-06-25 14:35:29 +02003873static struct action_kw_list stream_http_res_keywords = { ILH, {
3874 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003875 { "set-nice", stream_parse_set_nice },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003876 { /* END */ }
3877}};
3878
Christopher Faulet551a6412021-06-25 14:35:29 +02003879INITCALL1(STG_REGISTER, http_res_keywords_register, &stream_http_res_keywords);
Willy Tarreau8b22a712010-06-18 17:46:06 +02003880
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003881static int smp_fetch_cur_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3882{
3883 smp->flags = SMP_F_VOL_TXN;
3884 smp->data.type = SMP_T_SINT;
3885 if (!smp->strm)
3886 return 0;
3887
3888 smp->data.u.sint = TICKS_TO_MS(smp->strm->res.rto);
3889 return 1;
3890}
3891
3892static int smp_fetch_cur_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3893{
3894 smp->flags = SMP_F_VOL_TXN;
3895 smp->data.type = SMP_T_SINT;
3896 if (!smp->strm)
3897 return 0;
3898
3899 smp->data.u.sint = TICKS_TO_MS(smp->strm->tunnel_timeout);
3900 return 1;
3901}
3902
Willy Tarreau0657b932022-03-09 17:33:05 +01003903static int smp_fetch_last_rule_file(const struct arg *args, struct sample *smp, const char *km, void *private)
3904{
3905 smp->flags = SMP_F_VOL_TXN;
3906 smp->data.type = SMP_T_STR;
3907 if (!smp->strm || !smp->strm->last_rule_file)
3908 return 0;
3909
3910 smp->flags |= SMP_F_CONST;
3911 smp->data.u.str.area = (char *)smp->strm->last_rule_file;
3912 smp->data.u.str.data = strlen(smp->strm->last_rule_file);
3913 return 1;
3914}
3915
3916static int smp_fetch_last_rule_line(const struct arg *args, struct sample *smp, const char *km, void *private)
3917{
3918 smp->flags = SMP_F_VOL_TXN;
3919 smp->data.type = SMP_T_SINT;
3920 if (!smp->strm || !smp->strm->last_rule_line)
3921 return 0;
3922
3923 smp->data.u.sint = smp->strm->last_rule_line;
3924 return 1;
3925}
3926
Amaury Denoyelle12bada52020-12-10 13:43:57 +01003927/* Note: must not be declared <const> as its list will be overwritten.
3928 * Please take care of keeping this list alphabetically sorted.
3929 */
3930static struct sample_fetch_kw_list smp_kws = {ILH, {
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003931 { "cur_server_timeout", smp_fetch_cur_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
3932 { "cur_tunnel_timeout", smp_fetch_cur_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
Willy Tarreau0657b932022-03-09 17:33:05 +01003933 { "last_rule_file", smp_fetch_last_rule_file, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
3934 { "last_rule_line", smp_fetch_last_rule_line, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
Amaury Denoyelle12bada52020-12-10 13:43:57 +01003935 { NULL, NULL, 0, 0, 0 },
3936}};
3937
3938INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
3939
Willy Tarreaubaaee002006-06-26 02:48:02 +02003940/*
3941 * Local variables:
3942 * c-indent-level: 8
3943 * c-basic-offset: 8
3944 * End:
3945 */