blob: dd3358e48588ac1d62a068d013502304b56b8ee7 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau87b09662015-04-03 00:22:06 +02002 * Stream management functions.
Willy Tarreaubaaee002006-06-26 02:48:02 +02003 *
Willy Tarreaud28c3532012-04-19 19:28:33 +02004 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
Willy Tarreaubaaee002006-06-26 02:48:02 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <stdlib.h>
Willy Tarreau81f9aa32010-06-01 17:45:26 +020014#include <unistd.h>
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020015
Willy Tarreaua264d962020-06-04 22:29:18 +020016#include <import/ebistree.h>
17
Willy Tarreaudcc048a2020-06-04 19:11:43 +020018#include <haproxy/acl.h>
Willy Tarreau122eba92020-06-04 10:15:32 +020019#include <haproxy/action.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/activity.h>
21#include <haproxy/api.h>
Willy Tarreau3f0f82e2020-06-04 19:42:41 +020022#include <haproxy/applet.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/arg.h>
Willy Tarreau49801602020-06-04 22:50:02 +020024#include <haproxy/backend.h>
Willy Tarreau278161c2020-06-04 11:18:28 +020025#include <haproxy/capture.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020026#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020027#include <haproxy/channel.h>
Willy Tarreau4aa573d2020-06-04 18:21:56 +020028#include <haproxy/check.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020029#include <haproxy/cli.h>
Willy Tarreau7ea393d2020-06-04 18:02:10 +020030#include <haproxy/connection.h>
Christopher Faulet908628c2022-03-25 16:43:49 +010031#include <haproxy/conn_stream.h>
32#include <haproxy/cs_utils.h>
Willy Tarreau3afc4c42020-06-03 18:23:19 +020033#include <haproxy/dict.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020034#include <haproxy/dynbuf.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020035#include <haproxy/fd.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020036#include <haproxy/filters.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020037#include <haproxy/freq_ctr.h>
Willy Tarreau762d7a52020-06-04 11:23:07 +020038#include <haproxy/frontend.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020039#include <haproxy/global.h>
Willy Tarreau86416052020-06-04 09:20:54 +020040#include <haproxy/hlua.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020041#include <haproxy/http_ana.h>
Willy Tarreauc761f842020-06-04 11:40:28 +020042#include <haproxy/http_rules.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020043#include <haproxy/htx.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020044#include <haproxy/istbuf.h>
Willy Tarreauaeed4a82020-06-04 22:01:04 +020045#include <haproxy/log.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020046#include <haproxy/pipe.h>
Willy Tarreaud0ef4392020-06-02 09:38:52 +020047#include <haproxy/pool.h>
Willy Tarreaua264d962020-06-04 22:29:18 +020048#include <haproxy/proxy.h>
Willy Tarreaua55c4542020-06-04 22:59:39 +020049#include <haproxy/queue.h>
Willy Tarreau1e56f922020-06-04 23:20:13 +020050#include <haproxy/server.h>
Emeric Brunc9437992021-02-12 19:42:55 +010051#include <haproxy/resolvers.h>
Amaury Denoyelle12bada52020-12-10 13:43:57 +010052#include <haproxy/sample.h>
Willy Tarreau48d25b32020-06-04 18:58:52 +020053#include <haproxy/session.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020054#include <haproxy/stats-t.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020055#include <haproxy/stick_table.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020056#include <haproxy/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020057#include <haproxy/task.h>
Willy Tarreau8b550af2020-06-04 17:42:48 +020058#include <haproxy/tcp_rules.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020059#include <haproxy/thread.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020060#include <haproxy/trace.h>
Willy Tarreaua1718922020-06-04 16:25:31 +020061#include <haproxy/vars.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020062
Willy Tarreaubaaee002006-06-26 02:48:02 +020063
Willy Tarreau8ceae722018-11-26 11:58:30 +010064DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
Tim Duesterhus127a74d2020-02-28 15:13:33 +010065DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
Willy Tarreau8ceae722018-11-26 11:58:30 +010066
Willy Tarreaub9813182021-02-24 11:29:51 +010067/* incremented by each "show sess" to fix a delimiter between streams */
68unsigned stream_epoch = 0;
Willy Tarreaubaaee002006-06-26 02:48:02 +020069
Thierry FOURNIER5a363e72015-09-27 19:29:33 +020070/* List of all use-service keywords. */
71static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
72
Willy Tarreau5790eb02017-08-28 17:18:36 +020073
Christopher Fauleteea8fc72019-11-05 16:18:10 +010074/* trace source and events */
75static void strm_trace(enum trace_level level, uint64_t mask,
76 const struct trace_source *src,
77 const struct ist where, const struct ist func,
78 const void *a1, const void *a2, const void *a3, const void *a4);
79
80/* The event representation is split like this :
81 * strm - stream
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +020082 * cs - conn-stream
Christopher Fauleteea8fc72019-11-05 16:18:10 +010083 * http - http analyzis
84 * tcp - tcp analyzis
85 *
86 * STRM_EV_* macros are defined in <proto/stream.h>
87 */
88static const struct trace_event strm_trace_events[] = {
89 { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
90 { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
91 { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
92 { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
93 { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
94
Christopher Faulet62e75742022-03-31 09:16:34 +020095 { .mask = STRM_EV_CS_ST, .name = "cs_state", .desc = "processing conn-stream states" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +010096
97 { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
98 { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
99
100 { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
101 { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
Christopher Faulet50019132022-03-08 15:47:02 +0100102
103 { .mask = STRM_EV_FLT_ANA, .name = "flt_ana", .desc = "Filter analyzers" },
104 { .mask = STRM_EV_FLT_ERR, .name = "flt_err", .desc = "error during filter analyzis" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100105 {}
106};
107
108static const struct name_desc strm_trace_lockon_args[4] = {
109 /* arg1 */ { /* already used by the stream */ },
110 /* arg2 */ { },
111 /* arg3 */ { },
112 /* arg4 */ { }
113};
114
115static const struct name_desc strm_trace_decoding[] = {
116#define STRM_VERB_CLEAN 1
117 { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
118#define STRM_VERB_MINIMAL 2
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200119 { .name="minimal", .desc="report info on stream and conn-streams" },
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100120#define STRM_VERB_SIMPLE 3
121 { .name="simple", .desc="add info on request and response channels" },
122#define STRM_VERB_ADVANCED 4
123 { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
124#define STRM_VERB_COMPLETE 5
125 { .name="complete", .desc="add info on channel's buffer" },
126 { /* end */ }
127};
128
129struct trace_source trace_strm = {
130 .name = IST("stream"),
131 .desc = "Applicative stream",
132 .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
133 .default_cb = strm_trace,
134 .known_events = strm_trace_events,
135 .lockon_args = strm_trace_lockon_args,
136 .decoding = strm_trace_decoding,
137 .report_events = ~0, // report everything by default
138};
139
140#define TRACE_SOURCE &trace_strm
141INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
142
143/* the stream traces always expect that arg1, if non-null, is of a stream (from
144 * which we can derive everything), that arg2, if non-null, is an http
145 * transaction, that arg3, if non-null, is an http message.
146 */
147static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
148 const struct ist where, const struct ist func,
149 const void *a1, const void *a2, const void *a3, const void *a4)
150{
151 const struct stream *s = a1;
152 const struct http_txn *txn = a2;
153 const struct http_msg *msg = a3;
154 struct task *task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100155 const struct channel *req, *res;
156 struct htx *htx;
157
158 if (!s || src->verbosity < STRM_VERB_CLEAN)
159 return;
160
161 task = s->task;
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100162 req = &s->req;
163 res = &s->res;
164 htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
165
166 /* General info about the stream (htx/tcp, id...) */
167 chunk_appendf(&trace_buf, " : [%u,%s]",
168 s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100169 if (isttest(s->unique_id)) {
170 chunk_appendf(&trace_buf, " id=");
171 b_putist(&trace_buf, s->unique_id);
172 }
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100173
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200174 /* Front and back conn-stream state */
Christopher Faulet62e75742022-03-31 09:16:34 +0200175 chunk_appendf(&trace_buf, " CS=(%s,%s)",
176 cs_state_str(s->csf->state), cs_state_str(s->csb->state));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100177
178 /* If txn is defined, HTTP req/rep states */
179 if (txn)
180 chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
181 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
182 if (msg)
183 chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
184
185 if (src->verbosity == STRM_VERB_CLEAN)
186 return;
187
188 /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
189 if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
190 const struct htx_blk *blk = htx_get_head_blk(htx);
191 const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
192 enum htx_blk_type type = htx_get_blk_type(blk);
193
194 if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
195 chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
196 HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
197 HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
198 HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
199 }
200
201
202 /* If txn defined info about HTTP msgs, otherwise info about SI. */
203 if (txn) {
Christopher Faulet50264b42022-03-30 19:39:30 +0200204 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x,0x%x) txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
205 task, s, s->flags, s->conn_err_type, txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100206 }
207 else {
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200208 chunk_appendf(&trace_buf, " - t=%p s=(%p,0x%08x,0x%x) csf=(%p,%d,0x%08x) csb=(%p,%d,0x%08x) retries=%d",
209 task, s, s->flags, s->conn_err_type,
210 s->csf, s->csf->state, s->csf->flags,
211 s->csb, s->csb->state, s->csb->flags,
212 s->conn_retries);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100213 }
214
215 if (src->verbosity == STRM_VERB_MINIMAL)
216 return;
217
218
219 /* If txn defined, don't display all channel info */
220 if (src->verbosity == STRM_VERB_SIMPLE || txn) {
221 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
222 req, req->flags, req->rex, req->wex, req->analyse_exp);
223 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u))",
224 res, res->flags, res->rex, res->wex, res->analyse_exp);
225 }
226 else {
227 chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
228 req, req->flags, req->analysers, req->rex, req->wex, req->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100229 (long)req->output, req->total, req->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100230 chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
231 res, res->flags, res->analysers, res->rex, res->wex, res->analyse_exp,
Willy Tarreaue18f53e2019-11-27 15:41:31 +0100232 (long)res->output, res->total, res->to_forward);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100233 }
234
235 if (src->verbosity == STRM_VERB_SIMPLE ||
236 (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
237 return;
238
239 /* channels' buffer info */
240 if (s->flags & SF_HTX) {
241 struct htx *rqhtx = htxbuf(&req->buf);
242 struct htx *rphtx = htxbuf(&res->buf);
243
244 chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
245 rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
246 rphtx->data, rphtx->size, htx_nbblks(rphtx));
247 }
248 else {
249 chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
250 (unsigned int)b_data(&req->buf), b_orig(&req->buf),
251 (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
Christopher Faulet5ce12992022-03-08 15:48:55 +0100252 (unsigned int)b_data(&res->buf), b_orig(&res->buf),
253 (unsigned int)b_head_ofs(&res->buf), (unsigned int)b_size(&res->buf));
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100254 }
255
256 /* If msg defined, display htx info if defined (level > USER) */
257 if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
258 int full = 0;
259
260 /* Full htx info (level > STATE && verbosity > SIMPLE) */
261 if (src->level > TRACE_LEVEL_STATE) {
262 if (src->verbosity == STRM_VERB_COMPLETE)
263 full = 1;
264 }
265
266 chunk_memcat(&trace_buf, "\n\t", 2);
267 htx_dump(&trace_buf, htx, full);
268 }
269}
270
Christopher Faulet13a35e52021-12-20 15:34:16 +0100271/* Upgrade an existing stream for conn-stream <cs>. Return < 0 on error. This
272 * is only valid right after a TCP to H1 upgrade. The stream should be
273 * "reativated" by removing SF_IGNORE flag. And the right mode must be set. On
Christopher Faulet16df1782020-12-04 16:47:41 +0100274 * success, <input> buffer is transferred to the stream and thus points to
275 * BUF_NULL. On error, it is unchanged and it is the caller responsibility to
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100276 * release it (this never happens for now).
277 */
278int stream_upgrade_from_cs(struct conn_stream *cs, struct buffer *input)
279{
Christopher Faulet693b23b2022-02-28 09:09:05 +0100280 struct stream *s = __cs_strm(cs);
281 const struct mux_ops *mux = cs_conn_mux(cs);
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100282
Christopher Faulet693b23b2022-02-28 09:09:05 +0100283 if (mux) {
Christopher Faulet13a35e52021-12-20 15:34:16 +0100284 if (mux->flags & MX_FL_HTX)
285 s->flags |= SF_HTX;
286 }
Christopher Faulet4ef84c92021-01-21 17:36:12 +0100287
288 if (!b_is_null(input)) {
289 /* Xfer the input buffer to the request channel. <input> will
290 * than point to BUF_NULL. From this point, it is the stream
291 * responsibility to release it.
292 */
293 s->req.buf = *input;
294 *input = BUF_NULL;
295 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
296 s->req.flags |= (s->req.total ? CF_READ_PARTIAL : 0);
297 }
298
299 s->flags &= ~SF_IGNORE;
300
301 task_wakeup(s->task, TASK_WOKEN_INIT);
302 return 0;
303}
304
Willy Tarreaub882dd82018-11-06 15:50:21 +0100305/* Callback used to wake up a stream when an input buffer is available. The
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +0200306 * stream <s>'s conn-streams are checked for a failed buffer allocation
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200307 * as indicated by the presence of the CS_EP_RXBLK_ROOM flag and the lack of a
Willy Tarreaub882dd82018-11-06 15:50:21 +0100308 * buffer, and and input buffer is assigned there (at most one). The function
309 * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
310 * It's designed to be called from __offer_buffer().
311 */
312int stream_buf_available(void *arg)
313{
314 struct stream *s = arg;
315
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200316 if (!s->req.buf.size && !s->req.pipe && (s->csf->endp->flags & CS_EP_RXBLK_BUFF) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100317 b_alloc(&s->req.buf))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200318 cs_rx_buff_rdy(s->csf);
319 else if (!s->res.buf.size && !s->res.pipe && (s->csb->endp->flags & CS_EP_RXBLK_BUFF) &&
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100320 b_alloc(&s->res.buf))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200321 cs_rx_buff_rdy(s->csb);
Willy Tarreaub882dd82018-11-06 15:50:21 +0100322 else
323 return 0;
324
325 task_wakeup(s->task, TASK_WOKEN_RES);
326 return 1;
327
328}
329
Willy Tarreau9903f0e2015-04-04 18:50:31 +0200330/* This function is called from the session handler which detects the end of
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200331 * handshake, in order to complete initialization of a valid stream. It must be
Joseph Herlant4cc8d0d2018-11-15 09:14:14 -0800332 * called with a completely initialized session. It returns the pointer to
Willy Tarreau73b65ac2015-04-08 18:26:29 +0200333 * the newly created stream, or NULL in case of fatal error. The client-facing
Willy Tarreau87787ac2017-08-28 16:22:54 +0200334 * end point is assigned to <origin>, which must be valid. The stream's task
335 * is configured with a nice value inherited from the listener's nice if any.
336 * The task's context is set to the new stream, and its function is set to
Christopher Faulet16df1782020-12-04 16:47:41 +0100337 * process_stream(). Target and analysers are null. <input> is used as input
338 * buffer for the request channel and may contain data. On success, it is
339 * transfer to the stream and <input> is set to BUF_NULL. On error, <input>
340 * buffer is unchanged and it is the caller responsibility to release it.
Willy Tarreau2542b532012-08-31 16:01:23 +0200341 */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100342struct stream *stream_new(struct session *sess, struct conn_stream *cs, struct buffer *input)
Willy Tarreau2542b532012-08-31 16:01:23 +0200343{
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200344 struct stream *s;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200345 struct task *t;
Willy Tarreau2542b532012-08-31 16:01:23 +0200346
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100347 DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100348 if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
Willy Tarreau87787ac2017-08-28 16:22:54 +0200349 goto out_fail_alloc;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200350
351 /* minimum stream initialization required for an embryonic stream is
352 * fairly low. We need very little to execute L4 ACLs, then we need a
353 * task to make the client-side connection live on its own.
354 * - flags
355 * - stick-entry tracking
356 */
357 s->flags = 0;
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200358 s->logs.logwait = sess->fe->to_log;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200359 s->logs.level = 0;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200360 tv_zero(&s->logs.tv_request);
361 s->logs.t_queue = -1;
362 s->logs.t_connect = -1;
363 s->logs.t_data = -1;
364 s->logs.t_close = 0;
365 s->logs.bytes_in = s->logs.bytes_out = 0;
Patrick Hemmerffe5e8c2018-05-11 12:52:31 -0400366 s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
367 s->logs.srv_queue_pos = 0; /* we will get this number soon */
Baptiste Assmann333939c2019-01-21 08:34:50 +0100368 s->obj_type = OBJ_TYPE_STREAM;
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200369
Christopher Faulet15e525f2020-09-30 14:03:54 +0200370 s->logs.accept_date = sess->accept_date;
371 s->logs.tv_accept = sess->tv_accept;
372 s->logs.t_handshake = sess->t_handshake;
Christopher Faulet7a6c5132020-09-30 13:49:56 +0200373 s->logs.t_idle = sess->t_idle;
Christopher Fauletb3484d62018-11-29 15:19:05 +0100374
Willy Tarreau99eb0f12015-04-05 12:03:54 +0200375 /* default logging function */
376 s->do_log = strm_log;
377
378 /* default error reporting function, may be changed by analysers */
379 s->srv_error = default_srv_error;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200380
381 /* Initialise the current rule list pointer to NULL. We are sure that
382 * any rulelist match the NULL pointer.
383 */
384 s->current_rule_list = NULL;
Remi Gacogne7fb9de22015-07-22 17:10:58 +0200385 s->current_rule = NULL;
Christopher Faulet2747fbb2020-07-28 11:56:13 +0200386 s->rules_exp = TICK_ETERNITY;
Willy Tarreauc6dae862022-03-09 17:23:10 +0100387 s->last_rule_file = NULL;
388 s->last_rule_line = 0;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200389
Willy Tarreaua68f7622015-09-21 17:48:24 +0200390 /* Copy SC counters for the stream. We don't touch refcounts because
391 * any reference we have is inherited from the session. Since the stream
392 * doesn't exist without the session, the session's existence guarantees
393 * we don't lose the entry. During the store operation, the stream won't
394 * touch these ones.
Thierry FOURNIER827752e2015-08-18 11:34:18 +0200395 */
Thierry FOURNIERc8fdb982015-08-16 12:03:39 +0200396 memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr));
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200397
398 s->sess = sess;
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200399
Willy Tarreaub9813182021-02-24 11:29:51 +0100400 s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
Willy Tarreau18515722021-04-06 11:57:41 +0200401 s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200402
Willy Tarreau87b09662015-04-03 00:22:06 +0200403 /* OK, we're keeping the stream, so let's properly initialize the stream */
Willy Tarreau2542b532012-08-31 16:01:23 +0200404 LIST_INIT(&s->back_refs);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100405
Willy Tarreau90f366b2021-02-20 11:49:49 +0100406 LIST_INIT(&s->buffer_wait.list);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100407 s->buffer_wait.target = s;
Willy Tarreaub882dd82018-11-06 15:50:21 +0100408 s->buffer_wait.wakeup_cb = stream_buf_available;
Willy Tarreauf8a49ea2013-10-14 21:32:07 +0200409
Willy Tarreaufa1258f2021-04-10 23:00:53 +0200410 s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
William Lallemandcf62f7e2018-10-26 14:47:40 +0200411 s->pcli_next_pid = 0;
William Lallemandebf61802018-12-11 16:10:57 +0100412 s->pcli_flags = 0;
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100413 s->unique_id = IST_NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200414
Willy Tarreaubeeabf52021-10-01 18:23:30 +0200415 if ((t = task_new_here()) == NULL)
Willy Tarreau87787ac2017-08-28 16:22:54 +0200416 goto out_fail_alloc;
417
Willy Tarreau02a0c0e2015-04-04 18:08:21 +0200418 s->task = t;
Christopher Faulet9d810ca2016-12-08 22:33:52 +0100419 s->pending_events = 0;
Christopher Faulet909f3182022-03-29 15:42:09 +0200420 s->conn_retries = 0;
Christopher Fauletae024ce2022-03-29 19:02:31 +0200421 s->conn_exp = TICK_ETERNITY;
Christopher Faulet50264b42022-03-30 19:39:30 +0200422 s->conn_err_type = STRM_ET_NONE;
Christopher Faulet62e75742022-03-31 09:16:34 +0200423 s->prev_conn_state = CS_ST_INI;
Willy Tarreaud1769b82015-04-06 00:25:48 +0200424 t->process = process_stream;
Willy Tarreau2542b532012-08-31 16:01:23 +0200425 t->context = s;
426 t->expire = TICK_ETERNITY;
Willy Tarreau87787ac2017-08-28 16:22:54 +0200427 if (sess->listener)
428 t->nice = sess->listener->nice;
Willy Tarreau2542b532012-08-31 16:01:23 +0200429
Willy Tarreau87b09662015-04-03 00:22:06 +0200430 /* Note: initially, the stream's backend points to the frontend.
Willy Tarreau2542b532012-08-31 16:01:23 +0200431 * This changes later when switching rules are executed or
432 * when the default backend is assigned.
433 */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200434 s->be = sess->fe;
Willy Tarreaucb7dd012015-04-03 22:16:32 +0200435 s->req_cap = NULL;
436 s->res_cap = NULL;
Willy Tarreau2542b532012-08-31 16:01:23 +0200437
Willy Tarreauebcd4842015-06-19 11:59:02 +0200438 /* Initialise all the variables contexts even if not used.
439 * This permits to prune these contexts without errors.
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200440 */
Willy Tarreaub7bfcb32021-08-31 08:13:25 +0200441 vars_init_head(&s->vars_txn, SCOPE_TXN);
442 vars_init_head(&s->vars_reqres, SCOPE_REQ);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200443
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100444 /* Set SF_HTX flag for HTTP frontends. */
445 if (sess->fe->mode == PR_MODE_HTTP)
446 s->flags |= SF_HTX;
447
Christopher Faulet95a61e82021-12-22 14:22:03 +0100448 s->csf = cs;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100449 if (cs_attach_strm(s->csf, s) < 0)
450 goto out_fail_attach_csf;
451
Christopher Faulet30995112022-03-25 15:32:38 +0100452 s->csb = cs_new_from_strm(s, CS_FL_ISBACK);
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100453 if (!s->csb)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100454 goto out_fail_alloc_csb;
Christopher Faulet95a61e82021-12-22 14:22:03 +0100455
Christopher Faulet62e75742022-03-31 09:16:34 +0200456 cs_set_state(s->csf, CS_ST_EST);
Christopher Faulet1d987772022-03-29 18:03:35 +0200457 s->csf->hcto = sess->fe->timeout.clientfin;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100458
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100459 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Christopher Fauleta7285182022-03-30 15:43:23 +0200460 s->csf->flags |= CS_FL_INDEP_STR;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100461
Christopher Faulet1d987772022-03-29 18:03:35 +0200462 s->csb->hcto = TICK_ETERNITY;
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100463 if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
Christopher Fauleta7285182022-03-30 15:43:23 +0200464 s->csb->flags |= CS_FL_INDEP_STR;
Willy Tarreau984fca92017-12-20 16:31:43 +0100465
Christopher Faulete9e48202022-03-22 18:13:29 +0100466 if (cs->endp->flags & CS_EP_WEBSOCKET)
Christopher Faulet13a35e52021-12-20 15:34:16 +0100467 s->flags |= SF_WEBSOCKET;
468 if (cs_conn(cs)) {
Christopher Faulet897d6122021-12-17 17:28:35 +0100469 const struct mux_ops *mux = cs_conn_mux(cs);
470
Christopher Faulet78ed7f22022-03-30 16:31:41 +0200471 if (mux && mux->flags & MX_FL_HTX)
472 s->flags |= SF_HTX;
Christopher Fauleta7422932021-12-15 11:42:23 +0100473 }
474
Willy Tarreau87b09662015-04-03 00:22:06 +0200475 stream_init_srv_conn(s);
Willy Tarreau9b82d942016-12-05 00:26:31 +0100476 s->target = sess->listener ? sess->listener->default_target : NULL;
477
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200478 s->pend_pos = NULL;
Patrick Hemmer268a7072018-05-11 12:52:31 -0400479 s->priority_class = 0;
480 s->priority_offset = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200481
482 /* init store persistence */
483 s->store_count = 0;
484
Christopher Faulet16df1782020-12-04 16:47:41 +0100485 channel_init(&s->req);
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100486 s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
Christopher Faulete9382e02022-03-07 15:31:46 +0100487 s->req.analysers = sess->listener ? sess->listener->analysers : sess->fe->fe_req_ana;
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100488
Christopher Faulet57e4a1b2021-03-15 17:09:27 +0100489 if (IS_HTX_STRM(s)) {
490 /* Be sure to have HTTP analysers because in case of
491 * "destructive" stream upgrade, they may be missing (e.g
492 * TCP>H2)
493 */
494 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
495 }
496
Willy Tarreaue223e3b2017-11-18 15:39:10 +0100497 if (!sess->fe->fe_req_ana) {
498 channel_auto_connect(&s->req); /* don't wait to establish connection */
499 channel_auto_close(&s->req); /* let the producer forward close requests */
500 }
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200501
502 s->req.rto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100503 s->req.wto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100504 s->req.rex = TICK_ETERNITY;
505 s->req.wex = TICK_ETERNITY;
506 s->req.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200507
Christopher Faulet16df1782020-12-04 16:47:41 +0100508 channel_init(&s->res);
Willy Tarreauef573c02014-11-28 14:17:09 +0100509 s->res.flags |= CF_ISRESP;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100510 s->res.analysers = 0;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200511
Willy Tarreaue36cbcb2015-04-03 15:40:56 +0200512 if (sess->fe->options2 & PR_O2_NODELAY) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100513 s->req.flags |= CF_NEVER_WAIT;
514 s->res.flags |= CF_NEVER_WAIT;
Willy Tarreau96e31212011-05-30 18:10:30 +0200515 }
516
Willy Tarreauc8815ef2015-04-05 18:15:59 +0200517 s->res.wto = sess->fe->timeout.client;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100518 s->res.rto = TICK_ETERNITY;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100519 s->res.rex = TICK_ETERNITY;
520 s->res.wex = TICK_ETERNITY;
521 s->res.analyse_exp = TICK_ETERNITY;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200522
Willy Tarreaueee5b512015-04-03 23:46:31 +0200523 s->txn = NULL;
Thierry FOURNIER2c8b54e2016-12-17 12:45:32 +0100524 s->hlua = NULL;
Thierry FOURNIER65f34c62015-02-16 20:11:43 +0100525
Emeric Brun08622d32020-12-23 17:41:43 +0100526 s->resolv_ctx.requester = NULL;
527 s->resolv_ctx.hostname_dn = NULL;
528 s->resolv_ctx.hostname_dn_len = 0;
529 s->resolv_ctx.parent = NULL;
Frédéric Lécaillebed883a2019-04-23 17:26:33 +0200530
Amaury Denoyellefb504432020-12-10 13:43:53 +0100531 s->tunnel_timeout = TICK_ETERNITY;
532
Willy Tarreaub4e34762021-09-30 19:02:18 +0200533 LIST_APPEND(&th_ctx->streams, &s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200534
Christopher Faulet92d36382015-11-05 13:35:03 +0100535 if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200536 goto out_fail_accept;
537
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200538 /* finish initialization of the accepted file descriptor */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100539 if (cs_appctx(cs))
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200540 cs_want_get(s->csf);
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200541
Willy Tarreaufb9f5842015-04-05 18:19:23 +0200542 if (sess->fe->accept && sess->fe->accept(s) < 0)
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200543 goto out_fail_accept;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200544
Christopher Faulet16df1782020-12-04 16:47:41 +0100545 if (!b_is_null(input)) {
546 /* Xfer the input buffer to the request channel. <input> will
547 * than point to BUF_NULL. From this point, it is the stream
548 * responsibility to release it.
549 */
550 s->req.buf = *input;
551 *input = BUF_NULL;
Christopher Fauletc43fca02020-12-04 17:22:49 +0100552 s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
Christopher Faulet16df1782020-12-04 16:47:41 +0100553 s->req.flags |= (s->req.total ? CF_READ_PARTIAL : 0);
554 }
555
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200556 /* it is important not to call the wakeup function directly but to
557 * pass through task_wakeup(), because this one knows how to apply
Emeric Brun5f77fef2017-05-29 15:26:51 +0200558 * priorities to tasks. Using multi thread we must be sure that
559 * stream is fully initialized before calling task_wakeup. So
560 * the caller must handle the task_wakeup
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200561 */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100562 DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
Christopher Faulet13a35e52021-12-20 15:34:16 +0100563 task_wakeup(s->task, TASK_WOKEN_INIT);
Willy Tarreau02d86382015-04-05 12:00:52 +0200564 return s;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200565
566 /* Error unrolling */
Willy Tarreauc5a7ff42015-04-05 11:52:08 +0200567 out_fail_accept:
Christopher Faulet92d36382015-11-05 13:35:03 +0100568 flt_stream_release(s, 0);
Willy Tarreau2b718102021-04-21 07:32:39 +0200569 LIST_DELETE(&s->list);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100570 out_fail_attach_csf:
571 cs_free(s->csb);
572 out_fail_alloc_csb:
Christopher Faulet0dd566b2021-12-23 12:06:45 +0100573 task_destroy(t);
Willy Tarreau87787ac2017-08-28 16:22:54 +0200574 out_fail_alloc:
Willy Tarreaubafbe012017-11-24 17:34:44 +0100575 pool_free(pool_head_stream, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100576 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
Willy Tarreau02d86382015-04-05 12:00:52 +0200577 return NULL;
Willy Tarreau81f9aa32010-06-01 17:45:26 +0200578}
579
Willy Tarreaubaaee002006-06-26 02:48:02 +0200580/*
Willy Tarreau87b09662015-04-03 00:22:06 +0200581 * frees the context associated to a stream. It must have been removed first.
Willy Tarreaubaaee002006-06-26 02:48:02 +0200582 */
Christopher Fauletab5d1dc2022-05-12 14:56:55 +0200583void stream_free(struct stream *s)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200584{
Willy Tarreau9ad7bd42015-04-03 19:19:59 +0200585 struct session *sess = strm_sess(s);
586 struct proxy *fe = sess->fe;
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100587 struct bref *bref, *back;
Willy Tarreaua4cda672010-06-06 18:28:49 +0200588 int i;
Willy Tarreau0f7562b2007-01-07 15:46:13 +0100589
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100590 DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
591
Willy Tarreau0ad46fa2019-05-17 14:20:05 +0200592 /* detach the stream from its own task before even releasing it so
593 * that walking over a task list never exhibits a dying stream.
594 */
595 s->task->context = NULL;
596 __ha_barrier_store();
597
Willy Tarreaud0ad4a82018-07-25 11:13:53 +0200598 pendconn_free(s);
Willy Tarreau922a8062008-12-04 09:33:58 +0100599
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100600 if (objt_server(s->target)) { /* there may be requests left pending in queue */
Willy Tarreaue7dff022015-04-03 01:14:29 +0200601 if (s->flags & SF_CURR_SESS) {
602 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +0200603 _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
Willy Tarreau1e62de62008-11-11 20:20:02 +0100604 }
Willy Tarreau88bc8002021-12-06 07:01:02 +0000605 if (may_dequeue_tasks(__objt_server(s->target), s->be))
606 process_srv_queue(__objt_server(s->target));
Willy Tarreau1e62de62008-11-11 20:20:02 +0100607 }
Willy Tarreau922a8062008-12-04 09:33:58 +0100608
Willy Tarreau7c669d72008-06-20 15:04:11 +0200609 if (unlikely(s->srv_conn)) {
Willy Tarreau87b09662015-04-03 00:22:06 +0200610 /* the stream still has a reserved slot on a server, but
Willy Tarreau7c669d72008-06-20 15:04:11 +0200611 * it should normally be only the same as the one above,
612 * so this should not happen in fact.
613 */
614 sess_change_server(s, NULL);
615 }
616
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100617 if (s->req.pipe)
618 put_pipe(s->req.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100619
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100620 if (s->res.pipe)
621 put_pipe(s->res.pipe);
Willy Tarreau259de1b2009-01-18 21:56:21 +0100622
Willy Tarreaubf883e02014-11-25 21:10:35 +0100623 /* We may still be present in the buffer wait queue */
Willy Tarreau2b718102021-04-21 07:32:39 +0200624 if (LIST_INLIST(&s->buffer_wait.list))
Willy Tarreau90f366b2021-02-20 11:49:49 +0100625 LIST_DEL_INIT(&s->buffer_wait.list);
Willy Tarreau21046592020-02-26 10:39:36 +0100626
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200627 if (s->req.buf.size || s->res.buf.size) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100628 int count = !!s->req.buf.size + !!s->res.buf.size;
629
Willy Tarreaue0d0b402019-08-08 08:06:27 +0200630 b_free(&s->req.buf);
631 b_free(&s->res.buf);
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100632 offer_buffers(NULL, count);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100633 }
Willy Tarreau9b28e032012-10-12 23:49:43 +0200634
Tim Duesterhusa17e6622020-03-05 20:19:02 +0100635 pool_free(pool_head_uniqueid, s->unique_id.ptr);
636 s->unique_id = IST_NULL;
Olivier Houchard32211a12019-02-01 18:10:46 +0100637
Christopher Faulet03fb1b22020-02-24 16:26:55 +0100638 flt_stream_stop(s);
639 flt_stream_release(s, 0);
640
Thierry FOURNIER2c8b54e2016-12-17 12:45:32 +0100641 hlua_ctx_destroy(s->hlua);
642 s->hlua = NULL;
Willy Tarreaueee5b512015-04-03 23:46:31 +0200643 if (s->txn)
Christopher Faulet75f619a2021-03-08 19:12:58 +0100644 http_destroy_txn(s);
Willy Tarreau46023632010-01-07 22:51:47 +0100645
Willy Tarreau1e954912012-10-12 17:50:05 +0200646 /* ensure the client-side transport layer is destroyed */
Christopher Faulet13a35e52021-12-20 15:34:16 +0100647 /* Be sure it is useless !! */
648 /* if (cli_cs) */
649 /* cs_close(cli_cs); */
Willy Tarreau1e954912012-10-12 17:50:05 +0200650
Willy Tarreaua4cda672010-06-06 18:28:49 +0200651 for (i = 0; i < s->store_count; i++) {
652 if (!s->store[i].ts)
653 continue;
654 stksess_free(s->store[i].table, s->store[i].ts);
655 s->store[i].ts = NULL;
656 }
657
Emeric Brun08622d32020-12-23 17:41:43 +0100658 if (s->resolv_ctx.requester) {
Emeric Brun21fbeed2020-12-23 18:01:04 +0100659 __decl_thread(struct resolvers *resolvers = s->resolv_ctx.parent->arg.resolv.resolvers);
Christopher Faulet5098a082020-07-22 11:46:32 +0200660
661 HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
Willy Tarreau61cfdf42021-02-20 10:46:51 +0100662 ha_free(&s->resolv_ctx.hostname_dn);
Emeric Brun08622d32020-12-23 17:41:43 +0100663 s->resolv_ctx.hostname_dn_len = 0;
Willy Tarreau6878f802021-10-20 14:07:31 +0200664 resolv_unlink_resolution(s->resolv_ctx.requester);
Christopher Faulet5098a082020-07-22 11:46:32 +0200665 HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
Baptiste Assmann333939c2019-01-21 08:34:50 +0100666
Emeric Brun08622d32020-12-23 17:41:43 +0100667 pool_free(resolv_requester_pool, s->resolv_ctx.requester);
668 s->resolv_ctx.requester = NULL;
Baptiste Assmann333939c2019-01-21 08:34:50 +0100669 }
670
Willy Tarreau92fb9832007-10-16 17:34:28 +0200671 if (fe) {
Christopher Faulet59399252019-11-07 14:27:52 +0100672 if (s->req_cap) {
673 struct cap_hdr *h;
674 for (h = fe->req_cap; h; h = h->next)
675 pool_free(h->pool, s->req_cap[h->index]);
676 }
677
678 if (s->res_cap) {
679 struct cap_hdr *h;
680 for (h = fe->rsp_cap; h; h = h->next)
681 pool_free(h->pool, s->res_cap[h->index]);
682 }
683
Willy Tarreaubafbe012017-11-24 17:34:44 +0100684 pool_free(fe->rsp_cap_pool, s->res_cap);
685 pool_free(fe->req_cap_pool, s->req_cap);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200686 }
Willy Tarreau0937bc42009-12-22 15:03:09 +0100687
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200688 /* Cleanup all variable contexts. */
Willy Tarreaucda7f3f2018-10-28 13:44:36 +0100689 if (!LIST_ISEMPTY(&s->vars_txn.head))
690 vars_prune(&s->vars_txn, s->sess, s);
691 if (!LIST_ISEMPTY(&s->vars_reqres.head))
692 vars_prune(&s->vars_reqres, s->sess, s);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +0200693
Willy Tarreau87b09662015-04-03 00:22:06 +0200694 stream_store_counters(s);
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +0200695
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100696 list_for_each_entry_safe(bref, back, &s->back_refs, users) {
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100697 /* we have to unlink all watchers. We must not relink them if
Willy Tarreau49de6852021-02-24 13:46:12 +0100698 * this stream was the last one in the list. This is safe to do
699 * here because we're touching our thread's list so we know
700 * that other streams are not active, and the watchers will
701 * only touch their node under thread isolation.
Willy Tarreaufd3828e2009-02-22 15:17:24 +0100702 */
Willy Tarreau49de6852021-02-24 13:46:12 +0100703 LIST_DEL_INIT(&bref->users);
Willy Tarreaub4e34762021-09-30 19:02:18 +0200704 if (s->list.n != &th_ctx->streams)
Willy Tarreau2b718102021-04-21 07:32:39 +0200705 LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100706 bref->ref = s->list.n;
Willy Tarreau49de6852021-02-24 13:46:12 +0100707 __ha_barrier_store();
Willy Tarreau62e4f1d2008-12-07 20:16:23 +0100708 }
Willy Tarreau2b718102021-04-21 07:32:39 +0200709 LIST_DELETE(&s->list);
Emeric Brun6b35e9b2017-06-30 16:23:45 +0200710
Christopher Fauleteb50c012022-04-21 14:22:53 +0200711 cs_destroy(s->csb);
712 cs_destroy(s->csf);
Olivier Houchard4fdec7a2018-10-11 17:09:14 +0200713
Willy Tarreaubafbe012017-11-24 17:34:44 +0100714 pool_free(pool_head_stream, s);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200715
716 /* We may want to free the maximum amount of pools if the proxy is stopping */
Christopher Fauletdfd10ab2021-10-06 14:24:19 +0200717 if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100718 pool_flush(pool_head_buffer);
719 pool_flush(pool_head_http_txn);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100720 pool_flush(pool_head_requri);
721 pool_flush(pool_head_capture);
722 pool_flush(pool_head_stream);
723 pool_flush(pool_head_session);
724 pool_flush(pool_head_connection);
725 pool_flush(pool_head_pendconn);
726 pool_flush(fe->req_cap_pool);
727 pool_flush(fe->rsp_cap_pool);
Willy Tarreau632f5a72007-07-11 10:42:35 +0200728 }
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200729}
730
Willy Tarreau656859d2014-11-25 19:46:36 +0100731
Willy Tarreau87b09662015-04-03 00:22:06 +0200732/* Allocates a work buffer for stream <s>. It is meant to be called inside
733 * process_stream(). It will only allocate the side needed for the function
Willy Tarreaubc39a5d2015-04-20 15:52:18 +0200734 * to work fine, which is the response buffer so that an error message may be
735 * built and returned. Response buffers may be allocated from the reserve, this
736 * is critical to ensure that a response may always flow and will never block a
737 * server from releasing a connection. Returns 0 in case of failure, non-zero
738 * otherwise.
Willy Tarreau656859d2014-11-25 19:46:36 +0100739 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100740static int stream_alloc_work_buffer(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100741{
Willy Tarreaud68d4f12021-03-22 14:44:31 +0100742 if (b_alloc(&s->res.buf))
Willy Tarreau656859d2014-11-25 19:46:36 +0100743 return 1;
Willy Tarreau656859d2014-11-25 19:46:36 +0100744 return 0;
745}
746
747/* releases unused buffers after processing. Typically used at the end of the
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100748 * update() functions. It will try to wake up as many tasks/applets as the
749 * number of buffers that it releases. In practice, most often streams are
750 * blocked on a single buffer, so it makes sense to try to wake two up when two
751 * buffers are released at once.
Willy Tarreau656859d2014-11-25 19:46:36 +0100752 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200753void stream_release_buffers(struct stream *s)
Willy Tarreau656859d2014-11-25 19:46:36 +0100754{
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100755 int offer = 0;
Willy Tarreau656859d2014-11-25 19:46:36 +0100756
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200757 if (c_size(&s->req) && c_empty(&s->req)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100758 offer++;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100759 b_free(&s->req.buf);
760 }
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200761 if (c_size(&s->res) && c_empty(&s->res)) {
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100762 offer++;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100763 b_free(&s->res.buf);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100764 }
Willy Tarreau656859d2014-11-25 19:46:36 +0100765
Willy Tarreaubf883e02014-11-25 21:10:35 +0100766 /* if we're certain to have at least 1 buffer available, and there is
767 * someone waiting, we can wake up a waiter and offer them.
768 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100769 if (offer)
Willy Tarreau4d77bbf2021-02-20 12:02:46 +0100770 offer_buffers(s, offer);
Willy Tarreau656859d2014-11-25 19:46:36 +0100771}
Willy Tarreauc6ca1a02007-05-13 19:43:47 +0200772
Willy Tarreau87b09662015-04-03 00:22:06 +0200773void stream_process_counters(struct stream *s)
Willy Tarreau30e71012007-11-26 20:15:35 +0100774{
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200775 struct session *sess = s->sess;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100776 unsigned long long bytes;
Willy Tarreau20d46a52012-12-09 15:55:40 +0100777 int i;
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100778
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100779 bytes = s->req.total - s->logs.bytes_in;
780 s->logs.bytes_in = s->req.total;
781 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100782 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
783 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100784
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100785 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000786 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_in, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200787
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200788 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100789 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
Willy Tarreau855e4bb2010-06-18 18:33:32 +0200790
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100791 for (i = 0; i < MAX_SESS_STKCTR; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200792 if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
793 stkctr_inc_bytes_in_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100794 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100795 }
796
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100797 bytes = s->res.total - s->logs.bytes_out;
798 s->logs.bytes_out = s->res.total;
799 if (bytes) {
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100800 _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
801 _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100802
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100803 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000804 _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_out, bytes);
Krzysztof Piotr Oledzkiaeebf9b2009-10-04 15:43:17 +0200805
Willy Tarreaufb0afa72015-04-03 14:46:27 +0200806 if (sess->listener && sess->listener->counters)
Olivier Houcharddc6111e2019-03-08 18:54:51 +0100807 _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
Willy Tarreauf059a0f2010-08-03 16:29:52 +0200808
Willy Tarreau22ec1ea2014-11-27 20:45:39 +0100809 for (i = 0; i < MAX_SESS_STKCTR; i++) {
Christopher Faulet84600632020-10-06 13:52:40 +0200810 if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
811 stkctr_inc_bytes_out_ctr(&sess->stkctr[i], bytes);
Willy Tarreau30e71012007-11-26 20:15:35 +0100812 }
Krzysztof Piotr Oledzki583bc962007-11-24 22:12:47 +0100813 }
814}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200815
Christopher Faulet9125f3c2022-03-31 09:47:24 +0200816/*
817 * Returns a message to the client ; the connection is shut down for read,
818 * and the request is cleared so that no server connection can be initiated.
819 * The buffer is marked for read shutdown on the other side to protect the
820 * message, and the buffer write is enabled. The message is contained in a
821 * "chunk". If it is null, then an empty message is used. The reply buffer does
822 * not need to be empty before this, and its contents will not be overwritten.
823 * The primary goal of this function is to return error messages to a client.
824 */
825void stream_retnclose(struct stream *s, const struct buffer *msg)
826{
827 struct channel *ic = &s->req;
828 struct channel *oc = &s->res;
829
830 channel_auto_read(ic);
831 channel_abort(ic);
832 channel_auto_close(ic);
833 channel_erase(ic);
834 channel_truncate(oc);
835
836 if (likely(msg && msg->data))
837 co_inject(oc, msg->area, msg->data);
838
839 oc->wex = tick_add_ifset(now_ms, oc->wto);
840 channel_auto_read(oc);
841 channel_auto_close(oc);
842 channel_shutr_now(oc);
843}
844
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100845int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout)
846{
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100847 switch (name) {
848 case ACT_TIMEOUT_SERVER:
849 s->req.wto = timeout;
850 s->res.rto = timeout;
851 return 1;
852
Amaury Denoyellefb504432020-12-10 13:43:53 +0100853 case ACT_TIMEOUT_TUNNEL:
854 s->tunnel_timeout = timeout;
855 return 1;
856
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100857 default:
858 return 0;
859 }
Amaury Denoyelleb7150782020-12-10 13:43:51 +0100860}
861
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100862/*
Christopher Faulet62e75742022-03-31 09:16:34 +0200863 * This function handles the transition between the CS_ST_CON state and the
864 * CS_ST_EST state. It must only be called after switching from CS_ST_CON (or
865 * CS_ST_INI or CS_ST_RDY) to CS_ST_EST, but only when a ->proto is defined.
866 * Note that it will switch the interface to CS_ST_DIS if we already have
Olivier Houchardaacc4052019-05-21 17:43:50 +0200867 * the CF_SHUTR flag, it means we were able to forward the request, and
868 * receive the response, before process_stream() had the opportunity to
Christopher Faulet62e75742022-03-31 09:16:34 +0200869 * make the switch from CS_ST_CON to CS_ST_EST. When that happens, we want
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100870 * to go through back_establish() anyway, to make sure the analysers run.
Willy Tarreaud66ed882019-06-05 18:02:04 +0200871 * Timeouts are cleared. Error are reported on the channel so that analysers
872 * can handle them.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100873 */
Willy Tarreau3a9312a2020-01-09 18:43:15 +0100874static void back_establish(struct stream *s)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100875{
Christopher Faulet95a61e82021-12-22 14:22:03 +0100876 struct connection *conn = cs_conn(s->csb);
Willy Tarreau7b8c4f92014-11-28 15:15:44 +0100877 struct channel *req = &s->req;
878 struct channel *rep = &s->res;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100879
Christopher Faulet62e75742022-03-31 09:16:34 +0200880 DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200881 /* First, centralize the timers information, and clear any irrelevant
882 * timeout.
883 */
Willy Tarreau0e37f1c2013-12-31 23:06:46 +0100884 s->logs.t_connect = tv_ms_elapsed(&s->logs.tv_accept, &now);
Christopher Fauletae024ce2022-03-29 19:02:31 +0200885 s->conn_exp = TICK_ETERNITY;
886 s->flags &= ~SF_CONN_EXP;
Willy Tarreaud66ed882019-06-05 18:02:04 +0200887
888 /* errors faced after sending data need to be reported */
Christopher Faulet6cd56d52022-03-30 10:47:32 +0200889 if (s->csb->endp->flags & CS_EP_ERROR && req->flags & CF_WROTE_DATA) {
Willy Tarreaud66ed882019-06-05 18:02:04 +0200890 /* Don't add CF_WRITE_ERROR if we're here because
891 * early data were rejected by the server, or
892 * http_wait_for_response() will never be called
893 * to send a 425.
894 */
895 if (conn && conn->err_code != CO_ER_SSL_EARLY_FAILED)
896 req->flags |= CF_WRITE_ERROR;
897 rep->flags |= CF_READ_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +0200898 s->conn_err_type = STRM_ET_DATA_ERR;
Christopher Faulet62e75742022-03-31 09:16:34 +0200899 DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Willy Tarreaud66ed882019-06-05 18:02:04 +0200900 }
901
Willy Tarreau3fdb3662012-11-12 00:42:33 +0100902 if (objt_server(s->target))
Willy Tarreau88bc8002021-12-06 07:01:02 +0000903 health_adjust(__objt_server(s->target), HANA_STATUS_L4_OK);
Krzysztof Piotr Oledzki97f07b82009-12-15 22:31:24 +0100904
Christopher Faulet1bb6afa2021-03-08 17:57:53 +0100905 if (!IS_HTX_STRM(s)) { /* let's allow immediate data connection in this case */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100906 /* if the user wants to log as soon as possible, without counting
907 * bytes from the server, then this is the right moment. */
Willy Tarreaud0d8da92015-04-04 02:10:38 +0200908 if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
Willy Tarreau66425e32018-07-25 06:55:12 +0200909 /* note: no pend_pos here, session is established */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100910 s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
Willy Tarreaua5555ec2008-11-30 19:02:32 +0100911 s->do_log(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100912 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100913 }
914 else {
Willy Tarreaud81ca042013-12-31 22:33:13 +0100915 rep->flags |= CF_READ_DONTWAIT; /* a single read is enough to get response headers */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100916 }
917
Willy Tarreau0007d0a2018-12-11 18:01:38 +0100918 rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
Christopher Faulet309c6412015-12-02 09:57:32 +0100919
Christopher Fauleta0bdec32022-04-04 07:51:21 +0200920 cs_rx_endp_more(s->csb);
Willy Tarreau03cdb7c2012-08-27 23:14:58 +0200921 rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
Christopher Faulet0256da12021-12-15 09:50:17 +0100922 if (conn) {
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100923 /* real connections have timeouts
924 * if already defined, it means that a set-timeout rule has
925 * been executed so do not overwrite them
926 */
927 if (!tick_isset(req->wto))
928 req->wto = s->be->timeout.server;
929 if (!tick_isset(rep->rto))
930 rep->rto = s->be->timeout.server;
Amaury Denoyellefb504432020-12-10 13:43:53 +0100931 if (!tick_isset(s->tunnel_timeout))
932 s->tunnel_timeout = s->be->timeout.tunnel;
Amaury Denoyelle90d3d882020-12-10 13:43:52 +0100933
Olivier Houchard47e9a1a2018-11-07 17:55:19 +0100934 /* The connection is now established, try to read data from the
935 * underlying layer, and subscribe to recv events. We use a
936 * delayed recv here to give a chance to the data to flow back
937 * by the time we process other tasks.
938 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +0200939 cs_chk_rcv(s->csb);
Willy Tarreaud04e8582010-05-31 12:31:35 +0200940 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100941 req->wex = TICK_ETERNITY;
Olivier Houchard78595262019-07-26 14:54:34 +0200942 /* If we managed to get the whole response, and we don't have anything
Christopher Faulet62e75742022-03-31 09:16:34 +0200943 * left to send, or can't, switch to CS_ST_DIS now. */
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100944 if (rep->flags & (CF_SHUTR | CF_SHUTW)) {
Christopher Faulet62e75742022-03-31 09:16:34 +0200945 s->csb->state = CS_ST_DIS;
946 DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +0100947 }
948
Christopher Faulet62e75742022-03-31 09:16:34 +0200949 DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100950}
951
Willy Tarreau87b09662015-04-03 00:22:06 +0200952/* Set correct stream termination flags in case no analyser has done it. It
Simon Hormandec5be42011-06-08 09:19:07 +0900953 * also counts a failed request if the server state has not reached the request
954 * stage.
955 */
Willy Tarreau87b09662015-04-03 00:22:06 +0200956static void sess_set_term_flags(struct stream *s)
Simon Hormandec5be42011-06-08 09:19:07 +0900957{
Willy Tarreaue7dff022015-04-03 01:14:29 +0200958 if (!(s->flags & SF_FINST_MASK)) {
Christopher Faulet62e75742022-03-31 09:16:34 +0200959 if (s->csb->state == CS_ST_INI) {
Willy Tarreau7ab22adb2019-06-05 14:53:22 +0200960 /* anything before REQ in fact */
Willy Tarreau4781b152021-04-06 13:53:36 +0200961 _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
Willy Tarreau2c1068c2015-09-23 12:21:21 +0200962 if (strm_li(s) && strm_li(s)->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +0200963 _HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
Simon Hormandec5be42011-06-08 09:19:07 +0900964
Willy Tarreaue7dff022015-04-03 01:14:29 +0200965 s->flags |= SF_FINST_R;
Simon Hormandec5be42011-06-08 09:19:07 +0900966 }
Christopher Faulet62e75742022-03-31 09:16:34 +0200967 else if (s->csb->state == CS_ST_QUE)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200968 s->flags |= SF_FINST_Q;
Christopher Faulet62e75742022-03-31 09:16:34 +0200969 else if (cs_state_in(s->csb->state, CS_SB_REQ|CS_SB_TAR|CS_SB_ASS|CS_SB_CON|CS_SB_CER|CS_SB_RDY))
Willy Tarreaue7dff022015-04-03 01:14:29 +0200970 s->flags |= SF_FINST_C;
Christopher Faulet62e75742022-03-31 09:16:34 +0200971 else if (s->csb->state == CS_ST_EST || s->prev_conn_state == CS_ST_EST)
Willy Tarreaue7dff022015-04-03 01:14:29 +0200972 s->flags |= SF_FINST_D;
Simon Hormandec5be42011-06-08 09:19:07 +0900973 else
Willy Tarreaue7dff022015-04-03 01:14:29 +0200974 s->flags |= SF_FINST_L;
Simon Hormandec5be42011-06-08 09:19:07 +0900975 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +0100976}
977
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200978/* This function parses the use-service action ruleset. It executes
979 * the associated ACL and set an applet as a stream or txn final node.
980 * it returns ACT_RET_ERR if an error occurs, the proxy left in
Ilya Shipitsinc02a23f2020-05-06 00:53:22 +0500981 * consistent state. It returns ACT_RET_STOP in success case because
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200982 * use-service must be a terminal action. Returns ACT_RET_YIELD
983 * if the initialisation function require more data.
984 */
985enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
986 struct session *sess, struct stream *s, int flags)
987
988{
989 struct appctx *appctx;
990
991 /* Initialises the applet if it is required. */
Christopher Faulet105ba6c2019-12-18 14:41:51 +0100992 if (flags & ACT_OPT_FIRST) {
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200993 /* Register applet. this function schedules the applet. */
994 s->target = &rule->applet.obj_type;
Christopher Faulet1336ccf2022-04-12 18:15:16 +0200995 appctx = cs_applet_create(s->csb, objt_applet(s->target));
Christopher Faulet2da02ae2022-02-24 13:45:27 +0100996 if (unlikely(!appctx))
Thierry FOURNIER5a363e72015-09-27 19:29:33 +0200997 return ACT_RET_ERR;
998
Christopher Faulet93882042022-01-19 14:56:50 +0100999 /* Finish initialisation of the context. */
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001000 memset(&appctx->ctx, 0, sizeof(appctx->ctx));
1001 appctx->rule = rule;
Christopher Faulet4aa1d282022-01-13 16:01:35 +01001002 if (appctx->applet->init && !appctx->applet->init(appctx))
1003 return ACT_RET_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001004 }
1005 else
Christopher Faulet693b23b2022-02-28 09:09:05 +01001006 appctx = __cs_appctx(s->csb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001007
Christopher Faulet2571bc62019-03-01 11:44:26 +01001008 if (rule->from != ACT_F_HTTP_REQ) {
1009 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
Willy Tarreau4781b152021-04-06 13:53:36 +02001010 _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
Christopher Faulet2571bc62019-03-01 11:44:26 +01001011
1012 /* The flag SF_ASSIGNED prevent from server assignment. */
1013 s->flags |= SF_ASSIGNED;
1014 }
1015
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001016 /* Now we can schedule the applet. */
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001017 cs_cant_get(s->csb);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001018 appctx_wakeup(appctx);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02001019 return ACT_RET_STOP;
1020}
1021
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001022/* This stream analyser checks the switching rules and changes the backend
Willy Tarreau4de91492010-01-22 19:10:05 +01001023 * if appropriate. The default_backend rule is also considered, then the
1024 * target backend's forced persistence rules are also evaluated last if any.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001025 * It returns 1 if the processing can continue on next analysers, or zero if it
1026 * either needs more data or wants to immediately abort the request.
1027 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001028static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001029{
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001030 struct persist_rule *prst_rule;
Willy Tarreau192252e2015-04-04 01:47:55 +02001031 struct session *sess = s->sess;
1032 struct proxy *fe = sess->fe;
Willy Tarreau4de91492010-01-22 19:10:05 +01001033
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001034 req->analysers &= ~an_bit;
1035 req->analyse_exp = TICK_ETERNITY;
1036
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001037 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001038
1039 /* now check whether we have some switching rules for this request */
Willy Tarreaue7dff022015-04-03 01:14:29 +02001040 if (!(s->flags & SF_BE_ASSIGNED)) {
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001041 struct switching_rule *rule;
1042
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001043 list_for_each_entry(rule, &fe->switching_rules, list) {
Willy Tarreauf51658d2014-04-23 01:21:56 +02001044 int ret = 1;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001045
Willy Tarreauf51658d2014-04-23 01:21:56 +02001046 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001047 ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreauf51658d2014-04-23 01:21:56 +02001048 ret = acl_pass(ret);
1049 if (rule->cond->pol == ACL_COND_UNLESS)
1050 ret = !ret;
1051 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001052
1053 if (ret) {
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001054 /* If the backend name is dynamic, try to resolve the name.
1055 * If we can't resolve the name, or if any error occurs, break
1056 * the loop and fallback to the default backend.
1057 */
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001058 struct proxy *backend = NULL;
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001059
1060 if (rule->dynamic) {
Willy Tarreau83061a82018-07-13 11:56:34 +02001061 struct buffer *tmp;
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001062
1063 tmp = alloc_trash_chunk();
1064 if (!tmp)
1065 goto sw_failed;
1066
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001067 if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
1068 backend = proxy_be_by_name(tmp->area);
Dragan Dosen2ae327e2017-10-26 11:25:10 +02001069
1070 free_trash_chunk(tmp);
1071 tmp = NULL;
1072
Bertrand Jacquin702d44f2013-11-19 11:43:06 +01001073 if (!backend)
1074 break;
1075 }
1076 else
1077 backend = rule->be.backend;
1078
Willy Tarreau87b09662015-04-03 00:22:06 +02001079 if (!stream_set_backend(s, backend))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001080 goto sw_failed;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001081 break;
1082 }
1083 }
1084
1085 /* To ensure correct connection accounting on the backend, we
1086 * have to assign one if it was not set (eg: a listen). This
1087 * measure also takes care of correctly setting the default
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001088 * backend if any. Don't do anything if an upgrade is already in
1089 * progress.
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001090 */
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001091 if (!(s->flags & (SF_BE_ASSIGNED|SF_IGNORE)))
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001092 if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001093 goto sw_failed;
Christopher Fauletf0d7eb22021-03-22 15:07:51 +01001094
1095 /* No backend assigned but no error reported. It happens when a
1096 * TCP stream is upgraded to HTTP/2.
1097 */
1098 if ((s->flags & (SF_BE_ASSIGNED|SF_IGNORE)) == SF_IGNORE) {
1099 DBG_TRACE_DEVEL("leaving with no backend because of a destructive upgrade", STRM_EV_STRM_ANA, s);
1100 return 0;
1101 }
1102
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001103 }
1104
Willy Tarreaufb356202010-08-03 14:02:05 +02001105 /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02001106 if (fe == s->be) {
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001107 s->req.analysers &= ~AN_REQ_INSPECT_BE;
1108 s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001109 s->req.analysers &= ~AN_REQ_FLT_START_BE;
Willy Tarreaufb356202010-08-03 14:02:05 +02001110 }
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001111
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001112 /* as soon as we know the backend, we must check if we have a matching forced or ignored
Willy Tarreau87b09662015-04-03 00:22:06 +02001113 * persistence rule, and report that in the stream.
Willy Tarreau4de91492010-01-22 19:10:05 +01001114 */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001115 list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
Willy Tarreau4de91492010-01-22 19:10:05 +01001116 int ret = 1;
1117
1118 if (prst_rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001119 ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4de91492010-01-22 19:10:05 +01001120 ret = acl_pass(ret);
1121 if (prst_rule->cond->pol == ACL_COND_UNLESS)
1122 ret = !ret;
1123 }
1124
1125 if (ret) {
1126 /* no rule, or the rule matches */
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001127 if (prst_rule->type == PERSIST_TYPE_FORCE) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001128 s->flags |= SF_FORCE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001129 } else {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001130 s->flags |= SF_IGNORE_PRST;
Cyril Bonté47fdd8e2010-04-25 00:00:51 +02001131 }
Willy Tarreau4de91492010-01-22 19:10:05 +01001132 break;
1133 }
1134 }
1135
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001136 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001137 return 1;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001138
1139 sw_failed:
1140 /* immediately abort this request in case of allocation failure */
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001141 channel_abort(&s->req);
1142 channel_abort(&s->res);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001143
Willy Tarreaue7dff022015-04-03 01:14:29 +02001144 if (!(s->flags & SF_ERR_MASK))
1145 s->flags |= SF_ERR_RESOURCE;
1146 if (!(s->flags & SF_FINST_MASK))
1147 s->flags |= SF_FINST_R;
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001148
Willy Tarreaueee5b512015-04-03 23:46:31 +02001149 if (s->txn)
1150 s->txn->status = 500;
Christopher Faulet0184ea72017-01-05 14:06:34 +01001151 s->req.analysers &= AN_REQ_FLT_END;
Willy Tarreau22ec1ea2014-11-27 20:45:39 +01001152 s->req.analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001153 DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
Willy Tarreaubedb9ba2009-07-12 08:27:39 +02001154 return 0;
Willy Tarreau1d0dfb12009-07-07 15:10:31 +02001155}
1156
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001157/* This stream analyser works on a request. It applies all use-server rules on
1158 * it then returns 1. The data must already be present in the buffer otherwise
1159 * they won't match. It always returns 1.
1160 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001161static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001162{
1163 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001164 struct session *sess = s->sess;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001165 struct server_rule *rule;
1166
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001167 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001168
Willy Tarreaue7dff022015-04-03 01:14:29 +02001169 if (!(s->flags & SF_ASSIGNED)) {
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001170 list_for_each_entry(rule, &px->server_rules, list) {
1171 int ret;
1172
Willy Tarreau192252e2015-04-04 01:47:55 +02001173 ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001174 ret = acl_pass(ret);
1175 if (rule->cond->pol == ACL_COND_UNLESS)
1176 ret = !ret;
1177
1178 if (ret) {
Jerome Magnin824186b2020-03-29 09:37:12 +02001179 struct server *srv;
1180
1181 if (rule->dynamic) {
1182 struct buffer *tmp = get_trash_chunk();
1183
1184 if (!build_logline(s, tmp->area, tmp->size, &rule->expr))
1185 break;
1186
1187 srv = findserver(s->be, tmp->area);
1188 if (!srv)
1189 break;
1190 }
1191 else
1192 srv = rule->srv.ptr;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001193
Emeric Brun52a91d32017-08-31 14:41:55 +02001194 if ((srv->cur_state != SRV_ST_STOPPED) ||
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001195 (px->options & PR_O_PERSIST) ||
Willy Tarreaue7dff022015-04-03 01:14:29 +02001196 (s->flags & SF_FORCE_PRST)) {
1197 s->flags |= SF_DIRECT | SF_ASSIGNED;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001198 s->target = &srv->obj_type;
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001199 break;
1200 }
1201 /* if the server is not UP, let's go on with next rules
1202 * just in case another one is suited.
1203 */
1204 }
1205 }
1206 }
1207
1208 req->analysers &= ~an_bit;
1209 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001210 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Willy Tarreau4a5cade2012-04-05 21:09:48 +02001211 return 1;
1212}
1213
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001214static inline void sticking_rule_find_target(struct stream *s,
1215 struct stktable *t, struct stksess *ts)
1216{
1217 struct proxy *px = s->be;
1218 struct eb32_node *node;
1219 struct dict_entry *de;
1220 void *ptr;
1221 struct server *srv;
1222
1223 /* Look for the server name previously stored in <t> stick-table */
1224 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001225 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001226 de = stktable_data_cast(ptr, std_t_dict);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001227 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1228
1229 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001230 struct ebpt_node *node;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001231
Thayne McCombs92149f92020-11-20 01:28:26 -07001232 if (t->server_key_type == STKTABLE_SRV_NAME) {
1233 node = ebis_lookup(&px->conf.used_server_name, de->value.key);
1234 if (node) {
1235 srv = container_of(node, struct server, conf.name);
1236 goto found;
1237 }
1238 } else if (t->server_key_type == STKTABLE_SRV_ADDR) {
1239 HA_RWLOCK_RDLOCK(PROXY_LOCK, &px->lock);
1240 node = ebis_lookup(&px->used_server_addr, de->value.key);
1241 HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &px->lock);
1242 if (node) {
1243 srv = container_of(node, struct server, addr_node);
1244 goto found;
1245 }
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001246 }
1247 }
1248
1249 /* Look for the server ID */
1250 HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
1251 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001252 node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, std_t_sint));
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001253 HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
1254
1255 if (!node)
1256 return;
1257
1258 srv = container_of(node, struct server, conf.id);
1259 found:
1260 if ((srv->cur_state != SRV_ST_STOPPED) ||
1261 (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
1262 s->flags |= SF_DIRECT | SF_ASSIGNED;
1263 s->target = &srv->obj_type;
1264 }
1265}
1266
Emeric Brun1d33b292010-01-04 15:47:17 +01001267/* This stream analyser works on a request. It applies all sticking rules on
1268 * it then returns 1. The data must already be present in the buffer otherwise
1269 * they won't match. It always returns 1.
1270 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001271static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001272{
1273 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001274 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001275 struct sticking_rule *rule;
1276
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001277 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001278
1279 list_for_each_entry(rule, &px->sticking_rules, list) {
1280 int ret = 1 ;
1281 int i;
1282
Willy Tarreau9667a802013-12-09 12:52:13 +01001283 /* Only the first stick store-request of each table is applied
1284 * and other ones are ignored. The purpose is to allow complex
1285 * configurations which look for multiple entries by decreasing
1286 * order of precision and to stop at the first which matches.
1287 * An example could be a store of the IP address from an HTTP
1288 * header first, then from the source if not found.
1289 */
Jerome Magninbee00ad2020-01-16 17:37:21 +01001290 if (rule->flags & STK_IS_STORE) {
1291 for (i = 0; i < s->store_count; i++) {
1292 if (rule->table.t == s->store[i].table)
1293 break;
1294 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001295
Jerome Magninbee00ad2020-01-16 17:37:21 +01001296 if (i != s->store_count)
1297 continue;
1298 }
Emeric Brun1d33b292010-01-04 15:47:17 +01001299
1300 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001301 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001302 ret = acl_pass(ret);
1303 if (rule->cond->pol == ACL_COND_UNLESS)
1304 ret = !ret;
1305 }
1306
1307 if (ret) {
1308 struct stktable_key *key;
1309
Willy Tarreau192252e2015-04-04 01:47:55 +02001310 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001311 if (!key)
1312 continue;
1313
1314 if (rule->flags & STK_IS_MATCH) {
1315 struct stksess *ts;
1316
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001317 if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001318 if (!(s->flags & SF_ASSIGNED))
1319 sticking_rule_find_target(s, rule->table.t, ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001320 stktable_touch_local(rule->table.t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001321 }
1322 }
1323 if (rule->flags & STK_IS_STORE) {
1324 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
1325 struct stksess *ts;
1326
1327 ts = stksess_new(rule->table.t, key);
1328 if (ts) {
1329 s->store[s->store_count].table = rule->table.t;
1330 s->store[s->store_count++].ts = ts;
1331 }
1332 }
1333 }
1334 }
1335 }
1336
1337 req->analysers &= ~an_bit;
1338 req->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001339 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001340 return 1;
1341}
1342
1343/* This stream analyser works on a response. It applies all store rules on it
1344 * then returns 1. The data must already be present in the buffer otherwise
1345 * they won't match. It always returns 1.
1346 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001347static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
Emeric Brun1d33b292010-01-04 15:47:17 +01001348{
1349 struct proxy *px = s->be;
Willy Tarreau192252e2015-04-04 01:47:55 +02001350 struct session *sess = s->sess;
Emeric Brun1d33b292010-01-04 15:47:17 +01001351 struct sticking_rule *rule;
1352 int i;
Willy Tarreau9667a802013-12-09 12:52:13 +01001353 int nbreq = s->store_count;
Emeric Brun1d33b292010-01-04 15:47:17 +01001354
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001355 DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001356
1357 list_for_each_entry(rule, &px->storersp_rules, list) {
1358 int ret = 1 ;
Emeric Brun1d33b292010-01-04 15:47:17 +01001359
Willy Tarreau9667a802013-12-09 12:52:13 +01001360 /* Only the first stick store-response of each table is applied
1361 * and other ones are ignored. The purpose is to allow complex
1362 * configurations which look for multiple entries by decreasing
1363 * order of precision and to stop at the first which matches.
1364 * An example could be a store of a set-cookie value, with a
1365 * fallback to a parameter found in a 302 redirect.
1366 *
1367 * The store-response rules are not allowed to override the
1368 * store-request rules for the same table, but they may coexist.
1369 * Thus we can have up to one store-request entry and one store-
1370 * response entry for the same table at any time.
1371 */
1372 for (i = nbreq; i < s->store_count; i++) {
1373 if (rule->table.t == s->store[i].table)
1374 break;
1375 }
1376
1377 /* skip existing entries for this table */
1378 if (i < s->store_count)
1379 continue;
1380
Emeric Brun1d33b292010-01-04 15:47:17 +01001381 if (rule->cond) {
Willy Tarreau192252e2015-04-04 01:47:55 +02001382 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001383 ret = acl_pass(ret);
1384 if (rule->cond->pol == ACL_COND_UNLESS)
1385 ret = !ret;
1386 }
1387
1388 if (ret) {
1389 struct stktable_key *key;
1390
Willy Tarreau192252e2015-04-04 01:47:55 +02001391 key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
Emeric Brun1d33b292010-01-04 15:47:17 +01001392 if (!key)
1393 continue;
1394
Willy Tarreau37e340c2013-12-06 23:05:21 +01001395 if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
Emeric Brun1d33b292010-01-04 15:47:17 +01001396 struct stksess *ts;
1397
1398 ts = stksess_new(rule->table.t, key);
1399 if (ts) {
1400 s->store[s->store_count].table = rule->table.t;
Emeric Brun1d33b292010-01-04 15:47:17 +01001401 s->store[s->store_count++].ts = ts;
1402 }
1403 }
1404 }
1405 }
1406
1407 /* process store request and store response */
1408 for (i = 0; i < s->store_count; i++) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001409 struct stksess *ts;
Willy Tarreau13c29de2010-06-06 16:40:39 +02001410 void *ptr;
Thayne McCombs92149f92020-11-20 01:28:26 -07001411 char *key;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001412 struct dict_entry *de;
Thayne McCombs92149f92020-11-20 01:28:26 -07001413 struct stktable *t = s->store[i].table;
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001414
Willy Tarreau88bc8002021-12-06 07:01:02 +00001415 if (objt_server(s->target) && __objt_server(s->target)->flags & SRV_F_NON_STICK) {
Simon Hormanfa461682011-06-25 09:39:49 +09001416 stksess_free(s->store[i].table, s->store[i].ts);
1417 s->store[i].ts = NULL;
1418 continue;
1419 }
1420
Thayne McCombs92149f92020-11-20 01:28:26 -07001421 ts = stktable_set_entry(t, s->store[i].ts);
Emeric Brun819fc6f2017-06-13 19:37:32 +02001422 if (ts != s->store[i].ts) {
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001423 /* the entry already existed, we can free ours */
Thayne McCombs92149f92020-11-20 01:28:26 -07001424 stksess_free(t, s->store[i].ts);
Emeric Brun1d33b292010-01-04 15:47:17 +01001425 }
Willy Tarreauf16d2b82010-06-06 15:38:59 +02001426 s->store[i].ts = NULL;
Emeric Brun819fc6f2017-06-13 19:37:32 +02001427
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001428 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001429 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001430 stktable_data_cast(ptr, std_t_sint) = __objt_server(s->target)->puid;
Christopher Faulet2a944ee2017-11-07 10:42:54 +01001431 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001432
Thayne McCombs92149f92020-11-20 01:28:26 -07001433 if (t->server_key_type == STKTABLE_SRV_NAME)
1434 key = __objt_server(s->target)->id;
1435 else if (t->server_key_type == STKTABLE_SRV_ADDR)
1436 key = __objt_server(s->target)->addr_node.key;
1437 else
1438 continue;
1439
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001440 HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
Thayne McCombs92149f92020-11-20 01:28:26 -07001441 de = dict_insert(&server_key_dict, key);
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001442 if (de) {
Thayne McCombs92149f92020-11-20 01:28:26 -07001443 ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
Emeric Brun0e3457b2021-06-30 17:18:28 +02001444 stktable_data_cast(ptr, std_t_dict) = de;
Frédéric Lécaille03cdf552019-05-20 10:08:27 +02001445 }
1446 HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
1447
Thayne McCombs92149f92020-11-20 01:28:26 -07001448 stktable_touch_local(t, ts, 1);
Emeric Brun1d33b292010-01-04 15:47:17 +01001449 }
Willy Tarreau2a164ee2010-06-18 09:57:45 +02001450 s->store_count = 0; /* everything is stored */
Emeric Brun1d33b292010-01-04 15:47:17 +01001451
1452 rep->analysers &= ~an_bit;
1453 rep->analyse_exp = TICK_ETERNITY;
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001454
1455 DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
Emeric Brun1d33b292010-01-04 15:47:17 +01001456 return 1;
1457}
1458
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001459/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
1460 * are set and the client mux is upgraded. It returns 1 if the stream processing
1461 * may continue or 0 if it should be stopped. It happens on error or if the
Christopher Fauletae863c62021-03-15 12:03:44 +01001462 * upgrade required a new stream. The mux protocol may be specified.
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001463 */
Christopher Fauletae863c62021-03-15 12:03:44 +01001464int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001465{
Christopher Faulet95a61e82021-12-22 14:22:03 +01001466 struct conn_stream *cs = s->csf;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001467 struct connection *conn;
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001468
1469 /* Already an HTTP stream */
1470 if (IS_HTX_STRM(s))
1471 return 1;
1472
1473 s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
1474
1475 if (unlikely(!s->txn && !http_create_txn(s)))
1476 return 0;
1477
Christopher Faulet13a35e52021-12-20 15:34:16 +01001478 conn = cs_conn(cs);
1479 if (conn) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001480 cs_rx_endp_more(s->csf);
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001481 /* Make sure we're unsubscribed, the the new
1482 * mux will probably want to subscribe to
1483 * the underlying XPRT
1484 */
Christopher Faulet2f35e7b2022-03-31 11:09:28 +02001485 if (s->csf->wait_event.events)
1486 conn->mux->unsubscribe(cs, s->csf->wait_event.events, &(s->csf->wait_event));
Christopher Fauletae863c62021-03-15 12:03:44 +01001487
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001488 if (conn->mux->flags & MX_FL_NO_UPG)
1489 return 0;
Christopher Fauletae863c62021-03-15 12:03:44 +01001490 if (conn_upgrade_mux_fe(conn, cs, &s->req.buf,
1491 (mux_proto ? mux_proto->token : ist("")),
1492 PROTO_MODE_HTTP) == -1)
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001493 return 0;
1494
1495 s->req.flags &= ~(CF_READ_PARTIAL|CF_AUTO_CONNECT);
1496 s->req.total = 0;
1497 s->flags |= SF_IGNORE;
1498 if (strcmp(conn->mux->name, "H2") == 0) {
1499 /* For HTTP/2, destroy the conn_stream, disable logging,
1500 * and abort the stream process. Thus it will be
1501 * silently destroyed. The new mux will create new
1502 * streams.
1503 */
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001504 s->logs.logwait = 0;
1505 s->logs.level = 0;
1506 channel_abort(&s->req);
1507 channel_abort(&s->res);
1508 s->req.analysers &= AN_REQ_FLT_END;
1509 s->req.analyse_exp = TICK_ETERNITY;
1510 }
1511 }
1512
1513 return 1;
1514}
1515
1516
Christopher Fauletef285c12022-04-01 14:48:06 +02001517/* Updates at once the channel flags, and timers of both conn-streams of a
1518 * same stream, to complete the work after the analysers, then updates the data
1519 * layer below. This will ensure that any synchronous update performed at the
1520 * data layer will be reflected in the channel flags and/or conn-stream.
1521 * Note that this does not change the conn-stream's current state, though
1522 * it updates the previous state to the current one.
1523 */
1524static void stream_update_both_cs(struct stream *s)
1525{
1526 struct conn_stream *csf = s->csf;
1527 struct conn_stream *csb = s->csb;
1528 struct channel *req = &s->req;
1529 struct channel *res = &s->res;
1530
1531 req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
1532 res->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ATTACHED|CF_WRITE_NULL|CF_WRITE_PARTIAL);
1533
1534 s->prev_conn_state = csb->state;
1535
1536 /* let's recompute both sides states */
1537 if (cs_state_in(csf->state, CS_SB_RDY|CS_SB_EST))
1538 cs_update(csf);
1539
1540 if (cs_state_in(csb->state, CS_SB_RDY|CS_SB_EST))
1541 cs_update(csb);
1542
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001543 /* conn-streams are processed outside of process_stream() and must be
Christopher Fauletef285c12022-04-01 14:48:06 +02001544 * handled at the latest moment.
1545 */
1546 if (cs_appctx(csf)) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001547 if ((cs_rx_endp_ready(csf) && !cs_rx_blocked(csf)) ||
1548 (cs_tx_endp_ready(csf) && !cs_tx_blocked(csf)))
Christopher Fauletef285c12022-04-01 14:48:06 +02001549 appctx_wakeup(__cs_appctx(csf));
1550 }
1551 if (cs_appctx(csb)) {
Christopher Fauleta0bdec32022-04-04 07:51:21 +02001552 if ((cs_rx_endp_ready(csb) && !cs_rx_blocked(csb)) ||
1553 (cs_tx_endp_ready(csb) && !cs_tx_blocked(csb)))
Christopher Fauletef285c12022-04-01 14:48:06 +02001554 appctx_wakeup(__cs_appctx(csb));
1555 }
1556}
1557
Christopher Faulet6c1fd982021-03-15 10:42:02 +01001558
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001559/* This macro is very specific to the function below. See the comments in
Willy Tarreau87b09662015-04-03 00:22:06 +02001560 * process_stream() below to understand the logic and the tests.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001561 */
1562#define UPDATE_ANALYSERS(real, list, back, flag) { \
1563 list = (((list) & ~(flag)) | ~(back)) & (real); \
1564 back = real; \
1565 if (!(list)) \
1566 break; \
1567 if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
1568 continue; \
1569}
1570
Christopher Fauleta9215b72016-05-11 17:06:28 +02001571/* These 2 following macros call an analayzer for the specified channel if the
1572 * right flag is set. The first one is used for "filterable" analyzers. If a
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001573 * stream has some registered filters, pre and post analyaze callbacks are
Christopher Faulet0184ea72017-01-05 14:06:34 +01001574 * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
Christopher Fauleta9215b72016-05-11 17:06:28 +02001575 * AN_REQ/RES_HTTP_XFER_BODY) */
1576#define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
1577 { \
1578 if ((list) & (flag)) { \
1579 if (HAS_FILTERS(strm)) { \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001580 if (!flt_pre_analyze((strm), (chn), (flag))) \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001581 break; \
1582 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1583 break; \
Christopher Faulet3a394fa2016-05-11 17:13:39 +02001584 if (!flt_post_analyze((strm), (chn), (flag))) \
1585 break; \
Christopher Fauleta9215b72016-05-11 17:06:28 +02001586 } \
1587 else { \
1588 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1589 break; \
1590 } \
1591 UPDATE_ANALYSERS((chn)->analysers, (list), \
1592 (back), (flag)); \
1593 } \
1594 }
1595
1596#define ANALYZE(strm, chn, fun, list, back, flag, ...) \
1597 { \
1598 if ((list) & (flag)) { \
1599 if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
1600 break; \
1601 UPDATE_ANALYSERS((chn)->analysers, (list), \
1602 (back), (flag)); \
1603 } \
1604 }
1605
Willy Tarreau87b09662015-04-03 00:22:06 +02001606/* Processes the client, server, request and response jobs of a stream task,
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001607 * then puts it back to the wait queue in a clean state, or cleans up its
1608 * resources if it must be deleted. Returns in <next> the date the task wants
1609 * to be woken up, or TICK_ETERNITY. In order not to call all functions for
1610 * nothing too many times, the request and response buffers flags are monitored
1611 * and each function is called only if at least another function has changed at
1612 * least one flag it is interested in.
1613 */
Willy Tarreau144f84a2021-03-02 16:09:26 +01001614struct task *process_stream(struct task *t, void *context, unsigned int state)
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001615{
Willy Tarreau827aee92011-03-10 16:55:02 +01001616 struct server *srv;
Olivier Houchard9f6af332018-05-25 14:04:04 +02001617 struct stream *s = context;
Willy Tarreaufb0afa72015-04-03 14:46:27 +02001618 struct session *sess = s->sess;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001619 unsigned int rqf_last, rpf_last;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001620 unsigned int rq_prod_last, rq_cons_last;
1621 unsigned int rp_cons_last, rp_prod_last;
Willy Tarreau576507f2010-01-07 00:09:04 +01001622 unsigned int req_ana_back;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001623 struct channel *req, *res;
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001624 struct conn_stream *csf, *csb;
Willy Tarreau3d07a162019-04-25 19:15:20 +02001625 unsigned int rate;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001626
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001627 DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
1628
Willy Tarreau7af4fa92020-06-17 20:49:49 +02001629 activity[tid].stream_calls++;
Willy Tarreaud80cb4e2018-01-20 19:30:13 +01001630
Willy Tarreau8f128b42014-11-28 15:07:47 +01001631 req = &s->req;
1632 res = &s->res;
1633
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001634 csf = s->csf;
1635 csb = s->csb;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001636
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001637 /* First, attempt to receive pending data from I/O layers */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001638 cs_conn_sync_recv(csf);
1639 cs_conn_sync_recv(csb);
Willy Tarreau0f8d3ab2018-10-25 10:42:39 +02001640
Willy Tarreau6c539c42022-01-20 18:42:16 +01001641 /* Let's check if we're looping without making any progress, e.g. due
1642 * to a bogus analyser or the fact that we're ignoring a read0. The
1643 * call_rate counter only counts calls with no progress made.
1644 */
1645 if (!((req->flags | res->flags) & (CF_READ_PARTIAL|CF_WRITE_PARTIAL))) {
1646 rate = update_freq_ctr(&s->call_rate, 1);
1647 if (rate >= 100000 && s->call_rate.prev_ctr) // make sure to wait at least a full second
1648 stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
Willy Tarreau3d07a162019-04-25 19:15:20 +02001649 }
Olivier Houchardc2aa7112018-09-11 18:27:21 +02001650
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001651 /* this data may be no longer valid, clear it */
Willy Tarreaueee5b512015-04-03 23:46:31 +02001652 if (s->txn)
1653 memset(&s->txn->auth, 0, sizeof(s->txn->auth));
Krzysztof Piotr Oledzkif9423ae2010-01-29 19:26:18 +01001654
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02001655 /* This flag must explicitly be set every time */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001656 req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
1657 res->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001658
1659 /* Keep a copy of req/rep flags so that we can detect shutdowns */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001660 rqf_last = req->flags & ~CF_MASK_ANALYSER;
1661 rpf_last = res->flags & ~CF_MASK_ANALYSER;
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001662
Christopher Faulet974da9f2022-03-30 15:30:03 +02001663 /* we don't want the conn-stream functions to recursively wake us up */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001664 csf->flags |= CS_FL_DONT_WAKE;
1665 csb->flags |= CS_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02001666
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001667 /* update pending events */
Olivier Houchard9f6af332018-05-25 14:04:04 +02001668 s->pending_events |= (state & TASK_WOKEN_ANY);
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001669
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001670 /* 1a: Check for low level timeouts if needed. We just set a flag on
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001671 * conn-streams when their timeouts have expired.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001672 */
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001673 if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
Christopher Fauletae024ce2022-03-29 19:02:31 +02001674 stream_check_conn_timeout(s);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001675
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001676 /* check channel timeouts, and close the corresponding conn-streams
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001677 * for future reads or writes. Note: this will also concern upper layers
1678 * but we do not touch any other flag. We must be careful and correctly
1679 * detect state changes when calling them.
1680 */
1681
Willy Tarreau8f128b42014-11-28 15:07:47 +01001682 channel_check_timeouts(req);
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001683
Willy Tarreau8f128b42014-11-28 15:07:47 +01001684 if (unlikely((req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001685 csb->flags |= CS_FL_NOLINGER;
1686 cs_shutw(csb);
Willy Tarreau14641402009-12-29 14:49:56 +01001687 }
1688
Willy Tarreau8f128b42014-11-28 15:07:47 +01001689 if (unlikely((req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001690 if (csf->flags & CS_FL_NOHALF)
1691 csf->flags |= CS_FL_NOLINGER;
1692 cs_shutr(csf);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02001693 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001694
Willy Tarreau8f128b42014-11-28 15:07:47 +01001695 channel_check_timeouts(res);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001696
Willy Tarreau8f128b42014-11-28 15:07:47 +01001697 if (unlikely((res->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001698 csf->flags |= CS_FL_NOLINGER;
1699 cs_shutw(csf);
Willy Tarreau14641402009-12-29 14:49:56 +01001700 }
1701
Willy Tarreau8f128b42014-11-28 15:07:47 +01001702 if (unlikely((res->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001703 if (csb->flags & CS_FL_NOHALF)
1704 csb->flags |= CS_FL_NOLINGER;
1705 cs_shutr(csb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02001706 }
Willy Tarreau798f4322012-11-08 14:49:17 +01001707
Christopher Fauleta00d8172016-11-10 14:58:05 +01001708 if (HAS_FILTERS(s))
1709 flt_stream_check_timeouts(s);
1710
Willy Tarreau798f4322012-11-08 14:49:17 +01001711 /* Once in a while we're woken up because the task expires. But
1712 * this does not necessarily mean that a timeout has been reached.
Willy Tarreau87b09662015-04-03 00:22:06 +02001713 * So let's not run a whole stream processing if only an expiration
Willy Tarreau798f4322012-11-08 14:49:17 +01001714 * timeout needs to be refreshed.
1715 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001716 if (!((req->flags | res->flags) &
Willy Tarreau798f4322012-11-08 14:49:17 +01001717 (CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
Willy Tarreauede3d882018-10-24 17:17:56 +02001718 CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
Christopher Fauletae024ce2022-03-29 19:02:31 +02001719 !(s->flags & SF_CONN_EXP) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001720 !((csf->endp->flags | csb->flags) & CS_EP_ERROR) &&
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001721 ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001722 csf->flags &= ~CS_FL_DONT_WAKE;
1723 csb->flags &= ~CS_FL_DONT_WAKE;
Willy Tarreau798f4322012-11-08 14:49:17 +01001724 goto update_exp_and_leave;
Willy Tarreau5fb04712016-05-04 10:18:37 +02001725 }
Willy Tarreaub67a9b82009-06-21 22:03:51 +02001726 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001727
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001728 resync_conn_stream:
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001729 /* below we may emit error messages so we have to ensure that we have
Christopher Faulet686501c2022-02-01 18:53:53 +01001730 * our buffers properly allocated. If the allocation failed, an error is
1731 * triggered.
1732 *
1733 * NOTE: An error is returned because the mechanism to queue entities
1734 * waiting for a buffer is totally broken for now. However, this
1735 * part must be refactored. When it will be handled, this part
1736 * must be be reviewed too.
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001737 */
Willy Tarreau87b09662015-04-03 00:22:06 +02001738 if (!stream_alloc_work_buffer(s)) {
Christopher Faulet6cd56d52022-03-30 10:47:32 +02001739 s->csf->endp->flags |= CS_EP_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001740 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001741
Christopher Faulet6cd56d52022-03-30 10:47:32 +02001742 s->csb->endp->flags |= CS_EP_ERROR;
Christopher Faulet50264b42022-03-30 19:39:30 +02001743 s->conn_err_type = STRM_ET_CONN_RES;
Christopher Faulet686501c2022-02-01 18:53:53 +01001744
1745 if (!(s->flags & SF_ERR_MASK))
1746 s->flags |= SF_ERR_RESOURCE;
1747 sess_set_term_flags(s);
Willy Tarreau10fc09e2014-11-25 19:46:36 +01001748 }
1749
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02001750 /* 1b: check for low-level errors reported at the conn-stream.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001751 * First we check if it's a retryable error (in which case we don't
1752 * want to tell the buffer). Otherwise we report the error one level
1753 * upper by setting flags into the buffers. Note that the side towards
1754 * the client cannot have connect (hence retryable) errors. Also, the
1755 * connection setup code must be able to deal with any type of abort.
1756 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001757 srv = objt_server(s->target);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001758 if (unlikely(csf->endp->flags & CS_EP_ERROR)) {
1759 if (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS)) {
1760 cs_shutr(csf);
1761 cs_shutw(csf);
1762 cs_report_error(csf);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001763 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001764 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
1765 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001766 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001767 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001768 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001769 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001770 if (!(s->flags & SF_ERR_MASK))
1771 s->flags |= SF_ERR_CLICL;
1772 if (!(s->flags & SF_FINST_MASK))
1773 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001774 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001775 }
1776 }
1777
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001778 if (unlikely(csb->endp->flags & CS_EP_ERROR)) {
1779 if (cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS)) {
1780 cs_shutr(csb);
1781 cs_shutw(csb);
1782 cs_report_error(csb);
Willy Tarreau4781b152021-04-06 13:53:36 +02001783 _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
Willy Tarreau827aee92011-03-10 16:55:02 +01001784 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001785 _HA_ATOMIC_INC(&srv->counters.failed_resp);
Willy Tarreau8f128b42014-11-28 15:07:47 +01001786 if (!(req->analysers) && !(res->analysers)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02001787 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
1788 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01001789 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02001790 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01001791 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02001792 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02001793 if (!(s->flags & SF_ERR_MASK))
1794 s->flags |= SF_ERR_SRVCL;
1795 if (!(s->flags & SF_FINST_MASK))
1796 s->flags |= SF_FINST_D;
Willy Tarreau05cb29b2008-12-14 11:44:04 +01001797 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001798 }
1799 /* note: maybe we should process connection errors here ? */
1800 }
1801
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001802 if (cs_state_in(csb->state, CS_SB_CON|CS_SB_RDY)) {
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001803 /* we were trying to establish a connection on the server side,
1804 * maybe it succeeded, maybe it failed, maybe we timed out, ...
1805 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001806 if (csb->state == CS_ST_RDY)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001807 back_handle_st_rdy(s);
Christopher Faulet62e75742022-03-31 09:16:34 +02001808 else if (s->csb->state == CS_ST_CON)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001809 back_handle_st_con(s);
Willy Tarreaud66ed882019-06-05 18:02:04 +02001810
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001811 if (csb->state == CS_ST_CER)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001812 back_handle_st_cer(s);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001813 else if (csb->state == CS_ST_EST)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01001814 back_establish(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001815
Christopher Faulet62e75742022-03-31 09:16:34 +02001816 /* state is now one of CS_ST_CON (still in progress), CS_ST_EST
1817 * (established), CS_ST_DIS (abort), CS_ST_CLO (last error),
1818 * CS_ST_ASS/CS_ST_TAR/CS_ST_REQ for retryable errors.
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001819 */
1820 }
1821
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001822 rq_prod_last = csf->state;
1823 rq_cons_last = csb->state;
1824 rp_cons_last = csf->state;
1825 rp_prod_last = csb->state;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001826
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001827 /* Check for connection closure */
Christopher Fauleteea8fc72019-11-05 16:18:10 +01001828 DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001829
1830 /* nothing special to be done on client side */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001831 if (unlikely(csf->state == CS_ST_DIS)) {
1832 csf->state = CS_ST_CLO;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001833
Christopher Fauleta70a3542022-03-30 17:13:02 +02001834 /* This is needed only when debugging is enabled, to indicate
1835 * client-side close.
1836 */
1837 if (unlikely((global.mode & MODE_DEBUG) &&
1838 (!(global.mode & MODE_QUIET) ||
1839 (global.mode & MODE_VERBOSE)))) {
1840 chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
1841 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001842 (unsigned short)conn_fd(cs_conn(csf)),
1843 (unsigned short)conn_fd(cs_conn(csb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001844 DISGUISE(write(1, trash.area, trash.data));
1845 }
1846 }
1847
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001848 /* When a server-side connection is released, we have to count it and
1849 * check for pending connections on this server.
1850 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001851 if (unlikely(csb->state == CS_ST_DIS)) {
1852 csb->state = CS_ST_CLO;
Willy Tarreau3fdb3662012-11-12 00:42:33 +01001853 srv = objt_server(s->target);
Willy Tarreau827aee92011-03-10 16:55:02 +01001854 if (srv) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02001855 if (s->flags & SF_CURR_SESS) {
1856 s->flags &= ~SF_CURR_SESS;
Willy Tarreau4781b152021-04-06 13:53:36 +02001857 _HA_ATOMIC_DEC(&srv->cur_sess);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001858 }
1859 sess_change_server(s, NULL);
Willy Tarreau827aee92011-03-10 16:55:02 +01001860 if (may_dequeue_tasks(srv, s->be))
Willy Tarreau9ab78292021-06-22 18:47:51 +02001861 process_srv_queue(srv);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001862 }
Christopher Fauleta70a3542022-03-30 17:13:02 +02001863
1864 /* This is needed only when debugging is enabled, to indicate
1865 * server-side close.
1866 */
1867 if (unlikely((global.mode & MODE_DEBUG) &&
1868 (!(global.mode & MODE_QUIET) ||
1869 (global.mode & MODE_VERBOSE)))) {
Christopher Faulet62e75742022-03-31 09:16:34 +02001870 if (s->prev_conn_state == CS_ST_EST) {
Christopher Fauleta70a3542022-03-30 17:13:02 +02001871 chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
1872 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001873 (unsigned short)conn_fd(cs_conn(csf)),
1874 (unsigned short)conn_fd(cs_conn(csb)));
Christopher Fauleta70a3542022-03-30 17:13:02 +02001875 DISGUISE(write(1, trash.area, trash.data));
1876 }
1877 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001878 }
1879
1880 /*
1881 * Note: of the transient states (REQ, CER, DIS), only REQ may remain
1882 * at this point.
1883 */
1884
Willy Tarreau0be0ef92009-03-08 19:20:25 +01001885 resync_request:
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001886 /* Analyse request */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001887 if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
1888 ((req->flags ^ rqf_last) & CF_MASK_STATIC) ||
Willy Tarreau33982cb2017-11-20 15:37:13 +01001889 (req->analysers && (req->flags & CF_SHUTW)) ||
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001890 csf->state != rq_prod_last ||
1891 csb->state != rq_cons_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001892 s->pending_events & TASK_WOKEN_MSG) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01001893 unsigned int flags = req->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001894
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001895 if (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01001896 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001897 unsigned int ana_list;
1898 unsigned int ana_back;
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001899
Willy Tarreau90deb182010-01-07 00:20:41 +01001900 /* it's up to the analysers to stop new connections,
1901 * disable reading or closing. Note: if an analyser
1902 * disables any of these bits, it is responsible for
1903 * enabling them again when it disables itself, so
1904 * that other analysers are called in similar conditions.
1905 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001906 channel_auto_read(req);
1907 channel_auto_connect(req);
1908 channel_auto_close(req);
Willy Tarreauedcf6682008-11-30 23:15:34 +01001909
1910 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01001911 * req->analysers, following the bit order from LSB
Willy Tarreauedcf6682008-11-30 23:15:34 +01001912 * to MSB. The analysers must remove themselves from
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001913 * the list when not needed. Any analyser may return 0
1914 * to break out of the loop, either because of missing
1915 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02001916 * kill the stream. We loop at least once through each
Willy Tarreau1a52dbd2009-06-28 19:37:53 +02001917 * analyser, and we may loop again if other analysers
1918 * are added in the middle.
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001919 *
1920 * We build a list of analysers to run. We evaluate all
1921 * of these analysers in the order of the lower bit to
1922 * the higher bit. This ordering is very important.
1923 * An analyser will often add/remove other analysers,
1924 * including itself. Any changes to itself have no effect
1925 * on the loop. If it removes any other analysers, we
1926 * want those analysers not to be called anymore during
1927 * this loop. If it adds an analyser that is located
1928 * after itself, we want it to be scheduled for being
1929 * processed during the loop. If it adds an analyser
1930 * which is located before it, we want it to switch to
1931 * it immediately, even if it has already been called
1932 * once but removed since.
1933 *
1934 * In order to achieve this, we compare the analyser
1935 * list after the call with a copy of it before the
1936 * call. The work list is fed with analyser bits that
1937 * appeared during the call. Then we compare previous
1938 * work list with the new one, and check the bits that
1939 * appeared. If the lowest of these bits is lower than
1940 * the current bit, it means we have enabled a previous
1941 * analyser and must immediately loop again.
Willy Tarreauedcf6682008-11-30 23:15:34 +01001942 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001943
Willy Tarreau8f128b42014-11-28 15:07:47 +01001944 ana_list = ana_back = req->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01001945 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001946 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01001947 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001948 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001949 FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
1950 FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
1951 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001952 FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001953 ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001954 FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001955 FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
1956 FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001957 FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001958 FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
Christopher Fauleta9215b72016-05-11 17:06:28 +02001959 FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
1960 FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001961 ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001962 ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02001963 ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01001964 ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
1965 ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01001966 break;
1967 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01001968 }
Willy Tarreau84455332009-03-15 22:34:05 +01001969
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001970 rq_prod_last = csf->state;
1971 rq_cons_last = csb->state;
Willy Tarreau8f128b42014-11-28 15:07:47 +01001972 req->flags &= ~CF_WAKE_ONCE;
1973 rqf_last = req->flags;
Willy Tarreau815a9b22010-07-27 17:15:12 +02001974
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02001975 if ((req->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001976 goto resync_request;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001977 }
1978
Willy Tarreau576507f2010-01-07 00:09:04 +01001979 /* we'll monitor the request analysers while parsing the response,
1980 * because some response analysers may indirectly enable new request
1981 * analysers (eg: HTTP keep-alive).
1982 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01001983 req_ana_back = req->analysers;
Willy Tarreau576507f2010-01-07 00:09:04 +01001984
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001985 resync_response:
1986 /* Analyse response */
1987
Willy Tarreau8f128b42014-11-28 15:07:47 +01001988 if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
1989 (res->flags ^ rpf_last) & CF_MASK_STATIC ||
Willy Tarreau33982cb2017-11-20 15:37:13 +01001990 (res->analysers && (res->flags & CF_SHUTW)) ||
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001991 csf->state != rp_cons_last ||
1992 csb->state != rp_prod_last ||
Christopher Faulet9d810ca2016-12-08 22:33:52 +01001993 s->pending_events & TASK_WOKEN_MSG) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01001994 unsigned int flags = res->flags;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02001995
Christopher Fauletc77ceb62022-04-04 11:08:42 +02001996 if (cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) {
Willy Tarreaue34070e2010-01-08 00:32:27 +01001997 int max_loops = global.tune.maxpollevents;
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01001998 unsigned int ana_list;
1999 unsigned int ana_back;
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002000
Willy Tarreau90deb182010-01-07 00:20:41 +01002001 /* it's up to the analysers to stop disable reading or
2002 * closing. Note: if an analyser disables any of these
2003 * bits, it is responsible for enabling them again when
2004 * it disables itself, so that other analysers are called
2005 * in similar conditions.
2006 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002007 channel_auto_read(res);
2008 channel_auto_close(res);
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002009
2010 /* We will call all analysers for which a bit is set in
Willy Tarreau8f128b42014-11-28 15:07:47 +01002011 * res->analysers, following the bit order from LSB
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002012 * to MSB. The analysers must remove themselves from
2013 * the list when not needed. Any analyser may return 0
2014 * to break out of the loop, either because of missing
2015 * data to take a decision, or because it decides to
Willy Tarreau87b09662015-04-03 00:22:06 +02002016 * kill the stream. We loop at least once through each
Willy Tarreaub37c27e2009-10-18 22:53:08 +02002017 * analyser, and we may loop again if other analysers
2018 * are added in the middle.
2019 */
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002020
Willy Tarreau8f128b42014-11-28 15:07:47 +01002021 ana_list = ana_back = res->analysers;
Willy Tarreaue34070e2010-01-08 00:32:27 +01002022 while (ana_list && max_loops--) {
Willy Tarreau1e0bbaf2010-01-06 23:53:24 +01002023 /* Warning! ensure that analysers are always placed in ascending order! */
Christopher Faulet0184ea72017-01-05 14:06:34 +01002024 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
2025 ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002026 FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002027 FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
Christopher Fauleta9215b72016-05-11 17:06:28 +02002028 FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002029 FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002030 ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02002031 ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
William Lallemandcf62f7e2018-10-26 14:47:40 +02002032 ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
Christopher Faulet0184ea72017-01-05 14:06:34 +01002033 ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
2034 ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
Willy Tarreaue34070e2010-01-08 00:32:27 +01002035 break;
2036 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002037 }
2038
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002039 rp_cons_last = csf->state;
2040 rp_prod_last = csb->state;
Christopher Fauletcdaea892017-07-06 15:49:30 +02002041 res->flags &= ~CF_WAKE_ONCE;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002042 rpf_last = res->flags;
Willy Tarreau815a9b22010-07-27 17:15:12 +02002043
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02002044 if ((res->flags ^ flags) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002045 goto resync_response;
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002046 }
2047
Willy Tarreau576507f2010-01-07 00:09:04 +01002048 /* maybe someone has added some request analysers, so we must check and loop */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002049 if (req->analysers & ~req_ana_back)
Willy Tarreau576507f2010-01-07 00:09:04 +01002050 goto resync_request;
2051
Willy Tarreau8f128b42014-11-28 15:07:47 +01002052 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0499e352010-12-17 07:13:42 +01002053 goto resync_request;
2054
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002055 /* FIXME: here we should call protocol handlers which rely on
2056 * both buffers.
2057 */
2058
2059
2060 /*
Willy Tarreau87b09662015-04-03 00:22:06 +02002061 * Now we propagate unhandled errors to the stream. Normally
Willy Tarreauae526782010-03-04 20:34:23 +01002062 * we're just in a data phase here since it means we have not
2063 * seen any analyser who could set an error status.
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002064 */
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002065 srv = objt_server(s->target);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002066 if (unlikely(!(s->flags & SF_ERR_MASK))) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002067 if (req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002068 /* Report it if the client got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002069 req->analysers &= AN_REQ_FLT_END;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002070 if (req->flags & CF_READ_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002071 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2072 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002073 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002074 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002075 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002076 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002077 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002078 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002079 else if (req->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002080 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2081 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002082 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002083 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002084 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002085 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002086 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002087 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002088 else if (req->flags & CF_WRITE_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002089 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2090 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002091 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002092 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002093 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002094 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002095 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002096 }
2097 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002098 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2099 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002100 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002101 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002102 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002103 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002104 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002105 }
Willy Tarreau84455332009-03-15 22:34:05 +01002106 sess_set_term_flags(s);
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002107
2108 /* Abort the request if a client error occurred while
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02002109 * the backend conn-stream is in the CS_ST_INI
Christopher Faulet62e75742022-03-31 09:16:34 +02002110 * state. It is switched into the CS_ST_CLO state and
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002111 * the request channel is erased. */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002112 if (csb->state == CS_ST_INI) {
Christopher Faulet62e75742022-03-31 09:16:34 +02002113 s->csb->state = CS_ST_CLO;
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002114 channel_abort(req);
2115 if (IS_HTX_STRM(s))
2116 channel_htx_erase(req, htxbuf(&req->buf));
2117 else
2118 channel_erase(req);
2119 }
Willy Tarreau84455332009-03-15 22:34:05 +01002120 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002121 else if (res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002122 /* Report it if the server got an error or a read timeout expired */
Christopher Faulet813f9132021-10-18 15:06:20 +02002123 res->analysers &= AN_RES_FLT_END;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002124 if (res->flags & CF_READ_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002125 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2126 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002127 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002128 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002129 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002130 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002131 s->flags |= SF_ERR_SRVCL;
Willy Tarreauae526782010-03-04 20:34:23 +01002132 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002133 else if (res->flags & CF_READ_TIMEOUT) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002134 _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
2135 _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002136 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002137 _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002138 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002139 _HA_ATOMIC_INC(&srv->counters.srv_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002140 s->flags |= SF_ERR_SRVTO;
Willy Tarreauae526782010-03-04 20:34:23 +01002141 }
Willy Tarreau8f128b42014-11-28 15:07:47 +01002142 else if (res->flags & CF_WRITE_ERROR) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002143 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2144 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002145 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002146 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002147 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002148 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002149 s->flags |= SF_ERR_CLICL;
Willy Tarreauae526782010-03-04 20:34:23 +01002150 }
2151 else {
Willy Tarreau4781b152021-04-06 13:53:36 +02002152 _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
2153 _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
Christopher Faulet99ac8a12020-01-24 11:45:05 +01002154 if (sess->listener && sess->listener->counters)
Willy Tarreau4781b152021-04-06 13:53:36 +02002155 _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
Willy Tarreau827aee92011-03-10 16:55:02 +01002156 if (srv)
Willy Tarreau4781b152021-04-06 13:53:36 +02002157 _HA_ATOMIC_INC(&srv->counters.cli_aborts);
Willy Tarreaue7dff022015-04-03 01:14:29 +02002158 s->flags |= SF_ERR_CLITO;
Willy Tarreauae526782010-03-04 20:34:23 +01002159 }
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002160 sess_set_term_flags(s);
2161 }
Willy Tarreau84455332009-03-15 22:34:05 +01002162 }
2163
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002164 /*
2165 * Here we take care of forwarding unhandled data. This also includes
2166 * connection establishments and shutdown requests.
2167 */
2168
2169
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002170 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002171 * everything. We configure the buffer to forward indefinitely.
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002172 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002173 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002174 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002175 if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002176 !(req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002177 (cs_state_in(csf->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO)) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002178 (req->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002179 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002180 * attached to it. If any data are left in, we'll permit them to
2181 * move.
2182 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002183 channel_auto_read(req);
2184 channel_auto_connect(req);
2185 channel_auto_close(req);
Willy Tarreau5bd8c372009-01-19 00:32:22 +01002186
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002187 if (IS_HTX_STRM(s)) {
2188 struct htx *htx = htxbuf(&req->buf);
2189
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002190 /* We'll let data flow between the producer (if still connected)
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002191 * to the consumer.
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002192 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002193 co_set_data(req, htx->data);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002194 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002195 channel_htx_forward_forever(req, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002196 }
2197 else {
2198 /* We'll let data flow between the producer (if still connected)
2199 * to the consumer (which might possibly not be connected yet).
2200 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002201 c_adv(req, ci_data(req));
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002202 if (!(req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2203 channel_forward_forever(req);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002204 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002205 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002206
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002207 /* check if it is wise to enable kernel splicing to forward request data */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002208 if (!(req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2209 req->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002210 (global.tune.options & GTUNE_USE_SPLICE) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002211 (cs_conn(csf) && __cs_conn(csf)->xprt && __cs_conn(csf)->xprt->rcv_pipe &&
2212 __cs_conn(csf)->mux && __cs_conn(csf)->mux->rcv_pipe) &&
2213 (cs_conn(csb) && __cs_conn(csb)->xprt && __cs_conn(csb)->xprt->snd_pipe &&
2214 __cs_conn(csb)->mux && __cs_conn(csb)->mux->snd_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002215 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002216 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
2217 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002218 (req->flags & CF_STREAMER_FAST)))) {
2219 req->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002220 }
2221
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002222 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002223 rqf_last = req->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002224
Willy Tarreau520d95e2009-09-19 21:04:57 +02002225 /* it's possible that an upper layer has requested a connection setup or abort.
2226 * There are 2 situations where we decide to establish a new connection :
2227 * - there are data scheduled for emission in the buffer
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002228 * - the CF_AUTO_CONNECT flag is set (active connection)
Willy Tarreau520d95e2009-09-19 21:04:57 +02002229 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002230 if (csb->state == CS_ST_INI) {
Christopher Faulet5e1a9d72019-04-23 17:34:22 +02002231 if (!(req->flags & CF_SHUTW)) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002232 if ((req->flags & CF_AUTO_CONNECT) || !channel_is_empty(req)) {
Willy Tarreaucf644ed2013-09-29 17:19:56 +02002233 /* If we have an appctx, there is no connect method, so we
2234 * immediately switch to the connected state, otherwise we
2235 * perform a connection request.
Willy Tarreau520d95e2009-09-19 21:04:57 +02002236 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002237 csb->state = CS_ST_REQ; /* new connection requested */
Christopher Faulet731c8e62022-03-29 16:08:44 +02002238 s->conn_retries = 0;
Christopher Faulet9f5382e2021-05-21 13:46:14 +02002239 if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
2240 (s->be->mode == PR_MODE_HTTP) &&
Christopher Faulete05bf9e2022-03-29 15:23:40 +02002241 !(s->txn->flags & TX_D_L7_RETRY))
2242 s->txn->flags |= TX_L7_RETRY;
Willy Tarreau520d95e2009-09-19 21:04:57 +02002243 }
Willy Tarreau73201222009-08-16 18:27:24 +02002244 }
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002245 else {
Christopher Faulet62e75742022-03-31 09:16:34 +02002246 s->csb->state = CS_ST_CLO; /* shutw+ini = abort */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002247 channel_shutw_now(req); /* fix buffer flags upon abort */
2248 channel_shutr_now(res);
Willy Tarreauf41ffdc2009-09-20 08:19:25 +02002249 }
Willy Tarreau92795622009-03-06 12:51:23 +01002250 }
2251
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002252
2253 /* we may have a pending connection request, or a connection waiting
2254 * for completion.
2255 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002256 if (cs_state_in(csb->state, CS_SB_REQ|CS_SB_QUE|CS_SB_TAR|CS_SB_ASS)) {
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002257 /* prune the request variables and swap to the response variables. */
2258 if (s->vars_reqres.scope != SCOPE_RES) {
Jerome Magnin2f44e882019-11-09 18:00:47 +01002259 if (!LIST_ISEMPTY(&s->vars_reqres.head))
Willy Tarreaucda7f3f2018-10-28 13:44:36 +01002260 vars_prune(&s->vars_reqres, s->sess, s);
Willy Tarreaub7bfcb32021-08-31 08:13:25 +02002261 vars_init_head(&s->vars_reqres, SCOPE_RES);
Thierry FOURNIER4834bc72015-06-06 19:29:07 +02002262 }
2263
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002264 do {
2265 /* nb: step 1 might switch from QUE to ASS, but we first want
2266 * to give a chance to step 2 to perform a redirect if needed.
2267 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002268 if (csb->state != CS_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002269 back_try_conn_req(s);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002270 if (csb->state == CS_ST_REQ)
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002271 back_handle_st_req(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002272
Willy Tarreauada4c582020-03-04 16:42:03 +01002273 /* get a chance to complete an immediate connection setup */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002274 if (csb->state == CS_ST_RDY)
2275 goto resync_conn_stream;
Willy Tarreauada4c582020-03-04 16:42:03 +01002276
Willy Tarreau9e5a3aa2013-12-31 23:32:12 +01002277 /* applets directly go to the ESTABLISHED state. Similarly,
2278 * servers experience the same fate when their connection
2279 * is reused.
2280 */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002281 if (unlikely(csb->state == CS_ST_EST))
Willy Tarreau3a9312a2020-01-09 18:43:15 +01002282 back_establish(s);
Willy Tarreaufac4bd12013-11-30 09:21:49 +01002283
Willy Tarreau3fdb3662012-11-12 00:42:33 +01002284 srv = objt_server(s->target);
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002285 if (csb->state == CS_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
2286 http_perform_server_redirect(s, csb);
2287 } while (csb->state == CS_ST_ASS);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002288 }
2289
Willy Tarreau829bd472019-06-06 09:17:23 +02002290 /* Let's see if we can send the pending request now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002291 cs_conn_sync_send(csb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002292
2293 /*
2294 * Now forward all shutdown requests between both sides of the request buffer
2295 */
2296
2297 /* first, let's check if the request buffer needs to shutdown(write), which may
2298 * happen either because the input is closed or because we want to force a close
2299 * once the server has begun to respond. If a half-closed timeout is set, we adjust
Willy Tarreaua544c662022-04-14 17:39:48 +02002300 * the other side's timeout as well. However this doesn't have effect during the
2301 * connection setup unless the backend has abortonclose set.
Willy Tarreau829bd472019-06-06 09:17:23 +02002302 */
2303 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
Willy Tarreaua544c662022-04-14 17:39:48 +02002304 (CF_AUTO_CLOSE|CF_SHUTR) &&
2305 (csb->state != CS_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
Willy Tarreau829bd472019-06-06 09:17:23 +02002306 channel_shutw_now(req);
2307 }
2308
2309 /* shutdown(write) pending */
2310 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2311 channel_is_empty(req))) {
2312 if (req->flags & CF_READ_ERROR)
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002313 csb->flags |= CS_FL_NOLINGER;
2314 cs_shutw(csb);
Willy Tarreau829bd472019-06-06 09:17:23 +02002315 }
2316
2317 /* shutdown(write) done on server side, we must stop the client too */
2318 if (unlikely((req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
2319 !req->analysers))
2320 channel_shutr_now(req);
2321
2322 /* shutdown(read) pending */
2323 if (unlikely((req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002324 if (csf->flags & CS_FL_NOHALF)
2325 csf->flags |= CS_FL_NOLINGER;
2326 cs_shutr(csf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002327 }
2328
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002329 /* Benchmarks have shown that it's optimal to do a full resync now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002330 if (csf->state == CS_ST_DIS ||
2331 cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
2332 (csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
2333 (csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
2334 goto resync_conn_stream;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002335
Willy Tarreau815a9b22010-07-27 17:15:12 +02002336 /* otherwise we want to check if we need to resync the req buffer or not */
Willy Tarreau1ec9bb52019-06-06 14:45:26 +02002337 if ((req->flags ^ rqf_last) & (CF_SHUTR|CF_SHUTW))
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002338 goto resync_request;
2339
Willy Tarreau3deb3d02009-06-21 22:43:05 +02002340 /* perform output updates to the response buffer */
Willy Tarreau84455332009-03-15 22:34:05 +01002341
Ilya Shipitsinb8888ab2021-01-06 21:20:16 +05002342 /* If no one is interested in analysing data, it's time to forward
Willy Tarreau31971e52009-09-20 12:07:52 +02002343 * everything. We configure the buffer to forward indefinitely.
Willy Tarreau03cdb7c2012-08-27 23:14:58 +02002344 * Note that we're checking CF_SHUTR_NOW as an indication of a possible
Willy Tarreau8263d2b2012-08-28 00:06:31 +02002345 * recent call to channel_abort().
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002346 */
Christopher Faulet35fe6992017-08-29 16:06:38 +02002347 if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002348 !(res->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002349 cs_state_in(csb->state, CS_SB_EST|CS_SB_DIS|CS_SB_CLO) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002350 (res->to_forward != CHN_INFINITE_FORWARD))) {
Willy Tarreaub31c9712012-11-11 23:05:39 +01002351 /* This buffer is freewheeling, there's no analyser
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002352 * attached to it. If any data are left in, we'll permit them to
2353 * move.
2354 */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002355 channel_auto_read(res);
2356 channel_auto_close(res);
Willy Tarreauda4d9fe2010-11-07 20:26:56 +01002357
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002358 if (IS_HTX_STRM(s)) {
2359 struct htx *htx = htxbuf(&res->buf);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002360
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002361 /* We'll let data flow between the producer (if still connected)
2362 * to the consumer.
2363 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002364 co_set_data(res, htx->data);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002365 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002366 channel_htx_forward_forever(res, htx);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002367 }
2368 else {
2369 /* We'll let data flow between the producer (if still connected)
2370 * to the consumer.
2371 */
Christopher Faulet2dba1a52018-12-18 21:57:24 +01002372 c_adv(res, ci_data(res));
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002373 if (!(res->flags & (CF_SHUTR|CF_SHUTW_NOW)))
2374 channel_forward_forever(res);
Christopher Fauletb2aedea2018-12-05 11:56:15 +01002375 }
Willy Tarreau42529c32015-07-09 18:38:57 +02002376
Willy Tarreauce887fd2012-05-12 12:50:00 +02002377 /* if we have no analyser anymore in any direction and have a
Willy Tarreau05cdd962014-05-10 14:30:07 +02002378 * tunnel timeout set, use it now. Note that we must respect
2379 * the half-closed timeouts as well.
Willy Tarreauce887fd2012-05-12 12:50:00 +02002380 */
Amaury Denoyellefb504432020-12-10 13:43:53 +01002381 if (!req->analysers && s->tunnel_timeout) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002382 req->rto = req->wto = res->rto = res->wto =
Amaury Denoyellefb504432020-12-10 13:43:53 +01002383 s->tunnel_timeout;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002384
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002385 if ((req->flags & CF_SHUTR) && tick_isset(sess->fe->timeout.clientfin))
2386 res->wto = sess->fe->timeout.clientfin;
Willy Tarreau8f128b42014-11-28 15:07:47 +01002387 if ((req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
2388 res->rto = s->be->timeout.serverfin;
2389 if ((res->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
2390 req->wto = s->be->timeout.serverfin;
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002391 if ((res->flags & CF_SHUTW) && tick_isset(sess->fe->timeout.clientfin))
2392 req->rto = sess->fe->timeout.clientfin;
Willy Tarreau05cdd962014-05-10 14:30:07 +02002393
Willy Tarreau8f128b42014-11-28 15:07:47 +01002394 req->rex = tick_add(now_ms, req->rto);
2395 req->wex = tick_add(now_ms, req->wto);
2396 res->rex = tick_add(now_ms, res->rto);
2397 res->wex = tick_add(now_ms, res->wto);
Willy Tarreauce887fd2012-05-12 12:50:00 +02002398 }
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002399 }
Willy Tarreauf890dc92008-12-13 21:12:26 +01002400
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002401 /* check if it is wise to enable kernel splicing to forward response data */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002402 if (!(res->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
2403 res->to_forward &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002404 (global.tune.options & GTUNE_USE_SPLICE) &&
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002405 (cs_conn(csf) && __cs_conn(csf)->xprt && __cs_conn(csf)->xprt->snd_pipe &&
2406 __cs_conn(csf)->mux && __cs_conn(csf)->mux->snd_pipe) &&
2407 (cs_conn(csb) && __cs_conn(csb)->xprt && __cs_conn(csb)->xprt->rcv_pipe &&
2408 __cs_conn(csb)->mux && __cs_conn(csb)->mux->rcv_pipe) &&
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002409 (pipes_used < global.maxpipes) &&
Willy Tarreaue36cbcb2015-04-03 15:40:56 +02002410 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
2411 (((sess->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
Willy Tarreau8f128b42014-11-28 15:07:47 +01002412 (res->flags & CF_STREAMER_FAST)))) {
2413 res->flags |= CF_KERN_SPLICING;
Willy Tarreau7c84bab2009-03-08 21:38:23 +01002414 }
2415
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002416 /* reflect what the L7 analysers have seen last */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002417 rpf_last = res->flags;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002418
Willy Tarreau829bd472019-06-06 09:17:23 +02002419 /* Let's see if we can send the pending response now */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002420 cs_conn_sync_send(csf);
Willy Tarreau829bd472019-06-06 09:17:23 +02002421
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002422 /*
2423 * Now forward all shutdown requests between both sides of the buffer
2424 */
2425
2426 /*
2427 * FIXME: this is probably where we should produce error responses.
2428 */
2429
Willy Tarreau6b66f3e2008-12-14 17:31:54 +01002430 /* first, let's check if the response buffer needs to shutdown(write) */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002431 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
Willy Tarreau05cdd962014-05-10 14:30:07 +02002432 (CF_AUTO_CLOSE|CF_SHUTR))) {
Willy Tarreau8f128b42014-11-28 15:07:47 +01002433 channel_shutw_now(res);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002434 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002435
2436 /* shutdown(write) pending */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002437 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
2438 channel_is_empty(res))) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002439 cs_shutw(csf);
Willy Tarreau05cdd962014-05-10 14:30:07 +02002440 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002441
2442 /* shutdown(write) done on the client side, we must stop the server too */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002443 if (unlikely((res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
2444 !res->analysers)
2445 channel_shutr_now(res);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002446
2447 /* shutdown(read) pending */
Willy Tarreau8f128b42014-11-28 15:07:47 +01002448 if (unlikely((res->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002449 if (csb->flags & CS_FL_NOHALF)
2450 csb->flags |= CS_FL_NOLINGER;
2451 cs_shutr(csb);
Willy Tarreau7bb68ab2012-05-13 14:48:59 +02002452 }
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002453
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002454 if (csf->state == CS_ST_DIS ||
2455 cs_state_in(csb->state, CS_SB_RDY|CS_SB_DIS) ||
2456 (csf->endp->flags & CS_EP_ERROR && csf->state != CS_ST_CLO) ||
2457 (csb->endp->flags & CS_EP_ERROR && csb->state != CS_ST_CLO))
2458 goto resync_conn_stream;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002459
Willy Tarreau3c5c0662019-06-06 14:32:49 +02002460 if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002461 goto resync_request;
2462
Willy Tarreau8f128b42014-11-28 15:07:47 +01002463 if ((res->flags ^ rpf_last) & CF_MASK_STATIC)
Willy Tarreau0be0ef92009-03-08 19:20:25 +01002464 goto resync_response;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002465
Willy Tarreau829bd472019-06-06 09:17:23 +02002466 if (((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER)
2467 goto resync_request;
2468
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002469 /* we're interested in getting wakeups again */
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002470 csf->flags &= ~CS_FL_DONT_WAKE;
2471 csb->flags &= ~CS_FL_DONT_WAKE;
Willy Tarreau89f7ef22009-09-05 20:57:35 +02002472
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002473 if (likely((csf->state != CS_ST_CLO) || !cs_state_in(csb->state, CS_SB_INI|CS_SB_CLO) ||
Christopher Faulet6fcd2d32019-11-13 11:12:32 +01002474 (req->analysers & AN_REQ_FLT_END) || (res->analysers & AN_RES_FLT_END))) {
Olivier Houchard4c18f942019-07-31 18:05:26 +02002475 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
Willy Tarreau87b09662015-04-03 00:22:06 +02002476 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002477
Christopher Fauletef285c12022-04-01 14:48:06 +02002478 stream_update_both_cs(s);
Olivier Houchard53216e72018-10-10 15:46:36 +02002479
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02002480 /* Trick: if a request is being waiting for the server to respond,
2481 * and if we know the server can timeout, we don't want the timeout
2482 * to expire on the client side first, but we're still interested
2483 * in passing data from the client to the server (eg: POST). Thus,
2484 * we can cancel the client's request timeout if the server's
2485 * request timeout is set and the server has not yet sent a response.
2486 */
2487
Willy Tarreau8f128b42014-11-28 15:07:47 +01002488 if ((res->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
2489 (tick_isset(req->wex) || tick_isset(res->rex))) {
2490 req->flags |= CF_READ_NOEXP;
2491 req->rex = TICK_ETERNITY;
Willy Tarreau6f0a7ba2014-06-23 15:22:31 +02002492 }
2493
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002494 /* Reset pending events now */
2495 s->pending_events = 0;
2496
Willy Tarreau798f4322012-11-08 14:49:17 +01002497 update_exp_and_leave:
Christopher Faulet974da9f2022-03-30 15:30:03 +02002498 /* Note: please ensure that if you branch here you disable CS_FL_DONT_WAKE */
Christopher Fauleta00d8172016-11-10 14:58:05 +01002499 t->expire = tick_first((tick_is_expired(t->expire, now_ms) ? 0 : t->expire),
2500 tick_first(tick_first(req->rex, req->wex),
2501 tick_first(res->rex, res->wex)));
Willy Tarreaudef0d222016-11-08 22:03:00 +01002502 if (!req->analysers)
2503 req->analyse_exp = TICK_ETERNITY;
2504
2505 if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
2506 (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
2507 req->analyse_exp = tick_add(now_ms, 5000);
2508
2509 t->expire = tick_first(t->expire, req->analyse_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002510
Willy Tarreau9a398be2017-11-10 17:14:23 +01002511 t->expire = tick_first(t->expire, res->analyse_exp);
2512
Christopher Fauletae024ce2022-03-29 19:02:31 +02002513 t->expire = tick_first(t->expire, s->conn_exp);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002514
Christopher Faulet9d810ca2016-12-08 22:33:52 +01002515 s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
Willy Tarreau87b09662015-04-03 00:22:06 +02002516 stream_release_buffers(s);
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002517
2518 DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
Willy Tarreau26c25062009-03-08 09:38:41 +01002519 return t; /* nothing more to do */
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002520 }
2521
Christopher Fauleteea8fc72019-11-05 16:18:10 +01002522 DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
2523
Willy Tarreaue7dff022015-04-03 01:14:29 +02002524 if (s->flags & SF_BE_ASSIGNED)
Willy Tarreau4781b152021-04-06 13:53:36 +02002525 _HA_ATOMIC_DEC(&s->be->beconn);
Willy Tarreau6f5e4b92017-09-15 09:07:56 +02002526
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002527 if (unlikely((global.mode & MODE_DEBUG) &&
2528 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
Willy Tarreau19d14ef2012-10-29 16:51:55 +01002529 chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
Christopher Faulet0256da12021-12-15 09:50:17 +01002530 s->uniq_id, s->be->id,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002531 (unsigned short)conn_fd(cs_conn(csf)),
2532 (unsigned short)conn_fd(cs_conn(csb)));
Willy Tarreau2e8ab6b2020-03-14 11:03:20 +01002533 DISGUISE(write(1, trash.area, trash.data));
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002534 }
2535
Christopher Faulet341064e2021-01-21 17:10:44 +01002536 if (!(s->flags & SF_IGNORE)) {
2537 s->logs.t_close = tv_ms_elapsed(&s->logs.tv_accept, &now);
2538
Olivier Houchard4c18f942019-07-31 18:05:26 +02002539 stream_process_counters(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002540
Christopher Faulet341064e2021-01-21 17:10:44 +01002541 if (s->txn && s->txn->status) {
2542 int n;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002543
Christopher Faulet341064e2021-01-21 17:10:44 +01002544 n = s->txn->status / 100;
2545 if (n < 1 || n > 5)
2546 n = 0;
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002547
Christopher Faulet341064e2021-01-21 17:10:44 +01002548 if (sess->fe->mode == PR_MODE_HTTP) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002549 _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
Christopher Faulet341064e2021-01-21 17:10:44 +01002550 }
2551 if ((s->flags & SF_BE_ASSIGNED) &&
2552 (s->be->mode == PR_MODE_HTTP)) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002553 _HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
2554 _HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
Christopher Faulet341064e2021-01-21 17:10:44 +01002555 }
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002556 }
Christopher Faulet341064e2021-01-21 17:10:44 +01002557
2558 /* let's do a final log if we need it */
2559 if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
2560 !(s->flags & SF_MONITOR) &&
2561 (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
2562 /* we may need to know the position in the queue */
2563 pendconn_free(s);
2564 s->do_log(s);
Willy Tarreau5e16cbc2012-11-24 14:54:13 +01002565 }
Krzysztof Piotr Oledzkide71d162009-10-24 15:36:15 +02002566
Christopher Faulet341064e2021-01-21 17:10:44 +01002567 /* update time stats for this stream */
2568 stream_update_time_stats(s);
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002569 }
2570
2571 /* the task MUST not be in the run queue anymore */
Willy Tarreau87b09662015-04-03 00:22:06 +02002572 stream_free(s);
Olivier Houchard3f795f72019-04-17 22:51:06 +02002573 task_destroy(t);
Willy Tarreau26c25062009-03-08 09:38:41 +01002574 return NULL;
Willy Tarreau55a8d0e2008-11-30 18:47:21 +01002575}
2576
Willy Tarreau87b09662015-04-03 00:22:06 +02002577/* Update the stream's backend and server time stats */
2578void stream_update_time_stats(struct stream *s)
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002579{
2580 int t_request;
2581 int t_queue;
2582 int t_connect;
2583 int t_data;
2584 int t_close;
2585 struct server *srv;
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002586 unsigned int samples_window;
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002587
2588 t_request = 0;
2589 t_queue = s->logs.t_queue;
2590 t_connect = s->logs.t_connect;
2591 t_close = s->logs.t_close;
2592 t_data = s->logs.t_data;
2593
2594 if (s->be->mode != PR_MODE_HTTP)
2595 t_data = t_connect;
2596
2597 if (t_connect < 0 || t_data < 0)
2598 return;
2599
2600 if (tv_isge(&s->logs.tv_request, &s->logs.tv_accept))
2601 t_request = tv_ms_elapsed(&s->logs.tv_accept, &s->logs.tv_request);
2602
2603 t_data -= t_connect;
2604 t_connect -= t_queue;
2605 t_queue -= t_request;
2606
2607 srv = objt_server(s->target);
2608 if (srv) {
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002609 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2610 srv->counters.p.http.cum_req : srv->counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2611 swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
2612 swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
2613 swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
2614 swrate_add_dynamic(&srv->counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002615 HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
2616 HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
2617 HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
2618 HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002619 }
Marcin Deraneka8dbdf32020-05-15 20:02:40 +02002620 samples_window = (((s->be->mode == PR_MODE_HTTP) ?
2621 s->be->be_counters.p.http.cum_req : s->be->be_counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
2622 swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
2623 swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
2624 swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
2625 swrate_add_dynamic(&s->be->be_counters.t_time, samples_window, t_close);
Christopher Fauletefb41f02019-11-08 14:53:15 +01002626 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
2627 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
2628 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
2629 HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
Willy Tarreau4bfc5802014-06-17 12:19:18 +02002630}
2631
Willy Tarreau7c669d72008-06-20 15:04:11 +02002632/*
2633 * This function adjusts sess->srv_conn and maintains the previous and new
Willy Tarreau87b09662015-04-03 00:22:06 +02002634 * server's served stream counts. Setting newsrv to NULL is enough to release
Willy Tarreau7c669d72008-06-20 15:04:11 +02002635 * current connection slot. This function also notifies any LB algo which might
Willy Tarreau87b09662015-04-03 00:22:06 +02002636 * expect to be informed about any change in the number of active streams on a
Willy Tarreau7c669d72008-06-20 15:04:11 +02002637 * server.
2638 */
Willy Tarreaue89fae32021-03-09 15:43:32 +01002639void sess_change_server(struct stream *strm, struct server *newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002640{
Willy Tarreaue89fae32021-03-09 15:43:32 +01002641 struct server *oldsrv = strm->srv_conn;
Willy Tarreau751153e2021-02-17 13:33:24 +01002642
2643 if (oldsrv == newsrv)
Willy Tarreau7c669d72008-06-20 15:04:11 +02002644 return;
2645
Willy Tarreau751153e2021-02-17 13:33:24 +01002646 if (oldsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002647 _HA_ATOMIC_DEC(&oldsrv->served);
2648 _HA_ATOMIC_DEC(&oldsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002649 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002650 if (oldsrv->proxy->lbprm.server_drop_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002651 oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002652 stream_del_srv_conn(strm);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002653 }
2654
2655 if (newsrv) {
Willy Tarreau4781b152021-04-06 13:53:36 +02002656 _HA_ATOMIC_INC(&newsrv->served);
2657 _HA_ATOMIC_INC(&newsrv->proxy->served);
Olivier Houcharddc6111e2019-03-08 18:54:51 +01002658 __ha_barrier_atomic_store();
Willy Tarreau59b0fec2021-02-17 16:01:37 +01002659 if (newsrv->proxy->lbprm.server_take_conn)
Willy Tarreau5941ef02021-06-18 18:29:25 +02002660 newsrv->proxy->lbprm.server_take_conn(newsrv);
Willy Tarreaue89fae32021-03-09 15:43:32 +01002661 stream_add_srv_conn(strm, newsrv);
Willy Tarreau7c669d72008-06-20 15:04:11 +02002662 }
2663}
2664
Willy Tarreau84455332009-03-15 22:34:05 +01002665/* Handle server-side errors for default protocols. It is called whenever a a
2666 * connection setup is aborted or a request is aborted in queue. It sets the
Willy Tarreau87b09662015-04-03 00:22:06 +02002667 * stream termination flags so that the caller does not have to worry about
Christopher Faulet0eb32c02022-04-04 11:06:31 +02002668 * them. It's installed as ->srv_error for the server-side conn_stream.
Willy Tarreau84455332009-03-15 22:34:05 +01002669 */
Christopher Faulet0eb32c02022-04-04 11:06:31 +02002670void default_srv_error(struct stream *s, struct conn_stream *cs)
Willy Tarreau84455332009-03-15 22:34:05 +01002671{
Christopher Faulet50264b42022-03-30 19:39:30 +02002672 int err_type = s->conn_err_type;
Willy Tarreau84455332009-03-15 22:34:05 +01002673 int err = 0, fin = 0;
2674
Christopher Faulet50264b42022-03-30 19:39:30 +02002675 if (err_type & STRM_ET_QUEUE_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002676 err = SF_ERR_CLICL;
2677 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002678 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002679 else if (err_type & STRM_ET_CONN_ABRT) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002680 err = SF_ERR_CLICL;
2681 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002682 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002683 else if (err_type & STRM_ET_QUEUE_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002684 err = SF_ERR_SRVTO;
2685 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002686 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002687 else if (err_type & STRM_ET_QUEUE_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002688 err = SF_ERR_SRVCL;
2689 fin = SF_FINST_Q;
Willy Tarreau84455332009-03-15 22:34:05 +01002690 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002691 else if (err_type & STRM_ET_CONN_TO) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002692 err = SF_ERR_SRVTO;
2693 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002694 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002695 else if (err_type & STRM_ET_CONN_ERR) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002696 err = SF_ERR_SRVCL;
2697 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002698 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002699 else if (err_type & STRM_ET_CONN_RES) {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002700 err = SF_ERR_RESOURCE;
2701 fin = SF_FINST_C;
Willy Tarreau2d400bb2012-05-14 12:11:47 +02002702 }
Christopher Faulet50264b42022-03-30 19:39:30 +02002703 else /* STRM_ET_CONN_OTHER and others */ {
Willy Tarreaue7dff022015-04-03 01:14:29 +02002704 err = SF_ERR_INTERNAL;
2705 fin = SF_FINST_C;
Willy Tarreau84455332009-03-15 22:34:05 +01002706 }
2707
Willy Tarreaue7dff022015-04-03 01:14:29 +02002708 if (!(s->flags & SF_ERR_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002709 s->flags |= err;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002710 if (!(s->flags & SF_FINST_MASK))
Willy Tarreau84455332009-03-15 22:34:05 +01002711 s->flags |= fin;
2712}
Willy Tarreau7c669d72008-06-20 15:04:11 +02002713
Willy Tarreaue7dff022015-04-03 01:14:29 +02002714/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
Willy Tarreau87b09662015-04-03 00:22:06 +02002715void stream_shutdown(struct stream *stream, int why)
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002716{
Willy Tarreau87b09662015-04-03 00:22:06 +02002717 if (stream->req.flags & (CF_SHUTW|CF_SHUTW_NOW))
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002718 return;
2719
Willy Tarreau87b09662015-04-03 00:22:06 +02002720 channel_shutw_now(&stream->req);
2721 channel_shutr_now(&stream->res);
2722 stream->task->nice = 1024;
Willy Tarreaue7dff022015-04-03 01:14:29 +02002723 if (!(stream->flags & SF_ERR_MASK))
Willy Tarreau87b09662015-04-03 00:22:06 +02002724 stream->flags |= why;
2725 task_wakeup(stream->task, TASK_WOKEN_OTHER);
Willy Tarreaua2a64e92011-09-07 23:01:56 +02002726}
Willy Tarreau9ba2dcc2010-06-14 21:04:55 +02002727
Willy Tarreau5484d582019-05-22 09:33:03 +02002728/* Appends a dump of the state of stream <s> into buffer <buf> which must have
2729 * preliminary be prepared by its caller, with each line prepended by prefix
2730 * <pfx>, and each line terminated by character <eol>.
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002731 */
Willy Tarreau5484d582019-05-22 09:33:03 +02002732void stream_dump(struct buffer *buf, const struct stream *s, const char *pfx, char eol)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002733{
2734 const struct conn_stream *csf, *csb;
2735 const struct connection *cof, *cob;
2736 const struct appctx *acf, *acb;
2737 const struct server *srv;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002738 const char *src = "unknown";
2739 const char *dst = "unknown";
2740 char pn[INET6_ADDRSTRLEN];
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002741 const struct channel *req, *res;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002742
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002743 if (!s) {
Willy Tarreau5484d582019-05-22 09:33:03 +02002744 chunk_appendf(buf, "%sstrm=%p%c", pfx, s, eol);
2745 return;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002746 }
2747
Willy Tarreau5484d582019-05-22 09:33:03 +02002748 if (s->obj_type != OBJ_TYPE_STREAM) {
2749 chunk_appendf(buf, "%sstrm=%p [invalid type=%d(%s)]%c",
2750 pfx, s, s->obj_type, obj_type_name(&s->obj_type), eol);
2751 return;
2752 }
2753
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002754 req = &s->req;
2755 res = &s->res;
2756
Christopher Faulet10c9c742022-03-01 15:16:57 +01002757 csf = s->csf;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002758 cof = cs_conn(csf);
Christopher Faulet13a35e52021-12-20 15:34:16 +01002759 acf = cs_appctx(csf);
Willy Tarreau71e34c12019-07-17 15:07:06 +02002760 if (cof && cof->src && addr_to_str(cof->src, pn, sizeof(pn)) >= 0)
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002761 src = pn;
2762 else if (acf)
2763 src = acf->applet->name;
2764
Christopher Faulet10c9c742022-03-01 15:16:57 +01002765 csb = s->csb;
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002766 cob = cs_conn(csb);
Christopher Faulet13a35e52021-12-20 15:34:16 +01002767 acb = cs_appctx(csb);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002768 srv = objt_server(s->target);
2769 if (srv)
2770 dst = srv->id;
2771 else if (acb)
2772 dst = acb->applet->name;
2773
Willy Tarreau5484d582019-05-22 09:33:03 +02002774 chunk_appendf(buf,
Christopher Faulete8f35962021-11-02 17:18:15 +01002775 "%sstrm=%p,%x src=%s fe=%s be=%s dst=%s%c"
2776 "%stxn=%p,%x txn.req=%s,%x txn.rsp=%s,%x%c"
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002777 "%srqf=%x rqa=%x rpf=%x rpa=%x%c"
Christopher Faulet62e75742022-03-31 09:16:34 +02002778 "%scsf=%p,%s,%x csb=%p,%s,%x%c"
Christopher Faulet13a35e52021-12-20 15:34:16 +01002779 "%saf=%p,%u sab=%p,%u%c"
Willy Tarreau5484d582019-05-22 09:33:03 +02002780 "%scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2781 "%scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c"
2782 "",
Christopher Faulete8f35962021-11-02 17:18:15 +01002783 pfx, s, s->flags, src, s->sess->fe->id, s->be->id, dst, eol,
2784 pfx, s->txn, (s->txn ? s->txn->flags : 0),
2785 (s->txn ? h1_msg_state_str(s->txn->req.msg_state): "-"), (s->txn ? s->txn->req.flags : 0),
2786 (s->txn ? h1_msg_state_str(s->txn->rsp.msg_state): "-"), (s->txn ? s->txn->rsp.flags : 0), eol,
Christopher Fauletc77ceb62022-04-04 11:08:42 +02002787 pfx, req->flags, req->analysers, res->flags, res->analysers, eol,
Christopher Faulet62e75742022-03-31 09:16:34 +02002788 pfx, csf, cs_state_str(csf->state), csf->flags, csb, cs_state_str(csb->state), csb->flags, eol,
Christopher Faulet13a35e52021-12-20 15:34:16 +01002789 pfx, acf, acf ? acf->st0 : 0, acb, acb ? acb->st0 : 0, eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002790 pfx, cof, cof ? cof->flags : 0, conn_get_mux_name(cof), cof?cof->ctx:0, conn_get_xprt_name(cof),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002791 cof ? cof->xprt_ctx : 0, conn_get_ctrl_name(cof), conn_fd(cof), eol,
Willy Tarreau5484d582019-05-22 09:33:03 +02002792 pfx, cob, cob ? cob->flags : 0, conn_get_mux_name(cob), cob?cob->ctx:0, conn_get_xprt_name(cob),
Willy Tarreaua57f3452022-04-11 17:58:06 +02002793 cob ? cob->xprt_ctx : 0, conn_get_ctrl_name(cob), conn_fd(cob), eol);
Willy Tarreau5484d582019-05-22 09:33:03 +02002794}
2795
2796/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
Willy Tarreaub106ce12019-05-22 08:57:01 +02002797 * having reached loop rate <rate>, then aborts hoping to retrieve a core.
Willy Tarreau5484d582019-05-22 09:33:03 +02002798 */
2799void stream_dump_and_crash(enum obj_type *obj, int rate)
2800{
2801 const struct stream *s;
Willy Tarreau5484d582019-05-22 09:33:03 +02002802 char *msg = NULL;
2803 const void *ptr;
2804
2805 ptr = s = objt_stream(obj);
2806 if (!s) {
2807 const struct appctx *appctx = objt_appctx(obj);
2808 if (!appctx)
2809 return;
2810 ptr = appctx;
Willy Tarreau0698c802022-05-11 14:09:57 +02002811 s = appctx_strm(appctx);
Willy Tarreau5484d582019-05-22 09:33:03 +02002812 if (!s)
2813 return;
2814 }
2815
Willy Tarreau5484d582019-05-22 09:33:03 +02002816 chunk_reset(&trash);
2817 stream_dump(&trash, s, "", ' ');
Willy Tarreau9753d612020-05-01 16:57:02 +02002818
2819 chunk_appendf(&trash, "filters={");
2820 if (HAS_FILTERS(s)) {
2821 struct filter *filter;
2822
2823 list_for_each_entry(filter, &s->strm_flt.filters, list) {
2824 if (filter->list.p != &s->strm_flt.filters)
2825 chunk_appendf(&trash, ", ");
2826 chunk_appendf(&trash, "%p=\"%s\"", filter, FLT_ID(filter));
2827 }
2828 }
2829 chunk_appendf(&trash, "}");
2830
Willy Tarreaub106ce12019-05-22 08:57:01 +02002831 memprintf(&msg,
2832 "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
2833 "aborting now! Please report this error to developers "
2834 "[%s]\n",
Willy Tarreau5484d582019-05-22 09:33:03 +02002835 obj_type_name(obj), ptr, rate, trash.area);
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002836
2837 ha_alert("%s", msg);
2838 send_log(NULL, LOG_EMERG, "%s", msg);
Willy Tarreau2f67e542021-03-02 19:19:41 +01002839 ABORT_NOW();
Willy Tarreau71c07ac2019-04-25 19:08:48 +02002840}
2841
Willy Tarreaua698eb62021-02-24 10:37:01 +01002842/* initialize the require structures */
2843static void init_stream()
2844{
2845 int thr;
2846
2847 for (thr = 0; thr < MAX_THREADS; thr++)
Willy Tarreaub4e34762021-09-30 19:02:18 +02002848 LIST_INIT(&ha_thread_ctx[thr].streams);
Willy Tarreaua698eb62021-02-24 10:37:01 +01002849}
2850INITCALL0(STG_INIT, init_stream);
2851
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002852/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002853 * returns the unique ID.
2854
2855 * If this function fails to allocate memory IST_NULL is returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002856 *
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002857 * If an ID is already stored within the stream nothing happens existing unique ID is
2858 * returned.
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002859 */
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002860struct ist stream_generate_unique_id(struct stream *strm, struct list *format)
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002861{
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002862 if (isttest(strm->unique_id)) {
2863 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002864 }
2865 else {
2866 char *unique_id;
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002867 int length;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002868 if ((unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002869 return IST_NULL;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002870
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002871 length = build_logline(strm, unique_id, UNIQUEID_LEN, format);
2872 strm->unique_id = ist2(unique_id, length);
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002873
Tim Duesterhusa17e6622020-03-05 20:19:02 +01002874 return strm->unique_id;
Tim Duesterhus127a74d2020-02-28 15:13:33 +01002875 }
2876}
2877
Willy Tarreau8b22a712010-06-18 17:46:06 +02002878/************************************************************************/
2879/* All supported ACL keywords must be declared here. */
2880/************************************************************************/
Christopher Faulet551a6412021-06-25 14:35:29 +02002881static enum act_return stream_action_set_log_level(struct act_rule *rule, struct proxy *px,
2882 struct session *sess, struct stream *s, int flags)
2883{
2884 s->logs.level = (uintptr_t)rule->arg.act.p[0];
2885 return ACT_RET_CONT;
2886}
2887
2888
2889/* Parse a "set-log-level" action. It takes the level value as argument. It
2890 * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2891 */
2892static enum act_parse_ret stream_parse_set_log_level(const char **args, int *cur_arg, struct proxy *px,
2893 struct act_rule *rule, char **err)
2894{
2895 int level;
2896
2897 if (!*args[*cur_arg]) {
2898 bad_log_level:
2899 memprintf(err, "expects exactly 1 argument (log level name or 'silent')");
2900 return ACT_RET_PRS_ERR;
2901 }
2902 if (strcmp(args[*cur_arg], "silent") == 0)
2903 level = -1;
2904 else if ((level = get_log_level(args[*cur_arg]) + 1) == 0)
2905 goto bad_log_level;
2906
2907 (*cur_arg)++;
2908
2909 /* Register processing function. */
2910 rule->action_ptr = stream_action_set_log_level;
2911 rule->action = ACT_CUSTOM;
2912 rule->arg.act.p[0] = (void *)(uintptr_t)level;
2913 return ACT_RET_PRS_OK;
2914}
2915
Christopher Faulet1da374a2021-06-25 14:46:02 +02002916static enum act_return stream_action_set_nice(struct act_rule *rule, struct proxy *px,
2917 struct session *sess, struct stream *s, int flags)
2918{
2919 s->task->nice = (uintptr_t)rule->arg.act.p[0];
2920 return ACT_RET_CONT;
2921}
2922
2923
2924/* Parse a "set-nice" action. It takes the nice value as argument. It returns
2925 * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
2926 */
2927static enum act_parse_ret stream_parse_set_nice(const char **args, int *cur_arg, struct proxy *px,
2928 struct act_rule *rule, char **err)
2929{
2930 int nice;
2931
2932 if (!*args[*cur_arg]) {
2933 bad_log_level:
2934 memprintf(err, "expects exactly 1 argument (integer value)");
2935 return ACT_RET_PRS_ERR;
2936 }
2937
2938 nice = atoi(args[*cur_arg]);
2939 if (nice < -1024)
2940 nice = -1024;
2941 else if (nice > 1024)
2942 nice = 1024;
2943
2944 (*cur_arg)++;
2945
2946 /* Register processing function. */
2947 rule->action_ptr = stream_action_set_nice;
2948 rule->action = ACT_CUSTOM;
2949 rule->arg.act.p[0] = (void *)(uintptr_t)nice;
2950 return ACT_RET_PRS_OK;
2951}
2952
Christopher Faulet551a6412021-06-25 14:35:29 +02002953
Christopher Fauletae863c62021-03-15 12:03:44 +01002954static enum act_return tcp_action_switch_stream_mode(struct act_rule *rule, struct proxy *px,
2955 struct session *sess, struct stream *s, int flags)
2956{
2957 enum pr_mode mode = (uintptr_t)rule->arg.act.p[0];
2958 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
2959
2960 if (!IS_HTX_STRM(s) && mode == PR_MODE_HTTP) {
2961 if (!stream_set_http_mode(s, mux_proto)) {
2962 channel_abort(&s->req);
2963 channel_abort(&s->res);
2964 return ACT_RET_ABRT;
2965 }
2966 }
2967 return ACT_RET_STOP;
2968}
2969
2970
2971static int check_tcp_switch_stream_mode(struct act_rule *rule, struct proxy *px, char **err)
2972{
2973 const struct mux_proto_list *mux_ent;
2974 const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
2975 enum pr_mode pr_mode = (uintptr_t)rule->arg.act.p[0];
2976 enum proto_proxy_mode mode = (1 << (pr_mode == PR_MODE_HTTP));
2977
Christopher Faulet3b6446f2021-03-15 15:10:38 +01002978 if (pr_mode == PR_MODE_HTTP)
2979 px->options |= PR_O_HTTP_UPG;
2980
Christopher Fauletae863c62021-03-15 12:03:44 +01002981 if (mux_proto) {
2982 mux_ent = conn_get_best_mux_entry(mux_proto->token, PROTO_SIDE_FE, mode);
2983 if (!mux_ent || !isteq(mux_ent->token, mux_proto->token)) {
2984 memprintf(err, "MUX protocol '%.*s' is not compatible with the selected mode",
2985 (int)mux_proto->token.len, mux_proto->token.ptr);
2986 return 0;
2987 }
2988 }
2989 else {
2990 mux_ent = conn_get_best_mux_entry(IST_NULL, PROTO_SIDE_FE, mode);
2991 if (!mux_ent) {
2992 memprintf(err, "Unable to find compatible MUX protocol with the selected mode");
2993 return 0;
2994 }
2995 }
2996
2997 /* Update the mux */
2998 rule->arg.act.p[1] = (void *)mux_ent;
2999 return 1;
3000
3001}
3002
3003static enum act_parse_ret stream_parse_switch_mode(const char **args, int *cur_arg,
3004 struct proxy *px, struct act_rule *rule,
3005 char **err)
3006{
3007 const struct mux_proto_list *mux_proto = NULL;
3008 struct ist proto;
3009 enum pr_mode mode;
3010
3011 /* must have at least the mode */
3012 if (*(args[*cur_arg]) == 0) {
3013 memprintf(err, "'%s %s' expects a mode as argument.", args[0], args[*cur_arg-1]);
3014 return ACT_RET_PRS_ERR;
3015 }
3016
3017 if (!(px->cap & PR_CAP_FE)) {
3018 memprintf(err, "'%s %s' not allowed because %s '%s' has no frontend capability",
3019 args[0], args[*cur_arg-1], proxy_type_str(px), px->id);
3020 return ACT_RET_PRS_ERR;
3021 }
3022 /* Check if the mode. For now "tcp" is disabled because downgrade is not
3023 * supported and PT is the only TCP mux.
3024 */
3025 if (strcmp(args[*cur_arg], "http") == 0)
3026 mode = PR_MODE_HTTP;
3027 else {
3028 memprintf(err, "'%s %s' expects a valid mode (got '%s').", args[0], args[*cur_arg-1], args[*cur_arg]);
3029 return ACT_RET_PRS_ERR;
3030 }
3031
3032 /* check the proto, if specified */
3033 if (*(args[*cur_arg+1]) && strcmp(args[*cur_arg+1], "proto") == 0) {
3034 if (*(args[*cur_arg+2]) == 0) {
3035 memprintf(err, "'%s %s': '%s' expects a protocol as argument.",
3036 args[0], args[*cur_arg-1], args[*cur_arg+1]);
3037 return ACT_RET_PRS_ERR;
3038 }
3039
Tim Duesterhusb113b5c2021-09-15 13:58:44 +02003040 proto = ist(args[*cur_arg + 2]);
Christopher Fauletae863c62021-03-15 12:03:44 +01003041 mux_proto = get_mux_proto(proto);
3042 if (!mux_proto) {
3043 memprintf(err, "'%s %s': '%s' expects a valid MUX protocol, if specified (got '%s')",
3044 args[0], args[*cur_arg-1], args[*cur_arg+1], args[*cur_arg+2]);
3045 return ACT_RET_PRS_ERR;
3046 }
3047 *cur_arg += 2;
3048 }
3049
3050 (*cur_arg)++;
3051
3052 /* Register processing function. */
3053 rule->action_ptr = tcp_action_switch_stream_mode;
3054 rule->check_ptr = check_tcp_switch_stream_mode;
3055 rule->action = ACT_CUSTOM;
3056 rule->arg.act.p[0] = (void *)(uintptr_t)mode;
3057 rule->arg.act.p[1] = (void *)mux_proto;
3058 return ACT_RET_PRS_OK;
3059}
Willy Tarreau8b22a712010-06-18 17:46:06 +02003060
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003061/* 0=OK, <0=Alert, >0=Warning */
3062static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
3063 struct proxy *px, struct act_rule *rule,
3064 char **err)
3065{
3066 struct action_kw *kw;
3067
3068 /* Check if the service name exists. */
3069 if (*(args[*cur_arg]) == 0) {
3070 memprintf(err, "'%s' expects a service name.", args[0]);
Thierry FOURNIER337eae12015-11-26 19:48:04 +01003071 return ACT_RET_PRS_ERR;
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003072 }
3073
3074 /* lookup for keyword corresponding to a service. */
3075 kw = action_lookup(&service_keywords, args[*cur_arg]);
3076 if (!kw) {
3077 memprintf(err, "'%s' unknown service name.", args[1]);
3078 return ACT_RET_PRS_ERR;
3079 }
3080 (*cur_arg)++;
3081
3082 /* executes specific rule parser. */
3083 rule->kw = kw;
3084 if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
3085 return ACT_RET_PRS_ERR;
3086
3087 /* Register processing function. */
3088 rule->action_ptr = process_use_service;
3089 rule->action = ACT_CUSTOM;
3090
3091 return ACT_RET_PRS_OK;
3092}
3093
3094void service_keywords_register(struct action_kw_list *kw_list)
3095{
Willy Tarreau2b718102021-04-21 07:32:39 +02003096 LIST_APPEND(&service_keywords, &kw_list->list);
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003097}
3098
Thierry Fournier87e53992020-11-28 19:32:14 +01003099struct action_kw *service_find(const char *kw)
3100{
3101 return action_lookup(&service_keywords, kw);
3102}
3103
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003104/* Lists the known services on <out>. If <out> is null, emit them on stdout one
3105 * per line.
3106 */
Willy Tarreau679bba12019-03-19 08:08:10 +01003107void list_services(FILE *out)
3108{
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003109 const struct action_kw *akwp, *akwn;
Willy Tarreau679bba12019-03-19 08:08:10 +01003110 struct action_kw_list *kw_list;
3111 int found = 0;
3112 int i;
3113
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003114 if (out)
3115 fprintf(out, "Available services :");
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003116
3117 for (akwn = akwp = NULL;; akwp = akwn) {
3118 list_for_each_entry(kw_list, &service_keywords, list) {
3119 for (i = 0; kw_list->kw[i].kw != NULL; i++) {
3120 if (strordered(akwp ? akwp->kw : NULL,
3121 kw_list->kw[i].kw,
3122 akwn != akwp ? akwn->kw : NULL))
3123 akwn = &kw_list->kw[i];
3124 found = 1;
3125 }
Willy Tarreau679bba12019-03-19 08:08:10 +01003126 }
Willy Tarreau3f0b2e82022-03-30 12:12:44 +02003127 if (akwn == akwp)
3128 break;
3129 if (out)
3130 fprintf(out, " %s", akwn->kw);
3131 else
3132 printf("%s\n", akwn->kw);
Willy Tarreau679bba12019-03-19 08:08:10 +01003133 }
Willy Tarreau5fcc1002022-03-29 15:10:44 +02003134 if (!found && out)
Willy Tarreau679bba12019-03-19 08:08:10 +01003135 fprintf(out, " none\n");
3136}
William Lallemand4c5b4d52016-11-21 08:51:11 +01003137
Willy Tarreau39f097d2022-05-03 10:49:00 +02003138/* appctx context used by the "show sess" command */
3139
3140struct show_sess_ctx {
3141 struct bref bref; /* back-reference from the session being dumped */
3142 void *target; /* session we want to dump, or NULL for all */
3143 unsigned int thr; /* the thread number being explored (0..MAX_THREADS-1) */
3144 unsigned int uid; /* if non-null, the uniq_id of the session being dumped */
3145 int section; /* section of the session being dumped */
3146 int pos; /* last position of the current session's buffer */
3147};
3148
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02003149/* This function dumps a complete stream state onto the conn-stream's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003150 * read buffer. The stream has to be set in strm. It returns 0 if the output
3151 * buffer is full and it needs to be called again, otherwise non-zero. It is
3152 * designed to be called from stats_dump_strm_to_buffer() below.
3153 */
Christopher Faulet908628c2022-03-25 16:43:49 +01003154static int stats_dump_full_strm_to_buffer(struct conn_stream *cs, struct stream *strm)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003155{
Christopher Faulet908628c2022-03-25 16:43:49 +01003156 struct appctx *appctx = __cs_appctx(cs);
Willy Tarreau39f097d2022-05-03 10:49:00 +02003157 struct show_sess_ctx *ctx = appctx->svcctx;
Christopher Faulet908628c2022-03-25 16:43:49 +01003158 struct conn_stream *csf, *csb;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003159 struct tm tm;
3160 extern const char *monthname[12];
3161 char pn[INET6_ADDRSTRLEN];
3162 struct connection *conn;
3163 struct appctx *tmpctx;
3164
3165 chunk_reset(&trash);
3166
Willy Tarreau39f097d2022-05-03 10:49:00 +02003167 if (ctx->section > 0 && ctx->uid != strm->uniq_id) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003168 /* stream changed, no need to go any further */
3169 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
Christopher Faulet908628c2022-03-25 16:43:49 +01003170 if (ci_putchk(cs_ic(cs), &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003171 goto full;
3172 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003173 }
3174
Willy Tarreau39f097d2022-05-03 10:49:00 +02003175 switch (ctx->section) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003176 case 0: /* main status of the stream */
Willy Tarreau39f097d2022-05-03 10:49:00 +02003177 ctx->uid = strm->uniq_id;
3178 ctx->section = 1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003179 /* fall through */
3180
3181 case 1:
3182 get_localtime(strm->logs.accept_date.tv_sec, &tm);
3183 chunk_appendf(&trash,
3184 "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
3185 strm,
3186 tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
3187 tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
3188 strm->uniq_id,
Willy Tarreaub7436612020-08-28 19:51:44 +02003189 strm_li(strm) ? strm_li(strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003190
3191 conn = objt_conn(strm_orig(strm));
Willy Tarreau71e34c12019-07-17 15:07:06 +02003192 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003193 case AF_INET:
3194 case AF_INET6:
3195 chunk_appendf(&trash, " source=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003196 pn, get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003197 break;
3198 case AF_UNIX:
3199 chunk_appendf(&trash, " source=unix:%d\n", strm_li(strm)->luid);
3200 break;
3201 default:
3202 /* no more information to print right now */
3203 chunk_appendf(&trash, "\n");
3204 break;
3205 }
3206
3207 chunk_appendf(&trash,
Christopher Faulet50264b42022-03-30 19:39:30 +02003208 " flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x\n",
Christopher Fauletae024ce2022-03-29 19:02:31 +02003209 strm->flags, strm->conn_retries,
3210 strm->conn_exp ?
3211 tick_is_expired(strm->conn_exp, now_ms) ? "<PAST>" :
3212 human_time(TICKS_TO_MS(strm->conn_exp - now_ms),
3213 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Faulet50264b42022-03-30 19:39:30 +02003214 strm->conn_err_type, strm->srv_conn, strm->pend_pos,
Willy Tarreau2b718102021-04-21 07:32:39 +02003215 LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003216
3217 chunk_appendf(&trash,
3218 " frontend=%s (id=%u mode=%s), listener=%s (id=%u)",
William Lallemandb0dfd092022-03-08 12:05:31 +01003219 strm_fe(strm)->id, strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003220 strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
3221 strm_li(strm) ? strm_li(strm)->luid : 0);
3222
Willy Tarreau71e34c12019-07-17 15:07:06 +02003223 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003224 case AF_INET:
3225 case AF_INET6:
3226 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003227 pn, get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003228 break;
3229 case AF_UNIX:
3230 chunk_appendf(&trash, " addr=unix:%d\n", strm_li(strm)->luid);
3231 break;
3232 default:
3233 /* no more information to print right now */
3234 chunk_appendf(&trash, "\n");
3235 break;
3236 }
3237
3238 if (strm->be->cap & PR_CAP_BE)
3239 chunk_appendf(&trash,
3240 " backend=%s (id=%u mode=%s)",
3241 strm->be->id,
William Lallemandb0dfd092022-03-08 12:05:31 +01003242 strm->be->uuid, proxy_mode_str(strm->be->mode));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003243 else
3244 chunk_appendf(&trash, " backend=<NONE> (id=-1 mode=-)");
3245
Christopher Faulet95a61e82021-12-22 14:22:03 +01003246 conn = cs_conn(strm->csb);
Willy Tarreau71e34c12019-07-17 15:07:06 +02003247 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003248 case AF_INET:
3249 case AF_INET6:
3250 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003251 pn, get_host_port(conn->src));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003252 break;
3253 case AF_UNIX:
3254 chunk_appendf(&trash, " addr=unix\n");
3255 break;
3256 default:
3257 /* no more information to print right now */
3258 chunk_appendf(&trash, "\n");
3259 break;
3260 }
3261
3262 if (strm->be->cap & PR_CAP_BE)
3263 chunk_appendf(&trash,
3264 " server=%s (id=%u)",
Willy Tarreau88bc8002021-12-06 07:01:02 +00003265 objt_server(strm->target) ? __objt_server(strm->target)->id : "<none>",
3266 objt_server(strm->target) ? __objt_server(strm->target)->puid : 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003267 else
3268 chunk_appendf(&trash, " server=<NONE> (id=-1)");
3269
Willy Tarreau71e34c12019-07-17 15:07:06 +02003270 switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003271 case AF_INET:
3272 case AF_INET6:
3273 chunk_appendf(&trash, " addr=%s:%d\n",
Willy Tarreau71e34c12019-07-17 15:07:06 +02003274 pn, get_host_port(conn->dst));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003275 break;
3276 case AF_UNIX:
3277 chunk_appendf(&trash, " addr=unix\n");
3278 break;
3279 default:
3280 /* no more information to print right now */
3281 chunk_appendf(&trash, "\n");
3282 break;
3283 }
3284
3285 chunk_appendf(&trash,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003286 " task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tmask=0x%lx%s",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003287 strm->task,
3288 strm->task->state,
Willy Tarreau2e9c1d22019-04-24 08:28:31 +02003289 strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003290 strm->task->expire ?
3291 tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
3292 human_time(TICKS_TO_MS(strm->task->expire - now_ms),
3293 TICKS_TO_MS(1000)) : "<NEVER>",
Christopher Fauletf0205062017-11-15 20:56:43 +01003294 strm->task->thread_mask,
William Lallemand4c5b4d52016-11-21 08:51:11 +01003295 task_in_rq(strm->task) ? ", running" : "");
3296
3297 chunk_appendf(&trash,
3298 " age=%s)\n",
3299 human_time(now.tv_sec - strm->logs.accept_date.tv_sec, 1));
3300
3301 if (strm->txn)
3302 chunk_appendf(&trash,
Christopher Fauletbcac7862019-07-17 10:46:50 +02003303 " txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003304 strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
Willy Tarreau7778b592019-01-07 10:38:10 +01003305 h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003306 strm->txn->req.flags, strm->txn->rsp.flags);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003307
Christopher Faulet908628c2022-03-25 16:43:49 +01003308 csf = strm->csf;
Christopher Faulet02642122022-04-19 10:35:22 +02003309 chunk_appendf(&trash, " csf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003310 csf, csf->flags, cs_state_str(csf->state),
Christopher Faulet02642122022-04-19 10:35:22 +02003311 (csf->endp->flags & CS_EP_T_MUX ? "CONN" : (csf->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003312 csf->endp->target, csf->endp->flags, csf->wait_event.events);
Olivier Houchard9aaf7782017-09-13 18:30:23 +02003313
Christopher Faulet908628c2022-03-25 16:43:49 +01003314 if ((conn = cs_conn(csf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003315 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003316 " co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003317 conn,
3318 conn_get_ctrl_name(conn),
3319 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003320 conn_get_mux_name(conn),
Christopher Faulet908628c2022-03-25 16:43:49 +01003321 cs_get_data_name(csf),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003322 obj_type_name(conn->target),
3323 obj_base_ptr(conn->target));
3324
3325 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003326 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003327 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003328 conn_fd(conn),
3329 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
3330 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3331 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
Willy Tarreaufb3b1b02018-12-18 14:28:24 +01003332
William Lallemand4c5b4d52016-11-21 08:51:11 +01003333 }
Christopher Faulet908628c2022-03-25 16:43:49 +01003334 else if ((tmpctx = cs_appctx(csf)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003335 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003336 " app0=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003337 tmpctx,
3338 tmpctx->st0,
3339 tmpctx->st1,
Willy Tarreauc7afedc2022-05-05 20:01:54 +02003340 tmpctx->_st2,
Christopher Fauletf0205062017-11-15 20:56:43 +01003341 tmpctx->applet->name,
Willy Tarreau4c6986a2021-07-13 18:01:46 +02003342 tmpctx->t->thread_mask,
Willy Tarreau22d63a22019-04-24 08:41:29 +02003343 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
Willy Tarreau9efd7452018-05-31 14:48:54 +02003344 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003345 }
3346
Christopher Faulet908628c2022-03-25 16:43:49 +01003347 csb = strm->csb;
Christopher Faulet02642122022-04-19 10:35:22 +02003348 chunk_appendf(&trash, " csb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d\n",
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003349 csb, csb->flags, cs_state_str(csb->state),
Christopher Faulet02642122022-04-19 10:35:22 +02003350 (csb->endp->flags & CS_EP_T_MUX ? "CONN" : (csb->endp->flags & CS_EP_T_APPLET ? "APPCTX" : "NONE")),
Christopher Fauletc77ceb62022-04-04 11:08:42 +02003351 csb->endp->target, csb->endp->flags, csb->wait_event.events);
Christopher Faulet908628c2022-03-25 16:43:49 +01003352 if ((conn = cs_conn(csb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003353 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003354 " co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003355 conn,
3356 conn_get_ctrl_name(conn),
3357 conn_get_xprt_name(conn),
Willy Tarreau53a47662017-08-28 10:53:00 +02003358 conn_get_mux_name(conn),
Christopher Faulet908628c2022-03-25 16:43:49 +01003359 cs_get_data_name(csb),
William Lallemand4c5b4d52016-11-21 08:51:11 +01003360 obj_type_name(conn->target),
3361 obj_base_ptr(conn->target));
3362
3363 chunk_appendf(&trash,
Willy Tarreau76913d32019-08-30 14:33:11 +02003364 " flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003365 conn->flags,
Willy Tarreaua57f3452022-04-11 17:58:06 +02003366 conn_fd(conn),
3367 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
3368 conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & tid_bit) : 0,
3369 conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
Willy Tarreaufb3b1b02018-12-18 14:28:24 +01003370
William Lallemand4c5b4d52016-11-21 08:51:11 +01003371 }
Christopher Faulet908628c2022-03-25 16:43:49 +01003372 else if ((tmpctx = cs_appctx(csb)) != NULL) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003373 chunk_appendf(&trash,
Christopher Faulet13a35e52021-12-20 15:34:16 +01003374 " app1=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003375 tmpctx,
3376 tmpctx->st0,
3377 tmpctx->st1,
Willy Tarreauc7afedc2022-05-05 20:01:54 +02003378 tmpctx->_st2,
Christopher Fauletf0205062017-11-15 20:56:43 +01003379 tmpctx->applet->name,
Willy Tarreau4c6986a2021-07-13 18:01:46 +02003380 tmpctx->t->thread_mask,
Willy Tarreau22d63a22019-04-24 08:41:29 +02003381 tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate),
Willy Tarreau9efd7452018-05-31 14:48:54 +02003382 (unsigned long long)tmpctx->t->cpu_time, (unsigned long long)tmpctx->t->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003383 }
3384
3385 chunk_appendf(&trash,
3386 " req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3387 " an_exp=%s",
3388 &strm->req,
3389 strm->req.flags, strm->req.analysers,
3390 strm->req.pipe ? strm->req.pipe->data : 0,
3391 strm->req.to_forward, strm->req.total,
3392 strm->req.analyse_exp ?
3393 human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
3394 TICKS_TO_MS(1000)) : "<NEVER>");
3395
3396 chunk_appendf(&trash,
3397 " rex=%s",
3398 strm->req.rex ?
3399 human_time(TICKS_TO_MS(strm->req.rex - now_ms),
3400 TICKS_TO_MS(1000)) : "<NEVER>");
3401
3402 chunk_appendf(&trash,
3403 " wex=%s\n"
Christopher Fauletbcac7862019-07-17 10:46:50 +02003404 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003405 strm->req.wex ?
3406 human_time(TICKS_TO_MS(strm->req.wex - now_ms),
3407 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003408 &strm->req.buf,
3409 b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003410 (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003411 (unsigned int)strm->req.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003412
Christopher Fauletb9af8812019-01-04 14:30:44 +01003413 if (IS_HTX_STRM(strm)) {
3414 struct htx *htx = htxbuf(&strm->req.buf);
3415
3416 chunk_appendf(&trash,
Willy Tarreaub84e67f2019-01-07 10:01:34 +01003417 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003418 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003419 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003420 (unsigned long long)htx->extra);
3421 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003422 if (HAS_FILTERS(strm) && strm_flt(strm)->current[0]) {
3423 struct filter *flt = strm_flt(strm)->current[0];
3424
3425 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3426 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3427 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003428
William Lallemand4c5b4d52016-11-21 08:51:11 +01003429 chunk_appendf(&trash,
3430 " res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
3431 " an_exp=%s",
3432 &strm->res,
3433 strm->res.flags, strm->res.analysers,
3434 strm->res.pipe ? strm->res.pipe->data : 0,
3435 strm->res.to_forward, strm->res.total,
3436 strm->res.analyse_exp ?
3437 human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
3438 TICKS_TO_MS(1000)) : "<NEVER>");
3439
3440 chunk_appendf(&trash,
3441 " rex=%s",
3442 strm->res.rex ?
3443 human_time(TICKS_TO_MS(strm->res.rex - now_ms),
3444 TICKS_TO_MS(1000)) : "<NEVER>");
3445
3446 chunk_appendf(&trash,
3447 " wex=%s\n"
Christopher Fauletbcac7862019-07-17 10:46:50 +02003448 " buf=%p data=%p o=%u p=%u i=%u size=%u\n",
William Lallemand4c5b4d52016-11-21 08:51:11 +01003449 strm->res.wex ?
3450 human_time(TICKS_TO_MS(strm->res.wex - now_ms),
3451 TICKS_TO_MS(1000)) : "<NEVER>",
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003452 &strm->res.buf,
3453 b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
Christopher Fauletbcac7862019-07-17 10:46:50 +02003454 (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
Willy Tarreauc9fa0482018-07-10 17:43:27 +02003455 (unsigned int)strm->res.buf.size);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003456
Christopher Fauletb9af8812019-01-04 14:30:44 +01003457 if (IS_HTX_STRM(strm)) {
3458 struct htx *htx = htxbuf(&strm->res.buf);
3459
3460 chunk_appendf(&trash,
3461 " htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n",
Christopher Faulet192c6a22019-06-11 16:32:24 +02003462 htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
Christopher Faulet28f29c72019-04-30 17:55:45 +02003463 (htx->tail >= htx->head) ? "NO" : "YES",
Christopher Fauletb9af8812019-01-04 14:30:44 +01003464 (unsigned long long)htx->extra);
3465 }
Christopher Fauletd4762b82021-10-12 11:02:48 +02003466 if (HAS_FILTERS(strm) && strm_flt(strm)->current[1]) {
3467 struct filter *flt = strm_flt(strm)->current[1];
3468
3469 chunk_appendf(&trash, " current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n",
3470 flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
3471 }
Christopher Fauletb9af8812019-01-04 14:30:44 +01003472
Willy Tarreau1274e102021-10-11 09:49:03 +02003473 if (strm->current_rule_list && strm->current_rule) {
3474 const struct act_rule *rule = strm->current_rule;
Christopher Faulet8c67ece2021-10-12 11:10:31 +02003475 chunk_appendf(&trash, " current_rule=\"%s\" [%s:%d]\n", rule->kw->kw, rule->conf.file, rule->conf.line);
Willy Tarreau1274e102021-10-11 09:49:03 +02003476 }
3477
Christopher Faulet908628c2022-03-25 16:43:49 +01003478 if (ci_putchk(cs_ic(cs), &trash) == -1)
Willy Tarreaue6e52362019-01-04 17:42:57 +01003479 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003480
3481 /* use other states to dump the contents */
3482 }
3483 /* end of dump */
Willy Tarreaue6e52362019-01-04 17:42:57 +01003484 done:
Willy Tarreau39f097d2022-05-03 10:49:00 +02003485 ctx->uid = 0;
3486 ctx->section = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003487 return 1;
Willy Tarreaue6e52362019-01-04 17:42:57 +01003488 full:
3489 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003490}
3491
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003492static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand4c5b4d52016-11-21 08:51:11 +01003493{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003494 struct show_sess_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
3495
William Lallemand4c5b4d52016-11-21 08:51:11 +01003496 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
3497 return 1;
3498
3499 if (*args[2] && strcmp(args[2], "all") == 0)
Willy Tarreau39f097d2022-05-03 10:49:00 +02003500 ctx->target = (void *)-1;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003501 else if (*args[2])
Willy Tarreau39f097d2022-05-03 10:49:00 +02003502 ctx->target = (void *)strtoul(args[2], NULL, 0);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003503 else
Willy Tarreau39f097d2022-05-03 10:49:00 +02003504 ctx->target = NULL;
3505 ctx->section = 0; /* start with stream status */
3506 ctx->pos = 0;
3507 ctx->thr = 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003508
Willy Tarreauf3629f82022-05-03 11:05:39 +02003509 /* The back-ref must be reset, it will be detected and set by
3510 * the dump code upon first invocation.
3511 */
3512 LIST_INIT(&ctx->bref.users);
3513
Willy Tarreaub9813182021-02-24 11:29:51 +01003514 /* let's set our own stream's epoch to the current one and increment
3515 * it so that we know which streams were already there before us.
3516 */
Willy Tarreau0698c802022-05-11 14:09:57 +02003517 appctx_strm(appctx)->stream_epoch = _HA_ATOMIC_FETCH_ADD(&stream_epoch, 1);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003518 return 0;
3519}
3520
Christopher Faulet6b0a0fb2022-04-04 11:29:28 +02003521/* This function dumps all streams' states onto the conn-stream's
William Lallemand4c5b4d52016-11-21 08:51:11 +01003522 * read buffer. It returns 0 if the output buffer is full and it needs
Willy Tarreaue6e52362019-01-04 17:42:57 +01003523 * to be called again, otherwise non-zero. It proceeds in an isolated
3524 * thread so there is no thread safety issue here.
William Lallemand4c5b4d52016-11-21 08:51:11 +01003525 */
3526static int cli_io_handler_dump_sess(struct appctx *appctx)
3527{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003528 struct show_sess_ctx *ctx = appctx->svcctx;
Willy Tarreau0698c802022-05-11 14:09:57 +02003529 struct conn_stream *cs = appctx_cs(appctx);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003530 struct connection *conn;
3531
Willy Tarreaue6e52362019-01-04 17:42:57 +01003532 thread_isolate();
3533
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003534 if (ctx->thr >= global.nbthread) {
3535 /* already terminated */
3536 goto done;
3537 }
3538
Christopher Faulet908628c2022-03-25 16:43:49 +01003539 if (unlikely(cs_ic(cs)->flags & (CF_WRITE_ERROR|CF_SHUTW))) {
William Lallemand4c5b4d52016-11-21 08:51:11 +01003540 /* If we're forced to shut down, we might have to remove our
3541 * reference to the last stream being dumped.
3542 */
Willy Tarreauf3629f82022-05-03 11:05:39 +02003543 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3544 LIST_DELETE(&ctx->bref.users);
3545 LIST_INIT(&ctx->bref.users);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003546 }
Willy Tarreaue6e52362019-01-04 17:42:57 +01003547 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003548 }
3549
3550 chunk_reset(&trash);
3551
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003552 /* first, let's detach the back-ref from a possible previous stream */
3553 if (!LIST_ISEMPTY(&ctx->bref.users)) {
3554 LIST_DELETE(&ctx->bref.users);
3555 LIST_INIT(&ctx->bref.users);
3556 } else if (!ctx->bref.ref) {
3557 /* first call, start with first stream */
3558 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3559 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003560
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003561 /* and start from where we stopped */
3562 while (1) {
3563 char pn[INET6_ADDRSTRLEN];
3564 struct stream *curr_strm;
3565 int done= 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003566
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003567 if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].streams)
3568 done = 1;
3569 else {
3570 /* check if we've found a stream created after issuing the "show sess" */
3571 curr_strm = LIST_ELEM(ctx->bref.ref, struct stream *, list);
Willy Tarreau0698c802022-05-11 14:09:57 +02003572 if ((int)(curr_strm->stream_epoch - appctx_strm(appctx)->stream_epoch) > 0)
Willy Tarreaua698eb62021-02-24 10:37:01 +01003573 done = 1;
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003574 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003575
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003576 if (done) {
3577 ctx->thr++;
3578 if (ctx->thr >= global.nbthread)
3579 break;
3580 ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
3581 continue;
3582 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003583
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003584 if (ctx->target) {
3585 if (ctx->target != (void *)-1 && ctx->target != curr_strm)
3586 goto next_sess;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003587
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003588 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3589 /* call the proper dump() function and return if we're missing space */
3590 if (!stats_dump_full_strm_to_buffer(cs, curr_strm))
3591 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003592
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003593 /* stream dump complete */
3594 LIST_DELETE(&ctx->bref.users);
3595 LIST_INIT(&ctx->bref.users);
3596 if (ctx->target != (void *)-1) {
3597 ctx->target = NULL;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003598 break;
3599 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003600 else
3601 goto next_sess;
3602 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003603
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003604 chunk_appendf(&trash,
3605 "%p: proto=%s",
3606 curr_strm,
3607 strm_li(curr_strm) ? strm_li(curr_strm)->rx.proto->name : "?");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003608
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003609 conn = objt_conn(strm_orig(curr_strm));
3610 switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
3611 case AF_INET:
3612 case AF_INET6:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003613 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003614 " src=%s:%d fe=%s be=%s srv=%s",
3615 pn,
3616 get_host_port(conn->src),
3617 strm_fe(curr_strm)->id,
3618 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
3619 objt_server(curr_strm->target) ? __objt_server(curr_strm->target)->id : "<none>"
3620 );
3621 break;
3622 case AF_UNIX:
William Lallemand4c5b4d52016-11-21 08:51:11 +01003623 chunk_appendf(&trash,
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003624 " src=unix:%d fe=%s be=%s srv=%s",
3625 strm_li(curr_strm)->luid,
3626 strm_fe(curr_strm)->id,
3627 (curr_strm->be->cap & PR_CAP_BE) ? curr_strm->be->id : "<NONE>",
3628 objt_server(curr_strm->target) ? __objt_server(curr_strm->target)->id : "<none>"
3629 );
3630 break;
3631 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003632
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003633 chunk_appendf(&trash,
3634 " ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
3635 curr_strm->task->state, curr_strm->stream_epoch,
3636 human_time(now.tv_sec - curr_strm->logs.tv_accept.tv_sec, 1),
3637 curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
3638 (unsigned long long)curr_strm->task->cpu_time, (unsigned long long)curr_strm->task->lat_time);
William Lallemand4c5b4d52016-11-21 08:51:11 +01003639
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003640 chunk_appendf(&trash,
3641 " rq[f=%06xh,i=%u,an=%02xh,rx=%s",
3642 curr_strm->req.flags,
3643 (unsigned int)ci_data(&curr_strm->req),
3644 curr_strm->req.analysers,
3645 curr_strm->req.rex ?
3646 human_time(TICKS_TO_MS(curr_strm->req.rex - now_ms),
3647 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003648
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003649 chunk_appendf(&trash,
3650 ",wx=%s",
3651 curr_strm->req.wex ?
3652 human_time(TICKS_TO_MS(curr_strm->req.wex - now_ms),
3653 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003654
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003655 chunk_appendf(&trash,
3656 ",ax=%s]",
3657 curr_strm->req.analyse_exp ?
3658 human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
3659 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003660
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003661 chunk_appendf(&trash,
3662 " rp[f=%06xh,i=%u,an=%02xh,rx=%s",
3663 curr_strm->res.flags,
3664 (unsigned int)ci_data(&curr_strm->res),
3665 curr_strm->res.analysers,
3666 curr_strm->res.rex ?
3667 human_time(TICKS_TO_MS(curr_strm->res.rex - now_ms),
3668 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003669
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003670 chunk_appendf(&trash,
3671 ",wx=%s",
3672 curr_strm->res.wex ?
3673 human_time(TICKS_TO_MS(curr_strm->res.wex - now_ms),
3674 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003675
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003676 chunk_appendf(&trash,
3677 ",ax=%s]",
3678 curr_strm->res.analyse_exp ?
3679 human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
3680 TICKS_TO_MS(1000)) : "");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003681
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003682 conn = cs_conn(curr_strm->csf);
3683 chunk_appendf(&trash,
3684 " csf=[%d,%1xh,fd=%d]",
3685 curr_strm->csf->state,
3686 curr_strm->csf->flags,
3687 conn_fd(conn));
William Lallemand4c5b4d52016-11-21 08:51:11 +01003688
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003689 conn = cs_conn(curr_strm->csb);
3690 chunk_appendf(&trash,
3691 " csb=[%d,%1xh,fd=%d]",
3692 curr_strm->csb->state,
3693 curr_strm->csb->flags,
3694 conn_fd(conn));
3695
3696 chunk_appendf(&trash,
3697 " exp=%s rc=%d c_exp=%s",
3698 curr_strm->task->expire ?
3699 human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
3700 TICKS_TO_MS(1000)) : "",
3701 curr_strm->conn_retries,
3702 curr_strm->conn_exp ?
3703 human_time(TICKS_TO_MS(curr_strm->conn_exp - now_ms),
3704 TICKS_TO_MS(1000)) : "");
3705 if (task_in_rq(curr_strm->task))
3706 chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
3707
3708 chunk_appendf(&trash, "\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003709
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003710 if (ci_putchk(cs_ic(cs), &trash) == -1) {
3711 /* let's try again later from this stream. We add ourselves into
3712 * this stream's users so that it can remove us upon termination.
3713 */
3714 LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
3715 goto full;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003716 }
3717
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003718 next_sess:
3719 ctx->bref.ref = curr_strm->list.n;
3720 }
William Lallemand4c5b4d52016-11-21 08:51:11 +01003721
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003722 if (ctx->target && ctx->target != (void *)-1) {
3723 /* specified stream not found */
3724 if (ctx->section > 0)
3725 chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
3726 else
3727 chunk_appendf(&trash, "Session not found.\n");
William Lallemand4c5b4d52016-11-21 08:51:11 +01003728
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003729 if (ci_putchk(cs_ic(cs), &trash) == -1)
3730 goto full;
3731
3732 ctx->target = NULL;
3733 ctx->uid = 0;
3734 goto done;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003735 }
Willy Tarreau6177cfc2022-05-03 11:17:35 +02003736
Willy Tarreaue6e52362019-01-04 17:42:57 +01003737 done:
3738 thread_release();
3739 return 1;
3740 full:
3741 thread_release();
Christopher Fauleta0bdec32022-04-04 07:51:21 +02003742 cs_rx_room_blk(cs);
Willy Tarreaue6e52362019-01-04 17:42:57 +01003743 return 0;
William Lallemand4c5b4d52016-11-21 08:51:11 +01003744}
3745
3746static void cli_release_show_sess(struct appctx *appctx)
3747{
Willy Tarreau39f097d2022-05-03 10:49:00 +02003748 struct show_sess_ctx *ctx = appctx->svcctx;
3749
Willy Tarreaubb4e2892022-05-03 11:10:19 +02003750 if (ctx->thr < global.nbthread) {
Willy Tarreau49de6852021-02-24 13:46:12 +01003751 /* a dump was aborted, either in error or timeout. We need to
3752 * safely detach from the target stream's list. It's mandatory
3753 * to lock because a stream on the target thread could be moving
3754 * our node.
3755 */
3756 thread_isolate();
Willy Tarreau39f097d2022-05-03 10:49:00 +02003757 if (!LIST_ISEMPTY(&ctx->bref.users))
3758 LIST_DELETE(&ctx->bref.users);
Willy Tarreau49de6852021-02-24 13:46:12 +01003759 thread_release();
William Lallemand4c5b4d52016-11-21 08:51:11 +01003760 }
3761}
3762
Willy Tarreau61b65212016-11-24 11:09:25 +01003763/* Parses the "shutdown session" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003764static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau61b65212016-11-24 11:09:25 +01003765{
3766 struct stream *strm, *ptr;
Willy Tarreaua698eb62021-02-24 10:37:01 +01003767 int thr;
Willy Tarreau61b65212016-11-24 11:09:25 +01003768
3769 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3770 return 1;
3771
Willy Tarreauc40c4072022-03-31 14:49:45 +02003772 ptr = (void *)strtoul(args[2], NULL, 0);
3773 if (!ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003774 return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003775
Willy Tarreaua698eb62021-02-24 10:37:01 +01003776 strm = NULL;
Willy Tarreau61b65212016-11-24 11:09:25 +01003777
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003778 thread_isolate();
3779
Willy Tarreau61b65212016-11-24 11:09:25 +01003780 /* first, look for the requested stream in the stream table */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003781 for (thr = 0; strm != ptr && thr < global.nbthread; thr++) {
Willy Tarreaub4e34762021-09-30 19:02:18 +02003782 list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
Willy Tarreaua698eb62021-02-24 10:37:01 +01003783 if (strm == ptr) {
3784 stream_shutdown(strm, SF_ERR_KILLED);
3785 break;
3786 }
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003787 }
Willy Tarreau61b65212016-11-24 11:09:25 +01003788 }
3789
Willy Tarreau3f5dd292021-02-24 11:11:06 +01003790 thread_release();
3791
Willy Tarreau61b65212016-11-24 11:09:25 +01003792 /* do we have the stream ? */
Willy Tarreauc40c4072022-03-31 14:49:45 +02003793 if (strm != ptr)
Willy Tarreau9d008692019-08-09 11:21:01 +02003794 return cli_err(appctx, "No such session (use 'show sess').\n");
Willy Tarreau61b65212016-11-24 11:09:25 +01003795
Willy Tarreau61b65212016-11-24 11:09:25 +01003796 return 1;
3797}
3798
Willy Tarreau4e46b622016-11-23 16:50:48 +01003799/* Parses the "shutdown session server" directive, it always returns 1 */
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02003800static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
Willy Tarreau4e46b622016-11-23 16:50:48 +01003801{
3802 struct server *sv;
Willy Tarreau4e46b622016-11-23 16:50:48 +01003803
3804 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
3805 return 1;
3806
3807 sv = cli_find_server(appctx, args[3]);
3808 if (!sv)
3809 return 1;
3810
3811 /* kill all the stream that are on this server */
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003812 HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
Willy Tarreaud9e26a72019-11-14 16:37:16 +01003813 srv_shutdown_streams(sv, SF_ERR_KILLED);
Christopher Faulet2a944ee2017-11-07 10:42:54 +01003814 HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
Willy Tarreau4e46b622016-11-23 16:50:48 +01003815 return 1;
3816}
3817
William Lallemand4c5b4d52016-11-21 08:51:11 +01003818/* register cli keywords */
3819static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaub205bfd2021-05-07 11:38:37 +02003820 { { "show", "sess", NULL }, "show sess [id] : report the list of current sessions or dump this exact session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
3821 { { "shutdown", "session", NULL }, "shutdown session [id] : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
3822 { { "shutdown", "sessions", "server" }, "shutdown sessions server <bk>/<srv> : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
William Lallemand4c5b4d52016-11-21 08:51:11 +01003823 {{},}
3824}};
3825
Willy Tarreau0108d902018-11-25 19:14:37 +01003826INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
3827
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003828/* main configuration keyword registration. */
Christopher Faulet551a6412021-06-25 14:35:29 +02003829static struct action_kw_list stream_tcp_req_keywords = { ILH, {
3830 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003831 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003832 { "switch-mode", stream_parse_switch_mode },
3833 { "use-service", stream_parse_use_service },
3834 { /* END */ }
3835}};
3836
3837INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_req_keywords);
3838
3839/* main configuration keyword registration. */
3840static struct action_kw_list stream_tcp_res_keywords = { ILH, {
3841 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003842 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003843 { /* END */ }
3844}};
3845
3846INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &stream_tcp_res_keywords);
3847
3848static struct action_kw_list stream_http_req_keywords = { ILH, {
3849 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003850 { "set-nice", stream_parse_set_nice },
Christopher Faulet551a6412021-06-25 14:35:29 +02003851 { "use-service", stream_parse_use_service },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003852 { /* END */ }
3853}};
3854
Christopher Faulet551a6412021-06-25 14:35:29 +02003855INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_req_keywords);
Willy Tarreau0108d902018-11-25 19:14:37 +01003856
Christopher Faulet551a6412021-06-25 14:35:29 +02003857static struct action_kw_list stream_http_res_keywords = { ILH, {
3858 { "set-log-level", stream_parse_set_log_level },
Christopher Faulet1da374a2021-06-25 14:46:02 +02003859 { "set-nice", stream_parse_set_nice },
Thierry FOURNIER5a363e72015-09-27 19:29:33 +02003860 { /* END */ }
3861}};
3862
Christopher Faulet551a6412021-06-25 14:35:29 +02003863INITCALL1(STG_REGISTER, http_res_keywords_register, &stream_http_res_keywords);
Willy Tarreau8b22a712010-06-18 17:46:06 +02003864
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003865static int smp_fetch_cur_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3866{
3867 smp->flags = SMP_F_VOL_TXN;
3868 smp->data.type = SMP_T_SINT;
3869 if (!smp->strm)
3870 return 0;
3871
3872 smp->data.u.sint = TICKS_TO_MS(smp->strm->res.rto);
3873 return 1;
3874}
3875
3876static int smp_fetch_cur_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
3877{
3878 smp->flags = SMP_F_VOL_TXN;
3879 smp->data.type = SMP_T_SINT;
3880 if (!smp->strm)
3881 return 0;
3882
3883 smp->data.u.sint = TICKS_TO_MS(smp->strm->tunnel_timeout);
3884 return 1;
3885}
3886
Willy Tarreau0657b932022-03-09 17:33:05 +01003887static int smp_fetch_last_rule_file(const struct arg *args, struct sample *smp, const char *km, void *private)
3888{
3889 smp->flags = SMP_F_VOL_TXN;
3890 smp->data.type = SMP_T_STR;
3891 if (!smp->strm || !smp->strm->last_rule_file)
3892 return 0;
3893
3894 smp->flags |= SMP_F_CONST;
3895 smp->data.u.str.area = (char *)smp->strm->last_rule_file;
3896 smp->data.u.str.data = strlen(smp->strm->last_rule_file);
3897 return 1;
3898}
3899
3900static int smp_fetch_last_rule_line(const struct arg *args, struct sample *smp, const char *km, void *private)
3901{
3902 smp->flags = SMP_F_VOL_TXN;
3903 smp->data.type = SMP_T_SINT;
3904 if (!smp->strm || !smp->strm->last_rule_line)
3905 return 0;
3906
3907 smp->data.u.sint = smp->strm->last_rule_line;
3908 return 1;
3909}
3910
Amaury Denoyelle12bada52020-12-10 13:43:57 +01003911/* Note: must not be declared <const> as its list will be overwritten.
3912 * Please take care of keeping this list alphabetically sorted.
3913 */
3914static struct sample_fetch_kw_list smp_kws = {ILH, {
Amaury Denoyellef7719a22020-12-10 13:43:58 +01003915 { "cur_server_timeout", smp_fetch_cur_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
3916 { "cur_tunnel_timeout", smp_fetch_cur_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
Willy Tarreau0657b932022-03-09 17:33:05 +01003917 { "last_rule_file", smp_fetch_last_rule_file, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
3918 { "last_rule_line", smp_fetch_last_rule_line, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
Amaury Denoyelle12bada52020-12-10 13:43:57 +01003919 { NULL, NULL, 0, 0, 0 },
3920}};
3921
3922INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
3923
Willy Tarreaubaaee002006-06-26 02:48:02 +02003924/*
3925 * Local variables:
3926 * c-indent-level: 8
3927 * c-basic-offset: 8
3928 * End:
3929 */