blob: 8e1759e8d26d6b7b4398a931375f381a58c8d373 [file] [log] [blame]
Christopher Faulet1329f2a2021-12-16 17:32:56 +01001/*
Willy Tarreau4596fe22022-05-17 19:07:51 +02002 * stream connector management functions
Christopher Faulet1329f2a2021-12-16 17:32:56 +01003 *
4 * Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <haproxy/api.h>
Christopher Faulet37046632022-04-01 11:36:58 +020014#include <haproxy/applet.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010015#include <haproxy/connection.h>
Christopher Faulet5e29b762022-04-04 08:58:34 +020016#include <haproxy/check.h>
17#include <haproxy/http_ana.h>
18#include <haproxy/pipe.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010019#include <haproxy/pool.h>
Willy Tarreau5edca2f2022-05-27 09:25:10 +020020#include <haproxy/sc_strm.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020021#include <haproxy/stconn.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010022
Willy Tarreau4596fe22022-05-17 19:07:51 +020023DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
Willy Tarreauea59b022022-05-17 17:53:22 +020024DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
Christopher Faulet1329f2a2021-12-16 17:32:56 +010025
Willy Tarreau3a3f4802022-05-17 18:28:19 +020026/* functions used by default on a detached stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020027static void sc_app_shutr(struct stconn *sc);
28static void sc_app_shutw(struct stconn *sc);
29static void sc_app_chk_rcv(struct stconn *sc);
30static void sc_app_chk_snd(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020031
Willy Tarreau3a3f4802022-05-17 18:28:19 +020032/* functions used on a mux-based stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020033static void sc_app_shutr_conn(struct stconn *sc);
34static void sc_app_shutw_conn(struct stconn *sc);
35static void sc_app_chk_rcv_conn(struct stconn *sc);
36static void sc_app_chk_snd_conn(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020037
Willy Tarreau3a3f4802022-05-17 18:28:19 +020038/* functions used on an applet-based stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020039static void sc_app_shutr_applet(struct stconn *sc);
40static void sc_app_shutw_applet(struct stconn *sc);
41static void sc_app_chk_rcv_applet(struct stconn *sc);
42static void sc_app_chk_snd_applet(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020043
Willy Tarreau0adb2812022-05-27 10:02:48 +020044static int sc_conn_process(struct stconn *sc);
45static int sc_conn_recv(struct stconn *sc);
46static int sc_conn_send(struct stconn *sc);
47static int sc_applet_process(struct stconn *sc);
Willy Tarreau2f2318d2022-05-18 10:17:16 +020048
Willy Tarreau3a3f4802022-05-17 18:28:19 +020049/* stream connector operations for connections */
50struct sc_app_ops sc_app_conn_ops = {
51 .chk_rcv = sc_app_chk_rcv_conn,
52 .chk_snd = sc_app_chk_snd_conn,
53 .shutr = sc_app_shutr_conn,
54 .shutw = sc_app_shutw_conn,
Willy Tarreau462b9892022-05-18 18:06:53 +020055 .wake = sc_conn_process,
Willy Tarreau2f2318d2022-05-18 10:17:16 +020056 .name = "STRM",
Christopher Faulet9ffddd52022-04-01 14:04:29 +020057};
58
Willy Tarreau3a3f4802022-05-17 18:28:19 +020059/* stream connector operations for embedded tasks */
60struct sc_app_ops sc_app_embedded_ops = {
61 .chk_rcv = sc_app_chk_rcv,
62 .chk_snd = sc_app_chk_snd,
63 .shutr = sc_app_shutr,
64 .shutw = sc_app_shutw,
Willy Tarreau2f2318d2022-05-18 10:17:16 +020065 .wake = NULL, /* may never be used */
66 .name = "NONE", /* may never be used */
Christopher Faulet9ffddd52022-04-01 14:04:29 +020067};
68
Willy Tarreau2f2318d2022-05-18 10:17:16 +020069/* stream connector operations for applets */
Willy Tarreau3a3f4802022-05-17 18:28:19 +020070struct sc_app_ops sc_app_applet_ops = {
71 .chk_rcv = sc_app_chk_rcv_applet,
72 .chk_snd = sc_app_chk_snd_applet,
73 .shutr = sc_app_shutr_applet,
74 .shutw = sc_app_shutw_applet,
Willy Tarreau19c65a92022-05-27 08:49:24 +020075 .wake = sc_applet_process,
Christopher Faulet5e29b762022-04-04 08:58:34 +020076 .name = "STRM",
77};
78
Willy Tarreau2f2318d2022-05-18 10:17:16 +020079/* stream connector for health checks on connections */
80struct sc_app_ops sc_app_check_ops = {
81 .chk_rcv = NULL,
82 .chk_snd = NULL,
83 .shutr = NULL,
84 .shutw = NULL,
85 .wake = wake_srv_chk,
86 .name = "CHCK",
87};
Christopher Faulet5e29b762022-04-04 08:58:34 +020088
Christopher Faulet9ed77422022-04-12 08:51:15 +020089/* Initializes an endpoint */
Willy Tarreauea59b022022-05-17 17:53:22 +020090void sedesc_init(struct sedesc *sedesc)
Christopher Fauletdb90f2a2022-03-22 16:06:25 +010091{
Willy Tarreauea59b022022-05-17 17:53:22 +020092 sedesc->se = NULL;
93 sedesc->conn = NULL;
Willy Tarreauc1054922022-05-18 07:43:52 +020094 sedesc->sc = NULL;
Christopher Faulet4c135682023-02-16 11:09:31 +010095 sedesc->lra = TICK_ETERNITY;
96 sedesc->fsb = TICK_ETERNITY;
Willy Tarreauea59b022022-05-17 17:53:22 +020097 se_fl_setall(sedesc, SE_FL_NONE);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +010098}
99
Christopher Faulet9ed77422022-04-12 08:51:15 +0200100/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
Willy Tarreauea59b022022-05-17 17:53:22 +0200101struct sedesc *sedesc_new()
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100102{
Willy Tarreauea59b022022-05-17 17:53:22 +0200103 struct sedesc *sedesc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100104
Willy Tarreauea59b022022-05-17 17:53:22 +0200105 sedesc = pool_alloc(pool_head_sedesc);
106 if (unlikely(!sedesc))
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100107 return NULL;
108
Willy Tarreauea59b022022-05-17 17:53:22 +0200109 sedesc_init(sedesc);
110 return sedesc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100111}
112
Christopher Faulet9ed77422022-04-12 08:51:15 +0200113/* Releases an endpoint. It is the caller responsibility to be sure it is safe
114 * and it is not shared with another entity
115 */
Willy Tarreauea59b022022-05-17 17:53:22 +0200116void sedesc_free(struct sedesc *sedesc)
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100117{
Willy Tarreauea59b022022-05-17 17:53:22 +0200118 pool_free(pool_head_sedesc, sedesc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100119}
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100120
Willy Tarreau4596fe22022-05-17 19:07:51 +0200121/* Tries to allocate a new stconn and initialize its main fields. On
Christopher Faulet9ed77422022-04-12 08:51:15 +0200122 * failure, nothing is allocated and NULL is returned. It is an internal
Willy Tarreaub605c422022-05-17 17:04:55 +0200123 * function. The caller must, at least, set the SE_FL_ORPHAN or SE_FL_DETACHED
Christopher Faulet9ed77422022-04-12 08:51:15 +0200124 * flag.
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100125 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200126static struct stconn *sc_new(struct sedesc *sedesc)
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100127{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200128 struct stconn *sc;
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100129
Willy Tarreau0adb2812022-05-27 10:02:48 +0200130 sc = pool_alloc(pool_head_connstream);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100131
Willy Tarreau0adb2812022-05-27 10:02:48 +0200132 if (unlikely(!sc))
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100133 goto alloc_error;
Christopher Fauletbb772d02022-03-22 15:28:36 +0100134
Willy Tarreau1d2c79a2022-05-27 11:15:19 +0200135 sc->obj_type = OBJ_TYPE_SC;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200136 sc->flags = SC_FL_NONE;
137 sc->state = SC_ST_INI;
Christopher Fauletbe5cc762023-02-20 08:41:55 +0100138 sc->ioto = TICK_ETERNITY;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200139 sc->app = NULL;
140 sc->app_ops = NULL;
141 sc->src = NULL;
142 sc->dst = NULL;
143 sc->wait_event.tasklet = NULL;
144 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200145
Christopher Faulet9ed77422022-04-12 08:51:15 +0200146 /* If there is no endpoint, allocate a new one now */
Willy Tarreauea59b022022-05-17 17:53:22 +0200147 if (!sedesc) {
148 sedesc = sedesc_new();
149 if (unlikely(!sedesc))
Christopher Fauletb669d682022-03-22 18:37:19 +0100150 goto alloc_error;
151 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200152 sc->sedesc = sedesc;
153 sedesc->sc = sc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100154
Willy Tarreau0adb2812022-05-27 10:02:48 +0200155 return sc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100156
157 alloc_error:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200158 pool_free(pool_head_connstream, sc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100159 return NULL;
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100160}
161
Willy Tarreau31219282022-05-27 16:21:33 +0200162/* Creates a new stream connector and its associated stream from a mux. <sd> must
163 * be defined. It returns NULL on error. On success, the new stream connector is
Willy Tarreaub605c422022-05-17 17:04:55 +0200164 * returned. In this case, SE_FL_ORPHAN flag is removed.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200165 */
Willy Tarreau31219282022-05-27 16:21:33 +0200166struct stconn *sc_new_from_endp(struct sedesc *sd, struct session *sess, struct buffer *input)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100167{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200168 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100169
Willy Tarreau31219282022-05-27 16:21:33 +0200170 sc = sc_new(sd);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200171 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100172 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200173 if (unlikely(!stream_new(sess, sc, input))) {
Christopher Faulet3ab72c62022-09-27 09:18:20 +0200174 sd->sc = NULL;
Willy Tarreau7a8ca0a2023-03-20 19:53:14 +0100175 if (sc->sedesc != sd) {
176 /* none was provided so sc_new() allocated one */
177 sedesc_free(sc->sedesc);
178 }
179 pool_free(pool_head_connstream, sc);
Christopher Faulet3ab72c62022-09-27 09:18:20 +0200180 se_fl_set(sd, SE_FL_ORPHAN);
181 return NULL;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100182 }
Willy Tarreau31219282022-05-27 16:21:33 +0200183 se_fl_clr(sd, SE_FL_ORPHAN);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200184 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100185}
186
Willy Tarreau4596fe22022-05-17 19:07:51 +0200187/* Creates a new stream connector from an stream. There is no endpoint here, thus it
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200188 * will be created by sc_new(). So the SE_FL_DETACHED flag is set. It returns
Willy Tarreau4596fe22022-05-17 19:07:51 +0200189 * NULL on error. On success, the new stream connector is returned.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200190 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200191struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100192{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200193 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100194
Willy Tarreau0adb2812022-05-27 10:02:48 +0200195 sc = sc_new(NULL);
196 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100197 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200198 sc->flags |= flags;
199 sc_ep_set(sc, SE_FL_DETACHED);
200 sc->app = &strm->obj_type;
201 sc->app_ops = &sc_app_embedded_ops;
202 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100203}
204
Willy Tarreau4596fe22022-05-17 19:07:51 +0200205/* Creates a new stream connector from an health-check. There is no endpoint here,
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200206 * thus it will be created by sc_new(). So the SE_FL_DETACHED flag is set. It
Willy Tarreau4596fe22022-05-17 19:07:51 +0200207 * returns NULL on error. On success, the new stream connector is returned.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200208 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200209struct stconn *sc_new_from_check(struct check *check, unsigned int flags)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100210{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200211 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100212
Willy Tarreau0adb2812022-05-27 10:02:48 +0200213 sc = sc_new(NULL);
214 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100215 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200216 sc->flags |= flags;
217 sc_ep_set(sc, SE_FL_DETACHED);
218 sc->app = &check->obj_type;
219 sc->app_ops = &sc_app_check_ops;
220 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100221}
222
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200223/* Releases a stconn previously allocated by sc_new(), as well as its
Christopher Faulet9ed77422022-04-12 08:51:15 +0200224 * endpoint, if it exists. This function is called internally or on error path.
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100225 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200226void sc_free(struct stconn *sc)
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100227{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200228 sockaddr_free(&sc->src);
229 sockaddr_free(&sc->dst);
230 if (sc->sedesc) {
231 BUG_ON(!sc_ep_test(sc, SE_FL_DETACHED));
232 sedesc_free(sc->sedesc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100233 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200234 if (sc->wait_event.tasklet)
235 tasklet_free(sc->wait_event.tasklet);
236 pool_free(pool_head_connstream, sc);
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100237}
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100238
Willy Tarreau4596fe22022-05-17 19:07:51 +0200239/* Conditionally removes a stream connector if it is detached and if there is no app
Christopher Fauleteb50c012022-04-21 14:22:53 +0200240 * layer defined. Except on error path, this one must be used. if release, the
Willy Tarreaue68bc612022-05-27 11:23:05 +0200241 * pointer on the SC is set to NULL.
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200242 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200243static void sc_free_cond(struct stconn **scp)
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200244{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200245 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200246
Willy Tarreau0adb2812022-05-27 10:02:48 +0200247 if (!sc->app && (!sc->sedesc || sc_ep_test(sc, SE_FL_DETACHED))) {
248 sc_free(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +0200249 *scp = NULL;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200250 }
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200251}
252
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100253
Willy Tarreau4596fe22022-05-17 19:07:51 +0200254/* Attaches a stconn to a mux endpoint and sets the endpoint ctx. Returns
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500255 * -1 on error and 0 on success. SE_FL_DETACHED flag is removed. This function is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200256 * called from a mux when it is attached to a stream or a health-check.
257 */
Willy Tarreau31219282022-05-27 16:21:33 +0200258int sc_attach_mux(struct stconn *sc, void *sd, void *ctx)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100259{
Christopher Faulet93882042022-01-19 14:56:50 +0100260 struct connection *conn = ctx;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200261 struct sedesc *sedesc = sc->sedesc;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100262
Willy Tarreau0adb2812022-05-27 10:02:48 +0200263 if (sc_strm(sc)) {
264 if (!sc->wait_event.tasklet) {
265 sc->wait_event.tasklet = tasklet_new();
266 if (!sc->wait_event.tasklet)
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200267 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200268 sc->wait_event.tasklet->process = sc_conn_io_cb;
269 sc->wait_event.tasklet->context = sc;
270 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200271 }
272
Willy Tarreau0adb2812022-05-27 10:02:48 +0200273 sc->app_ops = &sc_app_conn_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100274 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200275 else if (sc_check(sc)) {
276 if (!sc->wait_event.tasklet) {
277 sc->wait_event.tasklet = tasklet_new();
278 if (!sc->wait_event.tasklet)
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200279 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200280 sc->wait_event.tasklet->process = srv_chk_io_cb;
281 sc->wait_event.tasklet->context = sc;
282 sc->wait_event.events = 0;
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200283 }
284
Willy Tarreau0adb2812022-05-27 10:02:48 +0200285 sc->app_ops = &sc_app_check_ops;
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200286 }
Willy Tarreaue2f79462023-03-20 19:45:41 +0100287
288 sedesc->se = sd;
289 sedesc->conn = ctx;
290 se_fl_set(sedesc, SE_FL_T_MUX);
291 se_fl_clr(sedesc, SE_FL_DETACHED);
292 if (!conn->ctx)
293 conn->ctx = sc;
Christopher Faulet070b91b2022-03-31 19:27:18 +0200294 return 0;
Christopher Faulet93882042022-01-19 14:56:50 +0100295}
296
Willy Tarreau4596fe22022-05-17 19:07:51 +0200297/* Attaches a stconn to an applet endpoint and sets the endpoint
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500298 * ctx. Returns -1 on error and 0 on success. SE_FL_DETACHED flag is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200299 * removed. This function is called by a stream when a backend applet is
300 * registered.
301 */
Willy Tarreau31219282022-05-27 16:21:33 +0200302static void sc_attach_applet(struct stconn *sc, void *sd)
Christopher Faulet93882042022-01-19 14:56:50 +0100303{
Willy Tarreau31219282022-05-27 16:21:33 +0200304 sc->sedesc->se = sd;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200305 sc_ep_set(sc, SE_FL_T_APPLET);
306 sc_ep_clr(sc, SE_FL_DETACHED);
307 if (sc_strm(sc))
308 sc->app_ops = &sc_app_applet_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100309}
310
Willy Tarreau4596fe22022-05-17 19:07:51 +0200311/* Attaches a stconn to a app layer and sets the relevant
Willy Tarreaub605c422022-05-17 17:04:55 +0200312 * callbacks. Returns -1 on error and 0 on success. SE_FL_ORPHAN flag is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200313 * removed. This function is called by a stream when it is created to attach it
Willy Tarreau4596fe22022-05-17 19:07:51 +0200314 * on the stream connector on the client side.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200315 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200316int sc_attach_strm(struct stconn *sc, struct stream *strm)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100317{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200318 sc->app = &strm->obj_type;
319 sc_ep_clr(sc, SE_FL_ORPHAN);
320 if (sc_ep_test(sc, SE_FL_T_MUX)) {
321 sc->wait_event.tasklet = tasklet_new();
322 if (!sc->wait_event.tasklet)
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200323 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200324 sc->wait_event.tasklet->process = sc_conn_io_cb;
325 sc->wait_event.tasklet->context = sc;
326 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200327
Willy Tarreau0adb2812022-05-27 10:02:48 +0200328 sc->app_ops = &sc_app_conn_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100329 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200330 else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
331 sc->app_ops = &sc_app_applet_ops;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100332 }
333 else {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200334 sc->app_ops = &sc_app_embedded_ops;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100335 }
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100336 return 0;
337}
338
Willy Tarreau4596fe22022-05-17 19:07:51 +0200339/* Detaches the stconn from the endpoint, if any. For a connecrion, if a
Christopher Faulet9ed77422022-04-12 08:51:15 +0200340 * mux owns the connection ->detach() callback is called. Otherwise, it means
Willy Tarreau4596fe22022-05-17 19:07:51 +0200341 * the stream connector owns the connection. In this case the connection is closed
Christopher Faulet9ed77422022-04-12 08:51:15 +0200342 * and released. For an applet, the appctx is released. If still allocated, the
343 * endpoint is reset and flag as detached. If the app layer is also detached,
Willy Tarreau4596fe22022-05-17 19:07:51 +0200344 * the stream connector is released.
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100345 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200346static void sc_detach_endp(struct stconn **scp)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100347{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200348 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200349
Willy Tarreau0adb2812022-05-27 10:02:48 +0200350 if (!sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200351 return;
352
Willy Tarreau0adb2812022-05-27 10:02:48 +0200353 if (sc_ep_test(sc, SE_FL_T_MUX)) {
354 struct connection *conn = __sc_conn(sc);
355 struct sedesc *sedesc = sc->sedesc;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100356
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100357 if (conn->mux) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200358 if (sc->wait_event.events != 0)
359 conn->mux->unsubscribe(sc, sc->wait_event.events, &sc->wait_event);
Willy Tarreau798465b2022-05-17 18:20:02 +0200360 se_fl_set(sedesc, SE_FL_ORPHAN);
Willy Tarreauc1054922022-05-18 07:43:52 +0200361 sedesc->sc = NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200362 sc->sedesc = NULL;
Willy Tarreau798465b2022-05-17 18:20:02 +0200363 conn->mux->detach(sedesc);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100364 }
365 else {
366 /* It's too early to have a mux, let's just destroy
367 * the connection
368 */
369 conn_stop_tracking(conn);
370 conn_full_close(conn);
371 if (conn->destroy_cb)
372 conn->destroy_cb(conn);
373 conn_free(conn);
374 }
375 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200376 else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
377 struct appctx *appctx = __sc_appctx(sc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100378
Willy Tarreau0adb2812022-05-27 10:02:48 +0200379 sc_ep_set(sc, SE_FL_ORPHAN);
380 sc->sedesc->sc = NULL;
381 sc->sedesc = NULL;
Willy Tarreau1c3ead42022-05-10 19:42:22 +0200382 appctx_shut(appctx);
383 appctx_free(appctx);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100384 }
385
Willy Tarreau0adb2812022-05-27 10:02:48 +0200386 if (sc->sedesc) {
Willy Tarreauda59c892022-05-27 17:03:34 +0200387 /* the SD wasn't used and can be recycled */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200388 sc->sedesc->se = NULL;
389 sc->sedesc->conn = NULL;
Willy Tarreauda59c892022-05-27 17:03:34 +0200390 sc->sedesc->flags = 0;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200391 sc_ep_set(sc, SE_FL_DETACHED);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100392 }
393
Willy Tarreaue68bc612022-05-27 11:23:05 +0200394 /* FIXME: Rest SC for now but must be reviewed. SC flags are only
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100395 * connection related for now but this will evolved
396 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200397 sc->flags &= SC_FL_ISBACK;
398 if (sc_strm(sc))
399 sc->app_ops = &sc_app_embedded_ops;
Willy Tarreau2f2318d2022-05-18 10:17:16 +0200400 else
Willy Tarreau0adb2812022-05-27 10:02:48 +0200401 sc->app_ops = NULL;
Willy Tarreaue68bc612022-05-27 11:23:05 +0200402 sc_free_cond(scp);
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100403}
404
Willy Tarreau4596fe22022-05-17 19:07:51 +0200405/* Detaches the stconn from the app layer. If there is no endpoint attached
406 * to the stconn
Christopher Faulet9ed77422022-04-12 08:51:15 +0200407 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200408static void sc_detach_app(struct stconn **scp)
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100409{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200410 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200411
Willy Tarreau0adb2812022-05-27 10:02:48 +0200412 if (!sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200413 return;
414
Willy Tarreau0adb2812022-05-27 10:02:48 +0200415 sc->app = NULL;
416 sc->app_ops = NULL;
417 sockaddr_free(&sc->src);
418 sockaddr_free(&sc->dst);
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200419
Willy Tarreau0adb2812022-05-27 10:02:48 +0200420 if (sc->wait_event.tasklet)
421 tasklet_free(sc->wait_event.tasklet);
422 sc->wait_event.tasklet = NULL;
423 sc->wait_event.events = 0;
Willy Tarreaue68bc612022-05-27 11:23:05 +0200424 sc_free_cond(scp);
Christopher Fauleteb50c012022-04-21 14:22:53 +0200425}
426
Willy Tarreau4596fe22022-05-17 19:07:51 +0200427/* Destroy the stconn. It is detached from its endpoint and its
428 * application. After this call, the stconn must be considered as released.
Christopher Fauleteb50c012022-04-21 14:22:53 +0200429 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200430void sc_destroy(struct stconn *sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200431{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200432 sc_detach_endp(&sc);
433 sc_detach_app(&sc);
434 BUG_ON_HOT(sc);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100435}
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100436
Willy Tarreau4596fe22022-05-17 19:07:51 +0200437/* Resets the stream connector endpoint. It happens when the app layer want to renew
Christopher Faulet9ed77422022-04-12 08:51:15 +0200438 * its endpoint. For a connection retry for instance. If a mux or an applet is
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500439 * attached, a new endpoint is created. Returns -1 on error and 0 on success.
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200440 *
Willy Tarreaub605c422022-05-17 17:04:55 +0200441 * Only SE_FL_ERROR flag is removed on the endpoint. Orther flags are preserved.
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200442 * It is the caller responsibility to remove other flags if needed.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200443 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200444int sc_reset_endp(struct stconn *sc)
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100445{
Willy Tarreau31219282022-05-27 16:21:33 +0200446 struct sedesc *new_sd;
Christopher Fauletb041b232022-03-24 10:27:02 +0100447
Willy Tarreau0adb2812022-05-27 10:02:48 +0200448 BUG_ON(!sc->app);
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200449
Willy Tarreau0adb2812022-05-27 10:02:48 +0200450 sc_ep_clr(sc, SE_FL_ERROR);
451 if (!__sc_endp(sc)) {
Christopher Fauletb041b232022-03-24 10:27:02 +0100452 /* endpoint not attached or attached to a mux with no
453 * target. Thus the endpoint will not be release but just
Willy Tarreau0adb2812022-05-27 10:02:48 +0200454 * reset. The app is still attached, the sc will not be
Christopher Fauleteb50c012022-04-21 14:22:53 +0200455 * released.
Christopher Fauletb041b232022-03-24 10:27:02 +0100456 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200457 sc_detach_endp(&sc);
Christopher Fauletb041b232022-03-24 10:27:02 +0100458 return 0;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100459 }
Christopher Fauletb041b232022-03-24 10:27:02 +0100460
461 /* allocate the new endpoint first to be able to set error if it
462 * fails */
Willy Tarreau31219282022-05-27 16:21:33 +0200463 new_sd = sedesc_new();
464 if (!unlikely(new_sd)) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200465 sc_ep_set(sc, SE_FL_ERROR);
Christopher Fauletb041b232022-03-24 10:27:02 +0100466 return -1;
467 }
468
Willy Tarreau0adb2812022-05-27 10:02:48 +0200469 /* The app is still attached, the sc will not be released */
470 sc_detach_endp(&sc);
Willy Tarreau6a378d12022-08-11 13:56:42 +0200471 BUG_ON(!sc);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200472 BUG_ON(sc->sedesc);
Willy Tarreau31219282022-05-27 16:21:33 +0200473 sc->sedesc = new_sd;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200474 sc->sedesc->sc = sc;
475 sc_ep_set(sc, SE_FL_DETACHED);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100476 return 0;
477}
Christopher Faulet37046632022-04-01 11:36:58 +0200478
479
Willy Tarreaue68bc612022-05-27 11:23:05 +0200480/* Create an applet to handle a stream connector as a new appctx. The SC will
Christopher Faulet37046632022-04-01 11:36:58 +0200481 * wake it up every time it is solicited. The appctx must be deleted by the task
Willy Tarreau19c65a92022-05-27 08:49:24 +0200482 * handler using sc_detach_endp(), possibly from within the function itself.
Christopher Faulet37046632022-04-01 11:36:58 +0200483 * It also pre-initializes the applet's context and returns it (or NULL in case
484 * it could not be allocated).
485 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200486struct appctx *sc_applet_create(struct stconn *sc, struct applet *app)
Christopher Faulet37046632022-04-01 11:36:58 +0200487{
488 struct appctx *appctx;
489
Willy Tarreau0adb2812022-05-27 10:02:48 +0200490 appctx = appctx_new_here(app, sc->sedesc);
Christopher Faulet37046632022-04-01 11:36:58 +0200491 if (!appctx)
492 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200493 sc_attach_applet(sc, appctx);
494 appctx->t->nice = __sc_strm(sc)->task->nice;
Willy Tarreau90e8b452022-05-25 18:21:43 +0200495 applet_need_more_data(appctx);
Christopher Faulet37046632022-04-01 11:36:58 +0200496 appctx_wakeup(appctx);
Christopher Fauleta33ff7a2022-04-21 11:52:07 +0200497
Willy Tarreau0adb2812022-05-27 10:02:48 +0200498 sc->state = SC_ST_RDY;
Christopher Faulet37046632022-04-01 11:36:58 +0200499 return appctx;
500}
501
Ilya Shipitsin07be66d2023-04-01 12:26:42 +0200502/* Conditionally forward the close to the write side. It return 1 if it can be
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100503 * forwarded. It is the caller responsibility to forward the close to the write
Christopher Faulete38534c2023-04-13 15:45:24 +0200504 * side. Otherwise, 0 is returned. In this case, SC_FL_SHUT_WANTED flag may be set on
Christopher Faulet87633c32023-04-03 18:32:50 +0200505 * the consumer SC if we are only waiting for the outgoing data to be flushed.
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100506 */
507static inline int sc_cond_forward_shutw(struct stconn *sc)
508{
509 /* The close must not be forwarded */
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200510 if (!(sc->flags & SC_FL_SHUTR) || !(sc->flags & SC_FL_NOHALF))
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100511 return 0;
512
513 if (!channel_is_empty(sc_ic(sc))) {
Christopher Fauletdf7cd712023-04-13 15:56:26 +0200514 /* the shutdown cannot be forwarded now because
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100515 * we should flush outgoing data first. But instruct the output
516 * channel it should be done ASAP.
517 */
Christopher Fauletdf7cd712023-04-13 15:56:26 +0200518 sc_schedule_shutdown(sc);
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100519 return 0;
520 }
521
522 /* the close can be immediately forwarded to the write side */
523 return 1;
524}
525
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200526/*
Willy Tarreau4596fe22022-05-17 19:07:51 +0200527 * This function performs a shutdown-read on a detached stream connector in a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200528 * connected or init state (it does nothing for other states). It either shuts
529 * the read side or marks itself as closed. The buffer flags are updated to
Willy Tarreaucb041662022-05-17 19:44:42 +0200530 * reflect the new state. If the stream connector has SC_FL_NOHALF, we also
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200531 * forward the close to the write side. The owner task is woken up if it exists.
532 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200533static void sc_app_shutr(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200534{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200535 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200536
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200537 if (sc->flags & SC_FL_SHUTR)
Christopher Fauletc665bb52023-04-04 10:06:57 +0200538 return;
Christopher Faulet87633c32023-04-03 18:32:50 +0200539
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200540 sc->flags |= SC_FL_SHUTR;
Christopher Faulet87633c32023-04-03 18:32:50 +0200541 ic->flags |= CF_READ_EVENT;
Christopher Faulet4c135682023-02-16 11:09:31 +0100542 sc_ep_report_read_activity(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200543
Willy Tarreau0adb2812022-05-27 10:02:48 +0200544 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200545 return;
546
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200547 if (sc->flags & SC_FL_SHUTW) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200548 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200549 if (sc->flags & SC_FL_ISBACK)
550 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200551 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100552 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200553 return sc_app_shutw(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200554
555 /* note that if the task exists, it must unregister itself once it runs */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200556 if (!(sc->flags & SC_FL_DONT_WAKE))
557 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200558}
559
560/*
Willy Tarreau4596fe22022-05-17 19:07:51 +0200561 * This function performs a shutdown-write on a detached stream connector in a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200562 * connected or init state (it does nothing for other states). It either shuts
563 * the write side or marks itself as closed. The buffer flags are updated to
Willy Tarreaue68bc612022-05-27 11:23:05 +0200564 * reflect the new state. It does also close everything if the SC was marked as
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200565 * being in error state. The owner task is woken up if it exists.
566 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200567static void sc_app_shutw(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200568{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200569 struct channel *ic = sc_ic(sc);
570 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200571
Christopher Faulete38534c2023-04-13 15:45:24 +0200572 sc->flags &= ~SC_FL_SHUT_WANTED;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200573 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200574 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200575 sc->flags |= SC_FL_SHUTW;
Christopher Faulet87633c32023-04-03 18:32:50 +0200576 oc->flags |= CF_WRITE_EVENT;
Christopher Fauletbcdcfad2023-02-20 08:36:53 +0100577 sc_set_hcto(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200578
Willy Tarreau0adb2812022-05-27 10:02:48 +0200579 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200580 case SC_ST_RDY:
581 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200582 /* we have to shut before closing, otherwise some short messages
583 * may never leave the system, especially when there are remaining
584 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200585 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200586 * no risk so we close both sides immediately.
587 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200588 if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200589 !(sc->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200590 return;
591
Willy Tarreau476c2802022-11-14 07:36:42 +0100592 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200593 case SC_ST_CON:
594 case SC_ST_CER:
595 case SC_ST_QUE:
596 case SC_ST_TAR:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200597 /* Note that none of these states may happen with applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200598 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100599 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200600 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200601 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200602 sc->flags |= SC_FL_SHUTR;
Christopher Fauletca679922022-07-20 13:24:04 +0200603 if (sc->flags & SC_FL_ISBACK)
604 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200605 }
606
607 /* note that if the task exists, it must unregister itself once it runs */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200608 if (!(sc->flags & SC_FL_DONT_WAKE))
609 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200610}
611
612/* default chk_rcv function for scheduled tasks */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200613static void sc_app_chk_rcv(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200614{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200615 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200616
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200617 if (ic->pipe) {
618 /* stop reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200619 sc_need_room(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200620 }
621 else {
622 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200623 if (!(sc->flags & SC_FL_DONT_WAKE))
624 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200625 }
626}
627
628/* default chk_snd function for scheduled tasks */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200629static void sc_app_chk_snd(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200630{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200631 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200632
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200633 if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUTW)))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200634 return;
635
Willy Tarreau0adb2812022-05-27 10:02:48 +0200636 if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || /* not waiting for data */
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200637 channel_is_empty(oc)) /* called with nothing to send ! */
638 return;
639
640 /* Otherwise there are remaining data to be sent in the buffer,
641 * so we tell the handler.
642 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200643 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200644 if (!(sc->flags & SC_FL_DONT_WAKE))
645 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200646}
647
648/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200649 * This function performs a shutdown-read on a stream connector attached to
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200650 * a connection in a connected or init state (it does nothing for other
651 * states). It either shuts the read side or marks itself as closed. The buffer
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200652 * flags are updated to reflect the new state. If the stream connector has
Willy Tarreaucb041662022-05-17 19:44:42 +0200653 * SC_FL_NOHALF, we also forward the close to the write side. If a control
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200654 * layer is defined, then it is supposed to be a socket layer and file
655 * descriptors are then shutdown or closed accordingly. The function
656 * automatically disables polling if needed.
657 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200658static void sc_app_shutr_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200659{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200660 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200661
Willy Tarreau0adb2812022-05-27 10:02:48 +0200662 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200663
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200664 if (sc->flags & SC_FL_SHUTR)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200665 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200666 sc->flags |= SC_FL_SHUTR;
Christopher Faulet87633c32023-04-03 18:32:50 +0200667 ic->flags |= CF_READ_EVENT;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200668
Willy Tarreau0adb2812022-05-27 10:02:48 +0200669 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200670 return;
671
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200672 if (sc->flags & SC_FL_SHUTW) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200673 sc_conn_shut(sc);
674 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200675 if (sc->flags & SC_FL_ISBACK)
676 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200677 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100678 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200679 return sc_app_shutw_conn(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200680}
681
682/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200683 * This function performs a shutdown-write on a stream connector attached to
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200684 * a connection in a connected or init state (it does nothing for other
685 * states). It either shuts the write side or marks itself as closed. The
686 * buffer flags are updated to reflect the new state. It does also close
Willy Tarreaue68bc612022-05-27 11:23:05 +0200687 * everything if the SC was marked as being in error state. If there is a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200688 * data-layer shutdown, it is called.
689 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200690static void sc_app_shutw_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200691{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200692 struct channel *ic = sc_ic(sc);
693 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200694
Willy Tarreau0adb2812022-05-27 10:02:48 +0200695 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200696
Christopher Faulete38534c2023-04-13 15:45:24 +0200697 sc->flags &= ~SC_FL_SHUT_WANTED;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200698 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200699 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200700 sc->flags |= SC_FL_SHUTW;
Christopher Faulet87633c32023-04-03 18:32:50 +0200701 oc->flags |= CF_WRITE_EVENT;
Christopher Fauletbcdcfad2023-02-20 08:36:53 +0100702 sc_set_hcto(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200703
Willy Tarreau0adb2812022-05-27 10:02:48 +0200704 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200705 case SC_ST_RDY:
706 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200707 /* we have to shut before closing, otherwise some short messages
708 * may never leave the system, especially when there are remaining
709 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200710 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200711 * no risk so we close both sides immediately.
712 */
713
Willy Tarreau0adb2812022-05-27 10:02:48 +0200714 if (sc_ep_test(sc, SE_FL_ERROR)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200715 /* quick close, the socket is already shut anyway */
716 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200717 else if (sc->flags & SC_FL_NOLINGER) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200718 /* unclean data-layer shutdown, typically an aborted request
719 * or a forwarded shutdown from a client to a server due to
720 * option abortonclose. No need for the TLS layer to try to
721 * emit a shutdown message.
722 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200723 sc_conn_shutw(sc, CO_SHW_SILENT);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200724 }
725 else {
726 /* clean data-layer shutdown. This only happens on the
727 * frontend side, or on the backend side when forwarding
728 * a client close in TCP mode or in HTTP TUNNEL mode
729 * while option abortonclose is set. We want the TLS
730 * layer to try to signal it to the peer before we close.
731 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200732 sc_conn_shutw(sc, CO_SHW_NORMAL);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200733
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200734 if (!(sc->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200735 return;
736 }
737
Willy Tarreau476c2802022-11-14 07:36:42 +0100738 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200739 case SC_ST_CON:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200740 /* we may have to close a pending connection, and mark the
741 * response buffer as shutr
742 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200743 sc_conn_shut(sc);
Willy Tarreau476c2802022-11-14 07:36:42 +0100744 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200745 case SC_ST_CER:
746 case SC_ST_QUE:
747 case SC_ST_TAR:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200748 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100749 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200750 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200751 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200752 sc->flags |= SC_FL_SHUTR;
Christopher Fauletca679922022-07-20 13:24:04 +0200753 if (sc->flags & SC_FL_ISBACK)
754 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200755 }
756}
757
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200758/* This function is used for inter-stream connector calls. It is called by the
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200759 * consumer to inform the producer side that it may be interested in checking
760 * for free space in the buffer. Note that it intentionally does not update
761 * timeouts, so that we can still check them later at wake-up. This function is
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200762 * dedicated to connection-based stream connectors.
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200763 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200764static void sc_app_chk_rcv_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200765{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200766 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200767
768 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200769 if (sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
770 tasklet_wakeup(sc->wait_event.tasklet);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200771}
772
773
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200774/* This function is used for inter-stream connector calls. It is called by the
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200775 * producer to inform the consumer side that it may be interested in checking
776 * for data in the buffer. Note that it intentionally does not update timeouts,
777 * so that we can still check them later at wake-up.
778 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200779static void sc_app_chk_snd_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200780{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200781 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200782
Willy Tarreau0adb2812022-05-27 10:02:48 +0200783 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200784
Willy Tarreau0adb2812022-05-27 10:02:48 +0200785 if (unlikely(!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST) ||
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200786 (sc->flags & SC_FL_SHUTW)))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200787 return;
788
789 if (unlikely(channel_is_empty(oc))) /* called with nothing to send ! */
790 return;
791
792 if (!oc->pipe && /* spliced data wants to be forwarded ASAP */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200793 !sc_ep_test(sc, SE_FL_WAIT_DATA)) /* not waiting for data */
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200794 return;
795
Willy Tarreau0adb2812022-05-27 10:02:48 +0200796 if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
797 sc_conn_send(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200798
Willy Tarreau0adb2812022-05-27 10:02:48 +0200799 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200800 /* Write error on the file descriptor */
Christopher Faulet7f6aa562022-10-17 10:21:19 +0200801 if (sc->state >= SC_ST_CON && sc_ep_test(sc, SE_FL_EOS))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200802 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200803 goto out_wakeup;
804 }
805
806 /* OK, so now we know that some data might have been sent, and that we may
807 * have to poll first. We have to do that too if the buffer is not empty.
808 */
809 if (channel_is_empty(oc)) {
810 /* the connection is established but we can't write. Either the
811 * buffer is empty, or we just refrain from sending because the
812 * ->o limit was reached. Maybe we just wrote the last
813 * chunk and need to close.
814 */
Christopher Faulet87633c32023-04-03 18:32:50 +0200815 if ((oc->flags & CF_AUTO_CLOSE) &&
Christopher Faulete38534c2023-04-13 15:45:24 +0200816 ((sc->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +0200817 sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST)) {
818 sc_shutw(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200819 goto out_wakeup;
820 }
821
Christopher Faulete38534c2023-04-13 15:45:24 +0200822 if ((sc->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED)) == 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +0200823 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200824 }
825 else {
826 /* Otherwise there are remaining data to be sent in the buffer,
827 * which means we have to poll before doing so.
828 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200829 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200830 }
831
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200832 /* in case of special condition (error, shutdown, end of write...), we
833 * have to notify the task.
834 */
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200835 if (likely((sc->flags & SC_FL_SHUTW) ||
Christopher Faulet71c486b2023-02-09 14:14:38 +0100836 ((oc->flags & CF_WRITE_EVENT) && sc->state < SC_ST_EST) ||
837 ((oc->flags & CF_WAKE_WRITE) &&
838 ((channel_is_empty(oc) && !oc->to_forward) ||
839 !sc_state_in(sc->state, SC_SB_EST))))) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200840 out_wakeup:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200841 if (!(sc->flags & SC_FL_DONT_WAKE))
842 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200843 }
844}
845
846/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200847 * This function performs a shutdown-read on a stream connector attached to an
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200848 * applet in a connected or init state (it does nothing for other states). It
849 * either shuts the read side or marks itself as closed. The buffer flags are
Willy Tarreaucb041662022-05-17 19:44:42 +0200850 * updated to reflect the new state. If the stream connector has SC_FL_NOHALF,
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200851 * we also forward the close to the write side. The owner task is woken up if
852 * it exists.
853 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200854static void sc_app_shutr_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200855{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200856 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200857
Willy Tarreau0adb2812022-05-27 10:02:48 +0200858 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200859
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200860 if (sc->flags & SC_FL_SHUTR)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200861 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200862 sc->flags |= SC_FL_SHUTR;
Christopher Faulet87633c32023-04-03 18:32:50 +0200863 ic->flags |= CF_READ_EVENT;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200864
865 /* Note: on shutr, we don't call the applet */
866
Willy Tarreau0adb2812022-05-27 10:02:48 +0200867 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200868 return;
869
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200870 if (sc->flags & SC_FL_SHUTW) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200871 appctx_shut(__sc_appctx(sc));
872 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200873 if (sc->flags & SC_FL_ISBACK)
874 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200875 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100876 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200877 return sc_app_shutw_applet(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200878}
879
880/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200881 * This function performs a shutdown-write on a stream connector attached to an
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200882 * applet in a connected or init state (it does nothing for other states). It
883 * either shuts the write side or marks itself as closed. The buffer flags are
884 * updated to reflect the new state. It does also close everything if the SI
885 * was marked as being in error state. The owner task is woken up if it exists.
886 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200887static void sc_app_shutw_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200888{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200889 struct channel *ic = sc_ic(sc);
890 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200891
Willy Tarreau0adb2812022-05-27 10:02:48 +0200892 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200893
Christopher Faulete38534c2023-04-13 15:45:24 +0200894 sc->flags &= ~SC_FL_SHUT_WANTED;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200895 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200896 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200897 sc->flags |= SC_FL_SHUTW;
Christopher Faulet87633c32023-04-03 18:32:50 +0200898 oc->flags |= CF_WRITE_EVENT;
Christopher Fauletbcdcfad2023-02-20 08:36:53 +0100899 sc_set_hcto(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200900
901 /* on shutw we always wake the applet up */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200902 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200903
Willy Tarreau0adb2812022-05-27 10:02:48 +0200904 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200905 case SC_ST_RDY:
906 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200907 /* we have to shut before closing, otherwise some short messages
908 * may never leave the system, especially when there are remaining
909 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200910 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200911 * no risk so we close both sides immediately.
912 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200913 if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200914 !(sc->flags & SC_FL_SHUTR) &&
Christopher Faulet87633c32023-04-03 18:32:50 +0200915 !(ic->flags & CF_DONT_READ))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200916 return;
917
Willy Tarreau476c2802022-11-14 07:36:42 +0100918 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200919 case SC_ST_CON:
920 case SC_ST_CER:
921 case SC_ST_QUE:
922 case SC_ST_TAR:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200923 /* Note that none of these states may happen with applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200924 appctx_shut(__sc_appctx(sc));
925 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100926 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200927 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200928 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200929 sc->flags |= SC_FL_SHUTR;
Christopher Fauletca679922022-07-20 13:24:04 +0200930 if (sc->flags & SC_FL_ISBACK)
931 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200932 }
933}
934
935/* chk_rcv function for applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200936static void sc_app_chk_rcv_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200937{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200938 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200939
Willy Tarreau0adb2812022-05-27 10:02:48 +0200940 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200941
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200942 if (!ic->pipe) {
943 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200944 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200945 }
946}
947
948/* chk_snd function for applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200949static void sc_app_chk_snd_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200950{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200951 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200952
Willy Tarreau0adb2812022-05-27 10:02:48 +0200953 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200954
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200955 if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUTW)))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200956 return;
957
Christopher Faulet04f03e12022-06-01 17:35:34 +0200958 /* we only wake the applet up if it was waiting for some data and is ready to consume it */
959 if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || sc_ep_test(sc, SE_FL_WONT_CONSUME))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200960 return;
961
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200962 if (!channel_is_empty(oc)) {
963 /* (re)start sending */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200964 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200965 }
966}
Christopher Faulet13045f02022-04-01 14:23:38 +0200967
968
969/* This function is designed to be called from within the stream handler to
Willy Tarreau4596fe22022-05-17 19:07:51 +0200970 * update the input channel's expiration timer and the stream connector's
Christopher Faulet13045f02022-04-01 14:23:38 +0200971 * Rx flags based on the channel's flags. It needs to be called only once
972 * after the channel's flags have settled down, and before they are cleared,
973 * though it doesn't harm to call it as often as desired (it just slightly
974 * hurts performance). It must not be called from outside of the stream
975 * handler, as what it does will be used to compute the stream task's
976 * expiration.
977 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200978void sc_update_rx(struct stconn *sc)
Christopher Faulet13045f02022-04-01 14:23:38 +0200979{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200980 struct channel *ic = sc_ic(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +0200981
Christopher Faulet7faac7c2023-04-04 10:05:27 +0200982 if (sc->flags & SC_FL_SHUTR)
Christopher Faulet13045f02022-04-01 14:23:38 +0200983 return;
Christopher Faulet13045f02022-04-01 14:23:38 +0200984
985 /* Read not closed, update FD status and timeout for reads */
986 if (ic->flags & CF_DONT_READ)
Willy Tarreau0adb2812022-05-27 10:02:48 +0200987 sc_wont_read(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +0200988 else
Willy Tarreau0adb2812022-05-27 10:02:48 +0200989 sc_will_read(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +0200990
Willy Tarreau0adb2812022-05-27 10:02:48 +0200991 sc_chk_rcv(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +0200992}
993
994/* This function is designed to be called from within the stream handler to
Willy Tarreau4596fe22022-05-17 19:07:51 +0200995 * update the output channel's expiration timer and the stream connector's
Christopher Faulet13045f02022-04-01 14:23:38 +0200996 * Tx flags based on the channel's flags. It needs to be called only once
997 * after the channel's flags have settled down, and before they are cleared,
998 * though it doesn't harm to call it as often as desired (it just slightly
999 * hurts performance). It must not be called from outside of the stream
1000 * handler, as what it does will be used to compute the stream task's
1001 * expiration.
1002 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001003void sc_update_tx(struct stconn *sc)
Christopher Faulet13045f02022-04-01 14:23:38 +02001004{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001005 struct channel *oc = sc_oc(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001006
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001007 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet13045f02022-04-01 14:23:38 +02001008 return;
1009
1010 /* Write not closed, update FD status and timeout for writes */
1011 if (channel_is_empty(oc)) {
1012 /* stop writing */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001013 if (!sc_ep_test(sc, SE_FL_WAIT_DATA)) {
Christopher Faulete38534c2023-04-13 15:45:24 +02001014 if ((sc->flags & SC_FL_SHUT_WANTED) == 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001015 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Faulet13045f02022-04-01 14:23:38 +02001016 }
1017 return;
1018 }
1019
Christopher Faulet15315d62023-02-20 08:23:51 +01001020 /* (re)start writing */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001021 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001022}
1023
Willy Tarreau19c65a92022-05-27 08:49:24 +02001024/* This function is the equivalent to sc_update() except that it's
Christopher Faulet5e29b762022-04-04 08:58:34 +02001025 * designed to be called from outside the stream handlers, typically the lower
1026 * layers (applets, connections) after I/O completion. After updating the stream
1027 * interface and timeouts, it will try to forward what can be forwarded, then to
1028 * wake the associated task up if an important event requires special handling.
Willy Tarreau15252cd2022-05-25 16:36:21 +02001029 * It may update SE_FL_WAIT_DATA and/or SC_FL_NEED_ROOM, that the callers are
Christopher Faulet5e29b762022-04-04 08:58:34 +02001030 * encouraged to watch to take appropriate action.
Willy Tarreau19c65a92022-05-27 08:49:24 +02001031 * It should not be called from within the stream itself, sc_update()
Christopher Faulet5e29b762022-04-04 08:58:34 +02001032 * is designed for this.
1033 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001034static void sc_notify(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001035{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001036 struct channel *ic = sc_ic(sc);
1037 struct channel *oc = sc_oc(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +02001038 struct stconn *sco = sc_opposite(sc);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001039 struct task *task = sc_strm_task(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001040
1041 /* process consumer side */
1042 if (channel_is_empty(oc)) {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001043 struct connection *conn = sc_conn(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001044
Christopher Faulete38534c2023-04-13 15:45:24 +02001045 if (((sc->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +02001046 (sc->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
1047 sc_shutw(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001048 }
1049
1050 /* indicate that we may be waiting for data from the output channel or
Christopher Faulete38534c2023-04-13 15:45:24 +02001051 * we're about to close and can't expect more data if SC_FL_SHUT_WANTED is there.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001052 */
Christopher Faulete38534c2023-04-13 15:45:24 +02001053 if (!(sc->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED)))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001054 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Faulete38534c2023-04-13 15:45:24 +02001055 else if ((sc->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001056 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001057
Christopher Faulet5e29b762022-04-04 08:58:34 +02001058 if (oc->flags & CF_DONT_READ)
Willy Tarreaue68bc612022-05-27 11:23:05 +02001059 sc_wont_read(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001060 else
Willy Tarreaue68bc612022-05-27 11:23:05 +02001061 sc_will_read(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001062
1063 /* Notify the other side when we've injected data into the IC that
1064 * needs to be forwarded. We can do fast-forwarding as soon as there
1065 * are output data, but we avoid doing this if some of the data are
1066 * not yet scheduled for being forwarded, because it is very likely
1067 * that it will be done again immediately afterwards once the following
Willy Tarreau15252cd2022-05-25 16:36:21 +02001068 * data are parsed (eg: HTTP chunking). We only clear SC_FL_NEED_ROOM
1069 * once we've emptied *some* of the output buffer, and not just when
1070 * there is available room, because applets are often forced to stop
1071 * before the buffer is full. We must not stop based on input data
1072 * alone because an HTTP parser might need more data to complete the
1073 * parsing.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001074 */
1075 if (!channel_is_empty(ic) &&
Willy Tarreaue68bc612022-05-27 11:23:05 +02001076 sc_ep_test(sco, SE_FL_WAIT_DATA) &&
Christopher Faulet84d3ef92023-03-17 15:45:58 +01001077 (!(sc->flags & SC_FL_SND_EXP_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001078 int new_len, last_len;
1079
1080 last_len = co_data(ic);
1081 if (ic->pipe)
1082 last_len += ic->pipe->data;
1083
Willy Tarreaue68bc612022-05-27 11:23:05 +02001084 sc_chk_snd(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001085
1086 new_len = co_data(ic);
1087 if (ic->pipe)
1088 new_len += ic->pipe->data;
1089
1090 /* check if the consumer has freed some space either in the
1091 * buffer or in the pipe.
1092 */
1093 if (new_len < last_len)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001094 sc_have_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001095 }
1096
1097 if (!(ic->flags & CF_DONT_READ))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001098 sc_will_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001099
Willy Tarreau0adb2812022-05-27 10:02:48 +02001100 sc_chk_rcv(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +02001101 sc_chk_rcv(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001102
Christopher Faulet5e29b762022-04-04 08:58:34 +02001103 /* wake the task up only when needed */
Christopher Faulet285f7612022-12-12 08:28:55 +01001104 if (/* changes on the production side that must be handled:
Christopher Faulet2e56a732023-01-26 16:18:09 +01001105 * - An error on receipt: SE_FL_ERROR
Christopher Faulet285f7612022-12-12 08:28:55 +01001106 * - A read event: shutdown for reads (CF_READ_EVENT + SHUTR)
Christopher Faulet904763f2023-03-22 14:53:11 +01001107 * end of input (CF_READ_EVENT + SC_FL_EOI)
Christopher Faulet285f7612022-12-12 08:28:55 +01001108 * data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
1109 * read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
1110 */
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001111 ((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (sc->flags & SC_FL_SHUTR) || !ic->to_forward || sco->state != SC_ST_EST)) ||
Christopher Faulet2e56a732023-01-26 16:18:09 +01001112 sc_ep_test(sc, SE_FL_ERROR) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001113
1114 /* changes on the consumption side */
Christopher Faulet2e56a732023-01-26 16:18:09 +01001115 sc_ep_test(sc, SE_FL_ERR_PENDING) ||
Christopher Fauletd8988412022-12-20 18:10:04 +01001116 ((oc->flags & CF_WRITE_EVENT) &&
1117 ((sc->state < SC_ST_EST) ||
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001118 (sc->flags & SC_FL_SHUTW) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001119 (((oc->flags & CF_WAKE_WRITE) ||
Christopher Faulet87633c32023-04-03 18:32:50 +02001120 (!(oc->flags & CF_AUTO_CLOSE) &&
Christopher Faulete38534c2023-04-13 15:45:24 +02001121 !(sc->flags & (SC_FL_SHUT_WANTED|SC_FL_SHUTW)))) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02001122 (sco->state != SC_ST_EST ||
1123 (channel_is_empty(oc) && !oc->to_forward)))))) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001124 task_wakeup(task, TASK_WOKEN_IO);
1125 }
Christopher Faulet5e29b762022-04-04 08:58:34 +02001126
Christopher Faulet2e56a732023-01-26 16:18:09 +01001127 if (ic->flags & CF_READ_EVENT)
Christopher Faulet9a790f62023-03-16 14:40:03 +01001128 sc->flags &= ~SC_FL_RCV_ONCE;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001129}
1130
1131/*
1132 * This function propagates a null read received on a socket-based connection.
Willy Tarreaucb041662022-05-17 19:44:42 +02001133 * It updates the stream connector. If the stream connector has SC_FL_NOHALF,
Christopher Faulet5e29b762022-04-04 08:58:34 +02001134 * the close is also forwarded to the write side as an abort.
1135 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001136static void sc_conn_read0(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001137{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001138 struct channel *ic = sc_ic(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001139
Willy Tarreau0adb2812022-05-27 10:02:48 +02001140 BUG_ON(!sc_conn(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001141
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001142 if (sc->flags & SC_FL_SHUTR)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001143 return;
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001144 sc->flags |= SC_FL_SHUTR;
Christopher Faulet87633c32023-04-03 18:32:50 +02001145 ic->flags |= CF_READ_EVENT;
Christopher Faulet4c135682023-02-16 11:09:31 +01001146 sc_ep_report_read_activity(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001147
Willy Tarreau0adb2812022-05-27 10:02:48 +02001148 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001149 return;
1150
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001151 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001152 goto do_close;
1153
Christopher Fauleteb3f26d2023-02-08 16:18:48 +01001154 if (sc_cond_forward_shutw(sc)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001155 /* we want to immediately forward this close to the write side */
1156 /* force flag on ssl to keep stream in cache */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001157 sc_conn_shutw(sc, CO_SHW_SILENT);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001158 goto do_close;
1159 }
1160
1161 /* otherwise that's just a normal read shutdown */
1162 return;
1163
1164 do_close:
Willy Tarreauf61dd192022-05-27 09:00:19 +02001165 /* OK we completely close the socket here just as if we went through sc_shut[rw]() */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001166 sc_conn_shut(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001167
Christopher Faulete38534c2023-04-13 15:45:24 +02001168 sc->flags &= ~SC_FL_SHUT_WANTED;
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001169 sc->flags |= SC_FL_SHUTW;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001170
Willy Tarreau0adb2812022-05-27 10:02:48 +02001171 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +02001172 if (sc->flags & SC_FL_ISBACK)
1173 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001174 return;
1175}
1176
1177/*
1178 * This is the callback which is called by the connection layer to receive data
1179 * into the buffer from the connection. It iterates over the mux layer's
1180 * rcv_buf function.
1181 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001182static int sc_conn_recv(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001183{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001184 struct connection *conn = __sc_conn(sc);
1185 struct channel *ic = sc_ic(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001186 int ret, max, cur_read = 0;
1187 int read_poll = MAX_READ_POLL_LOOPS;
1188 int flags = 0;
1189
1190 /* If not established yet, do nothing. */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001191 if (sc->state != SC_ST_EST)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001192 return 0;
1193
Willy Tarreau462b9892022-05-18 18:06:53 +02001194 /* If another call to sc_conn_recv() failed, and we subscribed to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001195 * recv events already, give up now.
1196 */
Christopher Faulet95125882023-04-12 18:35:18 +02001197 if ((sc->wait_event.events & SUB_RETRY_RECV) || sc_waiting_room(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001198 return 0;
1199
1200 /* maybe we were called immediately after an asynchronous shutr */
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001201 if (sc->flags & SC_FL_SHUTR)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001202 return 1;
1203
1204 /* we must wait because the mux is not installed yet */
1205 if (!conn->mux)
1206 return 0;
1207
Christopher Faulet5e29b762022-04-04 08:58:34 +02001208 /* stop immediately on errors. Note that we DON'T want to stop on
1209 * POLL_ERR, as the poller might report a write error while there
1210 * are still data available in the recv buffer. This typically
1211 * happens when we send too large a request to a backend server
1212 * which rejects it before reading it all.
1213 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001214 if (!sc_ep_test(sc, SE_FL_RCV_MORE)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001215 if (!conn_xprt_ready(conn))
1216 return 0;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001217 if (sc_ep_test(sc, SE_FL_ERROR))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001218 goto end_recv;
1219 }
1220
1221 /* prepare to detect if the mux needs more room */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001222 sc_ep_clr(sc, SE_FL_WANT_ROOM);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001223
1224 if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
1225 global.tune.idle_timer &&
1226 (unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
1227 /* The buffer was empty and nothing was transferred for more
1228 * than one second. This was caused by a pause and not by
1229 * congestion. Reset any streaming mode to reduce latency.
1230 */
1231 ic->xfer_small = 0;
1232 ic->xfer_large = 0;
1233 ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1234 }
1235
1236 /* First, let's see if we may splice data across the channel without
1237 * using a buffer.
1238 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001239 if (sc_ep_test(sc, SE_FL_MAY_SPLICE) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001240 (ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
1241 ic->flags & CF_KERN_SPLICING) {
1242 if (c_data(ic)) {
1243 /* We're embarrassed, there are already data pending in
1244 * the buffer and we don't want to have them at two
1245 * locations at a time. Let's indicate we need some
1246 * place and ask the consumer to hurry.
1247 */
1248 flags |= CO_RFL_BUF_FLUSH;
1249 goto abort_splice;
1250 }
1251
1252 if (unlikely(ic->pipe == NULL)) {
1253 if (pipes_used >= global.maxpipes || !(ic->pipe = get_pipe())) {
1254 ic->flags &= ~CF_KERN_SPLICING;
1255 goto abort_splice;
1256 }
1257 }
1258
Willy Tarreau0adb2812022-05-27 10:02:48 +02001259 ret = conn->mux->rcv_pipe(sc, ic->pipe, ic->to_forward);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001260 if (ret < 0) {
1261 /* splice not supported on this end, let's disable it */
1262 ic->flags &= ~CF_KERN_SPLICING;
1263 goto abort_splice;
1264 }
1265
1266 if (ret > 0) {
1267 if (ic->to_forward != CHN_INFINITE_FORWARD)
1268 ic->to_forward -= ret;
1269 ic->total += ret;
1270 cur_read += ret;
Christopher Faulet285f7612022-12-12 08:28:55 +01001271 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001272 }
1273
Willy Tarreau0adb2812022-05-27 10:02:48 +02001274 if (sc_ep_test(sc, SE_FL_EOS | SE_FL_ERROR))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001275 goto end_recv;
1276
1277 if (conn->flags & CO_FL_WAIT_ROOM) {
1278 /* the pipe is full or we have read enough data that it
1279 * could soon be full. Let's stop before needing to poll.
1280 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001281 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001282 goto done_recv;
1283 }
1284
1285 /* splice not possible (anymore), let's go on on standard copy */
1286 }
1287
1288 abort_splice:
1289 if (ic->pipe && unlikely(!ic->pipe->data)) {
1290 put_pipe(ic->pipe);
1291 ic->pipe = NULL;
1292 }
1293
Willy Tarreau0adb2812022-05-27 10:02:48 +02001294 if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && sc_ep_test(sc, SE_FL_MAY_SPLICE)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001295 /* don't break splicing by reading, but still call rcv_buf()
1296 * to pass the flag.
1297 */
1298 goto done_recv;
1299 }
1300
1301 /* now we'll need a input buffer for the stream */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001302 if (!sc_alloc_ibuf(sc, &(__sc_strm(sc)->buffer_wait)))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001303 goto end_recv;
1304
1305 /* For an HTX stream, if the buffer is stuck (no output data with some
1306 * input data) and if the HTX message is fragmented or if its free space
1307 * wraps, we force an HTX deframentation. It is a way to have a
1308 * contiguous free space nad to let the mux to copy as much data as
1309 * possible.
1310 *
1311 * NOTE: A possible optim may be to let the mux decides if defrag is
1312 * required or not, depending on amount of data to be xferred.
1313 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001314 if (IS_HTX_STRM(__sc_strm(sc)) && !co_data(ic)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001315 struct htx *htx = htxbuf(&ic->buf);
1316
1317 if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
1318 htx_defrag(htx, NULL, 0);
1319 }
1320
1321 /* Instruct the mux it must subscribed for read events */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001322 flags |= ((!conn_is_back(conn) && (__sc_strm(sc)->be->options & PR_O_ABRT_CLOSE)) ? CO_RFL_KEEP_RECV : 0);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001323
1324 /* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
1325 * was enabled, which implies that the recv buffer was not full. So we have a guarantee
1326 * that if such an event is not handled above in splice, it will be handled here by
1327 * recv().
1328 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001329 while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001330 (!(conn->flags & CO_FL_HANDSHAKE) &&
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001331 (!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(sc->flags & SC_FL_SHUTR))) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001332 int cur_flags = flags;
1333
1334 /* Compute transient CO_RFL_* flags */
1335 if (co_data(ic)) {
1336 cur_flags |= (CO_RFL_BUF_WET | CO_RFL_BUF_NOT_STUCK);
1337 }
1338
1339 /* <max> may be null. This is the mux responsibility to set
Willy Tarreaue68bc612022-05-27 11:23:05 +02001340 * SE_FL_RCV_MORE on the SC if more space is needed.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001341 */
1342 max = channel_recv_max(ic);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001343 ret = conn->mux->rcv_buf(sc, &ic->buf, max, cur_flags);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001344
Willy Tarreau0adb2812022-05-27 10:02:48 +02001345 if (sc_ep_test(sc, SE_FL_WANT_ROOM)) {
Willy Tarreaub605c422022-05-17 17:04:55 +02001346 /* SE_FL_WANT_ROOM must not be reported if the channel's
Christopher Faulet5e29b762022-04-04 08:58:34 +02001347 * buffer is empty.
1348 */
1349 BUG_ON(c_empty(ic));
1350
Willy Tarreau0adb2812022-05-27 10:02:48 +02001351 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001352 /* Add READ_PARTIAL because some data are pending but
1353 * cannot be xferred to the channel
1354 */
Christopher Faulet285f7612022-12-12 08:28:55 +01001355 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001356 }
1357
1358 if (ret <= 0) {
1359 /* if we refrained from reading because we asked for a
1360 * flush to satisfy rcv_pipe(), we must not subscribe
1361 * and instead report that there's not enough room
1362 * here to proceed.
1363 */
1364 if (flags & CO_RFL_BUF_FLUSH)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001365 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001366 break;
1367 }
1368
1369 cur_read += ret;
1370
1371 /* if we're allowed to directly forward data, we must update ->o */
Christopher Faulete38534c2023-04-13 15:45:24 +02001372 if (ic->to_forward && !(chn_cons(ic)->flags & (SC_FL_SHUTW|SC_FL_SHUT_WANTED))) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001373 unsigned long fwd = ret;
1374 if (ic->to_forward != CHN_INFINITE_FORWARD) {
1375 if (fwd > ic->to_forward)
1376 fwd = ic->to_forward;
1377 ic->to_forward -= fwd;
1378 }
1379 c_adv(ic, fwd);
1380 }
1381
Christopher Faulet285f7612022-12-12 08:28:55 +01001382 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001383 ic->total += ret;
1384
1385 /* End-of-input reached, we can leave. In this case, it is
Willy Tarreaue68bc612022-05-27 11:23:05 +02001386 * important to break the loop to not block the SC because of
Christopher Faulet5e29b762022-04-04 08:58:34 +02001387 * the channel's policies.This way, we are still able to receive
1388 * shutdowns.
1389 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001390 if (sc_ep_test(sc, SE_FL_EOI))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001391 break;
1392
Christopher Faulet9a790f62023-03-16 14:40:03 +01001393 if ((sc->flags & SC_FL_RCV_ONCE) || --read_poll <= 0) {
1394 /* we don't expect to read more data */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001395 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001396 break;
1397 }
1398
1399 /* if too many bytes were missing from last read, it means that
1400 * it's pointless trying to read again because the system does
1401 * not have them in buffers.
1402 */
1403 if (ret < max) {
1404 /* if a streamer has read few data, it may be because we
1405 * have exhausted system buffers. It's not worth trying
1406 * again.
1407 */
1408 if (ic->flags & CF_STREAMER) {
1409 /* we're stopped by the channel's policy */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001410 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001411 break;
1412 }
1413
1414 /* if we read a large block smaller than what we requested,
1415 * it's almost certain we'll never get anything more.
1416 */
1417 if (ret >= global.tune.recv_enough) {
1418 /* we're stopped by the channel's policy */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001419 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001420 break;
1421 }
1422 }
1423
1424 /* if we are waiting for more space, don't try to read more data
1425 * right now.
1426 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001427 if (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001428 break;
1429 } /* while !flags */
1430
1431 done_recv:
1432 if (cur_read) {
1433 if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
1434 (cur_read <= ic->buf.size / 2)) {
1435 ic->xfer_large = 0;
1436 ic->xfer_small++;
1437 if (ic->xfer_small >= 3) {
1438 /* we have read less than half of the buffer in
1439 * one pass, and this happened at least 3 times.
1440 * This is definitely not a streamer.
1441 */
1442 ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1443 }
1444 else if (ic->xfer_small >= 2) {
1445 /* if the buffer has been at least half full twice,
1446 * we receive faster than we send, so at least it
1447 * is not a "fast streamer".
1448 */
1449 ic->flags &= ~CF_STREAMER_FAST;
1450 }
1451 }
1452 else if (!(ic->flags & CF_STREAMER_FAST) &&
1453 (cur_read >= ic->buf.size - global.tune.maxrewrite)) {
1454 /* we read a full buffer at once */
1455 ic->xfer_small = 0;
1456 ic->xfer_large++;
1457 if (ic->xfer_large >= 3) {
1458 /* we call this buffer a fast streamer if it manages
1459 * to be filled in one call 3 consecutive times.
1460 */
1461 ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
1462 }
1463 }
1464 else {
1465 ic->xfer_small = 0;
1466 ic->xfer_large = 0;
1467 }
1468 ic->last_read = now_ms;
Christopher Faulet4c135682023-02-16 11:09:31 +01001469 sc_ep_report_read_activity(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001470 }
1471
1472 end_recv:
1473 ret = (cur_read != 0);
1474
1475 /* Report EOI on the channel if it was reached from the mux point of
1476 * view. */
Christopher Faulet904763f2023-03-22 14:53:11 +01001477 if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
Christopher Faulet4c135682023-02-16 11:09:31 +01001478 sc_ep_report_read_activity(sc);
Christopher Faulet904763f2023-03-22 14:53:11 +01001479 sc->flags |= SC_FL_EOI;
1480 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001481 ret = 1;
1482 }
1483
Christopher Fauletb208d8c2023-03-21 11:25:21 +01001484 if (sc_ep_test(sc, SE_FL_EOS)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001485 /* we received a shutdown */
Christopher Faulet5e29b762022-04-04 08:58:34 +02001486 if (ic->flags & CF_AUTO_CLOSE)
Christopher Fauletdf7cd712023-04-13 15:56:26 +02001487 sc_schedule_shutdown(sc_opposite(sc));
Willy Tarreau0adb2812022-05-27 10:02:48 +02001488 sc_conn_read0(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001489 ret = 1;
1490 }
Christopher Fauletb208d8c2023-03-21 11:25:21 +01001491
1492 if (sc_ep_test(sc, SE_FL_ERROR))
1493 ret = 1;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001494 else if (!(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001495 !(sc->flags & SC_FL_SHUTR)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001496 /* Subscribe to receive events if we're blocking on I/O */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001497 conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
1498 se_have_no_more_data(sc->sedesc);
Christopher Fauletb208d8c2023-03-21 11:25:21 +01001499 }
1500 else {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001501 se_have_more_data(sc->sedesc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001502 ret = 1;
1503 }
Christopher Faulet8019f782023-03-23 17:30:29 +01001504
1505 BUG_ON_HOT((sc_ep_get(sc) & (SE_FL_EOI|SE_FL_EOS|SE_FL_ERROR)) == SE_FL_EOS);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001506 return ret;
1507}
1508
Willy Tarreau4596fe22022-05-17 19:07:51 +02001509/* This tries to perform a synchronous receive on the stream connector to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001510 * try to collect last arrived data. In practice it's only implemented on
Willy Tarreau4596fe22022-05-17 19:07:51 +02001511 * stconns. Returns 0 if nothing was done, non-zero if new data or a
Christopher Faulet5e29b762022-04-04 08:58:34 +02001512 * shutdown were collected. This may result on some delayed receive calls
1513 * to be programmed and performed later, though it doesn't provide any
1514 * such guarantee.
1515 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001516int sc_conn_sync_recv(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001517{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001518 if (!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001519 return 0;
1520
Willy Tarreau0adb2812022-05-27 10:02:48 +02001521 if (!sc_mux_ops(sc))
Willy Tarreau4596fe22022-05-17 19:07:51 +02001522 return 0; // only stconns are supported
Christopher Faulet5e29b762022-04-04 08:58:34 +02001523
Willy Tarreau0adb2812022-05-27 10:02:48 +02001524 if (sc->wait_event.events & SUB_RETRY_RECV)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001525 return 0; // already subscribed
1526
Willy Tarreau0adb2812022-05-27 10:02:48 +02001527 if (!sc_is_recv_allowed(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001528 return 0; // already failed
1529
Willy Tarreau0adb2812022-05-27 10:02:48 +02001530 return sc_conn_recv(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001531}
1532
1533/*
1534 * This function is called to send buffer data to a stream socket.
1535 * It calls the mux layer's snd_buf function. It relies on the
1536 * caller to commit polling changes. The caller should check conn->flags
1537 * for errors.
1538 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001539static int sc_conn_send(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001540{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001541 struct connection *conn = __sc_conn(sc);
Christopher Faulet904763f2023-03-22 14:53:11 +01001542 struct stconn *sco = sc_opposite(sc);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001543 struct stream *s = __sc_strm(sc);
1544 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001545 int ret;
1546 int did_send = 0;
1547
Willy Tarreau0adb2812022-05-27 10:02:48 +02001548 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001549 /* We're probably there because the tasklet was woken up,
1550 * but process_stream() ran before, detected there were an
Willy Tarreaue68bc612022-05-27 11:23:05 +02001551 * error and put the SC back to SC_ST_TAR. There's still
Christopher Faulet5e29b762022-04-04 08:58:34 +02001552 * CO_FL_ERROR on the connection but we don't want to add
Willy Tarreaub605c422022-05-17 17:04:55 +02001553 * SE_FL_ERROR back, so give up
Christopher Faulet5e29b762022-04-04 08:58:34 +02001554 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001555 if (sc->state < SC_ST_CON)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001556 return 0;
Christopher Faulet7f6aa562022-10-17 10:21:19 +02001557 if (sc_ep_test(sc, SE_FL_EOS))
1558 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001559 return 1;
1560 }
1561
1562 /* We're already waiting to be able to send, give up */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001563 if (sc->wait_event.events & SUB_RETRY_SEND)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001564 return 0;
1565
1566 /* we might have been called just after an asynchronous shutw */
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001567 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001568 return 1;
1569
1570 /* we must wait because the mux is not installed yet */
1571 if (!conn->mux)
1572 return 0;
1573
1574 if (oc->pipe && conn->xprt->snd_pipe && conn->mux->snd_pipe) {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001575 ret = conn->mux->snd_pipe(sc, oc->pipe);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001576 if (ret > 0)
1577 did_send = 1;
1578
1579 if (!oc->pipe->data) {
1580 put_pipe(oc->pipe);
1581 oc->pipe = NULL;
1582 }
1583
1584 if (oc->pipe)
1585 goto end;
1586 }
1587
1588 /* At this point, the pipe is empty, but we may still have data pending
1589 * in the normal buffer.
1590 */
1591 if (co_data(oc)) {
1592 /* when we're here, we already know that there is no spliced
1593 * data left, and that there are sendable buffered data.
1594 */
1595
1596 /* check if we want to inform the kernel that we're interested in
1597 * sending more data after this call. We want this if :
1598 * - we're about to close after this last send and want to merge
1599 * the ongoing FIN with the last segment.
1600 * - we know we can't send everything at once and must get back
1601 * here because of unaligned data
1602 * - there is still a finite amount of data to forward
1603 * The test is arranged so that the most common case does only 2
1604 * tests.
1605 */
1606 unsigned int send_flag = 0;
1607
Christopher Faulet68ef2182023-03-17 15:38:18 +01001608 if ((!(sc->flags & (SC_FL_SND_ASAP|SC_FL_SND_NEVERWAIT)) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001609 ((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
Christopher Faulet84d3ef92023-03-17 15:45:58 +01001610 (sc->flags & SC_FL_SND_EXP_MORE) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001611 (IS_HTX_STRM(s) &&
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001612 (!(sco->flags & (SC_FL_EOI|SC_FL_SHUTR)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001613 ((oc->flags & CF_ISRESP) &&
Christopher Faulet87633c32023-04-03 18:32:50 +02001614 (oc->flags & CF_AUTO_CLOSE) &&
Christopher Faulete38534c2023-04-13 15:45:24 +02001615 (sc->flags & SC_FL_SHUT_WANTED)))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001616 send_flag |= CO_SFL_MSG_MORE;
1617
1618 if (oc->flags & CF_STREAMER)
1619 send_flag |= CO_SFL_STREAMER;
1620
1621 if (s->txn && s->txn->flags & TX_L7_RETRY && !b_data(&s->txn->l7_buffer)) {
1622 /* If we want to be able to do L7 retries, copy
1623 * the data we're about to send, so that we are able
1624 * to resend them if needed
1625 */
1626 /* Try to allocate a buffer if we had none.
1627 * If it fails, the next test will just
1628 * disable the l7 retries by setting
1629 * l7_conn_retries to 0.
1630 */
1631 if (s->txn->req.msg_state != HTTP_MSG_DONE)
1632 s->txn->flags &= ~TX_L7_RETRY;
1633 else {
1634 if (b_alloc(&s->txn->l7_buffer) == NULL)
1635 s->txn->flags &= ~TX_L7_RETRY;
1636 else {
1637 memcpy(b_orig(&s->txn->l7_buffer),
1638 b_orig(&oc->buf),
1639 b_size(&oc->buf));
1640 s->txn->l7_buffer.head = co_data(oc);
1641 b_add(&s->txn->l7_buffer, co_data(oc));
1642 }
1643
1644 }
1645 }
1646
Willy Tarreau0adb2812022-05-27 10:02:48 +02001647 ret = conn->mux->snd_buf(sc, &oc->buf, co_data(oc), send_flag);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001648 if (ret > 0) {
1649 did_send = 1;
1650 c_rew(oc, ret);
1651 c_realign_if_empty(oc);
1652
1653 if (!co_data(oc)) {
1654 /* Always clear both flags once everything has been sent, they're one-shot */
Christopher Faulet84d3ef92023-03-17 15:45:58 +01001655 sc->flags &= ~(SC_FL_SND_ASAP|SC_FL_SND_EXP_MORE);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001656 }
1657 /* if some data remain in the buffer, it's only because the
1658 * system buffers are full, we will try next time.
1659 */
Christopher Faulet13045f02022-04-01 14:23:38 +02001660 }
1661 }
Christopher Faulet5e29b762022-04-04 08:58:34 +02001662
1663 end:
1664 if (did_send) {
Christopher Fauletd8988412022-12-20 18:10:04 +01001665 oc->flags |= CF_WRITE_EVENT | CF_WROTE_DATA;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001666 if (sc->state == SC_ST_CON)
1667 sc->state = SC_ST_RDY;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001668 sc_have_room(sc_opposite(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001669 }
1670
Willy Tarreau0adb2812022-05-27 10:02:48 +02001671 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
Christopher Faulet2e56a732023-01-26 16:18:09 +01001672 oc->flags |= CF_WRITE_EVENT;
Christopher Faulet7f6aa562022-10-17 10:21:19 +02001673 if (sc_ep_test(sc, SE_FL_EOS))
Christopher Faulet2e56a732023-01-26 16:18:09 +01001674 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001675 return 1;
1676 }
1677
Christopher Faulet59b240c2023-02-27 16:38:12 +01001678 if (channel_is_empty(oc))
1679 sc_ep_report_send_activity(sc);
1680 else {
1681 /* We couldn't send all of our data, let the mux know we'd like to send more */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001682 conn->mux->subscribe(sc, SUB_RETRY_SEND, &sc->wait_event);
Christopher Faulet59b240c2023-02-27 16:38:12 +01001683 sc_ep_report_blocked_send(sc);
1684 }
1685
Christopher Faulet5e29b762022-04-04 08:58:34 +02001686 return did_send;
1687}
1688
Christopher Fauletd8988412022-12-20 18:10:04 +01001689/* perform a synchronous send() for the stream connector. The CF_WRITE_EVENT
1690 * flag are cleared prior to the attempt, and will possibly be updated in case
1691 * of success.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001692 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001693void sc_conn_sync_send(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001694{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001695 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001696
Christopher Fauletd8988412022-12-20 18:10:04 +01001697 oc->flags &= ~CF_WRITE_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001698
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001699 if (sc->flags & SC_FL_SHUTW)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001700 return;
1701
1702 if (channel_is_empty(oc))
1703 return;
1704
Willy Tarreau0adb2812022-05-27 10:02:48 +02001705 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001706 return;
1707
Willy Tarreau0adb2812022-05-27 10:02:48 +02001708 if (!sc_mux_ops(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001709 return;
1710
Willy Tarreau0adb2812022-05-27 10:02:48 +02001711 sc_conn_send(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001712}
1713
1714/* Called by I/O handlers after completion.. It propagates
Willy Tarreau4596fe22022-05-17 19:07:51 +02001715 * connection flags to the stream connector, updates the stream (which may or
Christopher Faulet5e29b762022-04-04 08:58:34 +02001716 * may not take this opportunity to try to forward data), then update the
Willy Tarreau4596fe22022-05-17 19:07:51 +02001717 * connection's polling based on the channels and stream connector's final
Christopher Faulet5e29b762022-04-04 08:58:34 +02001718 * states. The function always returns 0.
1719 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001720static int sc_conn_process(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001721{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001722 struct connection *conn = __sc_conn(sc);
1723 struct channel *ic = sc_ic(sc);
1724 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001725
1726 BUG_ON(!conn);
1727
1728 /* If we have data to send, try it now */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001729 if (!channel_is_empty(oc) && !(sc->wait_event.events & SUB_RETRY_SEND))
1730 sc_conn_send(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001731
Willy Tarreau4596fe22022-05-17 19:07:51 +02001732 /* First step, report to the stream connector what was detected at the
Christopher Faulet5e29b762022-04-04 08:58:34 +02001733 * connection layer : errors and connection establishment.
Willy Tarreaub605c422022-05-17 17:04:55 +02001734 * Only add SE_FL_ERROR if we're connected, or we're attempting to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001735 * connect, we may get there because we got woken up, but only run
1736 * after process_stream() noticed there were an error, and decided
1737 * to retry to connect, the connection may still have CO_FL_ERROR,
Willy Tarreaub605c422022-05-17 17:04:55 +02001738 * and we don't want to add SE_FL_ERROR back
Christopher Faulet5e29b762022-04-04 08:58:34 +02001739 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001740 * Note: This test is only required because sc_conn_process is also the SI
1741 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001742 * care of it.
1743 */
1744
Willy Tarreau0adb2812022-05-27 10:02:48 +02001745 if (sc->state >= SC_ST_CON) {
1746 if (sc_is_conn_error(sc))
1747 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001748 }
1749
1750 /* If we had early data, and the handshake ended, then
1751 * we can remove the flag, and attempt to wake the task up,
1752 * in the event there's an analyser waiting for the end of
1753 * the handshake.
1754 */
1755 if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +02001756 sc_ep_test(sc, SE_FL_WAIT_FOR_HS)) {
1757 sc_ep_clr(sc, SE_FL_WAIT_FOR_HS);
1758 task_wakeup(sc_strm_task(sc), TASK_WOKEN_MSG);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001759 }
1760
Willy Tarreau0adb2812022-05-27 10:02:48 +02001761 if (!sc_state_in(sc->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001762 (conn->flags & CO_FL_WAIT_XPRT) == 0) {
Christopher Fauletca679922022-07-20 13:24:04 +02001763 if (sc->flags & SC_FL_ISBACK)
1764 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Fauletb96f2aa2022-12-12 08:11:36 +01001765 oc->flags |= CF_WRITE_EVENT;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001766 if (sc->state == SC_ST_CON)
1767 sc->state = SC_ST_RDY;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001768 }
1769
1770 /* Report EOS on the channel if it was reached from the mux point of
1771 * view.
1772 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001773 * Note: This test is only required because sc_conn_process is also the SI
1774 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001775 * care of it.
1776 */
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001777 if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_SHUTR)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001778 /* we received a shutdown */
Christopher Faulet5e29b762022-04-04 08:58:34 +02001779 if (ic->flags & CF_AUTO_CLOSE)
Christopher Fauletdf7cd712023-04-13 15:56:26 +02001780 sc_schedule_shutdown(sc_opposite(sc));
Willy Tarreau0adb2812022-05-27 10:02:48 +02001781 sc_conn_read0(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001782 }
1783
1784 /* Report EOI on the channel if it was reached from the mux point of
1785 * view.
1786 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001787 * Note: This test is only required because sc_conn_process is also the SI
1788 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001789 * care of it.
1790 */
Christopher Faulet904763f2023-03-22 14:53:11 +01001791 if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
1792 sc->flags |= SC_FL_EOI;
1793 ic->flags |= CF_READ_EVENT;
1794 }
Christopher Faulet5e29b762022-04-04 08:58:34 +02001795
Willy Tarreau4596fe22022-05-17 19:07:51 +02001796 /* Second step : update the stream connector and channels, try to forward any
Christopher Faulet5e29b762022-04-04 08:58:34 +02001797 * pending data, then possibly wake the stream up based on the new
Willy Tarreau4596fe22022-05-17 19:07:51 +02001798 * stream connector status.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001799 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001800 sc_notify(sc);
1801 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001802 return 0;
1803}
1804
Willy Tarreau4596fe22022-05-17 19:07:51 +02001805/* This is the ->process() function for any stream connector's wait_event task.
1806 * It's assigned during the stream connector's initialization, for any type of
1807 * stream connector. Thus it is always safe to perform a tasklet_wakeup() on a
Willy Tarreaue68bc612022-05-27 11:23:05 +02001808 * stream connector, as the presence of the SC is checked there.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001809 */
Willy Tarreau462b9892022-05-18 18:06:53 +02001810struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001811{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001812 struct stconn *sc = ctx;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001813 int ret = 0;
1814
Willy Tarreau0adb2812022-05-27 10:02:48 +02001815 if (!sc_conn(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001816 return t;
1817
Willy Tarreau0adb2812022-05-27 10:02:48 +02001818 if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
1819 ret = sc_conn_send(sc);
1820 if (!(sc->wait_event.events & SUB_RETRY_RECV))
1821 ret |= sc_conn_recv(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001822 if (ret != 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001823 sc_conn_process(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001824
Willy Tarreau0adb2812022-05-27 10:02:48 +02001825 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001826 return t;
1827}
1828
1829/* Callback to be used by applet handlers upon completion. It updates the stream
1830 * (which may or may not take this opportunity to try to forward data), then
Willy Tarreau4596fe22022-05-17 19:07:51 +02001831 * may re-enable the applet's based on the channels and stream connector's final
Christopher Faulet5e29b762022-04-04 08:58:34 +02001832 * states.
1833 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001834static int sc_applet_process(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001835{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001836 struct channel *ic = sc_ic(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001837
Willy Tarreau0adb2812022-05-27 10:02:48 +02001838 BUG_ON(!sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001839
Christopher Fauletf8fbb6d2023-03-21 11:49:21 +01001840 /* Report EOI on the channel if it was reached from the applet point of
1841 * view. */
Christopher Faulet904763f2023-03-22 14:53:11 +01001842 if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
Christopher Fauletf8fbb6d2023-03-21 11:49:21 +01001843 sc_ep_report_read_activity(sc);
Christopher Faulet904763f2023-03-22 14:53:11 +01001844 sc->flags |= SC_FL_EOI;
1845 ic->flags |= CF_READ_EVENT;
Christopher Fauletf8fbb6d2023-03-21 11:49:21 +01001846 }
1847
Christopher Faulet0ffc9d72023-03-21 14:19:08 +01001848 if (sc_ep_test(sc, SE_FL_EOS)) {
1849 /* we received a shutdown */
1850 sc_shutr(sc);
1851 }
1852
Christopher Faulet5e29b762022-04-04 08:58:34 +02001853 /* If the applet wants to write and the channel is closed, it's a
1854 * broken pipe and it must be reported.
1855 */
Christopher Faulet7faac7c2023-04-04 10:05:27 +02001856 if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (sc->flags & SC_FL_SHUTR))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001857 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001858
1859 /* automatically mark the applet having data available if it reported
1860 * begin blocked by the channel.
1861 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001862 if ((sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) ||
1863 sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
1864 applet_have_more_data(__sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001865
Willy Tarreau4596fe22022-05-17 19:07:51 +02001866 /* update the stream connector, channels, and possibly wake the stream up */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001867 sc_notify(sc);
1868 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001869
Willy Tarreau19c65a92022-05-27 08:49:24 +02001870 /* sc_notify may have passed through chk_snd and released some blocking
Willy Tarreau15252cd2022-05-25 16:36:21 +02001871 * flags. Process_stream will consider those flags to wake up the
Christopher Faulet5e29b762022-04-04 08:58:34 +02001872 * appctx but in the case the task is not in runqueue we may have to
1873 * wakeup the appctx immediately.
1874 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001875 if (sc_is_recv_allowed(sc) || sc_is_send_allowed(sc))
1876 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001877 return 0;
Christopher Faulet13045f02022-04-01 14:23:38 +02001878}
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001879
1880
1881/* Prepares an endpoint upgrade. We don't now at this stage if the upgrade will
1882 * succeed or not and if the stconn will be reused by the new endpoint. Thus,
1883 * for now, only pretend the stconn is detached.
1884 */
1885void sc_conn_prepare_endp_upgrade(struct stconn *sc)
1886{
1887 BUG_ON(!sc_conn(sc) || !sc->app);
1888 sc_ep_clr(sc, SE_FL_T_MUX);
1889 sc_ep_set(sc, SE_FL_DETACHED);
1890}
1891
Ilya Shipitsin3b64a282022-07-29 22:26:53 +05001892/* Endpoint upgrade failed. Restore the stconn state. */
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001893void sc_conn_abort_endp_upgrade(struct stconn *sc)
1894{
1895 sc_ep_set(sc, SE_FL_T_MUX);
1896 sc_ep_clr(sc, SE_FL_DETACHED);
1897}
1898
1899/* Commit the endpoint upgrade. If stconn is attached, it means the new endpoint
1900 * use it. So we do nothing. Otherwise, the stconn will be destroy with the
1901 * overlying stream. So, it means we must commit the detach.
1902*/
1903void sc_conn_commit_endp_upgrade(struct stconn *sc)
1904{
1905 if (!sc_ep_test(sc, SE_FL_DETACHED))
1906 return;
1907 sc_detach_endp(&sc);
1908 /* Because it was already set as detached, the sedesc must be preserved */
Willy Tarreau6a378d12022-08-11 13:56:42 +02001909 BUG_ON(!sc);
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001910 BUG_ON(!sc->sedesc);
1911}