blob: f834f70e1d550d1d2b451d0301cb2296acff2b0b [file] [log] [blame]
Christopher Faulet1329f2a2021-12-16 17:32:56 +01001/*
Willy Tarreau4596fe22022-05-17 19:07:51 +02002 * stream connector management functions
Christopher Faulet1329f2a2021-12-16 17:32:56 +01003 *
4 * Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <haproxy/api.h>
Christopher Faulet37046632022-04-01 11:36:58 +020014#include <haproxy/applet.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010015#include <haproxy/connection.h>
Christopher Faulet5e29b762022-04-04 08:58:34 +020016#include <haproxy/check.h>
17#include <haproxy/http_ana.h>
18#include <haproxy/pipe.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010019#include <haproxy/pool.h>
Willy Tarreau5edca2f2022-05-27 09:25:10 +020020#include <haproxy/sc_strm.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020021#include <haproxy/stconn.h>
Christopher Faulet1329f2a2021-12-16 17:32:56 +010022
Willy Tarreau4596fe22022-05-17 19:07:51 +020023DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
Willy Tarreauea59b022022-05-17 17:53:22 +020024DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
Christopher Faulet1329f2a2021-12-16 17:32:56 +010025
Willy Tarreau3a3f4802022-05-17 18:28:19 +020026/* functions used by default on a detached stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020027static void sc_app_shutr(struct stconn *sc);
28static void sc_app_shutw(struct stconn *sc);
29static void sc_app_chk_rcv(struct stconn *sc);
30static void sc_app_chk_snd(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020031
Willy Tarreau3a3f4802022-05-17 18:28:19 +020032/* functions used on a mux-based stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020033static void sc_app_shutr_conn(struct stconn *sc);
34static void sc_app_shutw_conn(struct stconn *sc);
35static void sc_app_chk_rcv_conn(struct stconn *sc);
36static void sc_app_chk_snd_conn(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020037
Willy Tarreau3a3f4802022-05-17 18:28:19 +020038/* functions used on an applet-based stream connector */
Willy Tarreau0adb2812022-05-27 10:02:48 +020039static void sc_app_shutr_applet(struct stconn *sc);
40static void sc_app_shutw_applet(struct stconn *sc);
41static void sc_app_chk_rcv_applet(struct stconn *sc);
42static void sc_app_chk_snd_applet(struct stconn *sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +020043
Willy Tarreau0adb2812022-05-27 10:02:48 +020044static int sc_conn_process(struct stconn *sc);
45static int sc_conn_recv(struct stconn *sc);
46static int sc_conn_send(struct stconn *sc);
47static int sc_applet_process(struct stconn *sc);
Willy Tarreau2f2318d2022-05-18 10:17:16 +020048
Willy Tarreau3a3f4802022-05-17 18:28:19 +020049/* stream connector operations for connections */
50struct sc_app_ops sc_app_conn_ops = {
51 .chk_rcv = sc_app_chk_rcv_conn,
52 .chk_snd = sc_app_chk_snd_conn,
53 .shutr = sc_app_shutr_conn,
54 .shutw = sc_app_shutw_conn,
Willy Tarreau462b9892022-05-18 18:06:53 +020055 .wake = sc_conn_process,
Willy Tarreau2f2318d2022-05-18 10:17:16 +020056 .name = "STRM",
Christopher Faulet9ffddd52022-04-01 14:04:29 +020057};
58
Willy Tarreau3a3f4802022-05-17 18:28:19 +020059/* stream connector operations for embedded tasks */
60struct sc_app_ops sc_app_embedded_ops = {
61 .chk_rcv = sc_app_chk_rcv,
62 .chk_snd = sc_app_chk_snd,
63 .shutr = sc_app_shutr,
64 .shutw = sc_app_shutw,
Willy Tarreau2f2318d2022-05-18 10:17:16 +020065 .wake = NULL, /* may never be used */
66 .name = "NONE", /* may never be used */
Christopher Faulet9ffddd52022-04-01 14:04:29 +020067};
68
Willy Tarreau2f2318d2022-05-18 10:17:16 +020069/* stream connector operations for applets */
Willy Tarreau3a3f4802022-05-17 18:28:19 +020070struct sc_app_ops sc_app_applet_ops = {
71 .chk_rcv = sc_app_chk_rcv_applet,
72 .chk_snd = sc_app_chk_snd_applet,
73 .shutr = sc_app_shutr_applet,
74 .shutw = sc_app_shutw_applet,
Willy Tarreau19c65a92022-05-27 08:49:24 +020075 .wake = sc_applet_process,
Christopher Faulet5e29b762022-04-04 08:58:34 +020076 .name = "STRM",
77};
78
Willy Tarreau2f2318d2022-05-18 10:17:16 +020079/* stream connector for health checks on connections */
80struct sc_app_ops sc_app_check_ops = {
81 .chk_rcv = NULL,
82 .chk_snd = NULL,
83 .shutr = NULL,
84 .shutw = NULL,
85 .wake = wake_srv_chk,
86 .name = "CHCK",
87};
Christopher Faulet5e29b762022-04-04 08:58:34 +020088
Christopher Faulet9ed77422022-04-12 08:51:15 +020089/* Initializes an endpoint */
Willy Tarreauea59b022022-05-17 17:53:22 +020090void sedesc_init(struct sedesc *sedesc)
Christopher Fauletdb90f2a2022-03-22 16:06:25 +010091{
Willy Tarreauea59b022022-05-17 17:53:22 +020092 sedesc->se = NULL;
93 sedesc->conn = NULL;
Willy Tarreauc1054922022-05-18 07:43:52 +020094 sedesc->sc = NULL;
Christopher Fauletf8413cb2023-02-07 16:06:14 +010095 sedesc->rex = sedesc->wex = TICK_ETERNITY;
Willy Tarreauea59b022022-05-17 17:53:22 +020096 se_fl_setall(sedesc, SE_FL_NONE);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +010097}
98
Christopher Faulet9ed77422022-04-12 08:51:15 +020099/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
Willy Tarreauea59b022022-05-17 17:53:22 +0200100struct sedesc *sedesc_new()
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100101{
Willy Tarreauea59b022022-05-17 17:53:22 +0200102 struct sedesc *sedesc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100103
Willy Tarreauea59b022022-05-17 17:53:22 +0200104 sedesc = pool_alloc(pool_head_sedesc);
105 if (unlikely(!sedesc))
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100106 return NULL;
107
Willy Tarreauea59b022022-05-17 17:53:22 +0200108 sedesc_init(sedesc);
109 return sedesc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100110}
111
Christopher Faulet9ed77422022-04-12 08:51:15 +0200112/* Releases an endpoint. It is the caller responsibility to be sure it is safe
113 * and it is not shared with another entity
114 */
Willy Tarreauea59b022022-05-17 17:53:22 +0200115void sedesc_free(struct sedesc *sedesc)
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100116{
Willy Tarreauea59b022022-05-17 17:53:22 +0200117 pool_free(pool_head_sedesc, sedesc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100118}
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100119
Willy Tarreau4596fe22022-05-17 19:07:51 +0200120/* Tries to allocate a new stconn and initialize its main fields. On
Christopher Faulet9ed77422022-04-12 08:51:15 +0200121 * failure, nothing is allocated and NULL is returned. It is an internal
Willy Tarreaub605c422022-05-17 17:04:55 +0200122 * function. The caller must, at least, set the SE_FL_ORPHAN or SE_FL_DETACHED
Christopher Faulet9ed77422022-04-12 08:51:15 +0200123 * flag.
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100124 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200125static struct stconn *sc_new(struct sedesc *sedesc)
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100126{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200127 struct stconn *sc;
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100128
Willy Tarreau0adb2812022-05-27 10:02:48 +0200129 sc = pool_alloc(pool_head_connstream);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100130
Willy Tarreau0adb2812022-05-27 10:02:48 +0200131 if (unlikely(!sc))
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100132 goto alloc_error;
Christopher Fauletbb772d02022-03-22 15:28:36 +0100133
Willy Tarreau1d2c79a2022-05-27 11:15:19 +0200134 sc->obj_type = OBJ_TYPE_SC;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200135 sc->flags = SC_FL_NONE;
136 sc->state = SC_ST_INI;
Christopher Fauleted7e66f2023-02-07 11:09:15 +0100137 sc->rto = sc->wto = sc->hcto = TICK_ETERNITY;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200138 sc->app = NULL;
139 sc->app_ops = NULL;
140 sc->src = NULL;
141 sc->dst = NULL;
142 sc->wait_event.tasklet = NULL;
143 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200144
Christopher Faulet9ed77422022-04-12 08:51:15 +0200145 /* If there is no endpoint, allocate a new one now */
Willy Tarreauea59b022022-05-17 17:53:22 +0200146 if (!sedesc) {
147 sedesc = sedesc_new();
148 if (unlikely(!sedesc))
Christopher Fauletb669d682022-03-22 18:37:19 +0100149 goto alloc_error;
150 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200151 sc->sedesc = sedesc;
152 sedesc->sc = sc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100153
Willy Tarreau0adb2812022-05-27 10:02:48 +0200154 return sc;
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100155
156 alloc_error:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200157 pool_free(pool_head_connstream, sc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100158 return NULL;
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100159}
160
Willy Tarreau31219282022-05-27 16:21:33 +0200161/* Creates a new stream connector and its associated stream from a mux. <sd> must
162 * be defined. It returns NULL on error. On success, the new stream connector is
Willy Tarreaub605c422022-05-17 17:04:55 +0200163 * returned. In this case, SE_FL_ORPHAN flag is removed.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200164 */
Willy Tarreau31219282022-05-27 16:21:33 +0200165struct stconn *sc_new_from_endp(struct sedesc *sd, struct session *sess, struct buffer *input)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100166{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200167 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100168
Willy Tarreau31219282022-05-27 16:21:33 +0200169 sc = sc_new(sd);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200170 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100171 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200172 if (unlikely(!stream_new(sess, sc, input))) {
173 pool_free(pool_head_connstream, sc);
Christopher Faulet3ab72c62022-09-27 09:18:20 +0200174 sd->sc = NULL;
175 se_fl_set(sd, SE_FL_ORPHAN);
176 return NULL;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100177 }
Willy Tarreau31219282022-05-27 16:21:33 +0200178 se_fl_clr(sd, SE_FL_ORPHAN);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200179 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100180}
181
Willy Tarreau4596fe22022-05-17 19:07:51 +0200182/* Creates a new stream connector from an stream. There is no endpoint here, thus it
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200183 * will be created by sc_new(). So the SE_FL_DETACHED flag is set. It returns
Willy Tarreau4596fe22022-05-17 19:07:51 +0200184 * NULL on error. On success, the new stream connector is returned.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200185 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200186struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100187{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200188 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100189
Willy Tarreau0adb2812022-05-27 10:02:48 +0200190 sc = sc_new(NULL);
191 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100192 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200193 sc->flags |= flags;
194 sc_ep_set(sc, SE_FL_DETACHED);
195 sc->app = &strm->obj_type;
196 sc->app_ops = &sc_app_embedded_ops;
197 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100198}
199
Willy Tarreau4596fe22022-05-17 19:07:51 +0200200/* Creates a new stream connector from an health-check. There is no endpoint here,
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200201 * thus it will be created by sc_new(). So the SE_FL_DETACHED flag is set. It
Willy Tarreau4596fe22022-05-17 19:07:51 +0200202 * returns NULL on error. On success, the new stream connector is returned.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200203 */
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200204struct stconn *sc_new_from_check(struct check *check, unsigned int flags)
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100205{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200206 struct stconn *sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100207
Willy Tarreau0adb2812022-05-27 10:02:48 +0200208 sc = sc_new(NULL);
209 if (unlikely(!sc))
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100210 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200211 sc->flags |= flags;
212 sc_ep_set(sc, SE_FL_DETACHED);
213 sc->app = &check->obj_type;
214 sc->app_ops = &sc_app_check_ops;
215 return sc;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100216}
217
Willy Tarreaua0b58b52022-05-27 08:33:53 +0200218/* Releases a stconn previously allocated by sc_new(), as well as its
Christopher Faulet9ed77422022-04-12 08:51:15 +0200219 * endpoint, if it exists. This function is called internally or on error path.
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100220 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200221void sc_free(struct stconn *sc)
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100222{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200223 sockaddr_free(&sc->src);
224 sockaddr_free(&sc->dst);
225 if (sc->sedesc) {
226 BUG_ON(!sc_ep_test(sc, SE_FL_DETACHED));
227 sedesc_free(sc->sedesc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100228 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200229 if (sc->wait_event.tasklet)
230 tasklet_free(sc->wait_event.tasklet);
231 pool_free(pool_head_connstream, sc);
Christopher Faulet1329f2a2021-12-16 17:32:56 +0100232}
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100233
Willy Tarreau4596fe22022-05-17 19:07:51 +0200234/* Conditionally removes a stream connector if it is detached and if there is no app
Christopher Fauleteb50c012022-04-21 14:22:53 +0200235 * layer defined. Except on error path, this one must be used. if release, the
Willy Tarreaue68bc612022-05-27 11:23:05 +0200236 * pointer on the SC is set to NULL.
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200237 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200238static void sc_free_cond(struct stconn **scp)
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200239{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200240 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200241
Willy Tarreau0adb2812022-05-27 10:02:48 +0200242 if (!sc->app && (!sc->sedesc || sc_ep_test(sc, SE_FL_DETACHED))) {
243 sc_free(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +0200244 *scp = NULL;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200245 }
Christopher Fauletaa69d8f2022-04-12 18:09:48 +0200246}
247
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100248
Willy Tarreau4596fe22022-05-17 19:07:51 +0200249/* Attaches a stconn to a mux endpoint and sets the endpoint ctx. Returns
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500250 * -1 on error and 0 on success. SE_FL_DETACHED flag is removed. This function is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200251 * called from a mux when it is attached to a stream or a health-check.
252 */
Willy Tarreau31219282022-05-27 16:21:33 +0200253int sc_attach_mux(struct stconn *sc, void *sd, void *ctx)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100254{
Christopher Faulet93882042022-01-19 14:56:50 +0100255 struct connection *conn = ctx;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200256 struct sedesc *sedesc = sc->sedesc;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100257
Willy Tarreau31219282022-05-27 16:21:33 +0200258 sedesc->se = sd;
Willy Tarreau798465b2022-05-17 18:20:02 +0200259 sedesc->conn = ctx;
260 se_fl_set(sedesc, SE_FL_T_MUX);
261 se_fl_clr(sedesc, SE_FL_DETACHED);
Christopher Faulet93882042022-01-19 14:56:50 +0100262 if (!conn->ctx)
Willy Tarreau0adb2812022-05-27 10:02:48 +0200263 conn->ctx = sc;
264 if (sc_strm(sc)) {
265 if (!sc->wait_event.tasklet) {
266 sc->wait_event.tasklet = tasklet_new();
267 if (!sc->wait_event.tasklet)
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200268 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200269 sc->wait_event.tasklet->process = sc_conn_io_cb;
270 sc->wait_event.tasklet->context = sc;
271 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200272 }
273
Willy Tarreau0adb2812022-05-27 10:02:48 +0200274 sc->app_ops = &sc_app_conn_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100275 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200276 else if (sc_check(sc)) {
277 if (!sc->wait_event.tasklet) {
278 sc->wait_event.tasklet = tasklet_new();
279 if (!sc->wait_event.tasklet)
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200280 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200281 sc->wait_event.tasklet->process = srv_chk_io_cb;
282 sc->wait_event.tasklet->context = sc;
283 sc->wait_event.events = 0;
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200284 }
285
Willy Tarreau0adb2812022-05-27 10:02:48 +0200286 sc->app_ops = &sc_app_check_ops;
Christopher Fauletc95eaef2022-05-18 15:57:15 +0200287 }
Christopher Faulet070b91b2022-03-31 19:27:18 +0200288 return 0;
Christopher Faulet93882042022-01-19 14:56:50 +0100289}
290
Willy Tarreau4596fe22022-05-17 19:07:51 +0200291/* Attaches a stconn to an applet endpoint and sets the endpoint
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500292 * ctx. Returns -1 on error and 0 on success. SE_FL_DETACHED flag is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200293 * removed. This function is called by a stream when a backend applet is
294 * registered.
295 */
Willy Tarreau31219282022-05-27 16:21:33 +0200296static void sc_attach_applet(struct stconn *sc, void *sd)
Christopher Faulet93882042022-01-19 14:56:50 +0100297{
Willy Tarreau31219282022-05-27 16:21:33 +0200298 sc->sedesc->se = sd;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200299 sc_ep_set(sc, SE_FL_T_APPLET);
300 sc_ep_clr(sc, SE_FL_DETACHED);
301 if (sc_strm(sc))
302 sc->app_ops = &sc_app_applet_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100303}
304
Willy Tarreau4596fe22022-05-17 19:07:51 +0200305/* Attaches a stconn to a app layer and sets the relevant
Willy Tarreaub605c422022-05-17 17:04:55 +0200306 * callbacks. Returns -1 on error and 0 on success. SE_FL_ORPHAN flag is
Christopher Faulet9ed77422022-04-12 08:51:15 +0200307 * removed. This function is called by a stream when it is created to attach it
Willy Tarreau4596fe22022-05-17 19:07:51 +0200308 * on the stream connector on the client side.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200309 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200310int sc_attach_strm(struct stconn *sc, struct stream *strm)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100311{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200312 sc->app = &strm->obj_type;
313 sc_ep_clr(sc, SE_FL_ORPHAN);
314 if (sc_ep_test(sc, SE_FL_T_MUX)) {
315 sc->wait_event.tasklet = tasklet_new();
316 if (!sc->wait_event.tasklet)
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200317 return -1;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200318 sc->wait_event.tasklet->process = sc_conn_io_cb;
319 sc->wait_event.tasklet->context = sc;
320 sc->wait_event.events = 0;
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200321
Willy Tarreau0adb2812022-05-27 10:02:48 +0200322 sc->app_ops = &sc_app_conn_ops;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100323 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200324 else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
325 sc->app_ops = &sc_app_applet_ops;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100326 }
327 else {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200328 sc->app_ops = &sc_app_embedded_ops;
Christopher Fauleta9e8b392022-03-23 11:01:09 +0100329 }
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100330 return 0;
331}
332
Willy Tarreau4596fe22022-05-17 19:07:51 +0200333/* Detaches the stconn from the endpoint, if any. For a connecrion, if a
Christopher Faulet9ed77422022-04-12 08:51:15 +0200334 * mux owns the connection ->detach() callback is called. Otherwise, it means
Willy Tarreau4596fe22022-05-17 19:07:51 +0200335 * the stream connector owns the connection. In this case the connection is closed
Christopher Faulet9ed77422022-04-12 08:51:15 +0200336 * and released. For an applet, the appctx is released. If still allocated, the
337 * endpoint is reset and flag as detached. If the app layer is also detached,
Willy Tarreau4596fe22022-05-17 19:07:51 +0200338 * the stream connector is released.
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100339 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200340static void sc_detach_endp(struct stconn **scp)
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100341{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200342 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200343
Willy Tarreau0adb2812022-05-27 10:02:48 +0200344 if (!sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200345 return;
346
Willy Tarreau0adb2812022-05-27 10:02:48 +0200347 if (sc_ep_test(sc, SE_FL_T_MUX)) {
348 struct connection *conn = __sc_conn(sc);
349 struct sedesc *sedesc = sc->sedesc;
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100350
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100351 if (conn->mux) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200352 if (sc->wait_event.events != 0)
353 conn->mux->unsubscribe(sc, sc->wait_event.events, &sc->wait_event);
Willy Tarreau798465b2022-05-17 18:20:02 +0200354 se_fl_set(sedesc, SE_FL_ORPHAN);
Willy Tarreauc1054922022-05-18 07:43:52 +0200355 sedesc->sc = NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200356 sc->sedesc = NULL;
Willy Tarreau798465b2022-05-17 18:20:02 +0200357 conn->mux->detach(sedesc);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100358 }
359 else {
360 /* It's too early to have a mux, let's just destroy
361 * the connection
362 */
363 conn_stop_tracking(conn);
364 conn_full_close(conn);
365 if (conn->destroy_cb)
366 conn->destroy_cb(conn);
367 conn_free(conn);
368 }
369 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200370 else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
371 struct appctx *appctx = __sc_appctx(sc);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100372
Willy Tarreau0adb2812022-05-27 10:02:48 +0200373 sc_ep_set(sc, SE_FL_ORPHAN);
374 sc->sedesc->sc = NULL;
375 sc->sedesc = NULL;
Willy Tarreau1c3ead42022-05-10 19:42:22 +0200376 appctx_shut(appctx);
377 appctx_free(appctx);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100378 }
379
Willy Tarreau0adb2812022-05-27 10:02:48 +0200380 if (sc->sedesc) {
Willy Tarreauda59c892022-05-27 17:03:34 +0200381 /* the SD wasn't used and can be recycled */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200382 sc->sedesc->se = NULL;
383 sc->sedesc->conn = NULL;
Willy Tarreauda59c892022-05-27 17:03:34 +0200384 sc->sedesc->flags = 0;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200385 sc_ep_set(sc, SE_FL_DETACHED);
Christopher Fauletdb90f2a2022-03-22 16:06:25 +0100386 }
387
Willy Tarreaue68bc612022-05-27 11:23:05 +0200388 /* FIXME: Rest SC for now but must be reviewed. SC flags are only
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100389 * connection related for now but this will evolved
390 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200391 sc->flags &= SC_FL_ISBACK;
392 if (sc_strm(sc))
393 sc->app_ops = &sc_app_embedded_ops;
Willy Tarreau2f2318d2022-05-18 10:17:16 +0200394 else
Willy Tarreau0adb2812022-05-27 10:02:48 +0200395 sc->app_ops = NULL;
Willy Tarreaue68bc612022-05-27 11:23:05 +0200396 sc_free_cond(scp);
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100397}
398
Willy Tarreau4596fe22022-05-17 19:07:51 +0200399/* Detaches the stconn from the app layer. If there is no endpoint attached
400 * to the stconn
Christopher Faulet9ed77422022-04-12 08:51:15 +0200401 */
Willy Tarreaue68bc612022-05-27 11:23:05 +0200402static void sc_detach_app(struct stconn **scp)
Christopher Fauletc36de9d2022-01-06 08:44:58 +0100403{
Willy Tarreaue68bc612022-05-27 11:23:05 +0200404 struct stconn *sc = *scp;
Christopher Fauleteb50c012022-04-21 14:22:53 +0200405
Willy Tarreau0adb2812022-05-27 10:02:48 +0200406 if (!sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200407 return;
408
Willy Tarreau0adb2812022-05-27 10:02:48 +0200409 sc->app = NULL;
410 sc->app_ops = NULL;
411 sockaddr_free(&sc->src);
412 sockaddr_free(&sc->dst);
Christopher Faulet2f35e7b2022-03-31 11:09:28 +0200413
Willy Tarreau0adb2812022-05-27 10:02:48 +0200414 if (sc->wait_event.tasklet)
415 tasklet_free(sc->wait_event.tasklet);
416 sc->wait_event.tasklet = NULL;
417 sc->wait_event.events = 0;
Willy Tarreaue68bc612022-05-27 11:23:05 +0200418 sc_free_cond(scp);
Christopher Fauleteb50c012022-04-21 14:22:53 +0200419}
420
Willy Tarreau4596fe22022-05-17 19:07:51 +0200421/* Destroy the stconn. It is detached from its endpoint and its
422 * application. After this call, the stconn must be considered as released.
Christopher Fauleteb50c012022-04-21 14:22:53 +0200423 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200424void sc_destroy(struct stconn *sc)
Christopher Fauleteb50c012022-04-21 14:22:53 +0200425{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200426 sc_detach_endp(&sc);
427 sc_detach_app(&sc);
428 BUG_ON_HOT(sc);
Christopher Fauletcda94ac2021-12-23 17:28:17 +0100429}
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100430
Willy Tarreau4596fe22022-05-17 19:07:51 +0200431/* Resets the stream connector endpoint. It happens when the app layer want to renew
Christopher Faulet9ed77422022-04-12 08:51:15 +0200432 * its endpoint. For a connection retry for instance. If a mux or an applet is
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500433 * attached, a new endpoint is created. Returns -1 on error and 0 on success.
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200434 *
Willy Tarreaub605c422022-05-17 17:04:55 +0200435 * Only SE_FL_ERROR flag is removed on the endpoint. Orther flags are preserved.
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200436 * It is the caller responsibility to remove other flags if needed.
Christopher Faulet9ed77422022-04-12 08:51:15 +0200437 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200438int sc_reset_endp(struct stconn *sc)
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100439{
Willy Tarreau31219282022-05-27 16:21:33 +0200440 struct sedesc *new_sd;
Christopher Fauletb041b232022-03-24 10:27:02 +0100441
Willy Tarreau0adb2812022-05-27 10:02:48 +0200442 BUG_ON(!sc->app);
Christopher Fauleta6c4a482022-04-28 18:25:24 +0200443
Willy Tarreau0adb2812022-05-27 10:02:48 +0200444 sc_ep_clr(sc, SE_FL_ERROR);
445 if (!__sc_endp(sc)) {
Christopher Fauletb041b232022-03-24 10:27:02 +0100446 /* endpoint not attached or attached to a mux with no
447 * target. Thus the endpoint will not be release but just
Willy Tarreau0adb2812022-05-27 10:02:48 +0200448 * reset. The app is still attached, the sc will not be
Christopher Fauleteb50c012022-04-21 14:22:53 +0200449 * released.
Christopher Fauletb041b232022-03-24 10:27:02 +0100450 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200451 sc_detach_endp(&sc);
Christopher Fauletb041b232022-03-24 10:27:02 +0100452 return 0;
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100453 }
Christopher Fauletb041b232022-03-24 10:27:02 +0100454
455 /* allocate the new endpoint first to be able to set error if it
456 * fails */
Willy Tarreau31219282022-05-27 16:21:33 +0200457 new_sd = sedesc_new();
458 if (!unlikely(new_sd)) {
Willy Tarreau0adb2812022-05-27 10:02:48 +0200459 sc_ep_set(sc, SE_FL_ERROR);
Christopher Fauletb041b232022-03-24 10:27:02 +0100460 return -1;
461 }
462
Willy Tarreau0adb2812022-05-27 10:02:48 +0200463 /* The app is still attached, the sc will not be released */
464 sc_detach_endp(&sc);
Willy Tarreau6a378d12022-08-11 13:56:42 +0200465 BUG_ON(!sc);
Willy Tarreau0adb2812022-05-27 10:02:48 +0200466 BUG_ON(sc->sedesc);
Willy Tarreau31219282022-05-27 16:21:33 +0200467 sc->sedesc = new_sd;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200468 sc->sedesc->sc = sc;
469 sc_ep_set(sc, SE_FL_DETACHED);
Christopher Faulet9ec2f4d2022-03-23 15:15:29 +0100470 return 0;
471}
Christopher Faulet37046632022-04-01 11:36:58 +0200472
473
Willy Tarreaue68bc612022-05-27 11:23:05 +0200474/* Create an applet to handle a stream connector as a new appctx. The SC will
Christopher Faulet37046632022-04-01 11:36:58 +0200475 * wake it up every time it is solicited. The appctx must be deleted by the task
Willy Tarreau19c65a92022-05-27 08:49:24 +0200476 * handler using sc_detach_endp(), possibly from within the function itself.
Christopher Faulet37046632022-04-01 11:36:58 +0200477 * It also pre-initializes the applet's context and returns it (or NULL in case
478 * it could not be allocated).
479 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200480struct appctx *sc_applet_create(struct stconn *sc, struct applet *app)
Christopher Faulet37046632022-04-01 11:36:58 +0200481{
482 struct appctx *appctx;
483
Willy Tarreau0adb2812022-05-27 10:02:48 +0200484 DPRINTF(stderr, "registering handler %p for sc %p (was %p)\n", app, sc, sc_strm_task(sc));
Christopher Faulet37046632022-04-01 11:36:58 +0200485
Willy Tarreau0adb2812022-05-27 10:02:48 +0200486 appctx = appctx_new_here(app, sc->sedesc);
Christopher Faulet37046632022-04-01 11:36:58 +0200487 if (!appctx)
488 return NULL;
Willy Tarreau0adb2812022-05-27 10:02:48 +0200489 sc_attach_applet(sc, appctx);
490 appctx->t->nice = __sc_strm(sc)->task->nice;
Willy Tarreau90e8b452022-05-25 18:21:43 +0200491 applet_need_more_data(appctx);
Christopher Faulet37046632022-04-01 11:36:58 +0200492 appctx_wakeup(appctx);
Christopher Fauleta33ff7a2022-04-21 11:52:07 +0200493
Willy Tarreau0adb2812022-05-27 10:02:48 +0200494 sc->state = SC_ST_RDY;
Christopher Faulet37046632022-04-01 11:36:58 +0200495 return appctx;
496}
497
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100498/* Conditionnaly forward the close to the wirte side. It return 1 if it can be
499 * forwarded. It is the caller responsibility to forward the close to the write
500 * side. Otherwise, 0 is returned. In this case, CF_SHUTW_NOW flag may be set on
501 * the channel if we are only waiting for the outgoing data to be flushed.
502 */
503static inline int sc_cond_forward_shutw(struct stconn *sc)
504{
505 /* The close must not be forwarded */
506 if (!(sc_ic(sc)->flags & CF_SHUTR) || !(sc->flags & SC_FL_NOHALF))
507 return 0;
508
509 if (!channel_is_empty(sc_ic(sc))) {
510 /* the close to the write side cannot be forwarded now because
511 * we should flush outgoing data first. But instruct the output
512 * channel it should be done ASAP.
513 */
514 channel_shutw_now(sc_oc(sc));
515 return 0;
516 }
517
518 /* the close can be immediately forwarded to the write side */
519 return 1;
520}
521
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200522/*
Willy Tarreau4596fe22022-05-17 19:07:51 +0200523 * This function performs a shutdown-read on a detached stream connector in a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200524 * connected or init state (it does nothing for other states). It either shuts
525 * the read side or marks itself as closed. The buffer flags are updated to
Willy Tarreaucb041662022-05-17 19:44:42 +0200526 * reflect the new state. If the stream connector has SC_FL_NOHALF, we also
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200527 * forward the close to the write side. The owner task is woken up if it exists.
528 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200529static void sc_app_shutr(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200530{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200531 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200532
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200533 if (ic->flags & CF_SHUTR)
534 return;
535 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100536 sc_ep_reset_rex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200537
Willy Tarreau0adb2812022-05-27 10:02:48 +0200538 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200539 return;
540
Willy Tarreau0adb2812022-05-27 10:02:48 +0200541 if (sc_oc(sc)->flags & CF_SHUTW) {
542 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200543 if (sc->flags & SC_FL_ISBACK)
544 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200545 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100546 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200547 return sc_app_shutw(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200548
549 /* note that if the task exists, it must unregister itself once it runs */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200550 if (!(sc->flags & SC_FL_DONT_WAKE))
551 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200552}
553
554/*
Willy Tarreau4596fe22022-05-17 19:07:51 +0200555 * This function performs a shutdown-write on a detached stream connector in a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200556 * connected or init state (it does nothing for other states). It either shuts
557 * the write side or marks itself as closed. The buffer flags are updated to
Willy Tarreaue68bc612022-05-27 11:23:05 +0200558 * reflect the new state. It does also close everything if the SC was marked as
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200559 * being in error state. The owner task is woken up if it exists.
560 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200561static void sc_app_shutw(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200562{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200563 struct channel *ic = sc_ic(sc);
564 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200565
566 oc->flags &= ~CF_SHUTW_NOW;
567 if (oc->flags & CF_SHUTW)
568 return;
569 oc->flags |= CF_SHUTW;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100570 sc_ep_reset_wex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200571
Willy Tarreau0adb2812022-05-27 10:02:48 +0200572 if (tick_isset(sc->hcto)) {
Christopher Fauleted7e66f2023-02-07 11:09:15 +0100573 sc->rto = sc->hcto;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100574 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200575 }
576
Willy Tarreau0adb2812022-05-27 10:02:48 +0200577 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200578 case SC_ST_RDY:
579 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200580 /* we have to shut before closing, otherwise some short messages
581 * may never leave the system, especially when there are remaining
582 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200583 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200584 * no risk so we close both sides immediately.
585 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200586 if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200587 !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
588 return;
589
Willy Tarreau476c2802022-11-14 07:36:42 +0100590 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200591 case SC_ST_CON:
592 case SC_ST_CER:
593 case SC_ST_QUE:
594 case SC_ST_TAR:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200595 /* Note that none of these states may happen with applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200596 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100597 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200598 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200599 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200600 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100601 sc_ep_reset_rex(sc);
Christopher Fauletca679922022-07-20 13:24:04 +0200602 if (sc->flags & SC_FL_ISBACK)
603 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200604 }
605
606 /* note that if the task exists, it must unregister itself once it runs */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200607 if (!(sc->flags & SC_FL_DONT_WAKE))
608 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200609}
610
611/* default chk_rcv function for scheduled tasks */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200612static void sc_app_chk_rcv(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200613{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200614 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200615
Willy Tarreau0adb2812022-05-27 10:02:48 +0200616 DPRINTF(stderr, "%s: sc=%p, sc->state=%d ic->flags=%08x oc->flags=%08x\n",
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200617 __FUNCTION__,
Willy Tarreau0adb2812022-05-27 10:02:48 +0200618 sc, sc->state, ic->flags, sc_oc(sc)->flags);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200619
620 if (ic->pipe) {
621 /* stop reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200622 sc_need_room(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200623 }
624 else {
625 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200626 if (!(sc->flags & SC_FL_DONT_WAKE))
627 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200628 }
629}
630
631/* default chk_snd function for scheduled tasks */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200632static void sc_app_chk_snd(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200633{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200634 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200635
Willy Tarreau0adb2812022-05-27 10:02:48 +0200636 DPRINTF(stderr, "%s: sc=%p, sc->state=%d ic->flags=%08x oc->flags=%08x\n",
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200637 __FUNCTION__,
Willy Tarreau0adb2812022-05-27 10:02:48 +0200638 sc, sc->state, sc_ic(sc)->flags, oc->flags);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200639
Willy Tarreau0adb2812022-05-27 10:02:48 +0200640 if (unlikely(sc->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200641 return;
642
Willy Tarreau0adb2812022-05-27 10:02:48 +0200643 if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || /* not waiting for data */
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200644 channel_is_empty(oc)) /* called with nothing to send ! */
645 return;
646
647 /* Otherwise there are remaining data to be sent in the buffer,
648 * so we tell the handler.
649 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200650 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100651 if (!tick_isset(sc_ep_wex(sc)))
652 sc_ep_set_wex(sc, sc->wto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200653
Willy Tarreau0adb2812022-05-27 10:02:48 +0200654 if (!(sc->flags & SC_FL_DONT_WAKE))
655 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200656}
657
658/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200659 * This function performs a shutdown-read on a stream connector attached to
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200660 * a connection in a connected or init state (it does nothing for other
661 * states). It either shuts the read side or marks itself as closed. The buffer
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200662 * flags are updated to reflect the new state. If the stream connector has
Willy Tarreaucb041662022-05-17 19:44:42 +0200663 * SC_FL_NOHALF, we also forward the close to the write side. If a control
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200664 * layer is defined, then it is supposed to be a socket layer and file
665 * descriptors are then shutdown or closed accordingly. The function
666 * automatically disables polling if needed.
667 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200668static void sc_app_shutr_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200669{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200670 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200671
Willy Tarreau0adb2812022-05-27 10:02:48 +0200672 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200673
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200674 if (ic->flags & CF_SHUTR)
675 return;
676 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100677 sc_ep_reset_rex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200678
Willy Tarreau0adb2812022-05-27 10:02:48 +0200679 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200680 return;
681
Willy Tarreau0adb2812022-05-27 10:02:48 +0200682 if (sc_oc(sc)->flags & CF_SHUTW) {
683 sc_conn_shut(sc);
684 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200685 if (sc->flags & SC_FL_ISBACK)
686 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200687 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100688 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200689 return sc_app_shutw_conn(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200690}
691
692/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200693 * This function performs a shutdown-write on a stream connector attached to
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200694 * a connection in a connected or init state (it does nothing for other
695 * states). It either shuts the write side or marks itself as closed. The
696 * buffer flags are updated to reflect the new state. It does also close
Willy Tarreaue68bc612022-05-27 11:23:05 +0200697 * everything if the SC was marked as being in error state. If there is a
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200698 * data-layer shutdown, it is called.
699 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200700static void sc_app_shutw_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200701{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200702 struct channel *ic = sc_ic(sc);
703 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200704
Willy Tarreau0adb2812022-05-27 10:02:48 +0200705 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200706
707 oc->flags &= ~CF_SHUTW_NOW;
708 if (oc->flags & CF_SHUTW)
709 return;
710 oc->flags |= CF_SHUTW;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100711 sc_ep_reset_wex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200712
Willy Tarreau0adb2812022-05-27 10:02:48 +0200713 if (tick_isset(sc->hcto)) {
Christopher Fauleted7e66f2023-02-07 11:09:15 +0100714 sc->rto = sc->hcto;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100715 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200716 }
717
Willy Tarreau0adb2812022-05-27 10:02:48 +0200718 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200719 case SC_ST_RDY:
720 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200721 /* we have to shut before closing, otherwise some short messages
722 * may never leave the system, especially when there are remaining
723 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200724 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200725 * no risk so we close both sides immediately.
726 */
727
Willy Tarreau0adb2812022-05-27 10:02:48 +0200728 if (sc_ep_test(sc, SE_FL_ERROR)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200729 /* quick close, the socket is already shut anyway */
730 }
Willy Tarreau0adb2812022-05-27 10:02:48 +0200731 else if (sc->flags & SC_FL_NOLINGER) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200732 /* unclean data-layer shutdown, typically an aborted request
733 * or a forwarded shutdown from a client to a server due to
734 * option abortonclose. No need for the TLS layer to try to
735 * emit a shutdown message.
736 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200737 sc_conn_shutw(sc, CO_SHW_SILENT);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200738 }
739 else {
740 /* clean data-layer shutdown. This only happens on the
741 * frontend side, or on the backend side when forwarding
742 * a client close in TCP mode or in HTTP TUNNEL mode
743 * while option abortonclose is set. We want the TLS
744 * layer to try to signal it to the peer before we close.
745 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200746 sc_conn_shutw(sc, CO_SHW_NORMAL);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200747
748 if (!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
749 return;
750 }
751
Willy Tarreau476c2802022-11-14 07:36:42 +0100752 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200753 case SC_ST_CON:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200754 /* we may have to close a pending connection, and mark the
755 * response buffer as shutr
756 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200757 sc_conn_shut(sc);
Willy Tarreau476c2802022-11-14 07:36:42 +0100758 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200759 case SC_ST_CER:
760 case SC_ST_QUE:
761 case SC_ST_TAR:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200762 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100763 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200764 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200765 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200766 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100767 sc_ep_reset_rex(sc);
Christopher Fauletca679922022-07-20 13:24:04 +0200768 if (sc->flags & SC_FL_ISBACK)
769 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200770 }
771}
772
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200773/* This function is used for inter-stream connector calls. It is called by the
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200774 * consumer to inform the producer side that it may be interested in checking
775 * for free space in the buffer. Note that it intentionally does not update
776 * timeouts, so that we can still check them later at wake-up. This function is
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200777 * dedicated to connection-based stream connectors.
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200778 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200779static void sc_app_chk_rcv_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200780{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200781 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200782
783 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200784 if (sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
785 tasklet_wakeup(sc->wait_event.tasklet);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200786}
787
788
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200789/* This function is used for inter-stream connector calls. It is called by the
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200790 * producer to inform the consumer side that it may be interested in checking
791 * for data in the buffer. Note that it intentionally does not update timeouts,
792 * so that we can still check them later at wake-up.
793 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200794static void sc_app_chk_snd_conn(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200795{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200796 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200797
Willy Tarreau0adb2812022-05-27 10:02:48 +0200798 BUG_ON(!sc_conn(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200799
Willy Tarreau0adb2812022-05-27 10:02:48 +0200800 if (unlikely(!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST) ||
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200801 (oc->flags & CF_SHUTW)))
802 return;
803
804 if (unlikely(channel_is_empty(oc))) /* called with nothing to send ! */
805 return;
806
807 if (!oc->pipe && /* spliced data wants to be forwarded ASAP */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200808 !sc_ep_test(sc, SE_FL_WAIT_DATA)) /* not waiting for data */
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200809 return;
810
Willy Tarreau0adb2812022-05-27 10:02:48 +0200811 if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
812 sc_conn_send(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200813
Willy Tarreau0adb2812022-05-27 10:02:48 +0200814 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200815 /* Write error on the file descriptor */
Christopher Faulet7f6aa562022-10-17 10:21:19 +0200816 if (sc->state >= SC_ST_CON && sc_ep_test(sc, SE_FL_EOS))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200817 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200818 goto out_wakeup;
819 }
820
821 /* OK, so now we know that some data might have been sent, and that we may
822 * have to poll first. We have to do that too if the buffer is not empty.
823 */
824 if (channel_is_empty(oc)) {
825 /* the connection is established but we can't write. Either the
826 * buffer is empty, or we just refrain from sending because the
827 * ->o limit was reached. Maybe we just wrote the last
828 * chunk and need to close.
829 */
830 if (((oc->flags & (CF_SHUTW|CF_AUTO_CLOSE|CF_SHUTW_NOW)) ==
831 (CF_AUTO_CLOSE|CF_SHUTW_NOW)) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +0200832 sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST)) {
833 sc_shutw(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200834 goto out_wakeup;
835 }
836
837 if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +0200838 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100839 sc_ep_reset_wex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200840 }
841 else {
842 /* Otherwise there are remaining data to be sent in the buffer,
843 * which means we have to poll before doing so.
844 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200845 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100846 if (!tick_isset(sc_ep_wex(sc)))
847 sc_ep_set_wex(sc, sc->wto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200848 }
849
Christopher Faulet2e56a732023-01-26 16:18:09 +0100850 if (likely(oc->flags & CF_WRITE_EVENT)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200851 /* update timeout if we have written something */
Christopher Faulet2e56a732023-01-26 16:18:09 +0100852 if (!(oc->flags & CF_SHUTW) && !channel_is_empty(oc))
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100853 sc_ep_set_wex(sc, sc->wto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200854
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100855 if (tick_isset(sc_ep_rex(sc)) && !(sc->flags & SC_FL_INDEP_STR)) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200856 /* Note: to prevent the client from expiring read timeouts
857 * during writes, we refresh it. We only do this if the
858 * interface is not configured for "independent streams",
859 * because for some applications it's better not to do this,
860 * for instance when continuously exchanging small amounts
861 * of data which can full the socket buffers long before a
862 * write timeout is detected.
863 */
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100864 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200865 }
866 }
867
868 /* in case of special condition (error, shutdown, end of write...), we
869 * have to notify the task.
870 */
Christopher Faulet71c486b2023-02-09 14:14:38 +0100871 if (likely((oc->flags & CF_SHUTW) ||
872 ((oc->flags & CF_WRITE_EVENT) && sc->state < SC_ST_EST) ||
873 ((oc->flags & CF_WAKE_WRITE) &&
874 ((channel_is_empty(oc) && !oc->to_forward) ||
875 !sc_state_in(sc->state, SC_SB_EST))))) {
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200876 out_wakeup:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200877 if (!(sc->flags & SC_FL_DONT_WAKE))
878 task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200879 }
880}
881
882/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200883 * This function performs a shutdown-read on a stream connector attached to an
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200884 * applet in a connected or init state (it does nothing for other states). It
885 * either shuts the read side or marks itself as closed. The buffer flags are
Willy Tarreaucb041662022-05-17 19:44:42 +0200886 * updated to reflect the new state. If the stream connector has SC_FL_NOHALF,
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200887 * we also forward the close to the write side. The owner task is woken up if
888 * it exists.
889 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200890static void sc_app_shutr_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200891{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200892 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200893
Willy Tarreau0adb2812022-05-27 10:02:48 +0200894 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200895
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200896 if (ic->flags & CF_SHUTR)
897 return;
898 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100899 sc_ep_reset_rex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200900
901 /* Note: on shutr, we don't call the applet */
902
Willy Tarreau0adb2812022-05-27 10:02:48 +0200903 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200904 return;
905
Willy Tarreau0adb2812022-05-27 10:02:48 +0200906 if (sc_oc(sc)->flags & CF_SHUTW) {
907 appctx_shut(__sc_appctx(sc));
908 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +0200909 if (sc->flags & SC_FL_ISBACK)
910 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200911 }
Christopher Fauleteb3f26d2023-02-08 16:18:48 +0100912 else if (sc_cond_forward_shutw(sc))
Willy Tarreau0adb2812022-05-27 10:02:48 +0200913 return sc_app_shutw_applet(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200914}
915
916/*
Willy Tarreau3a3f4802022-05-17 18:28:19 +0200917 * This function performs a shutdown-write on a stream connector attached to an
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200918 * applet in a connected or init state (it does nothing for other states). It
919 * either shuts the write side or marks itself as closed. The buffer flags are
920 * updated to reflect the new state. It does also close everything if the SI
921 * was marked as being in error state. The owner task is woken up if it exists.
922 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200923static void sc_app_shutw_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200924{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200925 struct channel *ic = sc_ic(sc);
926 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200927
Willy Tarreau0adb2812022-05-27 10:02:48 +0200928 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200929
930 oc->flags &= ~CF_SHUTW_NOW;
931 if (oc->flags & CF_SHUTW)
932 return;
933 oc->flags |= CF_SHUTW;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100934 sc_ep_reset_wex(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200935
Willy Tarreau0adb2812022-05-27 10:02:48 +0200936 if (tick_isset(sc->hcto)) {
Christopher Fauleted7e66f2023-02-07 11:09:15 +0100937 sc->rto = sc->hcto;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100938 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200939 }
940
941 /* on shutw we always wake the applet up */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200942 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200943
Willy Tarreau0adb2812022-05-27 10:02:48 +0200944 switch (sc->state) {
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200945 case SC_ST_RDY:
946 case SC_ST_EST:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200947 /* we have to shut before closing, otherwise some short messages
948 * may never leave the system, especially when there are remaining
949 * unread data in the socket input buffer, or when nolinger is set.
Willy Tarreaucb041662022-05-17 19:44:42 +0200950 * However, if SC_FL_NOLINGER is explicitly set, we know there is
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200951 * no risk so we close both sides immediately.
952 */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200953 if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200954 !(ic->flags & (CF_SHUTR|CF_DONT_READ)))
955 return;
956
Willy Tarreau476c2802022-11-14 07:36:42 +0100957 __fallthrough;
Willy Tarreau026e8fb2022-05-17 19:47:17 +0200958 case SC_ST_CON:
959 case SC_ST_CER:
960 case SC_ST_QUE:
961 case SC_ST_TAR:
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200962 /* Note that none of these states may happen with applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200963 appctx_shut(__sc_appctx(sc));
964 sc->state = SC_ST_DIS;
Willy Tarreau476c2802022-11-14 07:36:42 +0100965 __fallthrough;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200966 default:
Willy Tarreau0adb2812022-05-27 10:02:48 +0200967 sc->flags &= ~SC_FL_NOLINGER;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200968 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +0100969 sc_ep_reset_rex(sc);
Christopher Fauletca679922022-07-20 13:24:04 +0200970 if (sc->flags & SC_FL_ISBACK)
971 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200972 }
973}
974
975/* chk_rcv function for applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200976static void sc_app_chk_rcv_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200977{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200978 struct channel *ic = sc_ic(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200979
Willy Tarreau0adb2812022-05-27 10:02:48 +0200980 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200981
Willy Tarreau0adb2812022-05-27 10:02:48 +0200982 DPRINTF(stderr, "%s: sc=%p, sc->state=%d ic->flags=%08x oc->flags=%08x\n",
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200983 __FUNCTION__,
Willy Tarreau0adb2812022-05-27 10:02:48 +0200984 sc, sc->state, ic->flags, sc_oc(sc)->flags);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200985
986 if (!ic->pipe) {
987 /* (re)start reading */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200988 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200989 }
990}
991
992/* chk_snd function for applets */
Willy Tarreau0adb2812022-05-27 10:02:48 +0200993static void sc_app_chk_snd_applet(struct stconn *sc)
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200994{
Willy Tarreau0adb2812022-05-27 10:02:48 +0200995 struct channel *oc = sc_oc(sc);
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200996
Willy Tarreau0adb2812022-05-27 10:02:48 +0200997 BUG_ON(!sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +0200998
Willy Tarreau0adb2812022-05-27 10:02:48 +0200999 DPRINTF(stderr, "%s: sc=%p, sc->state=%d ic->flags=%08x oc->flags=%08x\n",
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001000 __FUNCTION__,
Willy Tarreau0adb2812022-05-27 10:02:48 +02001001 sc, sc->state, sc_ic(sc)->flags, oc->flags);
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001002
Willy Tarreau0adb2812022-05-27 10:02:48 +02001003 if (unlikely(sc->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001004 return;
1005
Christopher Faulet04f03e12022-06-01 17:35:34 +02001006 /* we only wake the applet up if it was waiting for some data and is ready to consume it */
1007 if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || sc_ep_test(sc, SE_FL_WONT_CONSUME))
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001008 return;
1009
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001010 if (!tick_isset(sc_ep_wex(sc)))
1011 sc_ep_set_wex(sc, sc->wto);
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001012
1013 if (!channel_is_empty(oc)) {
1014 /* (re)start sending */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001015 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet9ffddd52022-04-01 14:04:29 +02001016 }
1017}
Christopher Faulet13045f02022-04-01 14:23:38 +02001018
1019
1020/* This function is designed to be called from within the stream handler to
Willy Tarreau4596fe22022-05-17 19:07:51 +02001021 * update the input channel's expiration timer and the stream connector's
Christopher Faulet13045f02022-04-01 14:23:38 +02001022 * Rx flags based on the channel's flags. It needs to be called only once
1023 * after the channel's flags have settled down, and before they are cleared,
1024 * though it doesn't harm to call it as often as desired (it just slightly
1025 * hurts performance). It must not be called from outside of the stream
1026 * handler, as what it does will be used to compute the stream task's
1027 * expiration.
1028 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001029void sc_update_rx(struct stconn *sc)
Christopher Faulet13045f02022-04-01 14:23:38 +02001030{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001031 struct channel *ic = sc_ic(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001032
Willy Tarreau676c8db2022-05-24 16:22:24 +02001033 if (ic->flags & CF_SHUTR)
Christopher Faulet13045f02022-04-01 14:23:38 +02001034 return;
Christopher Faulet13045f02022-04-01 14:23:38 +02001035
1036 /* Read not closed, update FD status and timeout for reads */
1037 if (ic->flags & CF_DONT_READ)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001038 sc_wont_read(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001039 else
Willy Tarreau0adb2812022-05-27 10:02:48 +02001040 sc_will_read(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001041
Christopher Faulet407210a2023-02-14 11:01:51 +01001042 if ((ic->flags & CF_EOI) || sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001043 sc_ep_reset_rex(sc);
1044 else if (!tick_isset(sc_ep_rex(sc)))
1045 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet13045f02022-04-01 14:23:38 +02001046
Willy Tarreau0adb2812022-05-27 10:02:48 +02001047 sc_chk_rcv(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001048}
1049
1050/* This function is designed to be called from within the stream handler to
Willy Tarreau4596fe22022-05-17 19:07:51 +02001051 * update the output channel's expiration timer and the stream connector's
Christopher Faulet13045f02022-04-01 14:23:38 +02001052 * Tx flags based on the channel's flags. It needs to be called only once
1053 * after the channel's flags have settled down, and before they are cleared,
1054 * though it doesn't harm to call it as often as desired (it just slightly
1055 * hurts performance). It must not be called from outside of the stream
1056 * handler, as what it does will be used to compute the stream task's
1057 * expiration.
1058 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001059void sc_update_tx(struct stconn *sc)
Christopher Faulet13045f02022-04-01 14:23:38 +02001060{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001061 struct channel *oc = sc_oc(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001062
1063 if (oc->flags & CF_SHUTW)
1064 return;
1065
1066 /* Write not closed, update FD status and timeout for writes */
1067 if (channel_is_empty(oc)) {
1068 /* stop writing */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001069 if (!sc_ep_test(sc, SE_FL_WAIT_DATA)) {
Christopher Faulet13045f02022-04-01 14:23:38 +02001070 if ((oc->flags & CF_SHUTW_NOW) == 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001071 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001072 sc_ep_reset_wex(sc);
Christopher Faulet13045f02022-04-01 14:23:38 +02001073 }
1074 return;
1075 }
1076
1077 /* (re)start writing and update timeout. Note: we don't recompute the timeout
1078 * every time we get here, otherwise it would risk never to expire. We only
1079 * update it if is was not yet set. The stream socket handler will already
1080 * have updated it if there has been a completed I/O.
1081 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001082 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001083 if (!tick_isset(sc_ep_wex(sc))) {
1084 sc_ep_set_wex(sc, sc->wto);
1085 if (tick_isset(sc_ep_rex(sc)) && !(sc->flags & SC_FL_INDEP_STR)) {
Christopher Faulet13045f02022-04-01 14:23:38 +02001086 /* Note: depending on the protocol, we don't know if we're waiting
1087 * for incoming data or not. So in order to prevent the socket from
1088 * expiring read timeouts during writes, we refresh the read timeout,
1089 * except if it was already infinite or if we have explicitly setup
1090 * independent streams.
1091 */
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001092 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001093 }
1094 }
1095}
1096
Willy Tarreau19c65a92022-05-27 08:49:24 +02001097/* This function is the equivalent to sc_update() except that it's
Christopher Faulet5e29b762022-04-04 08:58:34 +02001098 * designed to be called from outside the stream handlers, typically the lower
1099 * layers (applets, connections) after I/O completion. After updating the stream
1100 * interface and timeouts, it will try to forward what can be forwarded, then to
1101 * wake the associated task up if an important event requires special handling.
Willy Tarreau15252cd2022-05-25 16:36:21 +02001102 * It may update SE_FL_WAIT_DATA and/or SC_FL_NEED_ROOM, that the callers are
Christopher Faulet5e29b762022-04-04 08:58:34 +02001103 * encouraged to watch to take appropriate action.
Willy Tarreau19c65a92022-05-27 08:49:24 +02001104 * It should not be called from within the stream itself, sc_update()
Christopher Faulet5e29b762022-04-04 08:58:34 +02001105 * is designed for this.
1106 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001107static void sc_notify(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001108{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001109 struct channel *ic = sc_ic(sc);
1110 struct channel *oc = sc_oc(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +02001111 struct stconn *sco = sc_opposite(sc);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001112 struct task *task = sc_strm_task(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001113
1114 /* process consumer side */
1115 if (channel_is_empty(oc)) {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001116 struct connection *conn = sc_conn(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001117
1118 if (((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +02001119 (sc->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
1120 sc_shutw(sc);
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001121 sc_ep_reset_wex(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001122 }
1123
1124 /* indicate that we may be waiting for data from the output channel or
1125 * we're about to close and can't expect more data if SHUTW_NOW is there.
1126 */
1127 if (!(oc->flags & (CF_SHUTW|CF_SHUTW_NOW)))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001128 sc_ep_set(sc, SE_FL_WAIT_DATA);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001129 else if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001130 sc_ep_clr(sc, SE_FL_WAIT_DATA);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001131
1132 /* update OC timeouts and wake the other side up if it's waiting for room */
Christopher Faulet2e56a732023-01-26 16:18:09 +01001133 if (oc->flags & (CF_WRITE_EVENT)) {
1134 if (sc_ep_test(sc, SE_FL_ERR_PENDING|SE_FL_ERROR) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001135 !channel_is_empty(oc))
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001136 if (tick_isset(sc_ep_wex(sc)))
1137 sc_ep_set_wex(sc, sc->wto);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001138
Willy Tarreau0adb2812022-05-27 10:02:48 +02001139 if (!(sc->flags & SC_FL_INDEP_STR))
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001140 if (tick_isset(sc_ep_rex(sc)))
1141 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001142 }
1143
1144 if (oc->flags & CF_DONT_READ)
Willy Tarreaue68bc612022-05-27 11:23:05 +02001145 sc_wont_read(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001146 else
Willy Tarreaue68bc612022-05-27 11:23:05 +02001147 sc_will_read(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001148
1149 /* Notify the other side when we've injected data into the IC that
1150 * needs to be forwarded. We can do fast-forwarding as soon as there
1151 * are output data, but we avoid doing this if some of the data are
1152 * not yet scheduled for being forwarded, because it is very likely
1153 * that it will be done again immediately afterwards once the following
Willy Tarreau15252cd2022-05-25 16:36:21 +02001154 * data are parsed (eg: HTTP chunking). We only clear SC_FL_NEED_ROOM
1155 * once we've emptied *some* of the output buffer, and not just when
1156 * there is available room, because applets are often forced to stop
1157 * before the buffer is full. We must not stop based on input data
1158 * alone because an HTTP parser might need more data to complete the
1159 * parsing.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001160 */
1161 if (!channel_is_empty(ic) &&
Willy Tarreaue68bc612022-05-27 11:23:05 +02001162 sc_ep_test(sco, SE_FL_WAIT_DATA) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001163 (!(ic->flags & CF_EXPECT_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
1164 int new_len, last_len;
1165
1166 last_len = co_data(ic);
1167 if (ic->pipe)
1168 last_len += ic->pipe->data;
1169
Willy Tarreaue68bc612022-05-27 11:23:05 +02001170 sc_chk_snd(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001171
1172 new_len = co_data(ic);
1173 if (ic->pipe)
1174 new_len += ic->pipe->data;
1175
1176 /* check if the consumer has freed some space either in the
1177 * buffer or in the pipe.
1178 */
1179 if (new_len < last_len)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001180 sc_have_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001181 }
1182
1183 if (!(ic->flags & CF_DONT_READ))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001184 sc_will_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001185
Willy Tarreau0adb2812022-05-27 10:02:48 +02001186 sc_chk_rcv(sc);
Willy Tarreaue68bc612022-05-27 11:23:05 +02001187 sc_chk_rcv(sco);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001188
Christopher Faulet407210a2023-02-14 11:01:51 +01001189 if (ic->flags & (CF_EOI|CF_SHUTR) || sc_ep_test(sc, SE_FL_APPLET_NEED_CONN) ||
Willy Tarreau0adb2812022-05-27 10:02:48 +02001190 (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))) {
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001191 sc_ep_reset_rex(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001192 }
Christopher Faulet285f7612022-12-12 08:28:55 +01001193 else if ((ic->flags & (CF_SHUTR|CF_READ_EVENT)) == CF_READ_EVENT) {
Willy Tarreauf61dd192022-05-27 09:00:19 +02001194 /* we must re-enable reading if sc_chk_snd() has freed some space */
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001195 if (tick_isset(sc_ep_rex(sc)))
1196 sc_ep_set_rex(sc, sc->rto);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001197 }
1198
1199 /* wake the task up only when needed */
Christopher Faulet285f7612022-12-12 08:28:55 +01001200 if (/* changes on the production side that must be handled:
Christopher Faulet2e56a732023-01-26 16:18:09 +01001201 * - An error on receipt: SE_FL_ERROR
Christopher Faulet285f7612022-12-12 08:28:55 +01001202 * - A read event: shutdown for reads (CF_READ_EVENT + SHUTR)
1203 * end of input (CF_READ_EVENT + CF_EOI)
1204 * data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
1205 * read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
1206 */
1207 ((ic->flags & CF_READ_EVENT) && ((ic->flags & (CF_SHUTR|CF_EOI)) || !ic->to_forward || sco->state != SC_ST_EST)) ||
Christopher Faulet2e56a732023-01-26 16:18:09 +01001208 sc_ep_test(sc, SE_FL_ERROR) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001209
1210 /* changes on the consumption side */
Christopher Faulet2e56a732023-01-26 16:18:09 +01001211 sc_ep_test(sc, SE_FL_ERR_PENDING) ||
Christopher Fauletd8988412022-12-20 18:10:04 +01001212 ((oc->flags & CF_WRITE_EVENT) &&
1213 ((sc->state < SC_ST_EST) ||
1214 (oc->flags & CF_SHUTW) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001215 (((oc->flags & CF_WAKE_WRITE) ||
Christopher Fauletd8988412022-12-20 18:10:04 +01001216 !(oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW|CF_SHUTW))) &&
1217 (sco->state != SC_ST_EST ||
1218 (channel_is_empty(oc) && !oc->to_forward)))))) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001219 task_wakeup(task, TASK_WOKEN_IO);
1220 }
Christopher Faulet5e29b762022-04-04 08:58:34 +02001221
Christopher Faulet2e56a732023-01-26 16:18:09 +01001222 if (ic->flags & CF_READ_EVENT)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001223 ic->flags &= ~CF_READ_DONTWAIT;
1224}
1225
1226/*
1227 * This function propagates a null read received on a socket-based connection.
Willy Tarreaucb041662022-05-17 19:44:42 +02001228 * It updates the stream connector. If the stream connector has SC_FL_NOHALF,
Christopher Faulet5e29b762022-04-04 08:58:34 +02001229 * the close is also forwarded to the write side as an abort.
1230 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001231static void sc_conn_read0(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001232{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001233 struct channel *ic = sc_ic(sc);
1234 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001235
Willy Tarreau0adb2812022-05-27 10:02:48 +02001236 BUG_ON(!sc_conn(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001237
Christopher Faulet5e29b762022-04-04 08:58:34 +02001238 if (ic->flags & CF_SHUTR)
1239 return;
1240 ic->flags |= CF_SHUTR;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001241 sc_ep_reset_rex(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001242
Willy Tarreau0adb2812022-05-27 10:02:48 +02001243 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001244 return;
1245
1246 if (oc->flags & CF_SHUTW)
1247 goto do_close;
1248
Christopher Fauleteb3f26d2023-02-08 16:18:48 +01001249 if (sc_cond_forward_shutw(sc)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001250 /* we want to immediately forward this close to the write side */
1251 /* force flag on ssl to keep stream in cache */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001252 sc_conn_shutw(sc, CO_SHW_SILENT);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001253 goto do_close;
1254 }
1255
1256 /* otherwise that's just a normal read shutdown */
1257 return;
1258
1259 do_close:
Willy Tarreauf61dd192022-05-27 09:00:19 +02001260 /* OK we completely close the socket here just as if we went through sc_shut[rw]() */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001261 sc_conn_shut(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001262
1263 oc->flags &= ~CF_SHUTW_NOW;
1264 oc->flags |= CF_SHUTW;
Christopher Fauletf8413cb2023-02-07 16:06:14 +01001265 sc_ep_reset_wex(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001266
Willy Tarreau0adb2812022-05-27 10:02:48 +02001267 sc->state = SC_ST_DIS;
Christopher Fauletca679922022-07-20 13:24:04 +02001268 if (sc->flags & SC_FL_ISBACK)
1269 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001270 return;
1271}
1272
1273/*
1274 * This is the callback which is called by the connection layer to receive data
1275 * into the buffer from the connection. It iterates over the mux layer's
1276 * rcv_buf function.
1277 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001278static int sc_conn_recv(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001279{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001280 struct connection *conn = __sc_conn(sc);
1281 struct channel *ic = sc_ic(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001282 int ret, max, cur_read = 0;
1283 int read_poll = MAX_READ_POLL_LOOPS;
1284 int flags = 0;
1285
1286 /* If not established yet, do nothing. */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001287 if (sc->state != SC_ST_EST)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001288 return 0;
1289
Willy Tarreau462b9892022-05-18 18:06:53 +02001290 /* If another call to sc_conn_recv() failed, and we subscribed to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001291 * recv events already, give up now.
1292 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001293 if (sc->wait_event.events & SUB_RETRY_RECV)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001294 return 0;
1295
1296 /* maybe we were called immediately after an asynchronous shutr */
1297 if (ic->flags & CF_SHUTR)
1298 return 1;
1299
1300 /* we must wait because the mux is not installed yet */
1301 if (!conn->mux)
1302 return 0;
1303
1304 /* stop here if we reached the end of data */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001305 if (sc_ep_test(sc, SE_FL_EOS))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001306 goto end_recv;
1307
1308 /* stop immediately on errors. Note that we DON'T want to stop on
1309 * POLL_ERR, as the poller might report a write error while there
1310 * are still data available in the recv buffer. This typically
1311 * happens when we send too large a request to a backend server
1312 * which rejects it before reading it all.
1313 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001314 if (!sc_ep_test(sc, SE_FL_RCV_MORE)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001315 if (!conn_xprt_ready(conn))
1316 return 0;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001317 if (sc_ep_test(sc, SE_FL_ERROR))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001318 goto end_recv;
1319 }
1320
1321 /* prepare to detect if the mux needs more room */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001322 sc_ep_clr(sc, SE_FL_WANT_ROOM);
Christopher Faulet341a5782023-02-10 17:37:11 +01001323 BUG_ON(sc_waiting_room(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001324
1325 if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
1326 global.tune.idle_timer &&
1327 (unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
1328 /* The buffer was empty and nothing was transferred for more
1329 * than one second. This was caused by a pause and not by
1330 * congestion. Reset any streaming mode to reduce latency.
1331 */
1332 ic->xfer_small = 0;
1333 ic->xfer_large = 0;
1334 ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1335 }
1336
1337 /* First, let's see if we may splice data across the channel without
1338 * using a buffer.
1339 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001340 if (sc_ep_test(sc, SE_FL_MAY_SPLICE) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001341 (ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
1342 ic->flags & CF_KERN_SPLICING) {
1343 if (c_data(ic)) {
1344 /* We're embarrassed, there are already data pending in
1345 * the buffer and we don't want to have them at two
1346 * locations at a time. Let's indicate we need some
1347 * place and ask the consumer to hurry.
1348 */
1349 flags |= CO_RFL_BUF_FLUSH;
1350 goto abort_splice;
1351 }
1352
1353 if (unlikely(ic->pipe == NULL)) {
1354 if (pipes_used >= global.maxpipes || !(ic->pipe = get_pipe())) {
1355 ic->flags &= ~CF_KERN_SPLICING;
1356 goto abort_splice;
1357 }
1358 }
1359
Willy Tarreau0adb2812022-05-27 10:02:48 +02001360 ret = conn->mux->rcv_pipe(sc, ic->pipe, ic->to_forward);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001361 if (ret < 0) {
1362 /* splice not supported on this end, let's disable it */
1363 ic->flags &= ~CF_KERN_SPLICING;
1364 goto abort_splice;
1365 }
1366
1367 if (ret > 0) {
1368 if (ic->to_forward != CHN_INFINITE_FORWARD)
1369 ic->to_forward -= ret;
1370 ic->total += ret;
1371 cur_read += ret;
Christopher Faulet285f7612022-12-12 08:28:55 +01001372 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001373 }
1374
Willy Tarreau0adb2812022-05-27 10:02:48 +02001375 if (sc_ep_test(sc, SE_FL_EOS | SE_FL_ERROR))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001376 goto end_recv;
1377
1378 if (conn->flags & CO_FL_WAIT_ROOM) {
1379 /* the pipe is full or we have read enough data that it
1380 * could soon be full. Let's stop before needing to poll.
1381 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001382 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001383 goto done_recv;
1384 }
1385
1386 /* splice not possible (anymore), let's go on on standard copy */
1387 }
1388
1389 abort_splice:
1390 if (ic->pipe && unlikely(!ic->pipe->data)) {
1391 put_pipe(ic->pipe);
1392 ic->pipe = NULL;
1393 }
1394
Willy Tarreau0adb2812022-05-27 10:02:48 +02001395 if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && sc_ep_test(sc, SE_FL_MAY_SPLICE)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001396 /* don't break splicing by reading, but still call rcv_buf()
1397 * to pass the flag.
1398 */
1399 goto done_recv;
1400 }
1401
1402 /* now we'll need a input buffer for the stream */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001403 if (!sc_alloc_ibuf(sc, &(__sc_strm(sc)->buffer_wait)))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001404 goto end_recv;
1405
1406 /* For an HTX stream, if the buffer is stuck (no output data with some
1407 * input data) and if the HTX message is fragmented or if its free space
1408 * wraps, we force an HTX deframentation. It is a way to have a
1409 * contiguous free space nad to let the mux to copy as much data as
1410 * possible.
1411 *
1412 * NOTE: A possible optim may be to let the mux decides if defrag is
1413 * required or not, depending on amount of data to be xferred.
1414 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001415 if (IS_HTX_STRM(__sc_strm(sc)) && !co_data(ic)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001416 struct htx *htx = htxbuf(&ic->buf);
1417
1418 if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
1419 htx_defrag(htx, NULL, 0);
1420 }
1421
1422 /* Instruct the mux it must subscribed for read events */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001423 flags |= ((!conn_is_back(conn) && (__sc_strm(sc)->be->options & PR_O_ABRT_CLOSE)) ? CO_RFL_KEEP_RECV : 0);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001424
1425 /* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
1426 * was enabled, which implies that the recv buffer was not full. So we have a guarantee
1427 * that if such an event is not handled above in splice, it will be handled here by
1428 * recv().
1429 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001430 while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
Christopher Faulet5e29b762022-04-04 08:58:34 +02001431 (!(conn->flags & CO_FL_HANDSHAKE) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +02001432 (!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(ic->flags & CF_SHUTR))) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001433 int cur_flags = flags;
1434
1435 /* Compute transient CO_RFL_* flags */
1436 if (co_data(ic)) {
1437 cur_flags |= (CO_RFL_BUF_WET | CO_RFL_BUF_NOT_STUCK);
1438 }
1439
1440 /* <max> may be null. This is the mux responsibility to set
Willy Tarreaue68bc612022-05-27 11:23:05 +02001441 * SE_FL_RCV_MORE on the SC if more space is needed.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001442 */
1443 max = channel_recv_max(ic);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001444 ret = conn->mux->rcv_buf(sc, &ic->buf, max, cur_flags);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001445
Willy Tarreau0adb2812022-05-27 10:02:48 +02001446 if (sc_ep_test(sc, SE_FL_WANT_ROOM)) {
Willy Tarreaub605c422022-05-17 17:04:55 +02001447 /* SE_FL_WANT_ROOM must not be reported if the channel's
Christopher Faulet5e29b762022-04-04 08:58:34 +02001448 * buffer is empty.
1449 */
1450 BUG_ON(c_empty(ic));
1451
Willy Tarreau0adb2812022-05-27 10:02:48 +02001452 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001453 /* Add READ_PARTIAL because some data are pending but
1454 * cannot be xferred to the channel
1455 */
Christopher Faulet285f7612022-12-12 08:28:55 +01001456 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001457 }
1458
1459 if (ret <= 0) {
1460 /* if we refrained from reading because we asked for a
1461 * flush to satisfy rcv_pipe(), we must not subscribe
1462 * and instead report that there's not enough room
1463 * here to proceed.
1464 */
1465 if (flags & CO_RFL_BUF_FLUSH)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001466 sc_need_room(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001467 break;
1468 }
1469
1470 cur_read += ret;
1471
1472 /* if we're allowed to directly forward data, we must update ->o */
1473 if (ic->to_forward && !(ic->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
1474 unsigned long fwd = ret;
1475 if (ic->to_forward != CHN_INFINITE_FORWARD) {
1476 if (fwd > ic->to_forward)
1477 fwd = ic->to_forward;
1478 ic->to_forward -= fwd;
1479 }
1480 c_adv(ic, fwd);
1481 }
1482
Christopher Faulet285f7612022-12-12 08:28:55 +01001483 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001484 ic->total += ret;
1485
1486 /* End-of-input reached, we can leave. In this case, it is
Willy Tarreaue68bc612022-05-27 11:23:05 +02001487 * important to break the loop to not block the SC because of
Christopher Faulet5e29b762022-04-04 08:58:34 +02001488 * the channel's policies.This way, we are still able to receive
1489 * shutdowns.
1490 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001491 if (sc_ep_test(sc, SE_FL_EOI))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001492 break;
1493
1494 if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
1495 /* we're stopped by the channel's policy */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001496 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001497 break;
1498 }
1499
1500 /* if too many bytes were missing from last read, it means that
1501 * it's pointless trying to read again because the system does
1502 * not have them in buffers.
1503 */
1504 if (ret < max) {
1505 /* if a streamer has read few data, it may be because we
1506 * have exhausted system buffers. It's not worth trying
1507 * again.
1508 */
1509 if (ic->flags & CF_STREAMER) {
1510 /* we're stopped by the channel's policy */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001511 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001512 break;
1513 }
1514
1515 /* if we read a large block smaller than what we requested,
1516 * it's almost certain we'll never get anything more.
1517 */
1518 if (ret >= global.tune.recv_enough) {
1519 /* we're stopped by the channel's policy */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001520 sc_wont_read(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001521 break;
1522 }
1523 }
1524
1525 /* if we are waiting for more space, don't try to read more data
1526 * right now.
1527 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001528 if (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001529 break;
1530 } /* while !flags */
1531
1532 done_recv:
1533 if (cur_read) {
1534 if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
1535 (cur_read <= ic->buf.size / 2)) {
1536 ic->xfer_large = 0;
1537 ic->xfer_small++;
1538 if (ic->xfer_small >= 3) {
1539 /* we have read less than half of the buffer in
1540 * one pass, and this happened at least 3 times.
1541 * This is definitely not a streamer.
1542 */
1543 ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
1544 }
1545 else if (ic->xfer_small >= 2) {
1546 /* if the buffer has been at least half full twice,
1547 * we receive faster than we send, so at least it
1548 * is not a "fast streamer".
1549 */
1550 ic->flags &= ~CF_STREAMER_FAST;
1551 }
1552 }
1553 else if (!(ic->flags & CF_STREAMER_FAST) &&
1554 (cur_read >= ic->buf.size - global.tune.maxrewrite)) {
1555 /* we read a full buffer at once */
1556 ic->xfer_small = 0;
1557 ic->xfer_large++;
1558 if (ic->xfer_large >= 3) {
1559 /* we call this buffer a fast streamer if it manages
1560 * to be filled in one call 3 consecutive times.
1561 */
1562 ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
1563 }
1564 }
1565 else {
1566 ic->xfer_small = 0;
1567 ic->xfer_large = 0;
1568 }
1569 ic->last_read = now_ms;
1570 }
1571
1572 end_recv:
1573 ret = (cur_read != 0);
1574
1575 /* Report EOI on the channel if it was reached from the mux point of
1576 * view. */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001577 if (sc_ep_test(sc, SE_FL_EOI) && !(ic->flags & CF_EOI)) {
Christopher Faulet285f7612022-12-12 08:28:55 +01001578 ic->flags |= (CF_EOI|CF_READ_EVENT);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001579 ret = 1;
1580 }
1581
Willy Tarreau0adb2812022-05-27 10:02:48 +02001582 if (sc_ep_test(sc, SE_FL_ERROR))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001583 ret = 1;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001584 else if (sc_ep_test(sc, SE_FL_EOS)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001585 /* we received a shutdown */
Christopher Faulet6e1bbc42022-12-12 08:08:15 +01001586 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001587 if (ic->flags & CF_AUTO_CLOSE)
1588 channel_shutw_now(ic);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001589 sc_conn_read0(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001590 ret = 1;
1591 }
Willy Tarreau0adb2812022-05-27 10:02:48 +02001592 else if (!(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
Willy Tarreau15252cd2022-05-25 16:36:21 +02001593 !(ic->flags & CF_SHUTR)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001594 /* Subscribe to receive events if we're blocking on I/O */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001595 conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
1596 se_have_no_more_data(sc->sedesc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001597 } else {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001598 se_have_more_data(sc->sedesc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001599 ret = 1;
1600 }
1601 return ret;
1602}
1603
Willy Tarreau4596fe22022-05-17 19:07:51 +02001604/* This tries to perform a synchronous receive on the stream connector to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001605 * try to collect last arrived data. In practice it's only implemented on
Willy Tarreau4596fe22022-05-17 19:07:51 +02001606 * stconns. Returns 0 if nothing was done, non-zero if new data or a
Christopher Faulet5e29b762022-04-04 08:58:34 +02001607 * shutdown were collected. This may result on some delayed receive calls
1608 * to be programmed and performed later, though it doesn't provide any
1609 * such guarantee.
1610 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001611int sc_conn_sync_recv(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001612{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001613 if (!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001614 return 0;
1615
Willy Tarreau0adb2812022-05-27 10:02:48 +02001616 if (!sc_mux_ops(sc))
Willy Tarreau4596fe22022-05-17 19:07:51 +02001617 return 0; // only stconns are supported
Christopher Faulet5e29b762022-04-04 08:58:34 +02001618
Willy Tarreau0adb2812022-05-27 10:02:48 +02001619 if (sc->wait_event.events & SUB_RETRY_RECV)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001620 return 0; // already subscribed
1621
Willy Tarreau0adb2812022-05-27 10:02:48 +02001622 if (!sc_is_recv_allowed(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001623 return 0; // already failed
1624
Willy Tarreau0adb2812022-05-27 10:02:48 +02001625 return sc_conn_recv(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001626}
1627
1628/*
1629 * This function is called to send buffer data to a stream socket.
1630 * It calls the mux layer's snd_buf function. It relies on the
1631 * caller to commit polling changes. The caller should check conn->flags
1632 * for errors.
1633 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001634static int sc_conn_send(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001635{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001636 struct connection *conn = __sc_conn(sc);
1637 struct stream *s = __sc_strm(sc);
1638 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001639 int ret;
1640 int did_send = 0;
1641
Willy Tarreau0adb2812022-05-27 10:02:48 +02001642 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001643 /* We're probably there because the tasklet was woken up,
1644 * but process_stream() ran before, detected there were an
Willy Tarreaue68bc612022-05-27 11:23:05 +02001645 * error and put the SC back to SC_ST_TAR. There's still
Christopher Faulet5e29b762022-04-04 08:58:34 +02001646 * CO_FL_ERROR on the connection but we don't want to add
Willy Tarreaub605c422022-05-17 17:04:55 +02001647 * SE_FL_ERROR back, so give up
Christopher Faulet5e29b762022-04-04 08:58:34 +02001648 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001649 if (sc->state < SC_ST_CON)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001650 return 0;
Christopher Faulet7f6aa562022-10-17 10:21:19 +02001651 if (sc_ep_test(sc, SE_FL_EOS))
1652 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001653 return 1;
1654 }
1655
1656 /* We're already waiting to be able to send, give up */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001657 if (sc->wait_event.events & SUB_RETRY_SEND)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001658 return 0;
1659
1660 /* we might have been called just after an asynchronous shutw */
1661 if (oc->flags & CF_SHUTW)
1662 return 1;
1663
1664 /* we must wait because the mux is not installed yet */
1665 if (!conn->mux)
1666 return 0;
1667
1668 if (oc->pipe && conn->xprt->snd_pipe && conn->mux->snd_pipe) {
Willy Tarreau0adb2812022-05-27 10:02:48 +02001669 ret = conn->mux->snd_pipe(sc, oc->pipe);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001670 if (ret > 0)
1671 did_send = 1;
1672
1673 if (!oc->pipe->data) {
1674 put_pipe(oc->pipe);
1675 oc->pipe = NULL;
1676 }
1677
1678 if (oc->pipe)
1679 goto end;
1680 }
1681
1682 /* At this point, the pipe is empty, but we may still have data pending
1683 * in the normal buffer.
1684 */
1685 if (co_data(oc)) {
1686 /* when we're here, we already know that there is no spliced
1687 * data left, and that there are sendable buffered data.
1688 */
1689
1690 /* check if we want to inform the kernel that we're interested in
1691 * sending more data after this call. We want this if :
1692 * - we're about to close after this last send and want to merge
1693 * the ongoing FIN with the last segment.
1694 * - we know we can't send everything at once and must get back
1695 * here because of unaligned data
1696 * - there is still a finite amount of data to forward
1697 * The test is arranged so that the most common case does only 2
1698 * tests.
1699 */
1700 unsigned int send_flag = 0;
1701
1702 if ((!(oc->flags & (CF_NEVER_WAIT|CF_SEND_DONTWAIT)) &&
1703 ((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
1704 (oc->flags & CF_EXPECT_MORE) ||
1705 (IS_HTX_STRM(s) &&
1706 (!(oc->flags & (CF_EOI|CF_SHUTR)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
1707 ((oc->flags & CF_ISRESP) &&
1708 ((oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW)) == (CF_AUTO_CLOSE|CF_SHUTW_NOW))))
1709 send_flag |= CO_SFL_MSG_MORE;
1710
1711 if (oc->flags & CF_STREAMER)
1712 send_flag |= CO_SFL_STREAMER;
1713
1714 if (s->txn && s->txn->flags & TX_L7_RETRY && !b_data(&s->txn->l7_buffer)) {
1715 /* If we want to be able to do L7 retries, copy
1716 * the data we're about to send, so that we are able
1717 * to resend them if needed
1718 */
1719 /* Try to allocate a buffer if we had none.
1720 * If it fails, the next test will just
1721 * disable the l7 retries by setting
1722 * l7_conn_retries to 0.
1723 */
1724 if (s->txn->req.msg_state != HTTP_MSG_DONE)
1725 s->txn->flags &= ~TX_L7_RETRY;
1726 else {
1727 if (b_alloc(&s->txn->l7_buffer) == NULL)
1728 s->txn->flags &= ~TX_L7_RETRY;
1729 else {
1730 memcpy(b_orig(&s->txn->l7_buffer),
1731 b_orig(&oc->buf),
1732 b_size(&oc->buf));
1733 s->txn->l7_buffer.head = co_data(oc);
1734 b_add(&s->txn->l7_buffer, co_data(oc));
1735 }
1736
1737 }
1738 }
1739
Willy Tarreau0adb2812022-05-27 10:02:48 +02001740 ret = conn->mux->snd_buf(sc, &oc->buf, co_data(oc), send_flag);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001741 if (ret > 0) {
1742 did_send = 1;
1743 c_rew(oc, ret);
1744 c_realign_if_empty(oc);
1745
1746 if (!co_data(oc)) {
1747 /* Always clear both flags once everything has been sent, they're one-shot */
1748 oc->flags &= ~(CF_EXPECT_MORE | CF_SEND_DONTWAIT);
1749 }
1750 /* if some data remain in the buffer, it's only because the
1751 * system buffers are full, we will try next time.
1752 */
Christopher Faulet13045f02022-04-01 14:23:38 +02001753 }
1754 }
Christopher Faulet5e29b762022-04-04 08:58:34 +02001755
1756 end:
1757 if (did_send) {
Christopher Fauletd8988412022-12-20 18:10:04 +01001758 oc->flags |= CF_WRITE_EVENT | CF_WROTE_DATA;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001759 if (sc->state == SC_ST_CON)
1760 sc->state = SC_ST_RDY;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001761
Willy Tarreau0adb2812022-05-27 10:02:48 +02001762 sc_have_room(sc_opposite(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001763 }
1764
Willy Tarreau0adb2812022-05-27 10:02:48 +02001765 if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
Christopher Faulet2e56a732023-01-26 16:18:09 +01001766 oc->flags |= CF_WRITE_EVENT;
Christopher Faulet7f6aa562022-10-17 10:21:19 +02001767 if (sc_ep_test(sc, SE_FL_EOS))
Christopher Faulet2e56a732023-01-26 16:18:09 +01001768 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001769 return 1;
1770 }
1771
1772 /* We couldn't send all of our data, let the mux know we'd like to send more */
1773 if (!channel_is_empty(oc))
Willy Tarreau0adb2812022-05-27 10:02:48 +02001774 conn->mux->subscribe(sc, SUB_RETRY_SEND, &sc->wait_event);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001775 return did_send;
1776}
1777
Christopher Fauletd8988412022-12-20 18:10:04 +01001778/* perform a synchronous send() for the stream connector. The CF_WRITE_EVENT
1779 * flag are cleared prior to the attempt, and will possibly be updated in case
1780 * of success.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001781 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001782void sc_conn_sync_send(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001783{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001784 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001785
Christopher Fauletd8988412022-12-20 18:10:04 +01001786 oc->flags &= ~CF_WRITE_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001787
1788 if (oc->flags & CF_SHUTW)
1789 return;
1790
1791 if (channel_is_empty(oc))
1792 return;
1793
Willy Tarreau0adb2812022-05-27 10:02:48 +02001794 if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001795 return;
1796
Willy Tarreau0adb2812022-05-27 10:02:48 +02001797 if (!sc_mux_ops(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001798 return;
1799
Willy Tarreau0adb2812022-05-27 10:02:48 +02001800 sc_conn_send(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001801}
1802
1803/* Called by I/O handlers after completion.. It propagates
Willy Tarreau4596fe22022-05-17 19:07:51 +02001804 * connection flags to the stream connector, updates the stream (which may or
Christopher Faulet5e29b762022-04-04 08:58:34 +02001805 * may not take this opportunity to try to forward data), then update the
Willy Tarreau4596fe22022-05-17 19:07:51 +02001806 * connection's polling based on the channels and stream connector's final
Christopher Faulet5e29b762022-04-04 08:58:34 +02001807 * states. The function always returns 0.
1808 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001809static int sc_conn_process(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001810{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001811 struct connection *conn = __sc_conn(sc);
1812 struct channel *ic = sc_ic(sc);
1813 struct channel *oc = sc_oc(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001814
1815 BUG_ON(!conn);
1816
1817 /* If we have data to send, try it now */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001818 if (!channel_is_empty(oc) && !(sc->wait_event.events & SUB_RETRY_SEND))
1819 sc_conn_send(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001820
Willy Tarreau4596fe22022-05-17 19:07:51 +02001821 /* First step, report to the stream connector what was detected at the
Christopher Faulet5e29b762022-04-04 08:58:34 +02001822 * connection layer : errors and connection establishment.
Willy Tarreaub605c422022-05-17 17:04:55 +02001823 * Only add SE_FL_ERROR if we're connected, or we're attempting to
Christopher Faulet5e29b762022-04-04 08:58:34 +02001824 * connect, we may get there because we got woken up, but only run
1825 * after process_stream() noticed there were an error, and decided
1826 * to retry to connect, the connection may still have CO_FL_ERROR,
Willy Tarreaub605c422022-05-17 17:04:55 +02001827 * and we don't want to add SE_FL_ERROR back
Christopher Faulet5e29b762022-04-04 08:58:34 +02001828 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001829 * Note: This test is only required because sc_conn_process is also the SI
1830 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001831 * care of it.
1832 */
1833
Willy Tarreau0adb2812022-05-27 10:02:48 +02001834 if (sc->state >= SC_ST_CON) {
1835 if (sc_is_conn_error(sc))
1836 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001837 }
1838
1839 /* If we had early data, and the handshake ended, then
1840 * we can remove the flag, and attempt to wake the task up,
1841 * in the event there's an analyser waiting for the end of
1842 * the handshake.
1843 */
1844 if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
Willy Tarreau0adb2812022-05-27 10:02:48 +02001845 sc_ep_test(sc, SE_FL_WAIT_FOR_HS)) {
1846 sc_ep_clr(sc, SE_FL_WAIT_FOR_HS);
1847 task_wakeup(sc_strm_task(sc), TASK_WOKEN_MSG);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001848 }
1849
Willy Tarreau0adb2812022-05-27 10:02:48 +02001850 if (!sc_state_in(sc->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
Christopher Faulet5e29b762022-04-04 08:58:34 +02001851 (conn->flags & CO_FL_WAIT_XPRT) == 0) {
Christopher Fauletca679922022-07-20 13:24:04 +02001852 if (sc->flags & SC_FL_ISBACK)
1853 __sc_strm(sc)->conn_exp = TICK_ETERNITY;
Christopher Fauletb96f2aa2022-12-12 08:11:36 +01001854 oc->flags |= CF_WRITE_EVENT;
Willy Tarreau0adb2812022-05-27 10:02:48 +02001855 if (sc->state == SC_ST_CON)
1856 sc->state = SC_ST_RDY;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001857 }
1858
1859 /* Report EOS on the channel if it was reached from the mux point of
1860 * view.
1861 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001862 * Note: This test is only required because sc_conn_process is also the SI
1863 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001864 * care of it.
1865 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001866 if (sc_ep_test(sc, SE_FL_EOS) && !(ic->flags & CF_SHUTR)) {
Christopher Faulet5e29b762022-04-04 08:58:34 +02001867 /* we received a shutdown */
Christopher Faulet6e1bbc42022-12-12 08:08:15 +01001868 ic->flags |= CF_READ_EVENT;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001869 if (ic->flags & CF_AUTO_CLOSE)
1870 channel_shutw_now(ic);
Willy Tarreau0adb2812022-05-27 10:02:48 +02001871 sc_conn_read0(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001872 }
1873
1874 /* Report EOI on the channel if it was reached from the mux point of
1875 * view.
1876 *
Willy Tarreau462b9892022-05-18 18:06:53 +02001877 * Note: This test is only required because sc_conn_process is also the SI
1878 * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
Christopher Faulet5e29b762022-04-04 08:58:34 +02001879 * care of it.
1880 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001881 if (sc_ep_test(sc, SE_FL_EOI) && !(ic->flags & CF_EOI))
Christopher Faulet285f7612022-12-12 08:28:55 +01001882 ic->flags |= (CF_EOI|CF_READ_EVENT);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001883
Willy Tarreau4596fe22022-05-17 19:07:51 +02001884 /* Second step : update the stream connector and channels, try to forward any
Christopher Faulet5e29b762022-04-04 08:58:34 +02001885 * pending data, then possibly wake the stream up based on the new
Willy Tarreau4596fe22022-05-17 19:07:51 +02001886 * stream connector status.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001887 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001888 sc_notify(sc);
1889 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001890 return 0;
1891}
1892
Willy Tarreau4596fe22022-05-17 19:07:51 +02001893/* This is the ->process() function for any stream connector's wait_event task.
1894 * It's assigned during the stream connector's initialization, for any type of
1895 * stream connector. Thus it is always safe to perform a tasklet_wakeup() on a
Willy Tarreaue68bc612022-05-27 11:23:05 +02001896 * stream connector, as the presence of the SC is checked there.
Christopher Faulet5e29b762022-04-04 08:58:34 +02001897 */
Willy Tarreau462b9892022-05-18 18:06:53 +02001898struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001899{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001900 struct stconn *sc = ctx;
Christopher Faulet5e29b762022-04-04 08:58:34 +02001901 int ret = 0;
1902
Willy Tarreau0adb2812022-05-27 10:02:48 +02001903 if (!sc_conn(sc))
Christopher Faulet5e29b762022-04-04 08:58:34 +02001904 return t;
1905
Willy Tarreau0adb2812022-05-27 10:02:48 +02001906 if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
1907 ret = sc_conn_send(sc);
1908 if (!(sc->wait_event.events & SUB_RETRY_RECV))
1909 ret |= sc_conn_recv(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001910 if (ret != 0)
Willy Tarreau0adb2812022-05-27 10:02:48 +02001911 sc_conn_process(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001912
Willy Tarreau0adb2812022-05-27 10:02:48 +02001913 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001914 return t;
1915}
1916
1917/* Callback to be used by applet handlers upon completion. It updates the stream
1918 * (which may or may not take this opportunity to try to forward data), then
Willy Tarreau4596fe22022-05-17 19:07:51 +02001919 * may re-enable the applet's based on the channels and stream connector's final
Christopher Faulet5e29b762022-04-04 08:58:34 +02001920 * states.
1921 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001922static int sc_applet_process(struct stconn *sc)
Christopher Faulet5e29b762022-04-04 08:58:34 +02001923{
Willy Tarreau0adb2812022-05-27 10:02:48 +02001924 struct channel *ic = sc_ic(sc);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001925
Willy Tarreau0adb2812022-05-27 10:02:48 +02001926 BUG_ON(!sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001927
1928 /* If the applet wants to write and the channel is closed, it's a
1929 * broken pipe and it must be reported.
1930 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001931 if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (ic->flags & CF_SHUTR))
1932 sc_ep_set(sc, SE_FL_ERROR);
Christopher Faulet5e29b762022-04-04 08:58:34 +02001933
1934 /* automatically mark the applet having data available if it reported
1935 * begin blocked by the channel.
1936 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001937 if ((sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) ||
1938 sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
1939 applet_have_more_data(__sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001940
Willy Tarreau4596fe22022-05-17 19:07:51 +02001941 /* update the stream connector, channels, and possibly wake the stream up */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001942 sc_notify(sc);
1943 stream_release_buffers(__sc_strm(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001944
Willy Tarreau19c65a92022-05-27 08:49:24 +02001945 /* sc_notify may have passed through chk_snd and released some blocking
Willy Tarreau15252cd2022-05-25 16:36:21 +02001946 * flags. Process_stream will consider those flags to wake up the
Christopher Faulet5e29b762022-04-04 08:58:34 +02001947 * appctx but in the case the task is not in runqueue we may have to
1948 * wakeup the appctx immediately.
1949 */
Willy Tarreau0adb2812022-05-27 10:02:48 +02001950 if (sc_is_recv_allowed(sc) || sc_is_send_allowed(sc))
1951 appctx_wakeup(__sc_appctx(sc));
Christopher Faulet5e29b762022-04-04 08:58:34 +02001952 return 0;
Christopher Faulet13045f02022-04-01 14:23:38 +02001953}
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001954
1955
1956/* Prepares an endpoint upgrade. We don't now at this stage if the upgrade will
1957 * succeed or not and if the stconn will be reused by the new endpoint. Thus,
1958 * for now, only pretend the stconn is detached.
1959 */
1960void sc_conn_prepare_endp_upgrade(struct stconn *sc)
1961{
1962 BUG_ON(!sc_conn(sc) || !sc->app);
1963 sc_ep_clr(sc, SE_FL_T_MUX);
1964 sc_ep_set(sc, SE_FL_DETACHED);
1965}
1966
Ilya Shipitsin3b64a282022-07-29 22:26:53 +05001967/* Endpoint upgrade failed. Restore the stconn state. */
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001968void sc_conn_abort_endp_upgrade(struct stconn *sc)
1969{
1970 sc_ep_set(sc, SE_FL_T_MUX);
1971 sc_ep_clr(sc, SE_FL_DETACHED);
1972}
1973
1974/* Commit the endpoint upgrade. If stconn is attached, it means the new endpoint
1975 * use it. So we do nothing. Otherwise, the stconn will be destroy with the
1976 * overlying stream. So, it means we must commit the detach.
1977*/
1978void sc_conn_commit_endp_upgrade(struct stconn *sc)
1979{
1980 if (!sc_ep_test(sc, SE_FL_DETACHED))
1981 return;
1982 sc_detach_endp(&sc);
1983 /* Because it was already set as detached, the sedesc must be preserved */
Willy Tarreau6a378d12022-08-11 13:56:42 +02001984 BUG_ON(!sc);
Christopher Fauletb68f77d2022-06-16 16:24:16 +02001985 BUG_ON(!sc->sedesc);
1986}