blob: 96bf2125aaa69185e879e7d28ed80224e7a79a24 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreauc7e42382012-08-24 19:22:53 +02002 * include/proto/channel.h
3 * Channel management definitions, macros and inline functions.
Willy Tarreau7c3c5412009-12-13 15:53:05 +01004 *
Willy Tarreaua27dc192014-11-27 22:10:04 +01005 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
Willy Tarreau7c3c5412009-12-13 15:53:05 +01006 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreauc7e42382012-08-24 19:22:53 +020022#ifndef _PROTO_CHANNEL_H
23#define _PROTO_CHANNEL_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
Willy Tarreaua1bd1fa2019-03-29 17:26:33 +010025#include <inttypes.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020026#include <stdio.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010027#include <stdlib.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020028#include <string.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010029
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020030#include <haproxy/api.h>
Willy Tarreauc13ed532020-06-02 10:22:45 +020031#include <haproxy/chunk.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020032#include <haproxy/dynbuf.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020033#include <haproxy/global.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020034#include <haproxy/htx.h>
Willy Tarreauc2f7c582020-06-02 18:15:32 +020035#include <haproxy/ticks.h>
Willy Tarreau92b4f132020-06-01 11:05:15 +020036#include <haproxy/time.h>
Willy Tarreaufa645582007-06-03 15:59:52 +020037
Thierry FOURNIERac836ba2014-12-16 15:41:18 +010038#include <types/channel.h>
Willy Tarreau87b09662015-04-03 00:22:06 +020039#include <types/stream.h>
Willy Tarreau73796532014-11-28 14:10:28 +010040#include <types/stream_interface.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020041
Christopher Fauletaad45852019-05-14 22:14:03 +020042#include <proto/stream.h>
Willy Tarreaucea0e1b2020-06-04 17:25:40 +020043#include <haproxy/task.h>
Christopher Fauleta73e59b2016-12-09 17:30:18 +010044
Willy Tarreau7341d942007-05-13 19:56:02 +020045/* perform minimal intializations, report 0 in case of error, 1 if OK. */
Willy Tarreau8263d2b2012-08-28 00:06:31 +020046int init_channel();
Willy Tarreau7341d942007-05-13 19:56:02 +020047
Willy Tarreau55a69062012-10-26 00:21:52 +020048unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
Willy Tarreau8263d2b2012-08-28 00:06:31 +020049
50/* SI-to-channel functions working with buffers */
Willy Tarreau06d80a92017-10-19 14:32:15 +020051int ci_putblk(struct channel *chn, const char *str, int len);
Willy Tarreau06d80a92017-10-19 14:32:15 +020052int ci_putchr(struct channel *chn, char c);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020053int ci_getline_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
54int ci_getblk_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
Willy Tarreau4d893d42018-07-12 15:43:32 +020055int ci_insert_line2(struct channel *c, int pos, const char *str, int len);
Willy Tarreau06d80a92017-10-19 14:32:15 +020056int co_inject(struct channel *chn, const char *msg, int len);
Willy Tarreau41ab8682017-10-19 14:58:40 +020057int co_getline(const struct channel *chn, char *str, int len);
58int co_getblk(const struct channel *chn, char *blk, int len, int offset);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020059int co_getline_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
60int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
Thierry FOURNIERca16b032015-02-16 19:26:48 +010061
Willy Tarreau74b08c92010-09-08 17:04:31 +020062
Willy Tarreau87b09662015-04-03 00:22:06 +020063/* returns a pointer to the stream the channel belongs to */
Thierry FOURNIER27929fb2015-09-25 08:36:11 +020064static inline struct stream *chn_strm(const struct channel *chn)
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010065{
66 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020067 return LIST_ELEM(chn, struct stream *, res);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010068 else
Willy Tarreau87b09662015-04-03 00:22:06 +020069 return LIST_ELEM(chn, struct stream *, req);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010070}
71
Willy Tarreau73796532014-11-28 14:10:28 +010072/* returns a pointer to the stream interface feeding the channel (producer) */
73static inline struct stream_interface *chn_prod(const struct channel *chn)
74{
Willy Tarreau5decc052014-11-28 14:22:12 +010075 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020076 return &LIST_ELEM(chn, struct stream *, res)->si[1];
Willy Tarreau5decc052014-11-28 14:22:12 +010077 else
Willy Tarreau87b09662015-04-03 00:22:06 +020078 return &LIST_ELEM(chn, struct stream *, req)->si[0];
Willy Tarreau73796532014-11-28 14:10:28 +010079}
80
81/* returns a pointer to the stream interface consuming the channel (producer) */
82static inline struct stream_interface *chn_cons(const struct channel *chn)
83{
Willy Tarreau5decc052014-11-28 14:22:12 +010084 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020085 return &LIST_ELEM(chn, struct stream *, res)->si[0];
Willy Tarreau5decc052014-11-28 14:22:12 +010086 else
Willy Tarreau87b09662015-04-03 00:22:06 +020087 return &LIST_ELEM(chn, struct stream *, req)->si[1];
Willy Tarreau73796532014-11-28 14:10:28 +010088}
89
Willy Tarreau08d5ac82018-06-06 15:09:28 +020090/* c_orig() : returns the pointer to the channel buffer's origin */
91static inline char *c_orig(const struct channel *c)
92{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020093 return b_orig(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +020094}
95
96/* c_size() : returns the size of the channel's buffer */
97static inline size_t c_size(const struct channel *c)
98{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020099 return b_size(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200100}
101
102/* c_wrap() : returns the pointer to the channel buffer's wrapping point */
103static inline char *c_wrap(const struct channel *c)
104{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200105 return b_wrap(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200106}
107
108/* c_data() : returns the amount of data in the channel's buffer */
109static inline size_t c_data(const struct channel *c)
110{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200111 return b_data(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200112}
113
114/* c_room() : returns the room left in the channel's buffer */
115static inline size_t c_room(const struct channel *c)
116{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200117 return b_size(&c->buf) - b_data(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200118}
119
120/* c_empty() : returns a boolean indicating if the channel's buffer is empty */
121static inline size_t c_empty(const struct channel *c)
122{
123 return !c_data(c);
124}
125
126/* c_full() : returns a boolean indicating if the channel's buffer is full */
127static inline size_t c_full(const struct channel *c)
128{
129 return !c_room(c);
130}
131
132/* co_data() : returns the amount of output data in the channel's buffer */
133static inline size_t co_data(const struct channel *c)
134{
Olivier Houchard08afac02018-06-22 19:26:39 +0200135 return c->output;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200136}
137
138/* ci_data() : returns the amount of input data in the channel's buffer */
139static inline size_t ci_data(const struct channel *c)
140{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200141 return c_data(c) - co_data(c);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200142}
143
144/* ci_next() : for an absolute pointer <p> or a relative offset <o> pointing to
145 * a valid location within channel <c>'s buffer, returns either the absolute
146 * pointer or the relative offset pointing to the next byte, which usually is
147 * at (p + 1) unless p reaches the wrapping point and wrapping is needed.
148 */
149static inline size_t ci_next_ofs(const struct channel *c, size_t o)
150{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200151 return b_next_ofs(&c->buf, o);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200152}
153static inline char *ci_next(const struct channel *c, const char *p)
154{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200155 return b_next(&c->buf, p);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200156}
157
158
159/* c_ptr() : returns a pointer to an offset relative to the beginning of the
160 * input data in the buffer. If instead the offset is negative, a pointer to
161 * existing output data is returned. The function only takes care of wrapping,
162 * it's up to the caller to ensure the offset is always within byte count
163 * bounds.
164 */
165static inline char *c_ptr(const struct channel *c, ssize_t ofs)
166{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200167 return b_peek(&c->buf, co_data(c) + ofs);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200168}
169
170/* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
171 * buffer's pointer advances, and that as many bytes from in are transferred
172 * from in to out. The caller is responsible for ensuring that adv is always
173 * smaller than or equal to b->i.
174 */
175static inline void c_adv(struct channel *c, size_t adv)
176{
Olivier Houchard08afac02018-06-22 19:26:39 +0200177 c->output += adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200178}
179
180/* c_rew() : rewinds the channel's buffer by <adv> bytes, which means that the
181 * buffer's pointer goes backwards, and that as many bytes from out are moved
182 * to in. The caller is responsible for ensuring that adv is always smaller
183 * than or equal to b->o.
184 */
185static inline void c_rew(struct channel *c, size_t adv)
186{
Olivier Houchard08afac02018-06-22 19:26:39 +0200187 c->output -= adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200188}
189
190/* c_realign_if_empty() : realign the channel's buffer if it's empty */
191static inline void c_realign_if_empty(struct channel *chn)
192{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200193 b_realign_if_empty(&chn->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200194}
195
Olivier Houchardd4251a72018-06-29 16:17:34 +0200196/* Sets the amount of output for the channel */
197static inline void co_set_data(struct channel *c, size_t output)
198{
Olivier Houchard08afac02018-06-22 19:26:39 +0200199 c->output = output;
Olivier Houchardd4251a72018-06-29 16:17:34 +0200200}
201
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200202
203/* co_head() : returns a pointer to the beginning of output data in the buffer.
204 * The "__" variants don't support wrapping, "ofs" are relative to
205 * the buffer's origin.
206 */
207static inline size_t __co_head_ofs(const struct channel *c)
208{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200209 return __b_peek_ofs(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200210}
211static inline char *__co_head(const struct channel *c)
212{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200213 return __b_peek(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200214}
215static inline size_t co_head_ofs(const struct channel *c)
216{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200217 return b_peek_ofs(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200218}
219static inline char *co_head(const struct channel *c)
220{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200221 return b_peek(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200222}
223
224
225/* co_tail() : returns a pointer to the end of output data in the buffer.
226 * The "__" variants don't support wrapping, "ofs" are relative to
227 * the buffer's origin.
228 */
229static inline size_t __co_tail_ofs(const struct channel *c)
230{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200231 return __b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200232}
233static inline char *__co_tail(const struct channel *c)
234{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200235 return __b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200236}
237static inline size_t co_tail_ofs(const struct channel *c)
238{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200239 return b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200240}
241static inline char *co_tail(const struct channel *c)
242{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200243 return b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200244}
245
246
247/* ci_head() : returns a pointer to the beginning of input data in the buffer.
248 * The "__" variants don't support wrapping, "ofs" are relative to
249 * the buffer's origin.
250 */
251static inline size_t __ci_head_ofs(const struct channel *c)
252{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200253 return __b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200254}
255static inline char *__ci_head(const struct channel *c)
256{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200257 return __b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200258}
259static inline size_t ci_head_ofs(const struct channel *c)
260{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200261 return b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200262}
263static inline char *ci_head(const struct channel *c)
264{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200265 return b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200266}
267
268
269/* ci_tail() : returns a pointer to the end of input data in the buffer.
270 * The "__" variants don't support wrapping, "ofs" are relative to
271 * the buffer's origin.
272 */
273static inline size_t __ci_tail_ofs(const struct channel *c)
274{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200275 return __b_peek_ofs(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200276}
277static inline char *__ci_tail(const struct channel *c)
278{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200279 return __b_peek(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200280}
281static inline size_t ci_tail_ofs(const struct channel *c)
282{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200283 return b_peek_ofs(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200284}
285static inline char *ci_tail(const struct channel *c)
286{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200287 return b_peek(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200288}
289
290
291/* ci_stop() : returns the pointer to the byte following the end of input data
292 * in the channel buffer. It may be out of the buffer. It's used to
293 * compute lengths or stop pointers.
294 */
295static inline size_t __ci_stop_ofs(const struct channel *c)
296{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200297 return __b_stop_ofs(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200298}
299static inline const char *__ci_stop(const struct channel *c)
300{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200301 return __b_stop(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200302}
303static inline size_t ci_stop_ofs(const struct channel *c)
304{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200305 return b_stop_ofs(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200306}
307static inline const char *ci_stop(const struct channel *c)
308{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200309 return b_stop(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200310}
311
312
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200313/* Returns the amount of input data that can contiguously be read at once */
314static inline size_t ci_contig_data(const struct channel *c)
315{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200316 return b_contig_data(&c->buf, co_data(c));
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200317}
318
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200319/* Initialize all fields in the channel. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200320static inline void channel_init(struct channel *chn)
Willy Tarreau54469402006-07-29 16:59:06 +0200321{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200322 chn->buf = BUF_NULL;
Willy Tarreau974ced62012-10-12 23:11:02 +0200323 chn->to_forward = 0;
Willy Tarreaub145c782014-02-09 17:45:16 +0100324 chn->last_read = now_ms;
Willy Tarreau8f39dcd2014-02-09 08:31:49 +0100325 chn->xfer_small = chn->xfer_large = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200326 chn->total = 0;
327 chn->pipe = NULL;
328 chn->analysers = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200329 chn->flags = 0;
Olivier Houchard08afac02018-06-22 19:26:39 +0200330 chn->output = 0;
Willy Tarreau54469402006-07-29 16:59:06 +0200331}
332
Willy Tarreau55a69062012-10-26 00:21:52 +0200333/* Schedule up to <bytes> more bytes to be forwarded via the channel without
334 * notifying the owner task. Any data pending in the buffer are scheduled to be
335 * sent as well, in the limit of the number of bytes to forward. This must be
336 * the only method to use to schedule bytes to be forwarded. If the requested
337 * number is too large, it is automatically adjusted. The number of bytes taken
338 * into account is returned. Directly touching ->to_forward will cause lockups
339 * when buf->o goes down to zero if nobody is ready to push the remaining data.
340 */
341static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
342{
343 /* hint: avoid comparisons on long long for the fast case, since if the
344 * length does not fit in an unsigned it, it will never be forwarded at
345 * once anyway.
346 */
347 if (bytes <= ~0U) {
348 unsigned int bytes32 = bytes;
349
Willy Tarreau3ee83442018-06-15 16:42:02 +0200350 if (bytes32 <= ci_data(chn)) {
Willy Tarreau55a69062012-10-26 00:21:52 +0200351 /* OK this amount of bytes might be forwarded at once */
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200352 c_adv(chn, bytes32);
Willy Tarreau55a69062012-10-26 00:21:52 +0200353 return bytes;
354 }
355 }
356 return __channel_forward(chn, bytes);
357}
358
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200359/* Forwards any input data and marks the channel for permanent forwarding */
360static inline void channel_forward_forever(struct channel *chn)
361{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200362 c_adv(chn, ci_data(chn));
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200363 chn->to_forward = CHN_INFINITE_FORWARD;
364}
365
Christopher Faulete6458292019-01-02 14:24:35 +0100366/* <len> bytes of input data was added into the channel <chn>. This functions
367 * must be called to update the channel state. It also handles the fast
368 * forwarding. */
369static inline void channel_add_input(struct channel *chn, unsigned int len)
370{
371 if (chn->to_forward) {
372 unsigned long fwd = len;
373 if (chn->to_forward != CHN_INFINITE_FORWARD) {
374 if (fwd > chn->to_forward)
375 fwd = chn->to_forward;
376 chn->to_forward -= fwd;
377 }
378 c_adv(chn, fwd);
379 }
380 /* notify that some data was read */
381 chn->total += len;
382 chn->flags |= CF_READ_PARTIAL;
383}
384
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100385static inline unsigned long long channel_htx_forward(struct channel *chn, struct htx *htx, unsigned long long bytes)
386{
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200387 unsigned long long ret = 0;
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100388
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200389 if (htx->data) {
390 b_set_data(&chn->buf, htx->data);
391 ret = channel_forward(chn, bytes);
392 b_set_data(&chn->buf, b_size(&chn->buf));
393 }
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100394 return ret;
395}
396
397
398static inline void channel_htx_forward_forever(struct channel *chn, struct htx *htx)
399{
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200400 c_adv(chn, htx->data - co_data(chn));
401 chn->to_forward = CHN_INFINITE_FORWARD;
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100402}
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200403/*********************************************************************/
404/* These functions are used to compute various channel content sizes */
405/*********************************************************************/
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100406
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200407/* Reports non-zero if the channel is empty, which means both its
408 * buffer and pipe are empty. The construct looks strange but is
409 * jump-less and much more efficient on both 32 and 64-bit than
410 * the boolean test.
411 */
Willy Tarreau41ab8682017-10-19 14:58:40 +0200412static inline unsigned int channel_is_empty(const struct channel *c)
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200413{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200414 return !(co_data(c) | (long)c->pipe);
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200415}
416
Willy Tarreauba0902e2015-01-13 14:39:16 +0100417/* Returns non-zero if the channel is rewritable, which means that the buffer
418 * it is attached to has at least <maxrewrite> bytes immediately available.
419 * This is used to decide when a request or response may be parsed when some
420 * data from a previous exchange might still be present.
Willy Tarreau379357a2013-06-08 12:55:46 +0200421 */
Willy Tarreauba0902e2015-01-13 14:39:16 +0100422static inline int channel_is_rewritable(const struct channel *chn)
Willy Tarreau379357a2013-06-08 12:55:46 +0200423{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200424 int rem = chn->buf.size;
Willy Tarreau379357a2013-06-08 12:55:46 +0200425
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200426 rem -= b_data(&chn->buf);
Willy Tarreau379357a2013-06-08 12:55:46 +0200427 rem -= global.tune.maxrewrite;
428 return rem >= 0;
429}
430
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100431/* Tells whether data are likely to leave the buffer. This is used to know when
432 * we can safely ignore the reserve since we know we cannot retry a connection.
433 * It returns zero if data are blocked, non-zero otherwise.
434 */
435static inline int channel_may_send(const struct channel *chn)
436{
Willy Tarreau73796532014-11-28 14:10:28 +0100437 return chn_cons(chn)->state == SI_ST_EST;
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100438}
439
Willy Tarreau3889fff2015-01-13 20:20:10 +0100440/* Returns non-zero if the channel can still receive data. This is used to
Willy Tarreau379357a2013-06-08 12:55:46 +0200441 * decide when to stop reading into a buffer when we want to ensure that we
442 * leave the reserve untouched after all pending outgoing data are forwarded.
443 * The reserved space is taken into account if ->to_forward indicates that an
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200444 * end of transfer is close to happen. Note that both ->buf.o and ->to_forward
Willy Tarreau379357a2013-06-08 12:55:46 +0200445 * are considered as available since they're supposed to leave the buffer. The
446 * test is optimized to avoid as many operations as possible for the fast case
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200447 * and to be used as an "if" condition. Just like channel_recv_limit(), we
448 * never allow to overwrite the reserve until the output stream interface is
449 * connected, otherwise we could spin on a POST with http-send-name-header.
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100450 */
Willy Tarreau3889fff2015-01-13 20:20:10 +0100451static inline int channel_may_recv(const struct channel *chn)
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100452{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200453 int rem = chn->buf.size;
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200454
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200455 if (b_is_null(&chn->buf))
Willy Tarreau3889fff2015-01-13 20:20:10 +0100456 return 1;
Willy Tarreau4428a292014-11-28 20:54:13 +0100457
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200458 rem -= b_data(&chn->buf);
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200459 if (!rem)
Willy Tarreau3889fff2015-01-13 20:20:10 +0100460 return 0; /* buffer already full */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200461
Willy Tarreau93dc4782016-04-21 12:12:45 +0200462 if (rem > global.tune.maxrewrite)
463 return 1; /* reserve not yet reached */
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200464
Willy Tarreau93dc4782016-04-21 12:12:45 +0200465 if (!channel_may_send(chn))
466 return 0; /* don't touch reserve until we can send */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200467
Willy Tarreau93dc4782016-04-21 12:12:45 +0200468 /* Now we know there's some room left in the reserve and we may
469 * forward. As long as i-to_fwd < size-maxrw, we may still
470 * receive. This is equivalent to i+maxrw-size < to_fwd,
471 * which is logical since i+maxrw-size is what overlaps with
472 * the reserve, and we want to ensure they're covered by scheduled
473 * forwards.
474 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200475 rem = ci_data(chn) + global.tune.maxrewrite - chn->buf.size;
Willy Tarreau93dc4782016-04-21 12:12:45 +0200476 return rem < 0 || (unsigned int)rem < chn->to_forward;
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100477}
478
Christopher Faulet5811db02019-01-07 13:57:01 +0100479/* HTX version of channel_may_recv(). Returns non-zero if the channel can still
480 * receive data. */
481static inline int channel_htx_may_recv(const struct channel *chn, const struct htx *htx)
482{
483 uint32_t rem;
484
485 if (!htx->size)
486 return 1;
487
488 if (!channel_may_send(chn))
489 return 0; /* don't touch reserve until we can send */
490
491 rem = htx_free_data_space(htx);
492 if (!rem)
493 return 0; /* htx already full */
494
495 if (rem > global.tune.maxrewrite)
496 return 1; /* reserve not yet reached */
497
498 /* Now we know there's some room left in the reserve and we may
499 * forward. As long as i-to_fwd < size-maxrw, we may still
500 * receive. This is equivalent to i+maxrw-size < to_fwd,
501 * which is logical since i+maxrw-size is what overlaps with
502 * the reserve, and we want to ensure they're covered by scheduled
503 * forwards.
504 */
505 rem += co_data(chn);
506 if (rem > global.tune.maxrewrite)
507 return 1;
508
509 return (global.tune.maxrewrite - rem < chn->to_forward);
510}
511
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200512/* Returns true if the channel's input is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200513static inline int channel_input_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200514{
Willy Tarreau974ced62012-10-12 23:11:02 +0200515 return ((chn->flags & CF_SHUTR) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200516}
517
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200518/* Returns true if the channel's output is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200519static inline int channel_output_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200520{
Willy Tarreau974ced62012-10-12 23:11:02 +0200521 return ((chn->flags & CF_SHUTW) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200522}
523
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200524/* Check channel timeouts, and set the corresponding flags. The likely/unlikely
525 * have been optimized for fastest normal path. The read/write timeouts are not
526 * set if there was activity on the channel. That way, we don't have to update
527 * the timeout on every I/O. Note that the analyser timeout is always checked.
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200528 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200529static inline void channel_check_timeouts(struct channel *chn)
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200530{
Willy Tarreau974ced62012-10-12 23:11:02 +0200531 if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
532 unlikely(tick_is_expired(chn->rex, now_ms)))
533 chn->flags |= CF_READ_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200534
Willy Tarreauede3d882018-10-24 17:17:56 +0200535 if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) &&
Willy Tarreau974ced62012-10-12 23:11:02 +0200536 unlikely(tick_is_expired(chn->wex, now_ms)))
537 chn->flags |= CF_WRITE_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200538
Willy Tarreau974ced62012-10-12 23:11:02 +0200539 if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
540 unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
541 chn->flags |= CF_ANA_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200542}
543
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200544/* Erase any content from channel <buf> and adjusts flags accordingly. Note
Willy Tarreau0abebcc2009-01-08 00:09:41 +0100545 * that any spliced data is not affected since we may not have any access to
546 * it.
Willy Tarreaue393fe22008-08-16 22:18:07 +0200547 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200548static inline void channel_erase(struct channel *chn)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200549{
Willy Tarreau974ced62012-10-12 23:11:02 +0200550 chn->to_forward = 0;
Olivier Houchard55071d32019-05-02 00:58:53 +0200551 chn->output = 0;
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200552 b_reset(&chn->buf);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200553}
554
Christopher Fauletf7ed1952019-01-07 14:55:10 +0100555static inline void channel_htx_erase(struct channel *chn, struct htx *htx)
556{
557 htx_reset(htx);
558 channel_erase(chn);
559}
560
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200561/* marks the channel as "shutdown" ASAP for reads */
Willy Tarreau974ced62012-10-12 23:11:02 +0200562static inline void channel_shutr_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200563{
Willy Tarreau974ced62012-10-12 23:11:02 +0200564 chn->flags |= CF_SHUTR_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200565}
566
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200567/* marks the channel as "shutdown" ASAP for writes */
Willy Tarreau974ced62012-10-12 23:11:02 +0200568static inline void channel_shutw_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200569{
Willy Tarreau974ced62012-10-12 23:11:02 +0200570 chn->flags |= CF_SHUTW_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200571}
572
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200573/* marks the channel as "shutdown" ASAP in both directions */
Willy Tarreau974ced62012-10-12 23:11:02 +0200574static inline void channel_abort(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200575{
Willy Tarreau974ced62012-10-12 23:11:02 +0200576 chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
577 chn->flags &= ~CF_AUTO_CONNECT;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200578}
579
Willy Tarreau520d95e2009-09-19 21:04:57 +0200580/* allow the consumer to try to establish a new connection. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200581static inline void channel_auto_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200582{
Willy Tarreau974ced62012-10-12 23:11:02 +0200583 chn->flags |= CF_AUTO_CONNECT;
Willy Tarreau3da77c52008-08-29 09:58:42 +0200584}
585
Willy Tarreau520d95e2009-09-19 21:04:57 +0200586/* prevent the consumer from trying to establish a new connection, and also
587 * disable auto shutdown forwarding.
588 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200589static inline void channel_dont_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200590{
Willy Tarreau974ced62012-10-12 23:11:02 +0200591 chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
Willy Tarreau3da77c52008-08-29 09:58:42 +0200592}
593
Willy Tarreau520d95e2009-09-19 21:04:57 +0200594/* allow the producer to forward shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200595static inline void channel_auto_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100596{
Willy Tarreau974ced62012-10-12 23:11:02 +0200597 chn->flags |= CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100598}
599
Willy Tarreau520d95e2009-09-19 21:04:57 +0200600/* prevent the producer from forwarding shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200601static inline void channel_dont_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100602{
Willy Tarreau974ced62012-10-12 23:11:02 +0200603 chn->flags &= ~CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100604}
605
Willy Tarreau90deb182010-01-07 00:20:41 +0100606/* allow the producer to read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200607static inline void channel_auto_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100608{
Willy Tarreau974ced62012-10-12 23:11:02 +0200609 chn->flags &= ~CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100610}
611
612/* prevent the producer from read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200613static inline void channel_dont_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100614{
Willy Tarreau974ced62012-10-12 23:11:02 +0200615 chn->flags |= CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100616}
617
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200618
619/*************************************************/
620/* Buffer operations in the context of a channel */
621/*************************************************/
622
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200623
624/* Return the max number of bytes the buffer can contain so that once all the
Willy Tarreau169c4702016-04-20 18:05:17 +0200625 * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
Willy Tarreau974ced62012-10-12 23:11:02 +0200626 * bytes free. The result sits between chn->size - maxrewrite and chn->size.
Willy Tarreau169c4702016-04-20 18:05:17 +0200627 * It is important to mention that if buf->i is already larger than size-maxrw
628 * the condition above cannot be satisfied and the lowest size will be returned
629 * anyway. The principles are the following :
630 * 0) the empty buffer has a limit of zero
631 * 1) a non-connected buffer cannot touch the reserve
632 * 2) infinite forward can always fill the buffer since all data will leave
633 * 3) all output bytes are considered in transit since they're leaving
634 * 4) all input bytes covered by to_forward are considered in transit since
635 * they'll be converted to output bytes.
636 * 5) all input bytes not covered by to_forward as considered remaining
637 * 6) all bytes scheduled to be forwarded minus what is already in the input
638 * buffer will be in transit during future rounds.
639 * 7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
640 * usable length, only to_forward and output count. The difference is
641 * visible when to_forward > i.
642 * 8) the reserve may be covered up to the amount of bytes in transit since
643 * these bytes will only take temporary space.
Willy Tarreau999f6432016-01-25 01:09:11 +0100644 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200645 * A typical buffer looks like this :
Willy Tarreau999f6432016-01-25 01:09:11 +0100646 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200647 * <-------------- max_len ----------->
648 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
649 * +------------+--------------+-------+----------------------+
650 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
651 * +------------+--------+-----+-------+----------------------+
652 * <- fwd -> <-avail->
653 *
654 * Or when to_forward > i :
655 *
656 * <-------------- max_len ----------->
657 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
658 * +------------+--------------+-------+----------------------+
659 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
660 * +------------+--------+-----+-------+----------------------+
661 * <-avail->
662 * <------------------ fwd ---------------->
663 *
664 * - the amount of buffer bytes in transit is : min(i, fwd) + o
665 * - some scheduled bytes may be in transit (up to fwd - i)
666 * - the reserve is max(0, maxrewrite - transit)
667 * - the maximum usable buffer length is size - reserve.
668 * - the available space is max_len - i - o
669 *
670 * So the formula to compute the buffer's maximum length to protect the reserve
671 * when reading new data is :
672 *
673 * max = size - maxrewrite + min(maxrewrite, transit)
674 * = size - max(maxrewrite - transit, 0)
675 *
676 * But WARNING! The conditions might change during the transfer and it could
677 * very well happen that a buffer would contain more bytes than max_len due to
678 * i+o already walking over the reserve (eg: after a header rewrite), including
679 * i or o alone hitting the limit. So it is critical to always consider that
680 * bounds may have already been crossed and that available space may be negative
681 * for example. Due to this it is perfectly possible for this function to return
682 * a value that is lower than current i+o.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200683 */
Willy Tarreau3f5096d2015-01-14 20:21:43 +0100684static inline int channel_recv_limit(const struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200685{
Willy Tarreauef907fe2016-05-03 17:46:24 +0200686 unsigned int transit;
Willy Tarreau999f6432016-01-25 01:09:11 +0100687 int reserve;
688
689 /* return zero if empty */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200690 reserve = chn->buf.size;
691 if (b_is_null(&chn->buf))
Willy Tarreau999f6432016-01-25 01:09:11 +0100692 goto end;
693
694 /* return size - maxrewrite if we can't send */
695 reserve = global.tune.maxrewrite;
696 if (unlikely(!channel_may_send(chn)))
697 goto end;
698
Willy Tarreauef907fe2016-05-03 17:46:24 +0200699 /* We need to check what remains of the reserve after o and to_forward
700 * have been transmitted, but they can overflow together and they can
701 * cause an integer underflow in the comparison since both are unsigned
702 * while maxrewrite is signed.
703 * The code below has been verified for being a valid check for this :
704 * - if (o + to_forward) overflow => return size [ large enough ]
705 * - if o + to_forward >= maxrw => return size [ large enough ]
706 * - otherwise return size - (maxrw - (o + to_forward))
Willy Tarreau999f6432016-01-25 01:09:11 +0100707 */
Willy Tarreau3ee83442018-06-15 16:42:02 +0200708 transit = co_data(chn) + chn->to_forward;
Willy Tarreauef907fe2016-05-03 17:46:24 +0200709 reserve -= transit;
710 if (transit < chn->to_forward || // addition overflow
711 transit >= (unsigned)global.tune.maxrewrite) // enough transit data
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200712 return chn->buf.size;
Willy Tarreau999f6432016-01-25 01:09:11 +0100713 end:
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200714 return chn->buf.size - reserve;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200715}
716
Christopher Faulet5811db02019-01-07 13:57:01 +0100717/* HTX version of channel_recv_limit(). Return the max number of bytes the HTX
718 * buffer can contain so that once all the pending bytes are forwarded, the
719 * buffer still has global.tune.maxrewrite bytes free.
720 */
721static inline int channel_htx_recv_limit(const struct channel *chn, const struct htx *htx)
722{
723 unsigned int transit;
724 int reserve;
725
726 /* return zeor if not allocated */
727 if (!htx->size)
728 return 0;
729
730 /* return max_data_space - maxrewrite if we can't send */
731 reserve = global.tune.maxrewrite;
732 if (unlikely(!channel_may_send(chn)))
733 goto end;
734
735 /* We need to check what remains of the reserve after o and to_forward
736 * have been transmitted, but they can overflow together and they can
737 * cause an integer underflow in the comparison since both are unsigned
738 * while maxrewrite is signed.
739 * The code below has been verified for being a valid check for this :
Christopher Faulet621da6b2019-07-02 15:48:03 +0200740 * - if (o + to_forward) overflow => return htx->size [ large enough ]
741 * - if o + to_forward >= maxrw => return htx->size [ large enough ]
742 * - otherwise return htx->size - (maxrw - (o + to_forward))
Christopher Faulet5811db02019-01-07 13:57:01 +0100743 */
744 transit = co_data(chn) + chn->to_forward;
745 reserve -= transit;
746 if (transit < chn->to_forward || // addition overflow
747 transit >= (unsigned)global.tune.maxrewrite) // enough transit data
Christopher Faulet621da6b2019-07-02 15:48:03 +0200748 return htx->size;
Christopher Faulet5811db02019-01-07 13:57:01 +0100749 end:
Christopher Faulet621da6b2019-07-02 15:48:03 +0200750 return (htx->size - reserve);
Christopher Faulet5811db02019-01-07 13:57:01 +0100751}
752
Christopher Faulet87ebe942019-06-11 14:14:49 +0200753/* HTX version of channel_full(). Instead of checking if INPUT data exceeds
754 * (size - reserve), this function checks if the free space for data in <htx>
755 * and the data scheduled for output are lower to the reserve. In such case, the
756 * channel is considered as full.
757 */
758static inline int channel_htx_full(const struct channel *c, const struct htx *htx,
759 unsigned int reserve)
760{
761 if (!htx->size)
762 return 0;
763 return (htx_free_data_space(htx) + co_data(c) <= reserve);
764}
765
Willy Tarreau23752332018-06-15 14:54:53 +0200766/* Returns non-zero if the channel's INPUT buffer's is considered full, which
767 * means that it holds at least as much INPUT data as (size - reserve). This
768 * also means that data that are scheduled for output are considered as potential
769 * free space, and that the reserved space is always considered as not usable.
770 * This information alone cannot be used as a general purpose free space indicator.
771 * However it accurately indicates that too many data were fed in the buffer
772 * for an analyzer for instance. See the channel_may_recv() function for a more
773 * generic function taking everything into account.
774 */
775static inline int channel_full(const struct channel *c, unsigned int reserve)
776{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200777 if (b_is_null(&c->buf))
Willy Tarreau23752332018-06-15 14:54:53 +0200778 return 0;
779
Christopher Faulet87ebe942019-06-11 14:14:49 +0200780 if (IS_HTX_STRM(chn_strm(c)))
781 return channel_htx_full(c, htxbuf(&c->buf), reserve);
Willy Tarreau23752332018-06-15 14:54:53 +0200782
Christopher Faulet87ebe942019-06-11 14:14:49 +0200783 return (ci_data(c) + reserve >= c_size(c));
Christopher Faulet5811db02019-01-07 13:57:01 +0100784}
785
Christopher Fauletaad45852019-05-14 22:14:03 +0200786/* HTX version of channel_recv_max(). */
787static inline int channel_htx_recv_max(const struct channel *chn, const struct htx *htx)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200788{
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100789 int ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200790
Christopher Fauletaad45852019-05-14 22:14:03 +0200791 ret = channel_htx_recv_limit(chn, htx) - htx_used_space(htx);
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100792 if (ret < 0)
793 ret = 0;
794 return ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200795}
796
Christopher Fauletaad45852019-05-14 22:14:03 +0200797/* Returns the amount of space available at the input of the buffer, taking the
798 * reserved space into account if ->to_forward indicates that an end of transfer
799 * is close to happen. The test is optimized to avoid as many operations as
800 * possible for the fast case.
801 */
802static inline int channel_recv_max(const struct channel *chn)
Christopher Faulet5811db02019-01-07 13:57:01 +0100803{
804 int ret;
805
Christopher Fauletaad45852019-05-14 22:14:03 +0200806 if (IS_HTX_STRM(chn_strm(chn)))
807 return channel_htx_recv_max(chn, htxbuf(&chn->buf));
808
809 ret = channel_recv_limit(chn) - b_data(&chn->buf);
Christopher Faulet5811db02019-01-07 13:57:01 +0100810 if (ret < 0)
811 ret = 0;
812 return ret;
813}
814
Willy Tarreau3f679992018-06-15 15:06:42 +0200815/* Returns the amount of bytes that can be written over the input data at once,
816 * including reserved space which may be overwritten. This is used by Lua to
817 * insert data in the input side just before the other data using buffer_replace().
818 * The goal is to transfer these new data in the output buffer.
819 */
820static inline int ci_space_for_replace(const struct channel *chn)
821{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200822 const struct buffer *buf = &chn->buf;
Willy Tarreau3f679992018-06-15 15:06:42 +0200823 const char *end;
824
825 /* If the input side data overflows, we cannot insert data contiguously. */
826 if (b_head(buf) + b_data(buf) >= b_wrap(buf))
827 return 0;
828
829 /* Check the last byte used in the buffer, it may be a byte of the output
830 * side if the buffer wraps, or its the end of the buffer.
831 */
832 end = b_head(buf);
833 if (end <= ci_head(chn))
834 end = b_wrap(buf);
835
836 /* Compute the amount of bytes which can be written. */
837 return end - ci_tail(chn);
838}
839
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100840/* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
841 * not the last available buffer or it's the response buffer. Unless the buffer
842 * is the response buffer, an extra control is made so that we always keep
843 * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
844 * case of failure, non-zero otherwise.
845 *
846 * If no buffer are available, the requester, represented by <wait> pointer,
847 * will be added in the list of objects waiting for an available buffer.
848 */
849static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
850{
851 int margin = 0;
852
853 if (!(chn->flags & CF_ISRESP))
854 margin = global.tune.reserved_bufs;
855
856 if (b_alloc_margin(&chn->buf, margin) != NULL)
857 return 1;
858
Willy Tarreau21046592020-02-26 10:39:36 +0100859 if (!MT_LIST_ADDED(&wait->list))
860 MT_LIST_ADDQ(&buffer_wq, &wait->list);
Emeric Bruna1dd2432017-06-21 15:42:52 +0200861
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100862 return 0;
863}
864
865/* Releases a possibly allocated buffer for channel <chn>. If it was not
866 * allocated, this function does nothing. Else the buffer is released and we try
867 * to wake up as many streams/applets as possible. */
868static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
869{
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200870 if (c_size(chn) && c_empty(chn)) {
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100871 b_free(&chn->buf);
Olivier Houchard673867c2018-05-25 16:58:52 +0200872 offer_buffers(wait->target, tasks_run_queue);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100873 }
874}
875
Willy Tarreau319f7452015-01-14 20:32:59 +0100876/* Truncate any unread data in the channel's buffer, and disable forwarding.
877 * Outgoing data are left intact. This is mainly to be used to send error
878 * messages after existing data.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200879 */
Willy Tarreau319f7452015-01-14 20:32:59 +0100880static inline void channel_truncate(struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200881{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200882 if (!co_data(chn))
Willy Tarreau974ced62012-10-12 23:11:02 +0200883 return channel_erase(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200884
Willy Tarreau974ced62012-10-12 23:11:02 +0200885 chn->to_forward = 0;
Willy Tarreau3ee83442018-06-15 16:42:02 +0200886 if (!ci_data(chn))
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200887 return;
888
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200889 chn->buf.data = co_data(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200890}
891
Christopher Fauletf7ed1952019-01-07 14:55:10 +0100892static inline void channel_htx_truncate(struct channel *chn, struct htx *htx)
893{
894 if (!co_data(chn))
895 return channel_htx_erase(chn, htx);
896
897 chn->to_forward = 0;
898 if (htx->data == co_data(chn))
899 return;
900 htx_truncate(htx, co_data(chn));
901}
902
Willy Tarreau4cf13002018-06-06 06:53:15 +0200903/* This function realigns a possibly wrapping channel buffer so that the input
904 * part is contiguous and starts at the beginning of the buffer and the output
905 * part ends at the end of the buffer. This provides the best conditions since
906 * it allows the largest inputs to be processed at once and ensures that once
907 * the output data leaves, the whole buffer is available at once.
908 */
Willy Tarreaufd8d42f2018-07-12 10:57:15 +0200909static inline void channel_slow_realign(struct channel *chn, char *swap)
Willy Tarreau4cf13002018-06-06 06:53:15 +0200910{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200911 return b_slow_realign(&chn->buf, swap, co_data(chn));
Willy Tarreau4cf13002018-06-06 06:53:15 +0200912}
913
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200914
915/* Forward all headers of an HTX message, starting from the SL to the EOH. This
Christopher Faulet421e7692019-06-13 11:16:45 +0200916 * function returns the position of the block after the EOH, if
917 * found. Otherwise, it returns -1.
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200918 */
Christopher Faulet421e7692019-06-13 11:16:45 +0200919static inline int32_t channel_htx_fwd_headers(struct channel *chn, struct htx *htx)
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200920{
921 int32_t pos;
922 size_t data = 0;
923
924 for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
925 struct htx_blk *blk = htx_get_blk(htx, pos);
926 data += htx_get_blksz(blk);
927 if (htx_get_blk_type(blk) == HTX_BLK_EOH) {
Christopher Faulet421e7692019-06-13 11:16:45 +0200928 pos = htx_get_next(htx, pos);
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200929 break;
930 }
931 }
932 c_adv(chn, data);
Christopher Faulet421e7692019-06-13 11:16:45 +0200933 return pos;
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200934}
935
Willy Tarreaubaaee002006-06-26 02:48:02 +0200936/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200937 * Advance the channel buffer's read pointer by <len> bytes. This is useful
938 * when data have been read directly from the buffer. It is illegal to call
939 * this function with <len> causing a wrapping at the end of the buffer. It's
940 * the caller's responsibility to ensure that <len> is never larger than
Christopher Faulet729b5b32019-02-25 15:50:12 +0100941 * chn->o. Channel flags WRITE_PARTIAL and WROTE_DATA are set.
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200942 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200943static inline void co_skip(struct channel *chn, int len)
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200944{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200945 b_del(&chn->buf, len);
Olivier Houchard08afac02018-06-22 19:26:39 +0200946 chn->output -= len;
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200947 c_realign_if_empty(chn);
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200948
Willy Tarreaufb0e9202009-09-23 23:47:55 +0200949 /* notify that some data was written to the SI from the buffer */
Christopher Faulet729b5b32019-02-25 15:50:12 +0100950 chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
Christopher Faulet037b3eb2019-07-05 13:44:29 +0200951 chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200952}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200953
Christopher Fauletc6827d52019-02-25 10:44:51 +0100954/* HTX version of co_skip(). This function skips at most <len> bytes from the
955 * output of the channel <chn>. Depending on how data are stored in <htx> less
956 * than <len> bytes can be skipped. Channel flags WRITE_PARTIAL and WROTE_DATA
957 * are set.
958 */
959static inline void co_htx_skip(struct channel *chn, struct htx *htx, int len)
960{
961 struct htx_ret htxret;
962
963 htxret = htx_drain(htx, len);
964 if (htxret.ret) {
965 chn->output -= htxret.ret;
966
967 /* notify that some data was written to the SI from the buffer */
968 chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
Christopher Faulet037b3eb2019-07-05 13:44:29 +0200969 chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
Christopher Fauletc6827d52019-02-25 10:44:51 +0100970 }
971}
Christopher Faulet729b5b32019-02-25 15:50:12 +0100972
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200973/* Tries to copy chunk <chunk> into the channel's buffer after length controls.
Willy Tarreau974ced62012-10-12 23:11:02 +0200974 * The chn->o and to_forward pointers are updated. If the channel's input is
Willy Tarreau74b08c92010-09-08 17:04:31 +0200975 * closed, -2 is returned. If the block is too large for this buffer, -3 is
976 * returned. If there is not enough room left in the buffer, -1 is returned.
977 * Otherwise the number of bytes copied is returned (0 being a valid number).
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200978 * Channel flag READ_PARTIAL is updated if some data can be transferred. The
Willy Tarreauf941cf22012-08-27 20:53:34 +0200979 * chunk's length is updated with the number of bytes sent.
Willy Tarreauaeac3192009-08-31 08:09:57 +0200980 */
Willy Tarreau83061a82018-07-13 11:56:34 +0200981static inline int ci_putchk(struct channel *chn, struct buffer *chunk)
Willy Tarreauaeac3192009-08-31 08:09:57 +0200982{
983 int ret;
984
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200985 ret = ci_putblk(chn, chunk->area, chunk->data);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200986 if (ret > 0)
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200987 chunk->data -= ret;
Willy Tarreauaeac3192009-08-31 08:09:57 +0200988 return ret;
989}
990
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200991/* Tries to copy string <str> at once into the channel's buffer after length
Willy Tarreau974ced62012-10-12 23:11:02 +0200992 * controls. The chn->o and to_forward pointers are updated. If the channel's
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200993 * input is closed, -2 is returned. If the block is too large for this buffer,
994 * -3 is returned. If there is not enough room left in the buffer, -1 is
995 * returned. Otherwise the number of bytes copied is returned (0 being a valid
996 * number). Channel flag READ_PARTIAL is updated if some data can be
997 * transferred.
Willy Tarreau74b08c92010-09-08 17:04:31 +0200998 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200999static inline int ci_putstr(struct channel *chn, const char *str)
Willy Tarreau74b08c92010-09-08 17:04:31 +02001000{
Willy Tarreau06d80a92017-10-19 14:32:15 +02001001 return ci_putblk(chn, str, strlen(str));
Willy Tarreau74b08c92010-09-08 17:04:31 +02001002}
1003
1004/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +02001005 * Return one char from the channel's buffer. If the buffer is empty and the
1006 * channel is closed, return -2. If the buffer is just empty, return -1. The
Willy Tarreau06d80a92017-10-19 14:32:15 +02001007 * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
Willy Tarreau8263d2b2012-08-28 00:06:31 +02001008 * 1) when it has consumed the char. Also note that this function respects the
Willy Tarreau974ced62012-10-12 23:11:02 +02001009 * chn->o limit.
Willy Tarreau74b08c92010-09-08 17:04:31 +02001010 */
Willy Tarreau06d80a92017-10-19 14:32:15 +02001011static inline int co_getchr(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +02001012{
1013 /* closed or empty + imminent close = -2; empty = -1 */
Willy Tarreau974ced62012-10-12 23:11:02 +02001014 if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
1015 if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
Willy Tarreau74b08c92010-09-08 17:04:31 +02001016 return -2;
1017 return -1;
1018 }
Willy Tarreau50227f92018-06-15 15:18:17 +02001019 return *co_head(chn);
Willy Tarreau74b08c92010-09-08 17:04:31 +02001020}
1021
Willy Tarreaubaaee002006-06-26 02:48:02 +02001022
Willy Tarreauc7e42382012-08-24 19:22:53 +02001023#endif /* _PROTO_CHANNEL_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +02001024
1025/*
1026 * Local variables:
1027 * c-indent-level: 8
1028 * c-basic-offset: 8
1029 * End:
1030 */