blob: 4872aa3cc1aca203d513184770407065eb126354 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreauc7e42382012-08-24 19:22:53 +02002 * include/proto/channel.h
3 * Channel management definitions, macros and inline functions.
Willy Tarreau7c3c5412009-12-13 15:53:05 +01004 *
Willy Tarreaua27dc192014-11-27 22:10:04 +01005 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
Willy Tarreau7c3c5412009-12-13 15:53:05 +01006 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreauc7e42382012-08-24 19:22:53 +020022#ifndef _PROTO_CHANNEL_H
23#define _PROTO_CHANNEL_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
Willy Tarreaua1bd1fa2019-03-29 17:26:33 +010025#include <inttypes.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020026#include <stdio.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010027#include <stdlib.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020028#include <string.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010029
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020030#include <common/config.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020031#include <common/chunk.h>
Willy Tarreaub96b77e2018-12-11 10:22:41 +010032#include <common/htx.h>
Willy Tarreau0c303ee2008-07-07 00:09:58 +020033#include <common/ticks.h>
Willy Tarreaufa645582007-06-03 15:59:52 +020034#include <common/time.h>
35
Thierry FOURNIERac836ba2014-12-16 15:41:18 +010036#include <types/channel.h>
Willy Tarreau7c3c5412009-12-13 15:53:05 +010037#include <types/global.h>
Willy Tarreau87b09662015-04-03 00:22:06 +020038#include <types/stream.h>
Willy Tarreau73796532014-11-28 14:10:28 +010039#include <types/stream_interface.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020040
Christopher Fauletaad45852019-05-14 22:14:03 +020041#include <proto/stream.h>
Christopher Fauleta73e59b2016-12-09 17:30:18 +010042#include <proto/task.h>
43
Willy Tarreau7341d942007-05-13 19:56:02 +020044/* perform minimal intializations, report 0 in case of error, 1 if OK. */
Willy Tarreau8263d2b2012-08-28 00:06:31 +020045int init_channel();
Willy Tarreau7341d942007-05-13 19:56:02 +020046
Willy Tarreau55a69062012-10-26 00:21:52 +020047unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
Willy Tarreau8263d2b2012-08-28 00:06:31 +020048
49/* SI-to-channel functions working with buffers */
Willy Tarreau06d80a92017-10-19 14:32:15 +020050int ci_putblk(struct channel *chn, const char *str, int len);
Willy Tarreau06d80a92017-10-19 14:32:15 +020051int ci_putchr(struct channel *chn, char c);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020052int ci_getline_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
53int ci_getblk_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
Willy Tarreau4d893d42018-07-12 15:43:32 +020054int ci_insert_line2(struct channel *c, int pos, const char *str, int len);
Willy Tarreau06d80a92017-10-19 14:32:15 +020055int co_inject(struct channel *chn, const char *msg, int len);
Willy Tarreau41ab8682017-10-19 14:58:40 +020056int co_getline(const struct channel *chn, char *str, int len);
57int co_getblk(const struct channel *chn, char *blk, int len, int offset);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020058int co_getline_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
59int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
Thierry FOURNIERca16b032015-02-16 19:26:48 +010060
Willy Tarreau74b08c92010-09-08 17:04:31 +020061
Willy Tarreau87b09662015-04-03 00:22:06 +020062/* returns a pointer to the stream the channel belongs to */
Thierry FOURNIER27929fb2015-09-25 08:36:11 +020063static inline struct stream *chn_strm(const struct channel *chn)
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010064{
65 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020066 return LIST_ELEM(chn, struct stream *, res);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010067 else
Willy Tarreau87b09662015-04-03 00:22:06 +020068 return LIST_ELEM(chn, struct stream *, req);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010069}
70
Willy Tarreau73796532014-11-28 14:10:28 +010071/* returns a pointer to the stream interface feeding the channel (producer) */
72static inline struct stream_interface *chn_prod(const struct channel *chn)
73{
Willy Tarreau5decc052014-11-28 14:22:12 +010074 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020075 return &LIST_ELEM(chn, struct stream *, res)->si[1];
Willy Tarreau5decc052014-11-28 14:22:12 +010076 else
Willy Tarreau87b09662015-04-03 00:22:06 +020077 return &LIST_ELEM(chn, struct stream *, req)->si[0];
Willy Tarreau73796532014-11-28 14:10:28 +010078}
79
80/* returns a pointer to the stream interface consuming the channel (producer) */
81static inline struct stream_interface *chn_cons(const struct channel *chn)
82{
Willy Tarreau5decc052014-11-28 14:22:12 +010083 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020084 return &LIST_ELEM(chn, struct stream *, res)->si[0];
Willy Tarreau5decc052014-11-28 14:22:12 +010085 else
Willy Tarreau87b09662015-04-03 00:22:06 +020086 return &LIST_ELEM(chn, struct stream *, req)->si[1];
Willy Tarreau73796532014-11-28 14:10:28 +010087}
88
Willy Tarreau08d5ac82018-06-06 15:09:28 +020089/* c_orig() : returns the pointer to the channel buffer's origin */
90static inline char *c_orig(const struct channel *c)
91{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020092 return b_orig(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +020093}
94
95/* c_size() : returns the size of the channel's buffer */
96static inline size_t c_size(const struct channel *c)
97{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020098 return b_size(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +020099}
100
101/* c_wrap() : returns the pointer to the channel buffer's wrapping point */
102static inline char *c_wrap(const struct channel *c)
103{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200104 return b_wrap(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200105}
106
107/* c_data() : returns the amount of data in the channel's buffer */
108static inline size_t c_data(const struct channel *c)
109{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200110 return b_data(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200111}
112
113/* c_room() : returns the room left in the channel's buffer */
114static inline size_t c_room(const struct channel *c)
115{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200116 return b_size(&c->buf) - b_data(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200117}
118
119/* c_empty() : returns a boolean indicating if the channel's buffer is empty */
120static inline size_t c_empty(const struct channel *c)
121{
122 return !c_data(c);
123}
124
125/* c_full() : returns a boolean indicating if the channel's buffer is full */
126static inline size_t c_full(const struct channel *c)
127{
128 return !c_room(c);
129}
130
131/* co_data() : returns the amount of output data in the channel's buffer */
132static inline size_t co_data(const struct channel *c)
133{
Olivier Houchard08afac02018-06-22 19:26:39 +0200134 return c->output;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200135}
136
137/* ci_data() : returns the amount of input data in the channel's buffer */
138static inline size_t ci_data(const struct channel *c)
139{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200140 return c_data(c) - co_data(c);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200141}
142
143/* ci_next() : for an absolute pointer <p> or a relative offset <o> pointing to
144 * a valid location within channel <c>'s buffer, returns either the absolute
145 * pointer or the relative offset pointing to the next byte, which usually is
146 * at (p + 1) unless p reaches the wrapping point and wrapping is needed.
147 */
148static inline size_t ci_next_ofs(const struct channel *c, size_t o)
149{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200150 return b_next_ofs(&c->buf, o);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200151}
152static inline char *ci_next(const struct channel *c, const char *p)
153{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200154 return b_next(&c->buf, p);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200155}
156
157
158/* c_ptr() : returns a pointer to an offset relative to the beginning of the
159 * input data in the buffer. If instead the offset is negative, a pointer to
160 * existing output data is returned. The function only takes care of wrapping,
161 * it's up to the caller to ensure the offset is always within byte count
162 * bounds.
163 */
164static inline char *c_ptr(const struct channel *c, ssize_t ofs)
165{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200166 return b_peek(&c->buf, co_data(c) + ofs);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200167}
168
169/* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
170 * buffer's pointer advances, and that as many bytes from in are transferred
171 * from in to out. The caller is responsible for ensuring that adv is always
172 * smaller than or equal to b->i.
173 */
174static inline void c_adv(struct channel *c, size_t adv)
175{
Olivier Houchard08afac02018-06-22 19:26:39 +0200176 c->output += adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200177}
178
179/* c_rew() : rewinds the channel's buffer by <adv> bytes, which means that the
180 * buffer's pointer goes backwards, and that as many bytes from out are moved
181 * to in. The caller is responsible for ensuring that adv is always smaller
182 * than or equal to b->o.
183 */
184static inline void c_rew(struct channel *c, size_t adv)
185{
Olivier Houchard08afac02018-06-22 19:26:39 +0200186 c->output -= adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200187}
188
189/* c_realign_if_empty() : realign the channel's buffer if it's empty */
190static inline void c_realign_if_empty(struct channel *chn)
191{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200192 b_realign_if_empty(&chn->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200193}
194
Olivier Houchardd4251a72018-06-29 16:17:34 +0200195/* Sets the amount of output for the channel */
196static inline void co_set_data(struct channel *c, size_t output)
197{
Olivier Houchard08afac02018-06-22 19:26:39 +0200198 c->output = output;
Olivier Houchardd4251a72018-06-29 16:17:34 +0200199}
200
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200201
202/* co_head() : returns a pointer to the beginning of output data in the buffer.
203 * The "__" variants don't support wrapping, "ofs" are relative to
204 * the buffer's origin.
205 */
206static inline size_t __co_head_ofs(const struct channel *c)
207{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200208 return __b_peek_ofs(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200209}
210static inline char *__co_head(const struct channel *c)
211{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200212 return __b_peek(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200213}
214static inline size_t co_head_ofs(const struct channel *c)
215{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200216 return b_peek_ofs(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200217}
218static inline char *co_head(const struct channel *c)
219{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200220 return b_peek(&c->buf, 0);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200221}
222
223
224/* co_tail() : returns a pointer to the end of output data in the buffer.
225 * The "__" variants don't support wrapping, "ofs" are relative to
226 * the buffer's origin.
227 */
228static inline size_t __co_tail_ofs(const struct channel *c)
229{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200230 return __b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200231}
232static inline char *__co_tail(const struct channel *c)
233{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200234 return __b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200235}
236static inline size_t co_tail_ofs(const struct channel *c)
237{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200238 return b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200239}
240static inline char *co_tail(const struct channel *c)
241{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200242 return b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200243}
244
245
246/* ci_head() : returns a pointer to the beginning of input data in the buffer.
247 * The "__" variants don't support wrapping, "ofs" are relative to
248 * the buffer's origin.
249 */
250static inline size_t __ci_head_ofs(const struct channel *c)
251{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200252 return __b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200253}
254static inline char *__ci_head(const struct channel *c)
255{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200256 return __b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200257}
258static inline size_t ci_head_ofs(const struct channel *c)
259{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200260 return b_peek_ofs(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200261}
262static inline char *ci_head(const struct channel *c)
263{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200264 return b_peek(&c->buf, co_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200265}
266
267
268/* ci_tail() : returns a pointer to the end of input data in the buffer.
269 * The "__" variants don't support wrapping, "ofs" are relative to
270 * the buffer's origin.
271 */
272static inline size_t __ci_tail_ofs(const struct channel *c)
273{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200274 return __b_peek_ofs(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200275}
276static inline char *__ci_tail(const struct channel *c)
277{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200278 return __b_peek(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200279}
280static inline size_t ci_tail_ofs(const struct channel *c)
281{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200282 return b_peek_ofs(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200283}
284static inline char *ci_tail(const struct channel *c)
285{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200286 return b_peek(&c->buf, c_data(c));
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200287}
288
289
290/* ci_stop() : returns the pointer to the byte following the end of input data
291 * in the channel buffer. It may be out of the buffer. It's used to
292 * compute lengths or stop pointers.
293 */
294static inline size_t __ci_stop_ofs(const struct channel *c)
295{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200296 return __b_stop_ofs(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200297}
298static inline const char *__ci_stop(const struct channel *c)
299{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200300 return __b_stop(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200301}
302static inline size_t ci_stop_ofs(const struct channel *c)
303{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200304 return b_stop_ofs(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200305}
306static inline const char *ci_stop(const struct channel *c)
307{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200308 return b_stop(&c->buf);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200309}
310
311
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200312/* Returns the amount of input data that can contiguously be read at once */
313static inline size_t ci_contig_data(const struct channel *c)
314{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200315 return b_contig_data(&c->buf, co_data(c));
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200316}
317
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200318/* Initialize all fields in the channel. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200319static inline void channel_init(struct channel *chn)
Willy Tarreau54469402006-07-29 16:59:06 +0200320{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200321 chn->buf = BUF_NULL;
Willy Tarreau974ced62012-10-12 23:11:02 +0200322 chn->to_forward = 0;
Willy Tarreaub145c782014-02-09 17:45:16 +0100323 chn->last_read = now_ms;
Willy Tarreau8f39dcd2014-02-09 08:31:49 +0100324 chn->xfer_small = chn->xfer_large = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200325 chn->total = 0;
326 chn->pipe = NULL;
327 chn->analysers = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200328 chn->flags = 0;
Olivier Houchard08afac02018-06-22 19:26:39 +0200329 chn->output = 0;
Willy Tarreau54469402006-07-29 16:59:06 +0200330}
331
Willy Tarreau55a69062012-10-26 00:21:52 +0200332/* Schedule up to <bytes> more bytes to be forwarded via the channel without
333 * notifying the owner task. Any data pending in the buffer are scheduled to be
334 * sent as well, in the limit of the number of bytes to forward. This must be
335 * the only method to use to schedule bytes to be forwarded. If the requested
336 * number is too large, it is automatically adjusted. The number of bytes taken
337 * into account is returned. Directly touching ->to_forward will cause lockups
338 * when buf->o goes down to zero if nobody is ready to push the remaining data.
339 */
340static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
341{
342 /* hint: avoid comparisons on long long for the fast case, since if the
343 * length does not fit in an unsigned it, it will never be forwarded at
344 * once anyway.
345 */
346 if (bytes <= ~0U) {
347 unsigned int bytes32 = bytes;
348
Willy Tarreau3ee83442018-06-15 16:42:02 +0200349 if (bytes32 <= ci_data(chn)) {
Willy Tarreau55a69062012-10-26 00:21:52 +0200350 /* OK this amount of bytes might be forwarded at once */
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200351 c_adv(chn, bytes32);
Willy Tarreau55a69062012-10-26 00:21:52 +0200352 return bytes;
353 }
354 }
355 return __channel_forward(chn, bytes);
356}
357
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200358/* Forwards any input data and marks the channel for permanent forwarding */
359static inline void channel_forward_forever(struct channel *chn)
360{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200361 c_adv(chn, ci_data(chn));
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200362 chn->to_forward = CHN_INFINITE_FORWARD;
363}
364
Christopher Faulete6458292019-01-02 14:24:35 +0100365/* <len> bytes of input data was added into the channel <chn>. This functions
366 * must be called to update the channel state. It also handles the fast
367 * forwarding. */
368static inline void channel_add_input(struct channel *chn, unsigned int len)
369{
370 if (chn->to_forward) {
371 unsigned long fwd = len;
372 if (chn->to_forward != CHN_INFINITE_FORWARD) {
373 if (fwd > chn->to_forward)
374 fwd = chn->to_forward;
375 chn->to_forward -= fwd;
376 }
377 c_adv(chn, fwd);
378 }
379 /* notify that some data was read */
380 chn->total += len;
381 chn->flags |= CF_READ_PARTIAL;
382}
383
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100384static inline unsigned long long channel_htx_forward(struct channel *chn, struct htx *htx, unsigned long long bytes)
385{
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200386 unsigned long long ret = 0;
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100387
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200388 if (htx->data) {
389 b_set_data(&chn->buf, htx->data);
390 ret = channel_forward(chn, bytes);
391 b_set_data(&chn->buf, b_size(&chn->buf));
392 }
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100393 return ret;
394}
395
396
397static inline void channel_htx_forward_forever(struct channel *chn, struct htx *htx)
398{
Christopher Fauleta4f9dd42019-05-29 14:52:56 +0200399 c_adv(chn, htx->data - co_data(chn));
400 chn->to_forward = CHN_INFINITE_FORWARD;
Christopher Fauletb2aedea2018-12-05 11:56:15 +0100401}
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200402/*********************************************************************/
403/* These functions are used to compute various channel content sizes */
404/*********************************************************************/
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100405
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200406/* Reports non-zero if the channel is empty, which means both its
407 * buffer and pipe are empty. The construct looks strange but is
408 * jump-less and much more efficient on both 32 and 64-bit than
409 * the boolean test.
410 */
Willy Tarreau41ab8682017-10-19 14:58:40 +0200411static inline unsigned int channel_is_empty(const struct channel *c)
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200412{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200413 return !(co_data(c) | (long)c->pipe);
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200414}
415
Willy Tarreauba0902e2015-01-13 14:39:16 +0100416/* Returns non-zero if the channel is rewritable, which means that the buffer
417 * it is attached to has at least <maxrewrite> bytes immediately available.
418 * This is used to decide when a request or response may be parsed when some
419 * data from a previous exchange might still be present.
Willy Tarreau379357a2013-06-08 12:55:46 +0200420 */
Willy Tarreauba0902e2015-01-13 14:39:16 +0100421static inline int channel_is_rewritable(const struct channel *chn)
Willy Tarreau379357a2013-06-08 12:55:46 +0200422{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200423 int rem = chn->buf.size;
Willy Tarreau379357a2013-06-08 12:55:46 +0200424
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200425 rem -= b_data(&chn->buf);
Willy Tarreau379357a2013-06-08 12:55:46 +0200426 rem -= global.tune.maxrewrite;
427 return rem >= 0;
428}
429
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100430/* Tells whether data are likely to leave the buffer. This is used to know when
431 * we can safely ignore the reserve since we know we cannot retry a connection.
432 * It returns zero if data are blocked, non-zero otherwise.
433 */
434static inline int channel_may_send(const struct channel *chn)
435{
Willy Tarreau73796532014-11-28 14:10:28 +0100436 return chn_cons(chn)->state == SI_ST_EST;
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100437}
438
Willy Tarreau3889fff2015-01-13 20:20:10 +0100439/* Returns non-zero if the channel can still receive data. This is used to
Willy Tarreau379357a2013-06-08 12:55:46 +0200440 * decide when to stop reading into a buffer when we want to ensure that we
441 * leave the reserve untouched after all pending outgoing data are forwarded.
442 * The reserved space is taken into account if ->to_forward indicates that an
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200443 * end of transfer is close to happen. Note that both ->buf.o and ->to_forward
Willy Tarreau379357a2013-06-08 12:55:46 +0200444 * are considered as available since they're supposed to leave the buffer. The
445 * test is optimized to avoid as many operations as possible for the fast case
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200446 * and to be used as an "if" condition. Just like channel_recv_limit(), we
447 * never allow to overwrite the reserve until the output stream interface is
448 * connected, otherwise we could spin on a POST with http-send-name-header.
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100449 */
Willy Tarreau3889fff2015-01-13 20:20:10 +0100450static inline int channel_may_recv(const struct channel *chn)
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100451{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200452 int rem = chn->buf.size;
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200453
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200454 if (b_is_null(&chn->buf))
Willy Tarreau3889fff2015-01-13 20:20:10 +0100455 return 1;
Willy Tarreau4428a292014-11-28 20:54:13 +0100456
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200457 rem -= b_data(&chn->buf);
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200458 if (!rem)
Willy Tarreau3889fff2015-01-13 20:20:10 +0100459 return 0; /* buffer already full */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200460
Willy Tarreau93dc4782016-04-21 12:12:45 +0200461 if (rem > global.tune.maxrewrite)
462 return 1; /* reserve not yet reached */
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200463
Willy Tarreau93dc4782016-04-21 12:12:45 +0200464 if (!channel_may_send(chn))
465 return 0; /* don't touch reserve until we can send */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200466
Willy Tarreau93dc4782016-04-21 12:12:45 +0200467 /* Now we know there's some room left in the reserve and we may
468 * forward. As long as i-to_fwd < size-maxrw, we may still
469 * receive. This is equivalent to i+maxrw-size < to_fwd,
470 * which is logical since i+maxrw-size is what overlaps with
471 * the reserve, and we want to ensure they're covered by scheduled
472 * forwards.
473 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200474 rem = ci_data(chn) + global.tune.maxrewrite - chn->buf.size;
Willy Tarreau93dc4782016-04-21 12:12:45 +0200475 return rem < 0 || (unsigned int)rem < chn->to_forward;
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100476}
477
Christopher Faulet5811db02019-01-07 13:57:01 +0100478/* HTX version of channel_may_recv(). Returns non-zero if the channel can still
479 * receive data. */
480static inline int channel_htx_may_recv(const struct channel *chn, const struct htx *htx)
481{
482 uint32_t rem;
483
484 if (!htx->size)
485 return 1;
486
487 if (!channel_may_send(chn))
488 return 0; /* don't touch reserve until we can send */
489
490 rem = htx_free_data_space(htx);
491 if (!rem)
492 return 0; /* htx already full */
493
494 if (rem > global.tune.maxrewrite)
495 return 1; /* reserve not yet reached */
496
497 /* Now we know there's some room left in the reserve and we may
498 * forward. As long as i-to_fwd < size-maxrw, we may still
499 * receive. This is equivalent to i+maxrw-size < to_fwd,
500 * which is logical since i+maxrw-size is what overlaps with
501 * the reserve, and we want to ensure they're covered by scheduled
502 * forwards.
503 */
504 rem += co_data(chn);
505 if (rem > global.tune.maxrewrite)
506 return 1;
507
508 return (global.tune.maxrewrite - rem < chn->to_forward);
509}
510
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200511/* Returns true if the channel's input is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200512static inline int channel_input_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200513{
Willy Tarreau974ced62012-10-12 23:11:02 +0200514 return ((chn->flags & CF_SHUTR) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200515}
516
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200517/* Returns true if the channel's output is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200518static inline int channel_output_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200519{
Willy Tarreau974ced62012-10-12 23:11:02 +0200520 return ((chn->flags & CF_SHUTW) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200521}
522
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200523/* Check channel timeouts, and set the corresponding flags. The likely/unlikely
524 * have been optimized for fastest normal path. The read/write timeouts are not
525 * set if there was activity on the channel. That way, we don't have to update
526 * the timeout on every I/O. Note that the analyser timeout is always checked.
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200527 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200528static inline void channel_check_timeouts(struct channel *chn)
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200529{
Willy Tarreau974ced62012-10-12 23:11:02 +0200530 if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
531 unlikely(tick_is_expired(chn->rex, now_ms)))
532 chn->flags |= CF_READ_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200533
Willy Tarreauede3d882018-10-24 17:17:56 +0200534 if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) &&
Willy Tarreau974ced62012-10-12 23:11:02 +0200535 unlikely(tick_is_expired(chn->wex, now_ms)))
536 chn->flags |= CF_WRITE_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200537
Willy Tarreau974ced62012-10-12 23:11:02 +0200538 if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
539 unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
540 chn->flags |= CF_ANA_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200541}
542
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200543/* Erase any content from channel <buf> and adjusts flags accordingly. Note
Willy Tarreau0abebcc2009-01-08 00:09:41 +0100544 * that any spliced data is not affected since we may not have any access to
545 * it.
Willy Tarreaue393fe22008-08-16 22:18:07 +0200546 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200547static inline void channel_erase(struct channel *chn)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200548{
Willy Tarreau974ced62012-10-12 23:11:02 +0200549 chn->to_forward = 0;
Olivier Houchard55071d32019-05-02 00:58:53 +0200550 chn->output = 0;
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200551 b_reset(&chn->buf);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200552}
553
Christopher Fauletf7ed1952019-01-07 14:55:10 +0100554static inline void channel_htx_erase(struct channel *chn, struct htx *htx)
555{
556 htx_reset(htx);
557 channel_erase(chn);
558}
559
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200560/* marks the channel as "shutdown" ASAP for reads */
Willy Tarreau974ced62012-10-12 23:11:02 +0200561static inline void channel_shutr_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200562{
Willy Tarreau974ced62012-10-12 23:11:02 +0200563 chn->flags |= CF_SHUTR_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200564}
565
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200566/* marks the channel as "shutdown" ASAP for writes */
Willy Tarreau974ced62012-10-12 23:11:02 +0200567static inline void channel_shutw_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200568{
Willy Tarreau974ced62012-10-12 23:11:02 +0200569 chn->flags |= CF_SHUTW_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200570}
571
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200572/* marks the channel as "shutdown" ASAP in both directions */
Willy Tarreau974ced62012-10-12 23:11:02 +0200573static inline void channel_abort(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200574{
Willy Tarreau974ced62012-10-12 23:11:02 +0200575 chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
576 chn->flags &= ~CF_AUTO_CONNECT;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200577}
578
Willy Tarreau520d95e2009-09-19 21:04:57 +0200579/* allow the consumer to try to establish a new connection. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200580static inline void channel_auto_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200581{
Willy Tarreau974ced62012-10-12 23:11:02 +0200582 chn->flags |= CF_AUTO_CONNECT;
Willy Tarreau3da77c52008-08-29 09:58:42 +0200583}
584
Willy Tarreau520d95e2009-09-19 21:04:57 +0200585/* prevent the consumer from trying to establish a new connection, and also
586 * disable auto shutdown forwarding.
587 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200588static inline void channel_dont_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200589{
Willy Tarreau974ced62012-10-12 23:11:02 +0200590 chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
Willy Tarreau3da77c52008-08-29 09:58:42 +0200591}
592
Willy Tarreau520d95e2009-09-19 21:04:57 +0200593/* allow the producer to forward shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200594static inline void channel_auto_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100595{
Willy Tarreau974ced62012-10-12 23:11:02 +0200596 chn->flags |= CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100597}
598
Willy Tarreau520d95e2009-09-19 21:04:57 +0200599/* prevent the producer from forwarding shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200600static inline void channel_dont_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100601{
Willy Tarreau974ced62012-10-12 23:11:02 +0200602 chn->flags &= ~CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100603}
604
Willy Tarreau90deb182010-01-07 00:20:41 +0100605/* allow the producer to read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200606static inline void channel_auto_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100607{
Willy Tarreau974ced62012-10-12 23:11:02 +0200608 chn->flags &= ~CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100609}
610
611/* prevent the producer from read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200612static inline void channel_dont_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100613{
Willy Tarreau974ced62012-10-12 23:11:02 +0200614 chn->flags |= CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100615}
616
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200617
618/*************************************************/
619/* Buffer operations in the context of a channel */
620/*************************************************/
621
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200622
623/* Return the max number of bytes the buffer can contain so that once all the
Willy Tarreau169c4702016-04-20 18:05:17 +0200624 * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
Willy Tarreau974ced62012-10-12 23:11:02 +0200625 * bytes free. The result sits between chn->size - maxrewrite and chn->size.
Willy Tarreau169c4702016-04-20 18:05:17 +0200626 * It is important to mention that if buf->i is already larger than size-maxrw
627 * the condition above cannot be satisfied and the lowest size will be returned
628 * anyway. The principles are the following :
629 * 0) the empty buffer has a limit of zero
630 * 1) a non-connected buffer cannot touch the reserve
631 * 2) infinite forward can always fill the buffer since all data will leave
632 * 3) all output bytes are considered in transit since they're leaving
633 * 4) all input bytes covered by to_forward are considered in transit since
634 * they'll be converted to output bytes.
635 * 5) all input bytes not covered by to_forward as considered remaining
636 * 6) all bytes scheduled to be forwarded minus what is already in the input
637 * buffer will be in transit during future rounds.
638 * 7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
639 * usable length, only to_forward and output count. The difference is
640 * visible when to_forward > i.
641 * 8) the reserve may be covered up to the amount of bytes in transit since
642 * these bytes will only take temporary space.
Willy Tarreau999f6432016-01-25 01:09:11 +0100643 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200644 * A typical buffer looks like this :
Willy Tarreau999f6432016-01-25 01:09:11 +0100645 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200646 * <-------------- max_len ----------->
647 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
648 * +------------+--------------+-------+----------------------+
649 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
650 * +------------+--------+-----+-------+----------------------+
651 * <- fwd -> <-avail->
652 *
653 * Or when to_forward > i :
654 *
655 * <-------------- max_len ----------->
656 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
657 * +------------+--------------+-------+----------------------+
658 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
659 * +------------+--------+-----+-------+----------------------+
660 * <-avail->
661 * <------------------ fwd ---------------->
662 *
663 * - the amount of buffer bytes in transit is : min(i, fwd) + o
664 * - some scheduled bytes may be in transit (up to fwd - i)
665 * - the reserve is max(0, maxrewrite - transit)
666 * - the maximum usable buffer length is size - reserve.
667 * - the available space is max_len - i - o
668 *
669 * So the formula to compute the buffer's maximum length to protect the reserve
670 * when reading new data is :
671 *
672 * max = size - maxrewrite + min(maxrewrite, transit)
673 * = size - max(maxrewrite - transit, 0)
674 *
675 * But WARNING! The conditions might change during the transfer and it could
676 * very well happen that a buffer would contain more bytes than max_len due to
677 * i+o already walking over the reserve (eg: after a header rewrite), including
678 * i or o alone hitting the limit. So it is critical to always consider that
679 * bounds may have already been crossed and that available space may be negative
680 * for example. Due to this it is perfectly possible for this function to return
681 * a value that is lower than current i+o.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200682 */
Willy Tarreau3f5096d2015-01-14 20:21:43 +0100683static inline int channel_recv_limit(const struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200684{
Willy Tarreauef907fe2016-05-03 17:46:24 +0200685 unsigned int transit;
Willy Tarreau999f6432016-01-25 01:09:11 +0100686 int reserve;
687
688 /* return zero if empty */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200689 reserve = chn->buf.size;
690 if (b_is_null(&chn->buf))
Willy Tarreau999f6432016-01-25 01:09:11 +0100691 goto end;
692
693 /* return size - maxrewrite if we can't send */
694 reserve = global.tune.maxrewrite;
695 if (unlikely(!channel_may_send(chn)))
696 goto end;
697
Willy Tarreauef907fe2016-05-03 17:46:24 +0200698 /* We need to check what remains of the reserve after o and to_forward
699 * have been transmitted, but they can overflow together and they can
700 * cause an integer underflow in the comparison since both are unsigned
701 * while maxrewrite is signed.
702 * The code below has been verified for being a valid check for this :
703 * - if (o + to_forward) overflow => return size [ large enough ]
704 * - if o + to_forward >= maxrw => return size [ large enough ]
705 * - otherwise return size - (maxrw - (o + to_forward))
Willy Tarreau999f6432016-01-25 01:09:11 +0100706 */
Willy Tarreau3ee83442018-06-15 16:42:02 +0200707 transit = co_data(chn) + chn->to_forward;
Willy Tarreauef907fe2016-05-03 17:46:24 +0200708 reserve -= transit;
709 if (transit < chn->to_forward || // addition overflow
710 transit >= (unsigned)global.tune.maxrewrite) // enough transit data
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200711 return chn->buf.size;
Willy Tarreau999f6432016-01-25 01:09:11 +0100712 end:
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200713 return chn->buf.size - reserve;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200714}
715
Christopher Faulet5811db02019-01-07 13:57:01 +0100716/* HTX version of channel_recv_limit(). Return the max number of bytes the HTX
717 * buffer can contain so that once all the pending bytes are forwarded, the
718 * buffer still has global.tune.maxrewrite bytes free.
719 */
720static inline int channel_htx_recv_limit(const struct channel *chn, const struct htx *htx)
721{
722 unsigned int transit;
723 int reserve;
724
725 /* return zeor if not allocated */
726 if (!htx->size)
727 return 0;
728
729 /* return max_data_space - maxrewrite if we can't send */
730 reserve = global.tune.maxrewrite;
731 if (unlikely(!channel_may_send(chn)))
732 goto end;
733
734 /* We need to check what remains of the reserve after o and to_forward
735 * have been transmitted, but they can overflow together and they can
736 * cause an integer underflow in the comparison since both are unsigned
737 * while maxrewrite is signed.
738 * The code below has been verified for being a valid check for this :
Christopher Fauleta135a4f2019-07-02 15:48:03 +0200739 * - if (o + to_forward) overflow => return htx->size [ large enough ]
740 * - if o + to_forward >= maxrw => return htx->size [ large enough ]
741 * - otherwise return htx->size - (maxrw - (o + to_forward))
Christopher Faulet5811db02019-01-07 13:57:01 +0100742 */
743 transit = co_data(chn) + chn->to_forward;
744 reserve -= transit;
745 if (transit < chn->to_forward || // addition overflow
746 transit >= (unsigned)global.tune.maxrewrite) // enough transit data
Christopher Fauleta135a4f2019-07-02 15:48:03 +0200747 return htx->size;
Christopher Faulet5811db02019-01-07 13:57:01 +0100748 end:
Christopher Fauleta135a4f2019-07-02 15:48:03 +0200749 return (htx->size - reserve);
Christopher Faulet5811db02019-01-07 13:57:01 +0100750}
751
Christopher Faulet87ebe942019-06-11 14:14:49 +0200752/* HTX version of channel_full(). Instead of checking if INPUT data exceeds
753 * (size - reserve), this function checks if the free space for data in <htx>
754 * and the data scheduled for output are lower to the reserve. In such case, the
755 * channel is considered as full.
756 */
757static inline int channel_htx_full(const struct channel *c, const struct htx *htx,
758 unsigned int reserve)
759{
760 if (!htx->size)
761 return 0;
762 return (htx_free_data_space(htx) + co_data(c) <= reserve);
763}
764
Willy Tarreau23752332018-06-15 14:54:53 +0200765/* Returns non-zero if the channel's INPUT buffer's is considered full, which
766 * means that it holds at least as much INPUT data as (size - reserve). This
767 * also means that data that are scheduled for output are considered as potential
768 * free space, and that the reserved space is always considered as not usable.
769 * This information alone cannot be used as a general purpose free space indicator.
770 * However it accurately indicates that too many data were fed in the buffer
771 * for an analyzer for instance. See the channel_may_recv() function for a more
772 * generic function taking everything into account.
773 */
774static inline int channel_full(const struct channel *c, unsigned int reserve)
775{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200776 if (b_is_null(&c->buf))
Willy Tarreau23752332018-06-15 14:54:53 +0200777 return 0;
778
Christopher Faulet87ebe942019-06-11 14:14:49 +0200779 if (IS_HTX_STRM(chn_strm(c)))
780 return channel_htx_full(c, htxbuf(&c->buf), reserve);
Willy Tarreau23752332018-06-15 14:54:53 +0200781
Christopher Faulet87ebe942019-06-11 14:14:49 +0200782 return (ci_data(c) + reserve >= c_size(c));
Christopher Faulet5811db02019-01-07 13:57:01 +0100783}
784
Christopher Fauletaad45852019-05-14 22:14:03 +0200785/* HTX version of channel_recv_max(). */
786static inline int channel_htx_recv_max(const struct channel *chn, const struct htx *htx)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200787{
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100788 int ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200789
Christopher Fauletaad45852019-05-14 22:14:03 +0200790 ret = channel_htx_recv_limit(chn, htx) - htx_used_space(htx);
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100791 if (ret < 0)
792 ret = 0;
793 return ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200794}
795
Christopher Fauletaad45852019-05-14 22:14:03 +0200796/* Returns the amount of space available at the input of the buffer, taking the
797 * reserved space into account if ->to_forward indicates that an end of transfer
798 * is close to happen. The test is optimized to avoid as many operations as
799 * possible for the fast case.
800 */
801static inline int channel_recv_max(const struct channel *chn)
Christopher Faulet5811db02019-01-07 13:57:01 +0100802{
803 int ret;
804
Christopher Fauletaad45852019-05-14 22:14:03 +0200805 if (IS_HTX_STRM(chn_strm(chn)))
806 return channel_htx_recv_max(chn, htxbuf(&chn->buf));
807
808 ret = channel_recv_limit(chn) - b_data(&chn->buf);
Christopher Faulet5811db02019-01-07 13:57:01 +0100809 if (ret < 0)
810 ret = 0;
811 return ret;
812}
813
Willy Tarreau3f679992018-06-15 15:06:42 +0200814/* Returns the amount of bytes that can be written over the input data at once,
815 * including reserved space which may be overwritten. This is used by Lua to
816 * insert data in the input side just before the other data using buffer_replace().
817 * The goal is to transfer these new data in the output buffer.
818 */
819static inline int ci_space_for_replace(const struct channel *chn)
820{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200821 const struct buffer *buf = &chn->buf;
Willy Tarreau3f679992018-06-15 15:06:42 +0200822 const char *end;
823
824 /* If the input side data overflows, we cannot insert data contiguously. */
825 if (b_head(buf) + b_data(buf) >= b_wrap(buf))
826 return 0;
827
828 /* Check the last byte used in the buffer, it may be a byte of the output
829 * side if the buffer wraps, or its the end of the buffer.
830 */
831 end = b_head(buf);
832 if (end <= ci_head(chn))
833 end = b_wrap(buf);
834
835 /* Compute the amount of bytes which can be written. */
836 return end - ci_tail(chn);
837}
838
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100839/* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
840 * not the last available buffer or it's the response buffer. Unless the buffer
841 * is the response buffer, an extra control is made so that we always keep
842 * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
843 * case of failure, non-zero otherwise.
844 *
845 * If no buffer are available, the requester, represented by <wait> pointer,
846 * will be added in the list of objects waiting for an available buffer.
847 */
848static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
849{
850 int margin = 0;
851
852 if (!(chn->flags & CF_ISRESP))
853 margin = global.tune.reserved_bufs;
854
855 if (b_alloc_margin(&chn->buf, margin) != NULL)
856 return 1;
857
Emeric Bruna1dd2432017-06-21 15:42:52 +0200858 if (LIST_ISEMPTY(&wait->list)) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100859 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100860 LIST_ADDQ(&buffer_wq, &wait->list);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100861 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Emeric Bruna1dd2432017-06-21 15:42:52 +0200862 }
863
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100864 return 0;
865}
866
867/* Releases a possibly allocated buffer for channel <chn>. If it was not
868 * allocated, this function does nothing. Else the buffer is released and we try
869 * to wake up as many streams/applets as possible. */
870static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
871{
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200872 if (c_size(chn) && c_empty(chn)) {
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100873 b_free(&chn->buf);
Olivier Houchard673867c2018-05-25 16:58:52 +0200874 offer_buffers(wait->target, tasks_run_queue);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100875 }
876}
877
Willy Tarreau319f7452015-01-14 20:32:59 +0100878/* Truncate any unread data in the channel's buffer, and disable forwarding.
879 * Outgoing data are left intact. This is mainly to be used to send error
880 * messages after existing data.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200881 */
Willy Tarreau319f7452015-01-14 20:32:59 +0100882static inline void channel_truncate(struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200883{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200884 if (!co_data(chn))
Willy Tarreau974ced62012-10-12 23:11:02 +0200885 return channel_erase(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200886
Willy Tarreau974ced62012-10-12 23:11:02 +0200887 chn->to_forward = 0;
Willy Tarreau3ee83442018-06-15 16:42:02 +0200888 if (!ci_data(chn))
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200889 return;
890
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200891 chn->buf.data = co_data(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200892}
893
Christopher Fauletf7ed1952019-01-07 14:55:10 +0100894static inline void channel_htx_truncate(struct channel *chn, struct htx *htx)
895{
896 if (!co_data(chn))
897 return channel_htx_erase(chn, htx);
898
899 chn->to_forward = 0;
900 if (htx->data == co_data(chn))
901 return;
902 htx_truncate(htx, co_data(chn));
903}
904
Willy Tarreau4cf13002018-06-06 06:53:15 +0200905/* This function realigns a possibly wrapping channel buffer so that the input
906 * part is contiguous and starts at the beginning of the buffer and the output
907 * part ends at the end of the buffer. This provides the best conditions since
908 * it allows the largest inputs to be processed at once and ensures that once
909 * the output data leaves, the whole buffer is available at once.
910 */
Willy Tarreaufd8d42f2018-07-12 10:57:15 +0200911static inline void channel_slow_realign(struct channel *chn, char *swap)
Willy Tarreau4cf13002018-06-06 06:53:15 +0200912{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200913 return b_slow_realign(&chn->buf, swap, co_data(chn));
Willy Tarreau4cf13002018-06-06 06:53:15 +0200914}
915
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200916
917/* Forward all headers of an HTX message, starting from the SL to the EOH. This
Christopher Faulet421e7692019-06-13 11:16:45 +0200918 * function returns the position of the block after the EOH, if
919 * found. Otherwise, it returns -1.
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200920 */
Christopher Faulet421e7692019-06-13 11:16:45 +0200921static inline int32_t channel_htx_fwd_headers(struct channel *chn, struct htx *htx)
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200922{
923 int32_t pos;
924 size_t data = 0;
925
926 for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
927 struct htx_blk *blk = htx_get_blk(htx, pos);
928 data += htx_get_blksz(blk);
929 if (htx_get_blk_type(blk) == HTX_BLK_EOH) {
Christopher Faulet421e7692019-06-13 11:16:45 +0200930 pos = htx_get_next(htx, pos);
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200931 break;
932 }
933 }
934 c_adv(chn, data);
Christopher Faulet421e7692019-06-13 11:16:45 +0200935 return pos;
Christopher Fauletb2f4e832019-05-23 10:01:34 +0200936}
937
Willy Tarreaubaaee002006-06-26 02:48:02 +0200938/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200939 * Advance the channel buffer's read pointer by <len> bytes. This is useful
940 * when data have been read directly from the buffer. It is illegal to call
941 * this function with <len> causing a wrapping at the end of the buffer. It's
942 * the caller's responsibility to ensure that <len> is never larger than
Christopher Faulet729b5b32019-02-25 15:50:12 +0100943 * chn->o. Channel flags WRITE_PARTIAL and WROTE_DATA are set.
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200944 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200945static inline void co_skip(struct channel *chn, int len)
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200946{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200947 b_del(&chn->buf, len);
Olivier Houchard08afac02018-06-22 19:26:39 +0200948 chn->output -= len;
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200949 c_realign_if_empty(chn);
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200950
Willy Tarreaufb0e9202009-09-23 23:47:55 +0200951 /* notify that some data was written to the SI from the buffer */
Christopher Faulet729b5b32019-02-25 15:50:12 +0100952 chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
Christopher Fauletab1300b2019-07-05 13:44:29 +0200953 chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200954}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200955
Christopher Fauletc6827d52019-02-25 10:44:51 +0100956/* HTX version of co_skip(). This function skips at most <len> bytes from the
957 * output of the channel <chn>. Depending on how data are stored in <htx> less
958 * than <len> bytes can be skipped. Channel flags WRITE_PARTIAL and WROTE_DATA
959 * are set.
960 */
961static inline void co_htx_skip(struct channel *chn, struct htx *htx, int len)
962{
963 struct htx_ret htxret;
964
965 htxret = htx_drain(htx, len);
966 if (htxret.ret) {
967 chn->output -= htxret.ret;
968
969 /* notify that some data was written to the SI from the buffer */
970 chn->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
Christopher Fauletab1300b2019-07-05 13:44:29 +0200971 chn_prod(chn)->flags &= ~SI_FL_RXBLK_ROOM; // si_rx_room_rdy()
Christopher Fauletc6827d52019-02-25 10:44:51 +0100972 }
973}
Christopher Faulet729b5b32019-02-25 15:50:12 +0100974
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200975/* Tries to copy chunk <chunk> into the channel's buffer after length controls.
Willy Tarreau974ced62012-10-12 23:11:02 +0200976 * The chn->o and to_forward pointers are updated. If the channel's input is
Willy Tarreau74b08c92010-09-08 17:04:31 +0200977 * closed, -2 is returned. If the block is too large for this buffer, -3 is
978 * returned. If there is not enough room left in the buffer, -1 is returned.
979 * Otherwise the number of bytes copied is returned (0 being a valid number).
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200980 * Channel flag READ_PARTIAL is updated if some data can be transferred. The
Willy Tarreauf941cf22012-08-27 20:53:34 +0200981 * chunk's length is updated with the number of bytes sent.
Willy Tarreauaeac3192009-08-31 08:09:57 +0200982 */
Willy Tarreau83061a82018-07-13 11:56:34 +0200983static inline int ci_putchk(struct channel *chn, struct buffer *chunk)
Willy Tarreauaeac3192009-08-31 08:09:57 +0200984{
985 int ret;
986
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200987 ret = ci_putblk(chn, chunk->area, chunk->data);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200988 if (ret > 0)
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200989 chunk->data -= ret;
Willy Tarreauaeac3192009-08-31 08:09:57 +0200990 return ret;
991}
992
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200993/* Tries to copy string <str> at once into the channel's buffer after length
Willy Tarreau974ced62012-10-12 23:11:02 +0200994 * controls. The chn->o and to_forward pointers are updated. If the channel's
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200995 * input is closed, -2 is returned. If the block is too large for this buffer,
996 * -3 is returned. If there is not enough room left in the buffer, -1 is
997 * returned. Otherwise the number of bytes copied is returned (0 being a valid
998 * number). Channel flag READ_PARTIAL is updated if some data can be
999 * transferred.
Willy Tarreau74b08c92010-09-08 17:04:31 +02001000 */
Willy Tarreau06d80a92017-10-19 14:32:15 +02001001static inline int ci_putstr(struct channel *chn, const char *str)
Willy Tarreau74b08c92010-09-08 17:04:31 +02001002{
Willy Tarreau06d80a92017-10-19 14:32:15 +02001003 return ci_putblk(chn, str, strlen(str));
Willy Tarreau74b08c92010-09-08 17:04:31 +02001004}
1005
1006/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +02001007 * Return one char from the channel's buffer. If the buffer is empty and the
1008 * channel is closed, return -2. If the buffer is just empty, return -1. The
Willy Tarreau06d80a92017-10-19 14:32:15 +02001009 * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
Willy Tarreau8263d2b2012-08-28 00:06:31 +02001010 * 1) when it has consumed the char. Also note that this function respects the
Willy Tarreau974ced62012-10-12 23:11:02 +02001011 * chn->o limit.
Willy Tarreau74b08c92010-09-08 17:04:31 +02001012 */
Willy Tarreau06d80a92017-10-19 14:32:15 +02001013static inline int co_getchr(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +02001014{
1015 /* closed or empty + imminent close = -2; empty = -1 */
Willy Tarreau974ced62012-10-12 23:11:02 +02001016 if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
1017 if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
Willy Tarreau74b08c92010-09-08 17:04:31 +02001018 return -2;
1019 return -1;
1020 }
Willy Tarreau50227f92018-06-15 15:18:17 +02001021 return *co_head(chn);
Willy Tarreau74b08c92010-09-08 17:04:31 +02001022}
1023
Willy Tarreaubaaee002006-06-26 02:48:02 +02001024
Willy Tarreauc7e42382012-08-24 19:22:53 +02001025#endif /* _PROTO_CHANNEL_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +02001026
1027/*
1028 * Local variables:
1029 * c-indent-level: 8
1030 * c-basic-offset: 8
1031 * End:
1032 */