blob: 6241237996a0870f3db29a1eb3b7a94d8dadaa9f [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreauc7e42382012-08-24 19:22:53 +02002 * include/proto/channel.h
3 * Channel management definitions, macros and inline functions.
Willy Tarreau7c3c5412009-12-13 15:53:05 +01004 *
Willy Tarreaua27dc192014-11-27 22:10:04 +01005 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
Willy Tarreau7c3c5412009-12-13 15:53:05 +01006 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreauc7e42382012-08-24 19:22:53 +020022#ifndef _PROTO_CHANNEL_H
23#define _PROTO_CHANNEL_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
Willy Tarreau08d5ac82018-06-06 15:09:28 +020025#include <stdint.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020026#include <stdio.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010027#include <stdlib.h>
Willy Tarreau7341d942007-05-13 19:56:02 +020028#include <string.h>
Willy Tarreau0f772532006-12-23 20:51:41 +010029
Willy Tarreaue3ba5f02006-06-29 18:54:54 +020030#include <common/config.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020031#include <common/chunk.h>
Willy Tarreau0c303ee2008-07-07 00:09:58 +020032#include <common/ticks.h>
Willy Tarreaufa645582007-06-03 15:59:52 +020033#include <common/time.h>
34
Thierry FOURNIERac836ba2014-12-16 15:41:18 +010035#include <types/channel.h>
Willy Tarreau7c3c5412009-12-13 15:53:05 +010036#include <types/global.h>
Willy Tarreau87b09662015-04-03 00:22:06 +020037#include <types/stream.h>
Willy Tarreau73796532014-11-28 14:10:28 +010038#include <types/stream_interface.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020039
Christopher Fauleta73e59b2016-12-09 17:30:18 +010040#include <proto/task.h>
41
Willy Tarreau7341d942007-05-13 19:56:02 +020042/* perform minimal intializations, report 0 in case of error, 1 if OK. */
Willy Tarreau8263d2b2012-08-28 00:06:31 +020043int init_channel();
Willy Tarreau7341d942007-05-13 19:56:02 +020044
Willy Tarreau55a69062012-10-26 00:21:52 +020045unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
Willy Tarreau8263d2b2012-08-28 00:06:31 +020046
47/* SI-to-channel functions working with buffers */
Willy Tarreau06d80a92017-10-19 14:32:15 +020048int ci_putblk(struct channel *chn, const char *str, int len);
Willy Tarreau06d80a92017-10-19 14:32:15 +020049int ci_putchr(struct channel *chn, char c);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020050int ci_getline_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
51int ci_getblk_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
Willy Tarreau06d80a92017-10-19 14:32:15 +020052int co_inject(struct channel *chn, const char *msg, int len);
Willy Tarreau41ab8682017-10-19 14:58:40 +020053int co_getline(const struct channel *chn, char *str, int len);
54int co_getblk(const struct channel *chn, char *blk, int len, int offset);
Willy Tarreau55f3ce12018-07-18 11:49:27 +020055int co_getline_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
56int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
Thierry FOURNIERca16b032015-02-16 19:26:48 +010057
Willy Tarreau74b08c92010-09-08 17:04:31 +020058
Willy Tarreau87b09662015-04-03 00:22:06 +020059/* returns a pointer to the stream the channel belongs to */
Thierry FOURNIER27929fb2015-09-25 08:36:11 +020060static inline struct stream *chn_strm(const struct channel *chn)
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010061{
62 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020063 return LIST_ELEM(chn, struct stream *, res);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010064 else
Willy Tarreau87b09662015-04-03 00:22:06 +020065 return LIST_ELEM(chn, struct stream *, req);
Willy Tarreaud5ccfa32014-12-28 13:03:53 +010066}
67
Willy Tarreau73796532014-11-28 14:10:28 +010068/* returns a pointer to the stream interface feeding the channel (producer) */
69static inline struct stream_interface *chn_prod(const struct channel *chn)
70{
Willy Tarreau5decc052014-11-28 14:22:12 +010071 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020072 return &LIST_ELEM(chn, struct stream *, res)->si[1];
Willy Tarreau5decc052014-11-28 14:22:12 +010073 else
Willy Tarreau87b09662015-04-03 00:22:06 +020074 return &LIST_ELEM(chn, struct stream *, req)->si[0];
Willy Tarreau73796532014-11-28 14:10:28 +010075}
76
77/* returns a pointer to the stream interface consuming the channel (producer) */
78static inline struct stream_interface *chn_cons(const struct channel *chn)
79{
Willy Tarreau5decc052014-11-28 14:22:12 +010080 if (chn->flags & CF_ISRESP)
Willy Tarreau87b09662015-04-03 00:22:06 +020081 return &LIST_ELEM(chn, struct stream *, res)->si[0];
Willy Tarreau5decc052014-11-28 14:22:12 +010082 else
Willy Tarreau87b09662015-04-03 00:22:06 +020083 return &LIST_ELEM(chn, struct stream *, req)->si[1];
Willy Tarreau73796532014-11-28 14:10:28 +010084}
85
Willy Tarreau08d5ac82018-06-06 15:09:28 +020086/* c_orig() : returns the pointer to the channel buffer's origin */
87static inline char *c_orig(const struct channel *c)
88{
89 return b_orig(c->buf);
90}
91
92/* c_size() : returns the size of the channel's buffer */
93static inline size_t c_size(const struct channel *c)
94{
95 return b_size(c->buf);
96}
97
98/* c_wrap() : returns the pointer to the channel buffer's wrapping point */
99static inline char *c_wrap(const struct channel *c)
100{
101 return b_wrap(c->buf);
102}
103
104/* c_data() : returns the amount of data in the channel's buffer */
105static inline size_t c_data(const struct channel *c)
106{
107 return b_data(c->buf);
108}
109
110/* c_room() : returns the room left in the channel's buffer */
111static inline size_t c_room(const struct channel *c)
112{
113 return b_size(c->buf) - b_data(c->buf);
114}
115
116/* c_empty() : returns a boolean indicating if the channel's buffer is empty */
117static inline size_t c_empty(const struct channel *c)
118{
119 return !c_data(c);
120}
121
122/* c_full() : returns a boolean indicating if the channel's buffer is full */
123static inline size_t c_full(const struct channel *c)
124{
125 return !c_room(c);
126}
127
128/* co_data() : returns the amount of output data in the channel's buffer */
129static inline size_t co_data(const struct channel *c)
130{
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200131 return c->buf->output;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200132}
133
134/* ci_data() : returns the amount of input data in the channel's buffer */
135static inline size_t ci_data(const struct channel *c)
136{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200137 return c_data(c) - co_data(c);
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200138}
139
140/* ci_next() : for an absolute pointer <p> or a relative offset <o> pointing to
141 * a valid location within channel <c>'s buffer, returns either the absolute
142 * pointer or the relative offset pointing to the next byte, which usually is
143 * at (p + 1) unless p reaches the wrapping point and wrapping is needed.
144 */
145static inline size_t ci_next_ofs(const struct channel *c, size_t o)
146{
147 return b_next_ofs(c->buf, o);
148}
149static inline char *ci_next(const struct channel *c, const char *p)
150{
151 return b_next(c->buf, p);
152}
153
154
155/* c_ptr() : returns a pointer to an offset relative to the beginning of the
156 * input data in the buffer. If instead the offset is negative, a pointer to
157 * existing output data is returned. The function only takes care of wrapping,
158 * it's up to the caller to ensure the offset is always within byte count
159 * bounds.
160 */
161static inline char *c_ptr(const struct channel *c, ssize_t ofs)
162{
163 return b_peek(c->buf, co_data(c) + ofs);
164}
165
166/* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
167 * buffer's pointer advances, and that as many bytes from in are transferred
168 * from in to out. The caller is responsible for ensuring that adv is always
169 * smaller than or equal to b->i.
170 */
171static inline void c_adv(struct channel *c, size_t adv)
172{
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200173 c->buf->output += adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200174}
175
176/* c_rew() : rewinds the channel's buffer by <adv> bytes, which means that the
177 * buffer's pointer goes backwards, and that as many bytes from out are moved
178 * to in. The caller is responsible for ensuring that adv is always smaller
179 * than or equal to b->o.
180 */
181static inline void c_rew(struct channel *c, size_t adv)
182{
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200183 c->buf->output -= adv;
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200184}
185
186/* c_realign_if_empty() : realign the channel's buffer if it's empty */
187static inline void c_realign_if_empty(struct channel *chn)
188{
189 b_realign_if_empty(chn->buf);
190}
191
Olivier Houchardd4251a72018-06-29 16:17:34 +0200192/* Sets the amount of output for the channel */
193static inline void co_set_data(struct channel *c, size_t output)
194{
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200195 c->buf->len += output - c->buf->output;
196 c->buf->output = output;
Olivier Houchardd4251a72018-06-29 16:17:34 +0200197}
198
Willy Tarreau08d5ac82018-06-06 15:09:28 +0200199
200/* co_head() : returns a pointer to the beginning of output data in the buffer.
201 * The "__" variants don't support wrapping, "ofs" are relative to
202 * the buffer's origin.
203 */
204static inline size_t __co_head_ofs(const struct channel *c)
205{
206 return __b_peek_ofs(c->buf, 0);
207}
208static inline char *__co_head(const struct channel *c)
209{
210 return __b_peek(c->buf, 0);
211}
212static inline size_t co_head_ofs(const struct channel *c)
213{
214 return b_peek_ofs(c->buf, 0);
215}
216static inline char *co_head(const struct channel *c)
217{
218 return b_peek(c->buf, 0);
219}
220
221
222/* co_tail() : returns a pointer to the end of output data in the buffer.
223 * The "__" variants don't support wrapping, "ofs" are relative to
224 * the buffer's origin.
225 */
226static inline size_t __co_tail_ofs(const struct channel *c)
227{
228 return __b_peek_ofs(c->buf, co_data(c));
229}
230static inline char *__co_tail(const struct channel *c)
231{
232 return __b_peek(c->buf, co_data(c));
233}
234static inline size_t co_tail_ofs(const struct channel *c)
235{
236 return b_peek_ofs(c->buf, co_data(c));
237}
238static inline char *co_tail(const struct channel *c)
239{
240 return b_peek(c->buf, co_data(c));
241}
242
243
244/* ci_head() : returns a pointer to the beginning of input data in the buffer.
245 * The "__" variants don't support wrapping, "ofs" are relative to
246 * the buffer's origin.
247 */
248static inline size_t __ci_head_ofs(const struct channel *c)
249{
250 return __b_peek_ofs(c->buf, co_data(c));
251}
252static inline char *__ci_head(const struct channel *c)
253{
254 return __b_peek(c->buf, co_data(c));
255}
256static inline size_t ci_head_ofs(const struct channel *c)
257{
258 return b_peek_ofs(c->buf, co_data(c));
259}
260static inline char *ci_head(const struct channel *c)
261{
262 return b_peek(c->buf, co_data(c));
263}
264
265
266/* ci_tail() : returns a pointer to the end of input data in the buffer.
267 * The "__" variants don't support wrapping, "ofs" are relative to
268 * the buffer's origin.
269 */
270static inline size_t __ci_tail_ofs(const struct channel *c)
271{
272 return __b_peek_ofs(c->buf, c_data(c));
273}
274static inline char *__ci_tail(const struct channel *c)
275{
276 return __b_peek(c->buf, c_data(c));
277}
278static inline size_t ci_tail_ofs(const struct channel *c)
279{
280 return b_peek_ofs(c->buf, c_data(c));
281}
282static inline char *ci_tail(const struct channel *c)
283{
284 return b_peek(c->buf, c_data(c));
285}
286
287
288/* ci_stop() : returns the pointer to the byte following the end of input data
289 * in the channel buffer. It may be out of the buffer. It's used to
290 * compute lengths or stop pointers.
291 */
292static inline size_t __ci_stop_ofs(const struct channel *c)
293{
294 return __b_stop_ofs(c->buf);
295}
296static inline const char *__ci_stop(const struct channel *c)
297{
298 return __b_stop(c->buf);
299}
300static inline size_t ci_stop_ofs(const struct channel *c)
301{
302 return b_stop_ofs(c->buf);
303}
304static inline const char *ci_stop(const struct channel *c)
305{
306 return b_stop(c->buf);
307}
308
309
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200310/* Returns the amount of input data that can contiguously be read at once */
311static inline size_t ci_contig_data(const struct channel *c)
312{
313 return b_contig_data(c->buf, co_data(c));
314}
315
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200316/* Initialize all fields in the channel. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200317static inline void channel_init(struct channel *chn)
Willy Tarreau54469402006-07-29 16:59:06 +0200318{
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100319 chn->buf = &buf_empty;
Willy Tarreau974ced62012-10-12 23:11:02 +0200320 chn->to_forward = 0;
Willy Tarreaub145c782014-02-09 17:45:16 +0100321 chn->last_read = now_ms;
Willy Tarreau8f39dcd2014-02-09 08:31:49 +0100322 chn->xfer_small = chn->xfer_large = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200323 chn->total = 0;
324 chn->pipe = NULL;
325 chn->analysers = 0;
Willy Tarreau974ced62012-10-12 23:11:02 +0200326 chn->flags = 0;
Willy Tarreau54469402006-07-29 16:59:06 +0200327}
328
Willy Tarreau55a69062012-10-26 00:21:52 +0200329/* Schedule up to <bytes> more bytes to be forwarded via the channel without
330 * notifying the owner task. Any data pending in the buffer are scheduled to be
331 * sent as well, in the limit of the number of bytes to forward. This must be
332 * the only method to use to schedule bytes to be forwarded. If the requested
333 * number is too large, it is automatically adjusted. The number of bytes taken
334 * into account is returned. Directly touching ->to_forward will cause lockups
335 * when buf->o goes down to zero if nobody is ready to push the remaining data.
336 */
337static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
338{
339 /* hint: avoid comparisons on long long for the fast case, since if the
340 * length does not fit in an unsigned it, it will never be forwarded at
341 * once anyway.
342 */
343 if (bytes <= ~0U) {
344 unsigned int bytes32 = bytes;
345
Willy Tarreau3ee83442018-06-15 16:42:02 +0200346 if (bytes32 <= ci_data(chn)) {
Willy Tarreau55a69062012-10-26 00:21:52 +0200347 /* OK this amount of bytes might be forwarded at once */
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200348 c_adv(chn, bytes32);
Willy Tarreau55a69062012-10-26 00:21:52 +0200349 return bytes;
350 }
351 }
352 return __channel_forward(chn, bytes);
353}
354
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200355/* Forwards any input data and marks the channel for permanent forwarding */
356static inline void channel_forward_forever(struct channel *chn)
357{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200358 c_adv(chn, ci_data(chn));
Willy Tarreau8bf242b2016-05-04 14:05:58 +0200359 chn->to_forward = CHN_INFINITE_FORWARD;
360}
361
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200362/*********************************************************************/
363/* These functions are used to compute various channel content sizes */
364/*********************************************************************/
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100365
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200366/* Reports non-zero if the channel is empty, which means both its
367 * buffer and pipe are empty. The construct looks strange but is
368 * jump-less and much more efficient on both 32 and 64-bit than
369 * the boolean test.
370 */
Willy Tarreau41ab8682017-10-19 14:58:40 +0200371static inline unsigned int channel_is_empty(const struct channel *c)
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200372{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200373 return !(co_data(c) | (long)c->pipe);
Willy Tarreau8e21bb92012-08-24 22:40:29 +0200374}
375
Willy Tarreauba0902e2015-01-13 14:39:16 +0100376/* Returns non-zero if the channel is rewritable, which means that the buffer
377 * it is attached to has at least <maxrewrite> bytes immediately available.
378 * This is used to decide when a request or response may be parsed when some
379 * data from a previous exchange might still be present.
Willy Tarreau379357a2013-06-08 12:55:46 +0200380 */
Willy Tarreauba0902e2015-01-13 14:39:16 +0100381static inline int channel_is_rewritable(const struct channel *chn)
Willy Tarreau379357a2013-06-08 12:55:46 +0200382{
383 int rem = chn->buf->size;
384
Willy Tarreau3ee83442018-06-15 16:42:02 +0200385 rem -= b_data(chn->buf);
Willy Tarreau379357a2013-06-08 12:55:46 +0200386 rem -= global.tune.maxrewrite;
387 return rem >= 0;
388}
389
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100390/* Tells whether data are likely to leave the buffer. This is used to know when
391 * we can safely ignore the reserve since we know we cannot retry a connection.
392 * It returns zero if data are blocked, non-zero otherwise.
393 */
394static inline int channel_may_send(const struct channel *chn)
395{
Willy Tarreau73796532014-11-28 14:10:28 +0100396 return chn_cons(chn)->state == SI_ST_EST;
Willy Tarreau9c06ee42015-01-14 16:08:45 +0100397}
398
Willy Tarreau3889fff2015-01-13 20:20:10 +0100399/* Returns non-zero if the channel can still receive data. This is used to
Willy Tarreau379357a2013-06-08 12:55:46 +0200400 * decide when to stop reading into a buffer when we want to ensure that we
401 * leave the reserve untouched after all pending outgoing data are forwarded.
402 * The reserved space is taken into account if ->to_forward indicates that an
403 * end of transfer is close to happen. Note that both ->buf->o and ->to_forward
404 * are considered as available since they're supposed to leave the buffer. The
405 * test is optimized to avoid as many operations as possible for the fast case
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200406 * and to be used as an "if" condition. Just like channel_recv_limit(), we
407 * never allow to overwrite the reserve until the output stream interface is
408 * connected, otherwise we could spin on a POST with http-send-name-header.
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100409 */
Willy Tarreau3889fff2015-01-13 20:20:10 +0100410static inline int channel_may_recv(const struct channel *chn)
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100411{
Willy Tarreau9b28e032012-10-12 23:49:43 +0200412 int rem = chn->buf->size;
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200413
Willy Tarreau4428a292014-11-28 20:54:13 +0100414 if (chn->buf == &buf_empty)
Willy Tarreau3889fff2015-01-13 20:20:10 +0100415 return 1;
Willy Tarreau4428a292014-11-28 20:54:13 +0100416
Willy Tarreau3ee83442018-06-15 16:42:02 +0200417 rem -= b_data(chn->buf);
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200418 if (!rem)
Willy Tarreau3889fff2015-01-13 20:20:10 +0100419 return 0; /* buffer already full */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200420
Willy Tarreau93dc4782016-04-21 12:12:45 +0200421 if (rem > global.tune.maxrewrite)
422 return 1; /* reserve not yet reached */
Willy Tarreau4b46a3e2016-04-20 20:09:22 +0200423
Willy Tarreau93dc4782016-04-21 12:12:45 +0200424 if (!channel_may_send(chn))
425 return 0; /* don't touch reserve until we can send */
Willy Tarreau9dab5fc2012-05-07 11:56:55 +0200426
Willy Tarreau93dc4782016-04-21 12:12:45 +0200427 /* Now we know there's some room left in the reserve and we may
428 * forward. As long as i-to_fwd < size-maxrw, we may still
429 * receive. This is equivalent to i+maxrw-size < to_fwd,
430 * which is logical since i+maxrw-size is what overlaps with
431 * the reserve, and we want to ensure they're covered by scheduled
432 * forwards.
433 */
Willy Tarreau3ee83442018-06-15 16:42:02 +0200434 rem = ci_data(chn) + global.tune.maxrewrite - chn->buf->size;
Willy Tarreau93dc4782016-04-21 12:12:45 +0200435 return rem < 0 || (unsigned int)rem < chn->to_forward;
Willy Tarreau4b517ca2011-11-25 20:33:58 +0100436}
437
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200438/* Returns true if the channel's input is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200439static inline int channel_input_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200440{
Willy Tarreau974ced62012-10-12 23:11:02 +0200441 return ((chn->flags & CF_SHUTR) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200442}
443
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200444/* Returns true if the channel's output is already closed */
Willy Tarreau974ced62012-10-12 23:11:02 +0200445static inline int channel_output_closed(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200446{
Willy Tarreau974ced62012-10-12 23:11:02 +0200447 return ((chn->flags & CF_SHUTW) != 0);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200448}
449
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200450/* Check channel timeouts, and set the corresponding flags. The likely/unlikely
451 * have been optimized for fastest normal path. The read/write timeouts are not
452 * set if there was activity on the channel. That way, we don't have to update
453 * the timeout on every I/O. Note that the analyser timeout is always checked.
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200454 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200455static inline void channel_check_timeouts(struct channel *chn)
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200456{
Willy Tarreau974ced62012-10-12 23:11:02 +0200457 if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
458 unlikely(tick_is_expired(chn->rex, now_ms)))
459 chn->flags |= CF_READ_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200460
Christopher Fauletc5a9d5b2017-11-09 09:36:43 +0100461 if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY|CF_WRITE_EVENT))) &&
Willy Tarreau974ced62012-10-12 23:11:02 +0200462 unlikely(tick_is_expired(chn->wex, now_ms)))
463 chn->flags |= CF_WRITE_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200464
Willy Tarreau974ced62012-10-12 23:11:02 +0200465 if (likely(!(chn->flags & CF_ANA_TIMEOUT)) &&
466 unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
467 chn->flags |= CF_ANA_TIMEOUT;
Willy Tarreau2eb52f02008-09-04 09:14:08 +0200468}
469
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200470/* Erase any content from channel <buf> and adjusts flags accordingly. Note
Willy Tarreau0abebcc2009-01-08 00:09:41 +0100471 * that any spliced data is not affected since we may not have any access to
472 * it.
Willy Tarreaue393fe22008-08-16 22:18:07 +0200473 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200474static inline void channel_erase(struct channel *chn)
Willy Tarreaubaaee002006-06-26 02:48:02 +0200475{
Willy Tarreau974ced62012-10-12 23:11:02 +0200476 chn->to_forward = 0;
Willy Tarreau474cf542014-11-24 10:54:47 +0100477 b_reset(chn->buf);
Willy Tarreaubaaee002006-06-26 02:48:02 +0200478}
479
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200480/* marks the channel as "shutdown" ASAP for reads */
Willy Tarreau974ced62012-10-12 23:11:02 +0200481static inline void channel_shutr_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200482{
Willy Tarreau974ced62012-10-12 23:11:02 +0200483 chn->flags |= CF_SHUTR_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200484}
485
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200486/* marks the channel as "shutdown" ASAP for writes */
Willy Tarreau974ced62012-10-12 23:11:02 +0200487static inline void channel_shutw_now(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200488{
Willy Tarreau974ced62012-10-12 23:11:02 +0200489 chn->flags |= CF_SHUTW_NOW;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200490}
491
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200492/* marks the channel as "shutdown" ASAP in both directions */
Willy Tarreau974ced62012-10-12 23:11:02 +0200493static inline void channel_abort(struct channel *chn)
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200494{
Willy Tarreau974ced62012-10-12 23:11:02 +0200495 chn->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
496 chn->flags &= ~CF_AUTO_CONNECT;
Willy Tarreaufa7e1022008-10-19 07:30:41 +0200497}
498
Willy Tarreau520d95e2009-09-19 21:04:57 +0200499/* allow the consumer to try to establish a new connection. */
Willy Tarreau974ced62012-10-12 23:11:02 +0200500static inline void channel_auto_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200501{
Willy Tarreau974ced62012-10-12 23:11:02 +0200502 chn->flags |= CF_AUTO_CONNECT;
Willy Tarreau3da77c52008-08-29 09:58:42 +0200503}
504
Willy Tarreau520d95e2009-09-19 21:04:57 +0200505/* prevent the consumer from trying to establish a new connection, and also
506 * disable auto shutdown forwarding.
507 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200508static inline void channel_dont_connect(struct channel *chn)
Willy Tarreau3da77c52008-08-29 09:58:42 +0200509{
Willy Tarreau974ced62012-10-12 23:11:02 +0200510 chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
Willy Tarreau3da77c52008-08-29 09:58:42 +0200511}
512
Willy Tarreau520d95e2009-09-19 21:04:57 +0200513/* allow the producer to forward shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200514static inline void channel_auto_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100515{
Willy Tarreau974ced62012-10-12 23:11:02 +0200516 chn->flags |= CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100517}
518
Willy Tarreau520d95e2009-09-19 21:04:57 +0200519/* prevent the producer from forwarding shutdown requests */
Willy Tarreau974ced62012-10-12 23:11:02 +0200520static inline void channel_dont_close(struct channel *chn)
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100521{
Willy Tarreau974ced62012-10-12 23:11:02 +0200522 chn->flags &= ~CF_AUTO_CLOSE;
Willy Tarreau0a5d5dd2008-11-23 19:31:35 +0100523}
524
Willy Tarreau90deb182010-01-07 00:20:41 +0100525/* allow the producer to read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200526static inline void channel_auto_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100527{
Willy Tarreau974ced62012-10-12 23:11:02 +0200528 chn->flags &= ~CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100529}
530
531/* prevent the producer from read / poll the input */
Willy Tarreau974ced62012-10-12 23:11:02 +0200532static inline void channel_dont_read(struct channel *chn)
Willy Tarreau90deb182010-01-07 00:20:41 +0100533{
Willy Tarreau974ced62012-10-12 23:11:02 +0200534 chn->flags |= CF_DONT_READ;
Willy Tarreau90deb182010-01-07 00:20:41 +0100535}
536
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200537
538/*************************************************/
539/* Buffer operations in the context of a channel */
540/*************************************************/
541
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200542
543/* Return the max number of bytes the buffer can contain so that once all the
Willy Tarreau169c4702016-04-20 18:05:17 +0200544 * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
Willy Tarreau974ced62012-10-12 23:11:02 +0200545 * bytes free. The result sits between chn->size - maxrewrite and chn->size.
Willy Tarreau169c4702016-04-20 18:05:17 +0200546 * It is important to mention that if buf->i is already larger than size-maxrw
547 * the condition above cannot be satisfied and the lowest size will be returned
548 * anyway. The principles are the following :
549 * 0) the empty buffer has a limit of zero
550 * 1) a non-connected buffer cannot touch the reserve
551 * 2) infinite forward can always fill the buffer since all data will leave
552 * 3) all output bytes are considered in transit since they're leaving
553 * 4) all input bytes covered by to_forward are considered in transit since
554 * they'll be converted to output bytes.
555 * 5) all input bytes not covered by to_forward as considered remaining
556 * 6) all bytes scheduled to be forwarded minus what is already in the input
557 * buffer will be in transit during future rounds.
558 * 7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
559 * usable length, only to_forward and output count. The difference is
560 * visible when to_forward > i.
561 * 8) the reserve may be covered up to the amount of bytes in transit since
562 * these bytes will only take temporary space.
Willy Tarreau999f6432016-01-25 01:09:11 +0100563 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200564 * A typical buffer looks like this :
Willy Tarreau999f6432016-01-25 01:09:11 +0100565 *
Willy Tarreau169c4702016-04-20 18:05:17 +0200566 * <-------------- max_len ----------->
567 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
568 * +------------+--------------+-------+----------------------+
569 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
570 * +------------+--------+-----+-------+----------------------+
571 * <- fwd -> <-avail->
572 *
573 * Or when to_forward > i :
574 *
575 * <-------------- max_len ----------->
576 * <---- o ----><----- i -----> <--- 0..maxrewrite --->
577 * +------------+--------------+-------+----------------------+
578 * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
579 * +------------+--------+-----+-------+----------------------+
580 * <-avail->
581 * <------------------ fwd ---------------->
582 *
583 * - the amount of buffer bytes in transit is : min(i, fwd) + o
584 * - some scheduled bytes may be in transit (up to fwd - i)
585 * - the reserve is max(0, maxrewrite - transit)
586 * - the maximum usable buffer length is size - reserve.
587 * - the available space is max_len - i - o
588 *
589 * So the formula to compute the buffer's maximum length to protect the reserve
590 * when reading new data is :
591 *
592 * max = size - maxrewrite + min(maxrewrite, transit)
593 * = size - max(maxrewrite - transit, 0)
594 *
595 * But WARNING! The conditions might change during the transfer and it could
596 * very well happen that a buffer would contain more bytes than max_len due to
597 * i+o already walking over the reserve (eg: after a header rewrite), including
598 * i or o alone hitting the limit. So it is critical to always consider that
599 * bounds may have already been crossed and that available space may be negative
600 * for example. Due to this it is perfectly possible for this function to return
601 * a value that is lower than current i+o.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200602 */
Willy Tarreau3f5096d2015-01-14 20:21:43 +0100603static inline int channel_recv_limit(const struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200604{
Willy Tarreauef907fe2016-05-03 17:46:24 +0200605 unsigned int transit;
Willy Tarreau999f6432016-01-25 01:09:11 +0100606 int reserve;
607
608 /* return zero if empty */
609 reserve = chn->buf->size;
610 if (chn->buf == &buf_empty)
611 goto end;
612
613 /* return size - maxrewrite if we can't send */
614 reserve = global.tune.maxrewrite;
615 if (unlikely(!channel_may_send(chn)))
616 goto end;
617
Willy Tarreauef907fe2016-05-03 17:46:24 +0200618 /* We need to check what remains of the reserve after o and to_forward
619 * have been transmitted, but they can overflow together and they can
620 * cause an integer underflow in the comparison since both are unsigned
621 * while maxrewrite is signed.
622 * The code below has been verified for being a valid check for this :
623 * - if (o + to_forward) overflow => return size [ large enough ]
624 * - if o + to_forward >= maxrw => return size [ large enough ]
625 * - otherwise return size - (maxrw - (o + to_forward))
Willy Tarreau999f6432016-01-25 01:09:11 +0100626 */
Willy Tarreau3ee83442018-06-15 16:42:02 +0200627 transit = co_data(chn) + chn->to_forward;
Willy Tarreauef907fe2016-05-03 17:46:24 +0200628 reserve -= transit;
629 if (transit < chn->to_forward || // addition overflow
630 transit >= (unsigned)global.tune.maxrewrite) // enough transit data
Willy Tarreau169c4702016-04-20 18:05:17 +0200631 return chn->buf->size;
Willy Tarreau999f6432016-01-25 01:09:11 +0100632 end:
633 return chn->buf->size - reserve;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200634}
635
Willy Tarreau23752332018-06-15 14:54:53 +0200636/* Returns non-zero if the channel's INPUT buffer's is considered full, which
637 * means that it holds at least as much INPUT data as (size - reserve). This
638 * also means that data that are scheduled for output are considered as potential
639 * free space, and that the reserved space is always considered as not usable.
640 * This information alone cannot be used as a general purpose free space indicator.
641 * However it accurately indicates that too many data were fed in the buffer
642 * for an analyzer for instance. See the channel_may_recv() function for a more
643 * generic function taking everything into account.
644 */
645static inline int channel_full(const struct channel *c, unsigned int reserve)
646{
647 if (c->buf == &buf_empty)
648 return 0;
649
Willy Tarreau3ee83442018-06-15 16:42:02 +0200650 return (ci_data(c) + reserve >= c_size(c));
Willy Tarreau23752332018-06-15 14:54:53 +0200651}
652
653
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200654/* Returns the amount of space available at the input of the buffer, taking the
655 * reserved space into account if ->to_forward indicates that an end of transfer
656 * is close to happen. The test is optimized to avoid as many operations as
657 * possible for the fast case.
658 */
Willy Tarreaub5051f82015-01-14 20:25:34 +0100659static inline int channel_recv_max(const struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200660{
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100661 int ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200662
Willy Tarreau3ee83442018-06-15 16:42:02 +0200663 ret = channel_recv_limit(chn) - b_data(chn->buf);
Willy Tarreau27bb0e12015-01-14 15:56:50 +0100664 if (ret < 0)
665 ret = 0;
666 return ret;
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200667}
668
Willy Tarreau3f679992018-06-15 15:06:42 +0200669/* Returns the amount of bytes that can be written over the input data at once,
670 * including reserved space which may be overwritten. This is used by Lua to
671 * insert data in the input side just before the other data using buffer_replace().
672 * The goal is to transfer these new data in the output buffer.
673 */
674static inline int ci_space_for_replace(const struct channel *chn)
675{
676 const struct buffer *buf = chn->buf;
677 const char *end;
678
679 /* If the input side data overflows, we cannot insert data contiguously. */
680 if (b_head(buf) + b_data(buf) >= b_wrap(buf))
681 return 0;
682
683 /* Check the last byte used in the buffer, it may be a byte of the output
684 * side if the buffer wraps, or its the end of the buffer.
685 */
686 end = b_head(buf);
687 if (end <= ci_head(chn))
688 end = b_wrap(buf);
689
690 /* Compute the amount of bytes which can be written. */
691 return end - ci_tail(chn);
692}
693
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100694/* Allocates a buffer for channel <chn>, but only if it's guaranteed that it's
695 * not the last available buffer or it's the response buffer. Unless the buffer
696 * is the response buffer, an extra control is made so that we always keep
697 * <tune.buffers.reserved> buffers available after this allocation. Returns 0 in
698 * case of failure, non-zero otherwise.
699 *
700 * If no buffer are available, the requester, represented by <wait> pointer,
701 * will be added in the list of objects waiting for an available buffer.
702 */
703static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
704{
705 int margin = 0;
706
707 if (!(chn->flags & CF_ISRESP))
708 margin = global.tune.reserved_bufs;
709
710 if (b_alloc_margin(&chn->buf, margin) != NULL)
711 return 1;
712
Emeric Bruna1dd2432017-06-21 15:42:52 +0200713 if (LIST_ISEMPTY(&wait->list)) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100714 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100715 LIST_ADDQ(&buffer_wq, &wait->list);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100716 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Emeric Bruna1dd2432017-06-21 15:42:52 +0200717 }
718
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100719 return 0;
720}
721
722/* Releases a possibly allocated buffer for channel <chn>. If it was not
723 * allocated, this function does nothing. Else the buffer is released and we try
724 * to wake up as many streams/applets as possible. */
725static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
726{
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200727 if (c_size(chn) && c_empty(chn)) {
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100728 b_free(&chn->buf);
Olivier Houchard673867c2018-05-25 16:58:52 +0200729 offer_buffers(wait->target, tasks_run_queue);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100730 }
731}
732
Willy Tarreau319f7452015-01-14 20:32:59 +0100733/* Truncate any unread data in the channel's buffer, and disable forwarding.
734 * Outgoing data are left intact. This is mainly to be used to send error
735 * messages after existing data.
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200736 */
Willy Tarreau319f7452015-01-14 20:32:59 +0100737static inline void channel_truncate(struct channel *chn)
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200738{
Willy Tarreau3ee83442018-06-15 16:42:02 +0200739 if (!co_data(chn))
Willy Tarreau974ced62012-10-12 23:11:02 +0200740 return channel_erase(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200741
Willy Tarreau974ced62012-10-12 23:11:02 +0200742 chn->to_forward = 0;
Willy Tarreau3ee83442018-06-15 16:42:02 +0200743 if (!ci_data(chn))
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200744 return;
745
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200746 chn->buf->len = co_data(chn);
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200747}
748
Willy Tarreau4cf13002018-06-06 06:53:15 +0200749/* This function realigns a possibly wrapping channel buffer so that the input
750 * part is contiguous and starts at the beginning of the buffer and the output
751 * part ends at the end of the buffer. This provides the best conditions since
752 * it allows the largest inputs to be processed at once and ensures that once
753 * the output data leaves, the whole buffer is available at once.
754 */
Willy Tarreaufd8d42f2018-07-12 10:57:15 +0200755static inline void channel_slow_realign(struct channel *chn, char *swap)
Willy Tarreau4cf13002018-06-06 06:53:15 +0200756{
Willy Tarreaufd8d42f2018-07-12 10:57:15 +0200757 return b_slow_realign(chn->buf, swap, co_data(chn));
Willy Tarreau4cf13002018-06-06 06:53:15 +0200758}
759
Willy Tarreaubaaee002006-06-26 02:48:02 +0200760/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200761 * Advance the channel buffer's read pointer by <len> bytes. This is useful
762 * when data have been read directly from the buffer. It is illegal to call
763 * this function with <len> causing a wrapping at the end of the buffer. It's
764 * the caller's responsibility to ensure that <len> is never larger than
Willy Tarreau974ced62012-10-12 23:11:02 +0200765 * chn->o. Channel flag WRITE_PARTIAL is set.
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200766 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200767static inline void co_skip(struct channel *chn, int len)
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200768{
Willy Tarreaue5f12ce2018-06-15 10:28:05 +0200769 b_del(chn->buf, len);
Willy Tarreauabed1e72018-07-09 11:39:49 +0200770 chn->buf->output -= len;
Willy Tarreau0c7ed5d2018-07-10 09:53:31 +0200771 c_realign_if_empty(chn);
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200772
Willy Tarreaufb0e9202009-09-23 23:47:55 +0200773 /* notify that some data was written to the SI from the buffer */
Christopher Fauletc5a9d5b2017-11-09 09:36:43 +0100774 chn->flags |= CF_WRITE_PARTIAL | CF_WRITE_EVENT;
Willy Tarreau2b7addc2009-08-31 07:37:22 +0200775}
Willy Tarreaubaaee002006-06-26 02:48:02 +0200776
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200777/* Tries to copy chunk <chunk> into the channel's buffer after length controls.
Willy Tarreau974ced62012-10-12 23:11:02 +0200778 * The chn->o and to_forward pointers are updated. If the channel's input is
Willy Tarreau74b08c92010-09-08 17:04:31 +0200779 * closed, -2 is returned. If the block is too large for this buffer, -3 is
780 * returned. If there is not enough room left in the buffer, -1 is returned.
781 * Otherwise the number of bytes copied is returned (0 being a valid number).
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200782 * Channel flag READ_PARTIAL is updated if some data can be transferred. The
Willy Tarreauf941cf22012-08-27 20:53:34 +0200783 * chunk's length is updated with the number of bytes sent.
Willy Tarreauaeac3192009-08-31 08:09:57 +0200784 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200785static inline int ci_putchk(struct channel *chn, struct chunk *chunk)
Willy Tarreauaeac3192009-08-31 08:09:57 +0200786{
787 int ret;
788
Willy Tarreau06d80a92017-10-19 14:32:15 +0200789 ret = ci_putblk(chn, chunk->str, chunk->len);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200790 if (ret > 0)
791 chunk->len -= ret;
Willy Tarreauaeac3192009-08-31 08:09:57 +0200792 return ret;
793}
794
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200795/* Tries to copy string <str> at once into the channel's buffer after length
Willy Tarreau974ced62012-10-12 23:11:02 +0200796 * controls. The chn->o and to_forward pointers are updated. If the channel's
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200797 * input is closed, -2 is returned. If the block is too large for this buffer,
798 * -3 is returned. If there is not enough room left in the buffer, -1 is
799 * returned. Otherwise the number of bytes copied is returned (0 being a valid
800 * number). Channel flag READ_PARTIAL is updated if some data can be
801 * transferred.
Willy Tarreau74b08c92010-09-08 17:04:31 +0200802 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200803static inline int ci_putstr(struct channel *chn, const char *str)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200804{
Willy Tarreau06d80a92017-10-19 14:32:15 +0200805 return ci_putblk(chn, str, strlen(str));
Willy Tarreau74b08c92010-09-08 17:04:31 +0200806}
807
808/*
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200809 * Return one char from the channel's buffer. If the buffer is empty and the
810 * channel is closed, return -2. If the buffer is just empty, return -1. The
Willy Tarreau06d80a92017-10-19 14:32:15 +0200811 * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
Willy Tarreau8263d2b2012-08-28 00:06:31 +0200812 * 1) when it has consumed the char. Also note that this function respects the
Willy Tarreau974ced62012-10-12 23:11:02 +0200813 * chn->o limit.
Willy Tarreau74b08c92010-09-08 17:04:31 +0200814 */
Willy Tarreau06d80a92017-10-19 14:32:15 +0200815static inline int co_getchr(struct channel *chn)
Willy Tarreau74b08c92010-09-08 17:04:31 +0200816{
817 /* closed or empty + imminent close = -2; empty = -1 */
Willy Tarreau974ced62012-10-12 23:11:02 +0200818 if (unlikely((chn->flags & CF_SHUTW) || channel_is_empty(chn))) {
819 if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW))
Willy Tarreau74b08c92010-09-08 17:04:31 +0200820 return -2;
821 return -1;
822 }
Willy Tarreau50227f92018-06-15 15:18:17 +0200823 return *co_head(chn);
Willy Tarreau74b08c92010-09-08 17:04:31 +0200824}
825
Willy Tarreaubaaee002006-06-26 02:48:02 +0200826
Willy Tarreauc7e42382012-08-24 19:22:53 +0200827#endif /* _PROTO_CHANNEL_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200828
829/*
830 * Local variables:
831 * c-indent-level: 8
832 * c-basic-offset: 8
833 * End:
834 */