blob: 7d96b804d6fb0a57de20930d2c0ceb783f308224 [file] [log] [blame]
Christopher Faulet51dbc942018-09-13 09:05:15 +02001/*
2 * HTT/1 mux-demux for connections
3 *
4 * Copyright 2018 Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12#include <common/cfgparse.h>
13#include <common/config.h>
14
Christopher Faulet1be55f92018-10-02 15:59:23 +020015#include <types/pipe.h>
Christopher Fauletf2824e62018-10-01 12:12:37 +020016#include <types/proxy.h>
17#include <types/session.h>
18
Christopher Faulet51dbc942018-09-13 09:05:15 +020019#include <proto/connection.h>
Christopher Faulet129817b2018-09-20 16:14:40 +020020#include <proto/h1.h>
21#include <proto/log.h>
Christopher Faulet51dbc942018-09-13 09:05:15 +020022#include <proto/stream.h>
23#include <proto/stream_interface.h>
24
25/*
26 * H1 Connection flags (32 bits)
27 */
28#define H1C_F_NONE 0x00000000
29
30/* Flags indicating why writing output data are blocked */
31#define H1C_F_OUT_ALLOC 0x00000001 /* mux is blocked on lack of output buffer */
32#define H1C_F_OUT_FULL 0x00000002 /* mux is blocked on output buffer full */
33/* 0x00000004 - 0x00000008 unused */
34
35/* Flags indicating why reading input data are blocked. */
36#define H1C_F_IN_ALLOC 0x00000010 /* mux is blocked on lack of input buffer */
37#define H1C_F_IN_FULL 0x00000020 /* mux is blocked on input buffer full */
38/* 0x00000040 - 0x00000080 unused */
39
40/* Flags indicating why parsing data are blocked */
41#define H1C_F_RX_ALLOC 0x00000100 /* mux is blocked on lack of rx buffer */
42#define H1C_F_RX_FULL 0x00000200 /* mux is blocked on rx buffer full */
43/* 0x00000400 - 0x00000800 unused */
44
45#define H1C_F_CS_ERROR 0x00001000 /* connection must be closed ASAP because an error occurred */
46#define H1C_F_CS_SHUTW_NOW 0x00002000 /* connection must be shut down for writes ASAP */
47#define H1C_F_CS_SHUTW 0x00004000 /* connection is already shut down */
48
Christopher Fauletf2824e62018-10-01 12:12:37 +020049#define H1C_F_WAIT_NEXT_REQ 0x00010000 /* waiting for the next request to start, use keep-alive timeout */
Christopher Faulet129817b2018-09-20 16:14:40 +020050
Christopher Faulet51dbc942018-09-13 09:05:15 +020051/*
52 * H1 Stream flags (32 bits)
53 */
Christopher Faulet129817b2018-09-20 16:14:40 +020054#define H1S_F_NONE 0x00000000
55#define H1S_F_ERROR 0x00000001 /* An error occurred on the H1 stream */
Christopher Fauletf2824e62018-10-01 12:12:37 +020056#define H1S_F_REQ_ERROR 0x00000002 /* An error occurred during the request parsing/xfer */
57#define H1S_F_RES_ERROR 0x00000004 /* An error occurred during the response parsing/xfer */
58#define H1S_F_MSG_XFERED 0x00000008 /* current message was transferred to the data layer */
59#define H1S_F_WANT_KAL 0x00000010
60#define H1S_F_WANT_TUN 0x00000020
61#define H1S_F_WANT_CLO 0x00000040
62#define H1S_F_WANT_MSK 0x00000070
63#define H1S_F_NOT_FIRST 0x00000080 /* The H1 stream is not the first one */
Christopher Faulet1be55f92018-10-02 15:59:23 +020064#define H1S_F_BUF_FLUSH 0x00000100 /* Flush input buffers (ibuf and rxbuf) and don't read more data */
Christopher Faulet129817b2018-09-20 16:14:40 +020065
Christopher Faulet51dbc942018-09-13 09:05:15 +020066
67/* H1 connection descriptor */
Christopher Faulet51dbc942018-09-13 09:05:15 +020068struct h1c {
69 struct connection *conn;
70 struct proxy *px;
71 uint32_t flags; /* Connection flags: H1C_F_* */
72
73 struct buffer ibuf; /* Input buffer to store data before parsing */
74 struct buffer obuf; /* Output buffer to store data after reformatting */
75
76 struct buffer_wait buf_wait; /* Wait list for buffer allocation */
77 struct wait_event wait_event; /* To be used if we're waiting for I/Os */
78
79 struct h1s *h1s; /* H1 stream descriptor */
Christopher Faulet51dbc942018-09-13 09:05:15 +020080 struct task *task; /* timeout management task */
Christopher Faulet129817b2018-09-20 16:14:40 +020081
82 int idle_exp; /* expiration date for idle connections, in ticks (client-side only)*/
83 int http_exp; /* expiration date for HTTP headers parsing (client-side only) */
Christopher Faulet51dbc942018-09-13 09:05:15 +020084};
85
86/* H1 stream descriptor */
87struct h1s {
88 struct h1c *h1c;
89 struct conn_stream *cs;
90 uint32_t flags; /* Connection flags: H1S_F_* */
91
92 struct buffer rxbuf; /*receive buffer, always valid (buf_empty or real buffer) */
93
94 struct wait_event *recv_wait; /* Address of the wait_event the conn_stream associated is waiting on */
95 struct wait_event *send_wait; /* Address of the wait_event the conn_stream associated is waiting on */
Christopher Faulet129817b2018-09-20 16:14:40 +020096
97 struct h1m req;
98 struct h1m res;
99
100 enum http_meth_t meth; /* HTTP resquest method */
101 uint16_t status; /* HTTP response status */
Christopher Faulet51dbc942018-09-13 09:05:15 +0200102};
103
104/* the h1c and h1s pools */
105static struct pool_head *pool_head_h1c;
106static struct pool_head *pool_head_h1s;
107
108static struct task *h1_timeout_task(struct task *t, void *context, unsigned short state);
109static int h1_recv(struct h1c *h1c);
110static int h1_send(struct h1c *h1c);
111static int h1_process(struct h1c *h1c);
112static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short state);
113static void h1_shutw_conn(struct connection *conn);
114
115/*****************************************************/
116/* functions below are for dynamic buffer management */
117/*****************************************************/
118/*
119 * Indicates whether or not the we may call the h1_recv() function to
120 * attempt to receive data into the buffer and/or parse pending data. The
121 * condition is a bit complex due to some API limits for now. The rules are the
122 * following :
123 * - if an error or a shutdown was detected on the connection and the buffer
124 * is empty, we must not attempt to receive
125 * - if the input buffer failed to be allocated, we must not try to receive
126 * and we know there is nothing pending
127 * - if no flag indicates a blocking condition, we may attempt to receive,
128 * regardless of whether the input buffer is full or not, so that only de
129 * receiving part decides whether or not to block. This is needed because
130 * the connection API indeed prevents us from re-enabling receipt that is
131 * already enabled in a polled state, so we must always immediately stop as
132 * soon as the mux can't proceed so as never to hit an end of read with data
133 * pending in the buffers.
134 * - otherwise must may not attempt to receive
135 */
136static inline int h1_recv_allowed(const struct h1c *h1c)
137{
138 if (b_data(&h1c->ibuf) == 0 &&
139 (h1c->flags & (H1C_F_CS_ERROR||H1C_F_CS_SHUTW) ||
140 h1c->conn->flags & CO_FL_ERROR ||
141 conn_xprt_read0_pending(h1c->conn)))
142 return 0;
143
144 if (!(h1c->flags & (H1C_F_IN_ALLOC|H1C_F_IN_FULL)))
145 return 1;
146
147 return 0;
148}
149
150/*
151 * Tries to grab a buffer and to re-enables processing on mux <target>. The h1
152 * flags are used to figure what buffer was requested. It returns 1 if the
153 * allocation succeeds, in which case the connection is woken up, or 0 if it's
154 * impossible to wake up and we prefer to be woken up later.
155 */
156static int h1_buf_available(void *target)
157{
158 struct h1c *h1c = target;
159
160 if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
161 h1c->flags &= ~H1C_F_IN_ALLOC;
162 if (h1_recv_allowed(h1c))
163 tasklet_wakeup(h1c->wait_event.task);
164 return 1;
165 }
166
167 if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
168 h1c->flags &= ~H1C_F_OUT_ALLOC;
169 tasklet_wakeup(h1c->wait_event.task);
170 return 1;
171 }
172
173 if ((h1c->flags & H1C_F_RX_ALLOC) && h1c->h1s && b_alloc_margin(&h1c->h1s->rxbuf, 0)) {
174 h1c->flags &= ~H1C_F_RX_ALLOC;
175 if (h1_recv_allowed(h1c))
176 tasklet_wakeup(h1c->wait_event.task);
177 return 1;
178 }
179
180 return 0;
181}
182
183/*
184 * Allocate a buffer. If if fails, it adds the mux in buffer wait queue.
185 */
186static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
187{
188 struct buffer *buf = NULL;
189
190 if (likely(LIST_ISEMPTY(&h1c->buf_wait.list)) &&
191 unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
192 h1c->buf_wait.target = h1c;
193 h1c->buf_wait.wakeup_cb = h1_buf_available;
194 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
195 LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
196 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
197 __conn_xprt_stop_recv(h1c->conn);
198 }
199 return buf;
200}
201
202/*
203 * Release a buffer, if any, and try to wake up entities waiting in the buffer
204 * wait queue.
205 */
206static inline void h1_release_buf(struct h1c *h1c, struct buffer *bptr)
207{
208 if (bptr->size) {
209 b_free(bptr);
210 offer_buffers(h1c->buf_wait.target, tasks_run_queue);
211 }
212}
213
214static int h1_avail_streams(struct connection *conn)
215{
216 struct h1c *h1c = conn->mux_ctx;
217
218 return h1c->h1s ? 0 : 1;
219}
220
221
222/*****************************************************************/
223/* functions below are dedicated to the mux setup and management */
224/*****************************************************************/
Christopher Fauletf2824e62018-10-01 12:12:37 +0200225static struct h1s *h1s_create(struct h1c *h1c, struct conn_stream *cs)
Christopher Faulet51dbc942018-09-13 09:05:15 +0200226{
227 struct h1s *h1s;
228
229 h1s = pool_alloc(pool_head_h1s);
230 if (!h1s)
231 goto end;
232
233 h1s->h1c = h1c;
234 h1c->h1s = h1s;
235
236 h1s->cs = NULL;
237 h1s->rxbuf = BUF_NULL;
Christopher Faulet129817b2018-09-20 16:14:40 +0200238 h1s->flags = H1S_F_NONE;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200239
240 h1s->recv_wait = NULL;
241 h1s->send_wait = NULL;
Christopher Faulet129817b2018-09-20 16:14:40 +0200242
243 h1m_init_req(&h1s->req);
244 h1m_init_res(&h1s->res);
245
246 h1s->status = 0;
247 h1s->meth = HTTP_METH_OTHER;
248
249 if (!conn_is_back(h1c->conn)) {
250 if (h1c->px->options2 & PR_O2_REQBUG_OK)
251 h1s->req.err_pos = -1;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200252
253 if (h1c->flags & H1C_F_WAIT_NEXT_REQ)
254 h1s->flags |= H1S_F_NOT_FIRST;
255 h1c->flags &= ~H1C_F_WAIT_NEXT_REQ;
256 h1c->http_exp = tick_add_ifset(now_ms, h1c->px->timeout.httpreq);
Christopher Faulet129817b2018-09-20 16:14:40 +0200257 }
258 else {
259 if (h1c->px->options2 & PR_O2_RSPBUG_OK)
260 h1s->res.err_pos = -1;
261 }
Christopher Fauletf2824e62018-10-01 12:12:37 +0200262
263 /* If a conn_stream already exists, attach it to this H1S */
264 if (cs) {
265 cs->ctx = h1s;
266 h1s->cs = cs;
267 }
Christopher Faulet51dbc942018-09-13 09:05:15 +0200268 end:
269 return h1s;
270}
271
272static void h1s_destroy(struct h1s *h1s)
273{
Christopher Fauletf2824e62018-10-01 12:12:37 +0200274 if (h1s) {
275 struct h1c *h1c = h1s->h1c;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200276
Christopher Fauletf2824e62018-10-01 12:12:37 +0200277 h1c->h1s = NULL;
278 h1c->flags &= ~(H1C_F_RX_FULL|H1C_F_RX_ALLOC);
Christopher Faulet51dbc942018-09-13 09:05:15 +0200279
Christopher Fauletf2824e62018-10-01 12:12:37 +0200280 if (h1s->recv_wait != NULL)
281 h1s->recv_wait->wait_reason &= ~SUB_CAN_RECV;
282 if (h1s->send_wait != NULL)
283 h1s->send_wait->wait_reason &= ~SUB_CAN_SEND;
284
285 if (!conn_is_back(h1c->conn)) {
286 h1c->flags |= H1C_F_WAIT_NEXT_REQ;
287 h1c->http_exp = tick_add_ifset(now_ms, h1c->px->timeout.httpka);
288 }
Christopher Faulet51dbc942018-09-13 09:05:15 +0200289
Christopher Fauletf2824e62018-10-01 12:12:37 +0200290 h1_release_buf(h1c, &h1s->rxbuf);
291 cs_free(h1s->cs);
292 pool_free(pool_head_h1s, h1s);
293 }
Christopher Faulet51dbc942018-09-13 09:05:15 +0200294}
295
Christopher Fauletf2824e62018-10-01 12:12:37 +0200296static struct conn_stream *h1s_new_cs(struct h1s *h1s)
297{
298 struct conn_stream *cs;
299
300 cs = cs_new(h1s->h1c->conn);
301 if (!cs)
302 goto err;
303 h1s->cs = cs;
304 cs->ctx = h1s;
305
306 if (h1s->flags & H1S_F_NOT_FIRST)
307 cs->flags |= CS_FL_NOT_FIRST;
308
309 if (stream_create_from_cs(cs) < 0)
310 goto err;
311 return cs;
312
313 err:
314 cs_free(cs);
315 h1s->cs = NULL;
316 return NULL;
317}
318
Christopher Faulet51dbc942018-09-13 09:05:15 +0200319/*
320 * Initialize the mux once it's attached. It is expected that conn->mux_ctx
321 * points to the existing conn_stream (for outgoing connections) or NULL (for
322 * incoming ones). Returns < 0 on error.
323 */
324static int h1_init(struct connection *conn, struct proxy *proxy)
325{
Christopher Faulet51dbc942018-09-13 09:05:15 +0200326 struct h1c *h1c;
327 struct task *t = NULL;
328
329 h1c = pool_alloc(pool_head_h1c);
330 if (!h1c)
331 goto fail_h1c;
332 h1c->conn = conn;
333 h1c->px = proxy;
334
335 h1c->flags = H1C_F_NONE;
336 h1c->ibuf = BUF_NULL;
337 h1c->obuf = BUF_NULL;
338 h1c->h1s = NULL;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200339
340 t = task_new(tid_bit);
341 if (!t)
342 goto fail;
343 h1c->task = t;
344 t->process = h1_timeout_task;
345 t->context = h1c;
346 t->expire = TICK_ETERNITY;
347
Christopher Faulet129817b2018-09-20 16:14:40 +0200348 h1c->idle_exp = TICK_ETERNITY;
349 h1c->http_exp = TICK_ETERNITY;
350
Christopher Faulet51dbc942018-09-13 09:05:15 +0200351 LIST_INIT(&h1c->buf_wait.list);
352 h1c->wait_event.task = tasklet_new();
353 if (!h1c->wait_event.task)
354 goto fail;
355 h1c->wait_event.task->process = h1_io_cb;
356 h1c->wait_event.task->context = h1c;
357 h1c->wait_event.wait_reason = 0;
358
Christopher Fauletf2824e62018-10-01 12:12:37 +0200359 /* Always Create a new H1S */
360 if (!h1s_create(h1c, conn->mux_ctx))
361 goto fail;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200362
Christopher Faulet129817b2018-09-20 16:14:40 +0200363 conn->mux_ctx = h1c;
364 task_wakeup(t, TASK_WOKEN_INIT);
365
Christopher Faulet51dbc942018-09-13 09:05:15 +0200366 /* Try to read, if nothing is available yet we'll just subscribe */
367 if (h1_recv(h1c))
368 h1_process(h1c);
369
370 /* mux->wake will be called soon to complete the operation */
371 return 0;
372
373 fail:
374 if (t)
375 task_free(t);
376 if (h1c && h1c->wait_event.task)
377 tasklet_free(h1c->wait_event.task);
378 pool_free(pool_head_h1c, h1c);
379 fail_h1c:
380 return -1;
381}
382
383
384/* release function for a connection. This one should be called to free all
385 * resources allocated to the mux.
386 */
387static void h1_release(struct connection *conn)
388{
389 struct h1c *h1c = conn->mux_ctx;
390
391 LIST_DEL(&conn->list);
392
393 if (h1c) {
394 if (!LIST_ISEMPTY(&h1c->buf_wait.list)) {
395 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
396 LIST_DEL(&h1c->buf_wait.list);
397 LIST_INIT(&h1c->buf_wait.list);
398 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
399 }
400
401 h1_release_buf(h1c, &h1c->ibuf);
402 h1_release_buf(h1c, &h1c->obuf);
403
404 if (h1c->task) {
405 h1c->task->context = NULL;
406 task_wakeup(h1c->task, TASK_WOKEN_OTHER);
407 h1c->task = NULL;
408 }
409 if (h1c->wait_event.task)
410 tasklet_free(h1c->wait_event.task);
411
Christopher Fauletf2824e62018-10-01 12:12:37 +0200412 h1s_destroy(h1c->h1s);
Christopher Faulet51dbc942018-09-13 09:05:15 +0200413 if (h1c->wait_event.wait_reason != 0)
414 conn->xprt->unsubscribe(conn, h1c->wait_event.wait_reason,
415 &h1c->wait_event);
416 pool_free(pool_head_h1c, h1c);
417 }
418
419 conn->mux = NULL;
420 conn->mux_ctx = NULL;
421
422 conn_stop_tracking(conn);
423 conn_full_close(conn);
424 if (conn->destroy_cb)
425 conn->destroy_cb(conn);
426 conn_free(conn);
427}
428
429/******************************************************/
430/* functions below are for the H1 protocol processing */
431/******************************************************/
Christopher Faulet129817b2018-09-20 16:14:40 +0200432/*
433 * Set the appropriate error message. It first tries to get it from the proxy if
434 * it exists. Otherwise, it falls back on default one.
435 */
436static void h1_cpy_error_message(struct h1c *h1c, struct buffer *dst, int status)
437{
438 const int msgnum = http_get_status_idx(status);
439 const struct buffer *err;
440
441 err = (h1c->px->errmsg[msgnum].area
442 ? &h1c->px->errmsg[msgnum]
443 : &http_err_chunks[msgnum]);
444 b_putblk(dst, b_head(err), b_data(err));
445}
446
Christopher Fauletf2824e62018-10-01 12:12:37 +0200447/* Remove all "Connection:" headers from the buffer <buf>, using the array of
448 * parsed headers <hdrs>. It returns the number of bytes removed. This should
449 * happen just after the headers parsing, so the buffer should not wrap. At the
450 * ends, all entries of <hdrs> reamin valid.
451 */
452static int h1_remove_conn_hdrs(struct h1m *h1m, struct http_hdr *hdrs, struct buffer *buf)
453{
454 int src, dst, delta;
455
456 delta = 0;
457 for (src = 0, dst = 0; hdrs[src].n.len; src++) {
458
459 if (hdrs[src].n.ptr >= buf->area && hdrs[src].n.ptr < buf->area + buf->size)
460 hdrs[src].n.ptr += delta;
461 hdrs[src].v.ptr += delta;
462
463 if (!isteqi(hdrs[src].n, ist("Connection"))) {
464 if (src != dst)
465 hdrs[dst] = hdrs[src];
466 dst++;
467 continue;
468 }
469 delta += b_rep_blk(buf, hdrs[src].n.ptr, hdrs[src+1].n.ptr+delta, NULL, 0);
470 }
471
472 /* Don't forget to copy EOH */
473 hdrs[src].n.ptr += delta;
474 hdrs[dst] = hdrs[src];
475
476 h1m->flags &= ~(H1_MF_CONN_KAL|H1_MF_CONN_CLO);
477 return delta;
478}
479
480/* Add a "Connection:" header into the buffer <buf>. If <type> is 0, the header
481 * is set to "keep-alive", otherwise it is set to "close", It returns the number
482 * of bytes added. This should happen just after the headers parsing, so the
483 * buffer should not wrap. At the ends, all entries of <hdrs> reamin valid.
484 */
485static int h1_add_conn_hdrs(struct h1m *h1m, struct http_hdr *hdrs, struct buffer *buf,
486 int type)
487{
488 const char *conn_hdr;
489 size_t nlen, vlen;
490 int i, delta;
491
492 if (type == 0) { /* keep-alive */
493 conn_hdr = "Connection: keep-alive\r\n";
494 nlen = 10; vlen = 10;
495 }
496 else { /* close */
497 conn_hdr = "Connection: close\r\n";
498 nlen = 10; vlen = 5;
499 }
500
501 /* Find EOH*/
502 for (i = 0; hdrs[i].n.len; i++);
503
504 /* Insert the "Connection: " header */
505 delta = b_rep_blk(buf, hdrs[i].n.ptr, hdrs[i].n.ptr, conn_hdr, nlen+vlen+4);
506
507 /* Update the header list */
508 http_set_hdr(&hdrs[i], ist2(hdrs[i].n.ptr, nlen), ist2(hdrs[i].n.ptr+nlen+2, vlen));
509 http_set_hdr(&hdrs[i+1], ist2(hdrs[i].n.ptr+delta, 0), ist(""));
510
511 return delta;
512}
513
514/* Deduce the connection mode of the client connection, depending on the
515 * configuration and the H1 message flags. This function is called twice, the
516 * first time when the request is parsed and the second time when the response
517 * is parsed.
518 */
519static void h1_set_cli_conn_mode(struct h1s *h1s, struct h1m *h1m)
520{
521 struct proxy *fe = h1s->h1c->px;
522 int flag = H1S_F_WANT_KAL; /* For client connection: server-close == keepalive */
523
524 /* Tunnel mode can only by set on the frontend */
525 if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_TUN)
526 flag = H1S_F_WANT_TUN;
527 else if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)
528 flag = H1S_F_WANT_CLO;
529
530 /* flags order: CLO > SCL > TUN > KAL */
531 if ((h1s->flags & H1S_F_WANT_MSK) < flag)
532 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | flag;
533
534 if (h1m->flags & H1_MF_RESP) {
535 /* Either we've established an explicit tunnel, or we're
536 * switching the protocol. In both cases, we're very unlikely to
537 * understand the next protocols. We have to switch to tunnel
538 * mode, so that we transfer the request and responses then let
539 * this protocol pass unmodified. When we later implement
540 * specific parsers for such protocols, we'll want to check the
541 * Upgrade header which contains information about that protocol
542 * for responses with status 101 (eg: see RFC2817 about TLS).
543 */
544 if ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) ||
545 h1s->status == 101)
546 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
547 else if (!(h1m->flags & H1_MF_XFER_LEN)) /* no length known => close */
548 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
549 }
550 else {
551 if (h1s->flags & H1S_F_WANT_KAL &&
552 (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || /* no KA in HTTP/1.0 */
553 h1m->flags & H1_MF_CONN_CLO)) /* explicit close */
554 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
555 }
556
557 /* If KAL, check if the frontend is stopping. If yes, switch in CLO mode */
558 if (h1s->flags & H1S_F_WANT_KAL && fe->state == PR_STSTOPPED)
559 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
560}
561
562/* Deduce the connection mode of the client connection, depending on the
563 * configuration and the H1 message flags. This function is called twice, the
564 * first time when the request is parsed and the second time when the response
565 * is parsed.
566 */
567static void h1_set_srv_conn_mode(struct h1s *h1s, struct h1m *h1m)
568{
569 struct proxy *be = h1s->h1c->px;
570 struct proxy *fe = strm_fe(si_strm(h1s->cs->data));
571 int flag = H1S_F_WANT_KAL;
572
573 /* Tunnel mode can only by set on the frontend */
574 if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_TUN)
575 flag = H1S_F_WANT_TUN;
576
577 /* For the server connection: server-close == httpclose */
578 if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
579 (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
580 (fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO ||
581 (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)
582 flag = H1S_F_WANT_CLO;
583
584 /* flags order: CLO > SCL > TUN > KAL */
585 if ((h1s->flags & H1S_F_WANT_MSK) < flag)
586 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | flag;
587
588 if (h1m->flags & H1_MF_RESP) {
589 /* Either we've established an explicit tunnel, or we're
590 * switching the protocol. In both cases, we're very unlikely to
591 * understand the next protocols. We have to switch to tunnel
592 * mode, so that we transfer the request and responses then let
593 * this protocol pass unmodified. When we later implement
594 * specific parsers for such protocols, we'll want to check the
595 * Upgrade header which contains information about that protocol
596 * for responses with status 101 (eg: see RFC2817 about TLS).
597 */
598 if ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) ||
599 h1s->status == 101)
600 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
601 else if (!(h1m->flags & H1_MF_XFER_LEN)) /* no length known => close */
602 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
603 else if (h1s->flags & H1S_F_WANT_KAL &&
604 (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || /* no KA in HTTP/1.0 */
605 h1m->flags & H1_MF_CONN_CLO)) /* explicit close */
606 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
607 }
608
609 /* If KAL, check if the backend is stopping. If yes, switch in CLO mode */
610 if (h1s->flags & H1S_F_WANT_KAL && be->state == PR_STSTOPPED)
611 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
612
613 /* TODO: For now on the server-side, we disable keep-alive */
614 if (h1s->flags & H1S_F_WANT_KAL)
615 h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
616}
617
618static int h1_update_req_conn_hdr(struct h1s *h1s, struct h1m *h1m,
619 struct http_hdr *hdrs, struct buffer *buf)
620{
621 struct proxy *px = h1s->h1c->px;
622 int ret = 0;
623
624 /* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
625 * token is found
626 */
627 if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
628 goto end;
629
630 if (h1s->flags & H1S_F_WANT_KAL || px->options2 & PR_O2_FAKE_KA) {
631 if (h1m->flags & H1_MF_CONN_CLO)
632 ret += h1_remove_conn_hdrs(h1m, hdrs, buf);
633 if (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)))
634 ret += h1_add_conn_hdrs(h1m, hdrs, buf, 0);
635 }
636 else { /* H1S_F_WANT_CLO && !PR_O2_FAKE_KA */
637 if (h1m->flags & H1_MF_CONN_KAL)
638 ret += h1_remove_conn_hdrs(h1m, hdrs, buf);
639 if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_VER_11)
640 ret += h1_add_conn_hdrs(h1m, hdrs, buf, 1);
641 }
642
643 end:
644 return ret;
645}
646
647static int h1_update_res_conn_hdr(struct h1s *h1s, struct h1m *h1m,
648 struct http_hdr *hdrs, struct buffer *buf)
649{
650 int ret = 0;
651
652 /* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
653 * token is found
654 */
655 if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
656 goto end;
657
658 if (h1s->flags & H1S_F_WANT_KAL) {
659 if (h1m->flags & H1_MF_CONN_CLO)
660 ret += h1_remove_conn_hdrs(h1m, hdrs, buf);
661 if (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)))
662 ret += h1_add_conn_hdrs(h1m, hdrs, buf, 0);
663 }
664 else { /* H1S_F_WANT_CLO */
665 if (h1m->flags & H1_MF_CONN_KAL)
666 ret += h1_remove_conn_hdrs(h1m, hdrs, buf);
667 if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_VER_11)
668 ret += h1_add_conn_hdrs(h1m, hdrs, buf, 1);
669 }
670
671 end:
672 return ret;
673}
674
Christopher Faulet129817b2018-09-20 16:14:40 +0200675/*
676 * Parse HTTP/1 headers. It returns the number of bytes parsed if > 0, or 0 if
Christopher Fauletf2824e62018-10-01 12:12:37 +0200677 * it couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR
678 * flag and filling h1s->err_pos and h1s->err_state fields. This functions is
Christopher Faulet129817b2018-09-20 16:14:40 +0200679 * responsibile to update the parser state <h1m>.
680 */
681static size_t h1_process_headers(struct h1s *h1s, struct h1m *h1m,
Christopher Fauletf2824e62018-10-01 12:12:37 +0200682 struct buffer *buf, size_t *ofs, size_t max)
Christopher Faulet129817b2018-09-20 16:14:40 +0200683{
684 struct http_hdr hdrs[MAX_HTTP_HDR];
685 union h1_sl sl;
686 int ret = 0;
687
688 /* Realing input buffer if necessary */
689 if (b_head(buf) + b_data(buf) > b_wrap(buf))
690 b_slow_realign(buf, trash.area, 0);
691
Christopher Fauletf2824e62018-10-01 12:12:37 +0200692 ret = h1_headers_to_hdr_list(b_peek(buf, *ofs), b_peek(buf, *ofs) + max,
Christopher Faulet129817b2018-09-20 16:14:40 +0200693 hdrs, sizeof(hdrs)/sizeof(hdrs[0]), h1m, &sl);
694 if (ret <= 0) {
695 /* Incomplete or invalid message. If the buffer is full, it's an
696 * error because headers are too large to be handled by the
697 * parser. */
Christopher Fauletf2824e62018-10-01 12:12:37 +0200698 if (ret < 0 || (!ret && b_full(buf)))
699 goto error;
Christopher Faulet129817b2018-09-20 16:14:40 +0200700 goto end;
701 }
702
703 /* messages headers fully parsed, do some checks to prepare the body
704 * parsing.
705 */
706
707 /* Be sure to keep some space to do headers rewritting */
Christopher Fauletf2824e62018-10-01 12:12:37 +0200708 if (ret > (b_size(buf) - global.tune.maxrewrite))
709 goto error;
Christopher Faulet129817b2018-09-20 16:14:40 +0200710
711 /* Save the request's method or the response's status and check if the
712 * body length is known */
713 if (!(h1m->flags & H1_MF_RESP)) {
714 h1s->meth = sl.rq.meth;
715 /* Request have always a known length */
716 h1m->flags |= H1_MF_XFER_LEN;
717 if (!(h1m->flags & H1_MF_CHNK) && !h1m->body_len)
718 h1m->state = H1_MSG_DONE;
719 }
720 else {
721 h1s->status = sl.st.status;
722
723 if ((h1s->meth == HTTP_METH_HEAD) ||
724 (h1s->status >= 100 && h1s->status < 200) ||
725 (h1s->status == 204) || (h1s->status == 304) ||
726 (h1s->meth == HTTP_METH_CONNECT && h1s->status == 200)) {
727 h1m->flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
728 h1m->flags |= H1_MF_XFER_LEN;
729 h1m->curr_len = h1m->body_len = 0;
730 h1m->state = H1_MSG_DONE;
731 }
732 else if (h1m->flags & (H1_MF_CLEN|H1_MF_CHNK)) {
733 h1m->flags |= H1_MF_XFER_LEN;
734 if ((h1m->flags & H1_MF_CLEN) && !h1m->body_len)
735 h1m->state = H1_MSG_DONE;
736 }
737 else
738 h1m->state = H1_MSG_TUNNEL;
739 }
740
Christopher Fauletf2824e62018-10-01 12:12:37 +0200741 *ofs += ret;
742 if (!conn_is_back(h1s->h1c->conn)) {
743 h1_set_cli_conn_mode(h1s, h1m);
744 if (h1m->flags & H1_MF_RESP)
745 *ofs += h1_update_res_conn_hdr(h1s, h1m, hdrs, buf);
746 }
747 else {
748 h1_set_srv_conn_mode(h1s, h1m);
749 if (!(h1m->flags & H1_MF_RESP))
750 *ofs += h1_update_req_conn_hdr(h1s, h1m, hdrs, buf);
751 }
Christopher Faulet129817b2018-09-20 16:14:40 +0200752 end:
753 return ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200754
755 error:
756 h1s->flags |= (!(h1m->flags & H1_MF_RESP) ? H1S_F_REQ_ERROR : H1S_F_RES_ERROR);
757 h1m->err_state = h1m->state;
758 h1m->err_pos = h1m->next;
759 ret = 0;
760 goto end;
Christopher Faulet129817b2018-09-20 16:14:40 +0200761}
762
763/*
Christopher Fauletf2824e62018-10-01 12:12:37 +0200764 * Parse HTTP/1 body. It returns the number of bytes parsed if > 0, or 0 if it
765 * couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR flag
Christopher Faulet129817b2018-09-20 16:14:40 +0200766 * and filling h1s->err_pos and h1s->err_state fields. This functions is
767 * responsibile to update the parser state <h1m>.
768 */
769static size_t h1_process_data(struct h1s *h1s, struct h1m *h1m,
Christopher Fauletf2824e62018-10-01 12:12:37 +0200770 struct buffer *buf, size_t *ofs, size_t max)
Christopher Faulet129817b2018-09-20 16:14:40 +0200771{
772 size_t total = 0;
773 int ret = 0;
774
775 if (h1m->flags & H1_MF_XFER_LEN) {
776 if (h1m->flags & H1_MF_CLEN) {
777 /* content-length: read only h2m->body_len */
778 ret = max;
779 if ((uint64_t)ret > h1m->curr_len)
780 ret = h1m->curr_len;
781 h1m->curr_len -= ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200782 *ofs += ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200783 total += ret;
784 if (!h1m->curr_len)
785 h1m->state = H1_MSG_DONE;
786 }
787 else if (h1m->flags & H1_MF_CHNK) {
788 new_chunk:
789 /* te:chunked : parse chunks */
790 if (h1m->state == H1_MSG_CHUNK_CRLF) {
Christopher Fauletf2824e62018-10-01 12:12:37 +0200791 ret = h1_skip_chunk_crlf(buf, *ofs, *ofs + max);
Christopher Faulet129817b2018-09-20 16:14:40 +0200792 if (ret <= 0)
793 goto end;
794 max -= ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200795 *ofs += ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200796 total += ret;
797 h1m->state = H1_MSG_CHUNK_SIZE;
798 }
799
800 if (h1m->state == H1_MSG_CHUNK_SIZE) {
801 unsigned int chksz;
802
Christopher Fauletf2824e62018-10-01 12:12:37 +0200803 ret = h1_parse_chunk_size(buf, *ofs, *ofs + max, &chksz);
Christopher Faulet129817b2018-09-20 16:14:40 +0200804 if (ret <= 0)
805 goto end;
806 h1m->curr_len = chksz;
807 h1m->body_len += chksz;
808 max -= ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200809 *ofs += ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200810 total += ret;
811 h1m->state = (!chksz ? H1_MSG_TRAILERS : H1_MSG_DATA);
812 }
813
814 if (h1m->state == H1_MSG_DATA) {
815 ret = max;
816 if (!ret)
817 goto end;
818 if ((uint64_t)ret > h1m->curr_len)
819 ret = h1m->curr_len;
820 h1m->curr_len -= ret;
821 max -= ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200822 *ofs += ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200823 total += ret;
824 if (h1m->curr_len)
825 goto end;
826 h1m->state = H1_MSG_CHUNK_CRLF;
827 goto new_chunk;
828 }
829
830 if (h1m->state == H1_MSG_TRAILERS) {
Christopher Fauletf2824e62018-10-01 12:12:37 +0200831 ret = h1_measure_trailers(buf, *ofs, *ofs + max);
Christopher Faulet129817b2018-09-20 16:14:40 +0200832 if (ret <= 0)
833 goto end;
834 max -= ret;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200835 *ofs += ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200836 total += ret;
837 h1m->state = H1_MSG_DONE;
838 }
839 }
840 else {
841 /* XFER_LEN is set but not CLEN nor CHNK, it means there
842 * is no body. Switch the message in DONE state
843 */
844 h1m->state = H1_MSG_DONE;
845 }
846 }
847 else {
848 /* no content length, read till SHUTW */
Christopher Fauletf2824e62018-10-01 12:12:37 +0200849 *ofs += max;
Christopher Faulet129817b2018-09-20 16:14:40 +0200850 total = max;
851 }
852
853 end:
854 if (ret < 0) {
Christopher Fauletf2824e62018-10-01 12:12:37 +0200855 h1s->flags |= (!(h1m->flags & H1_MF_RESP) ? H1S_F_REQ_ERROR : H1S_F_RES_ERROR);
Christopher Faulet129817b2018-09-20 16:14:40 +0200856 h1m->err_state = h1m->state;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200857 h1m->err_pos = *ofs + max + ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200858 return 0;
859 }
860
861 return total;
862}
863
864/*
865 * Synchronize the request and the response before reseting them. Except for 1xx
866 * responses, we wait that the request and the response are in DONE state and
867 * that all data are forwarded for both. For 1xx responses, only the response is
868 * reset, waiting the final one. Many 1xx messages can be sent.
869 */
870static void h1_sync_messages(struct h1c *h1c)
871{
Christopher Fauletf2824e62018-10-01 12:12:37 +0200872 struct h1s *h1s = h1c->h1s;
873
874 if (!h1s)
Christopher Faulet129817b2018-09-20 16:14:40 +0200875 return;
876
Christopher Fauletf2824e62018-10-01 12:12:37 +0200877 if (h1s->res.state == H1_MSG_DONE &&
878 (h1s->status < 200 && (h1s->status == 100 || h1s->status >= 102)) &&
879 ((conn_is_back(h1c->conn) && !b_data(&h1c->obuf)) || !b_data(&h1s->rxbuf))) {
Christopher Faulet129817b2018-09-20 16:14:40 +0200880 /* For 100-Continue response or any other informational 1xx
881 * response which is non-final, don't reset the request, the
882 * transaction is not finished. We take care the response was
883 * transferred before.
884 */
Christopher Fauletf2824e62018-10-01 12:12:37 +0200885 h1m_init_res(&h1s->res);
Christopher Faulet129817b2018-09-20 16:14:40 +0200886 }
Christopher Fauletf2824e62018-10-01 12:12:37 +0200887 else if (!b_data(&h1s->rxbuf) && !b_data(&h1c->obuf) &&
888 h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE) {
889 if (h1s->flags & H1S_F_WANT_TUN) {
890 h1s->req.state = H1_MSG_TUNNEL;
891 h1s->res.state = H1_MSG_TUNNEL;
892 }
Christopher Faulet129817b2018-09-20 16:14:40 +0200893 }
894}
895
896/*
897 * Process incoming data. It parses data and transfer them from h1c->ibuf into
898 * h1s->rxbuf. It returns the number of bytes parsed and transferred if > 0, or
899 * 0 if it couldn't proceed.
900 */
901static size_t h1_process_input(struct h1c *h1c, struct buffer *buf, size_t count)
Christopher Faulet51dbc942018-09-13 09:05:15 +0200902{
Christopher Fauletf2824e62018-10-01 12:12:37 +0200903 struct h1s *h1s = NULL;
Christopher Faulet129817b2018-09-20 16:14:40 +0200904 struct h1m *h1m;
905 size_t total = 0;
906 size_t ret = 0;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200907 size_t max;
908 int errflag;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200909
910 if (h1c->flags & H1C_F_CS_ERROR)
911 goto end;
912
Christopher Fauletf2824e62018-10-01 12:12:37 +0200913 /* Create a new H1S without CS if not already done */
914 if (!h1c->h1s && !h1s_create(h1c, NULL))
915 goto err;
916 h1s = h1c->h1s;
917
918#if 0
919 // FIXME: Use a proxy option to enable early creation of the CS
920 /* Create the CS if not already attached to the H1S */
921 if (!h1s->cs && !h1s_new_cs(h1s))
922 goto err;
923#endif
Christopher Faulet51dbc942018-09-13 09:05:15 +0200924
925 if (!h1_get_buf(h1c, &h1s->rxbuf)) {
926 h1c->flags |= H1C_F_RX_ALLOC;
927 goto end;
928 }
929
Christopher Faulet129817b2018-09-20 16:14:40 +0200930 if (count > b_room(&h1s->rxbuf))
931 count = b_room(&h1s->rxbuf);
Christopher Fauletf2824e62018-10-01 12:12:37 +0200932 max = count;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200933
Christopher Fauletf2824e62018-10-01 12:12:37 +0200934 if (!conn_is_back(h1c->conn)) {
935 h1m = &h1s->req;
936 errflag = H1S_F_REQ_ERROR;
937 }
938 else {
939 h1m = &h1s->res;
940 errflag = H1S_F_RES_ERROR;
941 }
942 while (!(h1s->flags & errflag) && max) {
Christopher Faulet129817b2018-09-20 16:14:40 +0200943 if (h1m->state <= H1_MSG_LAST_LF) {
Christopher Fauletf2824e62018-10-01 12:12:37 +0200944 ret = h1_process_headers(h1s, h1m, buf, &total, max);
Christopher Faulet129817b2018-09-20 16:14:40 +0200945 if (!ret)
946 break;
947
Christopher Fauletf2824e62018-10-01 12:12:37 +0200948 /* Reset request timeout */
949 h1s->h1c->http_exp = TICK_ETERNITY;
Christopher Faulet129817b2018-09-20 16:14:40 +0200950
Christopher Fauletf2824e62018-10-01 12:12:37 +0200951 /* Create the CS if not already attached to the H1S */
952 if (!h1s->cs && !h1s_new_cs(h1s))
953 goto err;
Christopher Faulet129817b2018-09-20 16:14:40 +0200954 }
955 else if (h1m->state <= H1_MSG_TRAILERS) {
956 /* Do not parse the body if the header part is not yet
957 * transferred to the stream.
958 */
959 if (!(h1s->flags & H1S_F_MSG_XFERED))
960 break;
Christopher Fauletf2824e62018-10-01 12:12:37 +0200961 ret = h1_process_data(h1s, h1m, buf, &total, max);
Christopher Faulet129817b2018-09-20 16:14:40 +0200962 if (!ret)
963 break;
964 }
Christopher Fauletf2824e62018-10-01 12:12:37 +0200965 else if (h1m->state == H1_MSG_DONE)
966 break;
967 else if (h1m->state == H1_MSG_TUNNEL) {
968 total += max;
969 max = 0;
970 break;
971 }
Christopher Faulet129817b2018-09-20 16:14:40 +0200972 else {
Christopher Fauletf2824e62018-10-01 12:12:37 +0200973 h1s->flags |= errflag;
Christopher Faulet129817b2018-09-20 16:14:40 +0200974 break;
975 }
976
Christopher Fauletf2824e62018-10-01 12:12:37 +0200977 max -= ret;
Christopher Faulet129817b2018-09-20 16:14:40 +0200978 }
979
Christopher Fauletf2824e62018-10-01 12:12:37 +0200980 if (h1s->flags & errflag) {
Christopher Faulet129817b2018-09-20 16:14:40 +0200981 /* For now, if an error occurred during the message parsing when
982 * a stream is already attached to the mux, we transfer
983 * everything to let the stream handle the error itself. We
984 * suppose the stream will detect the same error of
985 * course. Otherwise, we generate the error here.
986 */
987 if (!h1s->cs) {
988 if (!h1_get_buf(h1c, &h1c->obuf)) {
989 h1c->flags |= H1C_F_OUT_ALLOC;
990 goto err;
991 }
992 h1_cpy_error_message(h1c, &h1c->obuf, 400);
993 goto err;
994 }
Christopher Fauletf2824e62018-10-01 12:12:37 +0200995 total += max;
996 max = 0;
Christopher Faulet51dbc942018-09-13 09:05:15 +0200997 }
Christopher Faulet129817b2018-09-20 16:14:40 +0200998
Christopher Fauletf2824e62018-10-01 12:12:37 +0200999 b_xfer(&h1s->rxbuf, buf, total);
Christopher Faulet129817b2018-09-20 16:14:40 +02001000
Christopher Faulet51dbc942018-09-13 09:05:15 +02001001 if (b_data(&h1s->rxbuf)) {
1002 h1s->cs->flags |= CS_FL_RCV_MORE;
1003 if (b_full(&h1s->rxbuf))
1004 h1c->flags |= H1C_F_RX_FULL;
1005 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02001006 ret = count - max;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001007 end:
Christopher Faulet129817b2018-09-20 16:14:40 +02001008 return ret;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001009
1010 err:
Christopher Fauletf2824e62018-10-01 12:12:37 +02001011 h1s_destroy(h1s);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001012 h1c->flags |= H1C_F_CS_ERROR;
Christopher Faulet129817b2018-09-20 16:14:40 +02001013 sess_log(h1c->conn->owner);
1014 ret = 0;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001015 goto end;
1016}
1017
Christopher Faulet129817b2018-09-20 16:14:40 +02001018/*
1019 * Process outgoing data. It parses data and transfer them from the channel buffer into
1020 * h1c->obuf. It returns the number of bytes parsed and transferred if > 0, or
1021 * 0 if it couldn't proceed.
1022 */
Christopher Faulet51dbc942018-09-13 09:05:15 +02001023static size_t h1_process_output(struct h1c *h1c, struct buffer *buf, size_t count)
1024{
Christopher Faulet129817b2018-09-20 16:14:40 +02001025 struct h1s *h1s = h1c->h1s;
1026 struct h1m *h1m;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001027 size_t max;
Christopher Faulet129817b2018-09-20 16:14:40 +02001028 size_t total = 0;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001029 size_t ret = 0;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001030 int errflag;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001031
1032 if (!h1_get_buf(h1c, &h1c->obuf)) {
1033 h1c->flags |= H1C_F_OUT_ALLOC;
1034 goto end;
1035 }
1036 if (count > b_room(&h1c->obuf))
1037 count = b_room(&h1c->obuf);
1038
Christopher Fauletf2824e62018-10-01 12:12:37 +02001039 max = count;
1040 if (!conn_is_back(h1c->conn)) {
1041 h1m = &h1s->res;
1042 errflag = H1S_F_RES_ERROR;
1043 }
1044 else {
1045 h1m = &h1s->req;
1046 errflag = H1S_F_REQ_ERROR;
1047 }
1048 while (!(h1s->flags & errflag) && max) {
Christopher Faulet129817b2018-09-20 16:14:40 +02001049 if (h1m->state <= H1_MSG_LAST_LF) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02001050 ret = h1_process_headers(h1s, h1m, buf, &total, max);
Christopher Faulet129817b2018-09-20 16:14:40 +02001051 if (!ret) {
1052 /* incomplete or invalid response, this is abnormal coming from
1053 * haproxy and may only result in a bad errorfile or bad Lua code
1054 * so that won't be fixed, raise an error now.
1055 */
Christopher Fauletf2824e62018-10-01 12:12:37 +02001056 h1s->flags |= errflag;
Christopher Faulet129817b2018-09-20 16:14:40 +02001057 break;
1058 }
1059 }
1060 else if (h1m->state <= H1_MSG_TRAILERS) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02001061 ret = h1_process_data(h1s, h1m, buf, &total, max);
Christopher Faulet129817b2018-09-20 16:14:40 +02001062 if (!ret)
1063 break;
1064 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02001065 else if (h1m->state == H1_MSG_DONE)
1066 break;
1067 else if (h1m->state == H1_MSG_TUNNEL) {
1068 total += max;
1069 max = 0;
1070 break;
1071 }
Christopher Faulet129817b2018-09-20 16:14:40 +02001072 else {
Christopher Fauletf2824e62018-10-01 12:12:37 +02001073 h1s->flags |= errflag;
Christopher Faulet129817b2018-09-20 16:14:40 +02001074 break;
1075 }
1076
Christopher Fauletf2824e62018-10-01 12:12:37 +02001077 max -= ret;
Christopher Faulet129817b2018-09-20 16:14:40 +02001078 }
1079
1080 // TODO: Handle H1S errors
Christopher Fauletf2824e62018-10-01 12:12:37 +02001081 b_xfer(&h1c->obuf, buf, total);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001082
1083 if (b_full(&h1c->obuf))
1084 h1c->flags |= H1C_F_OUT_FULL;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001085 ret = count - max;
1086 end:
Christopher Faulet51dbc942018-09-13 09:05:15 +02001087 return ret;
1088}
1089
Christopher Faulet129817b2018-09-20 16:14:40 +02001090/*
1091 * Transfer data from h1s->rxbuf into the channel buffer. It returns the number
1092 * of bytes transferred.
1093 */
Christopher Faulet51dbc942018-09-13 09:05:15 +02001094static size_t h1_xfer(struct h1s *h1s, struct buffer *buf, size_t count)
1095{
1096 struct h1c *h1c = h1s->h1c;
1097 struct conn_stream *cs = h1s->cs;
1098 size_t ret = 0;
1099
1100 /* transfer possibly pending data to the upper layer */
1101 ret = b_xfer(buf, &h1s->rxbuf, count);
1102
1103 if (b_data(&h1s->rxbuf)) {
1104 if (!b_full(&h1s->rxbuf)) {
1105 h1c->flags &= ~H1C_F_RX_FULL;
1106 }
1107 cs->flags |= CS_FL_RCV_MORE;
1108 }
1109 else {
Christopher Faulet129817b2018-09-20 16:14:40 +02001110 if (!(h1s->flags & H1S_F_MSG_XFERED))
1111 h1s->flags |= H1S_F_MSG_XFERED;
1112
Christopher Faulet51dbc942018-09-13 09:05:15 +02001113 h1c->flags &= ~H1C_F_RX_FULL;
1114 h1_release_buf(h1c, &h1s->rxbuf);
Christopher Faulet129817b2018-09-20 16:14:40 +02001115 h1_sync_messages(h1c);
1116
Christopher Faulet51dbc942018-09-13 09:05:15 +02001117 cs->flags &= ~CS_FL_RCV_MORE;
Christopher Faulet129817b2018-09-20 16:14:40 +02001118 if (!b_data(&h1c->ibuf) && (cs->flags & CS_FL_REOS))
Christopher Faulet51dbc942018-09-13 09:05:15 +02001119 cs->flags |= CS_FL_EOS;
1120 }
1121 return ret;
1122}
1123
1124/*********************************************************/
1125/* functions below are I/O callbacks from the connection */
1126/*********************************************************/
1127/*
1128 * Attempt to read data, and subscribe if none available
1129 */
1130static int h1_recv(struct h1c *h1c)
1131{
1132 struct connection *conn = h1c->conn;
1133 size_t ret, max;
1134 int rcvd = 0;
1135
1136 if (h1c->wait_event.wait_reason & SUB_CAN_RECV)
1137 return 0;
1138
1139 if (!h1_recv_allowed(h1c)) {
1140 if (h1c->h1s && b_data(&h1c->h1s->rxbuf))
1141 return 1;
1142 return 0;
1143 }
1144
Christopher Faulet1be55f92018-10-02 15:59:23 +02001145 if (h1c->h1s && (h1c->h1s->flags & H1S_F_BUF_FLUSH))
1146 return 1;
1147
Christopher Faulet51dbc942018-09-13 09:05:15 +02001148 if (!h1_get_buf(h1c, &h1c->ibuf)) {
1149 h1c->flags |= H1C_F_IN_ALLOC;
1150 return 0;
1151 }
1152
1153 ret = 0;
1154 max = b_room(&h1c->ibuf);
1155 if (max) {
1156 h1c->flags &= ~H1C_F_IN_FULL;
1157 ret = conn->xprt->rcv_buf(conn, &h1c->ibuf, max, 0);
1158 }
1159 if (ret > 0)
1160 rcvd = 1;
1161
1162 if (h1_recv_allowed(h1c))
1163 conn->xprt->subscribe(conn, SUB_CAN_RECV, &h1c->wait_event);
1164
1165 if (!b_data(&h1c->ibuf))
1166 h1_release_buf(h1c, &h1c->ibuf);
1167 else if (b_full(&h1c->ibuf))
1168 h1c->flags |= H1C_F_IN_FULL;
1169 return rcvd;
1170}
1171
1172
1173/*
1174 * Try to send data if possible
1175 */
1176static int h1_send(struct h1c *h1c)
1177{
1178 struct connection *conn = h1c->conn;
1179 unsigned int flags = 0;
1180 size_t ret;
1181 int sent = 0;
1182
1183 if (conn->flags & CO_FL_ERROR)
1184 return 0;
1185
1186 if (!b_data(&h1c->obuf))
1187 goto end;
1188
1189 if (h1c->flags & H1C_F_OUT_FULL)
1190 flags |= CO_SFL_MSG_MORE;
1191
1192 ret = conn->xprt->snd_buf(conn, &h1c->obuf, b_data(&h1c->obuf), flags);
1193 if (ret > 0) {
1194 h1c->flags &= ~H1C_F_OUT_FULL;
1195 b_del(&h1c->obuf, ret);
1196 sent = 1;
1197 }
1198
1199 end:
1200 /* We're done, no more to send */
1201 if (!b_data(&h1c->obuf)) {
1202 h1_release_buf(h1c, &h1c->obuf);
Christopher Fauletf2824e62018-10-01 12:12:37 +02001203 h1_sync_messages(h1c);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001204 if (h1c->flags & H1C_F_CS_SHUTW_NOW)
1205 h1_shutw_conn(conn);
1206 }
1207 else if (!(h1c->wait_event.wait_reason & SUB_CAN_SEND))
1208 conn->xprt->subscribe(conn, SUB_CAN_SEND, &h1c->wait_event);
1209
1210 return sent;
1211}
1212
1213
1214static void h1_wake_stream(struct h1c *h1c)
1215{
1216 struct connection *conn = h1c->conn;
1217 struct h1s *h1s = h1c->h1s;
1218 uint32_t flags = 0;
1219 int dont_wake = 0;
1220
1221 if (!h1s || !h1s->cs)
1222 return;
1223
1224 if ((h1c->flags & H1C_F_CS_ERROR) || (conn->flags & CO_FL_ERROR))
1225 flags |= CS_FL_ERROR;
1226 if (conn_xprt_read0_pending(conn))
1227 flags |= CS_FL_REOS;
1228
1229 h1s->cs->flags |= flags;
1230 if (h1s->recv_wait) {
1231 h1s->recv_wait->wait_reason &= ~SUB_CAN_RECV;
1232 tasklet_wakeup(h1s->recv_wait->task);
1233 h1s->recv_wait = NULL;
1234 dont_wake = 1;
1235 }
1236 if (h1s->send_wait) {
1237 h1s->send_wait->wait_reason &= ~SUB_CAN_SEND;
1238 tasklet_wakeup(h1s->send_wait->task);
1239 h1s->send_wait = NULL;
1240 dont_wake = 1;
1241 }
1242 if (!dont_wake && h1s->cs->data_cb->wake)
1243 h1s->cs->data_cb->wake(h1s->cs);
1244}
1245
1246/* callback called on any event by the connection handler.
1247 * It applies changes and returns zero, or < 0 if it wants immediate
1248 * destruction of the connection.
1249 */
1250static int h1_process(struct h1c * h1c)
1251{
1252 struct connection *conn = h1c->conn;
1253
Christopher Faulet129817b2018-09-20 16:14:40 +02001254 if (b_data(&h1c->ibuf) && !(h1c->flags & (H1C_F_RX_FULL|H1C_F_RX_ALLOC))) {
1255 size_t ret;
1256
1257 ret = h1_process_input(h1c, &h1c->ibuf, b_data(&h1c->ibuf));
1258 if (ret > 0) {
1259 h1c->flags &= ~H1C_F_IN_FULL;
1260 if (!b_data(&h1c->ibuf))
1261 h1_release_buf(h1c, &h1c->ibuf);
1262 }
1263 }
Christopher Faulet51dbc942018-09-13 09:05:15 +02001264
1265 h1_send(h1c);
1266
1267 h1_wake_stream(h1c);
1268
1269 if (!conn->mux_ctx)
1270 return -1;
1271
1272 if ((h1c->flags & H1C_F_CS_ERROR) || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn)) {
1273 if (!h1c->h1s || !h1c->h1s->cs) {
1274 h1_release(conn);
1275 return -1;
1276 }
1277 }
1278
Christopher Fauletf2824e62018-10-01 12:12:37 +02001279 /* If there is a stream attached to the mux, let it
1280 * handle the timeout.
1281 */
1282 if (h1c->h1s && h1c->h1s->cs)
1283 h1c->idle_exp = TICK_ETERNITY;
1284 else {
1285 int tout = (!conn_is_back(conn)
1286 ? h1c->px->timeout.client
1287 : h1c->px->timeout.server);
1288 h1c->idle_exp = tick_add_ifset(now_ms, tout);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001289 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02001290 h1c->task->expire = tick_first(h1c->http_exp, h1c->idle_exp);
1291 if (tick_isset(h1c->task->expire))
1292 task_queue(h1c->task);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001293 return 0;
1294}
1295
1296static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
1297{
1298 struct h1c *h1c = ctx;
1299 int ret = 0;
1300
1301 if (!(h1c->wait_event.wait_reason & SUB_CAN_SEND))
1302 ret = h1_send(h1c);
1303 if (!(h1c->wait_event.wait_reason & SUB_CAN_RECV))
1304 ret |= h1_recv(h1c);
1305 if (ret || b_data(&h1c->ibuf))
1306 h1_process(h1c);
1307 return NULL;
1308}
1309
1310
1311static int h1_wake(struct connection *conn)
1312{
1313 struct h1c *h1c = conn->mux_ctx;
1314
1315 return (h1_process(h1c));
1316}
1317
1318
1319/* Connection timeout management. The principle is that if there's no receipt
1320 * nor sending for a certain amount of time, the connection is closed.
1321 */
1322static struct task *h1_timeout_task(struct task *t, void *context, unsigned short state)
1323{
1324 struct h1c *h1c = context;
1325 int expired = tick_is_expired(t->expire, now_ms);
1326
Christopher Faulet129817b2018-09-20 16:14:40 +02001327 if (!h1c)
1328 goto end;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001329
Christopher Faulet129817b2018-09-20 16:14:40 +02001330 if (!expired) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02001331 t->expire = tick_first(t->expire, tick_first(h1c->idle_exp, h1c->http_exp));
Christopher Faulet129817b2018-09-20 16:14:40 +02001332 return t;
1333 }
Christopher Faulet51dbc942018-09-13 09:05:15 +02001334
Christopher Fauletf2824e62018-10-01 12:12:37 +02001335 h1c->flags |= H1C_F_CS_ERROR;
1336 h1c->idle_exp = TICK_ETERNITY;
1337 h1c->http_exp = TICK_ETERNITY;
1338 t->expire = TICK_ETERNITY;
1339
1340 /* Don't try send error message on the server-side */
1341 if (conn_is_back(h1c->conn))
1342 goto release;
1343
1344 /* Don't send error message if no input data is pending _AND_ if null
1345 * requests is ignored or it's not the first request.
1346 */
1347 if (!b_data(&h1c->ibuf) && (h1c->px->options & PR_O_IGNORE_PRB ||
1348 h1c->flags & H1C_F_WAIT_NEXT_REQ))
1349 goto release;
1350
1351 /* Try to allocate output buffer to store the error message. If
1352 * allocation fails, just go away.
1353 */
1354 if (!h1_get_buf(h1c, &h1c->obuf))
1355 goto release;
1356
1357 h1_cpy_error_message(h1c, &h1c->obuf, 408);
1358 tasklet_wakeup(h1c->wait_event.task);
1359 sess_log(h1c->conn->owner);
1360 return t;
1361
1362 release:
1363 if (h1c->h1s) {
Christopher Faulet129817b2018-09-20 16:14:40 +02001364 tasklet_wakeup(h1c->wait_event.task);
Christopher Faulet129817b2018-09-20 16:14:40 +02001365 return t;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001366 }
Christopher Faulet51dbc942018-09-13 09:05:15 +02001367 h1c->task = NULL;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001368 h1_release(h1c->conn);
Christopher Faulet129817b2018-09-20 16:14:40 +02001369 end:
1370 task_delete(t);
1371 task_free(t);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001372 return NULL;
1373}
1374
1375/*******************************************/
1376/* functions below are used by the streams */
1377/*******************************************/
1378/*
1379 * Attach a new stream to a connection
1380 * (Used for outgoing connections)
1381 */
1382static struct conn_stream *h1_attach(struct connection *conn)
1383{
1384 struct h1c *h1c = conn->mux_ctx;
1385 struct conn_stream *cs = NULL;
1386 struct h1s *h1s;
1387
1388 if (h1c->flags & H1C_F_CS_ERROR)
1389 goto end;
1390
1391 cs = cs_new(h1c->conn);
1392 if (!cs)
1393 goto end;
1394
Christopher Fauletf2824e62018-10-01 12:12:37 +02001395 h1s = h1s_create(h1c, cs);
Christopher Faulet51dbc942018-09-13 09:05:15 +02001396 if (h1s == NULL)
1397 goto end;
1398
1399 return cs;
1400 end:
1401 cs_free(cs);
1402 return NULL;
1403}
1404
1405/* Retrieves a valid conn_stream from this connection, or returns NULL. For
1406 * this mux, it's easy as we can only store a single conn_stream.
1407 */
1408static const struct conn_stream *h1_get_first_cs(const struct connection *conn)
1409{
1410 struct h1c *h1c = conn->mux_ctx;
1411 struct h1s *h1s = h1c->h1s;
1412
1413 if (h1s)
1414 return h1s->cs;
1415
1416 return NULL;
1417}
1418
1419static void h1_destroy(struct connection *conn)
1420{
1421 struct h1c *h1c = conn->mux_ctx;
1422
1423 if (!h1c->h1s)
1424 h1_release(conn);
1425}
1426
1427/*
1428 * Detach the stream from the connection and possibly release the connection.
1429 */
1430static void h1_detach(struct conn_stream *cs)
1431{
1432 struct h1s *h1s = cs->ctx;
1433 struct h1c *h1c;
1434
1435 cs->ctx = NULL;
1436 if (!h1s)
1437 return;
1438
1439 h1c = h1s->h1c;
1440 h1s->cs = NULL;
1441
1442 h1s_destroy(h1s);
1443
1444 /* We don't want to close right now unless the connection is in error */
1445 if ((h1c->flags & (H1C_F_CS_ERROR|H1C_F_CS_SHUTW)) ||
1446 (h1c->conn->flags & CO_FL_ERROR))
1447 h1_release(h1c->conn);
1448 else
1449 tasklet_wakeup(h1c->wait_event.task);
1450}
1451
1452
1453static void h1_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
1454{
1455 struct h1s *h1s = cs->ctx;
1456
1457 if (!h1s)
1458 return;
1459
Christopher Fauletf2824e62018-10-01 12:12:37 +02001460 if ((h1s->flags & H1S_F_WANT_KAL) && !(cs->flags & (CS_FL_REOS|CS_FL_EOS)))
1461 return;
1462
Christopher Faulet51dbc942018-09-13 09:05:15 +02001463 /* NOTE: Be sure to handle abort (cf. h2_shutr) */
1464 if (cs->flags & CS_FL_SHR)
1465 return;
1466 if (conn_xprt_ready(cs->conn) && cs->conn->xprt->shutr)
1467 cs->conn->xprt->shutr(cs->conn, (mode == CS_SHR_DRAIN));
1468 if (cs->flags & CS_FL_SHW) {
1469 h1s->h1c->flags = (h1s->h1c->flags & ~H1C_F_CS_SHUTW_NOW) | H1C_F_CS_SHUTW;
1470 conn_full_close(cs->conn);
1471 }
1472}
1473
1474static void h1_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
1475{
1476 struct h1s *h1s = cs->ctx;
1477 struct h1c *h1c;
1478
1479 if (!h1s)
1480 return;
1481 h1c = h1s->h1c;
1482
Christopher Fauletf2824e62018-10-01 12:12:37 +02001483 if ((h1s->flags & H1S_F_WANT_KAL) &&
1484 !(cs->flags & (CS_FL_REOS|CS_FL_EOS)) &&
1485 h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE)
1486 return;
1487
Christopher Faulet51dbc942018-09-13 09:05:15 +02001488 h1c->flags |= H1C_F_CS_SHUTW_NOW;
1489 if ((cs->flags & CS_FL_SHW) || b_data(&h1c->obuf))
1490 return;
1491
1492 h1_shutw_conn(cs->conn);
1493}
1494
1495static void h1_shutw_conn(struct connection *conn)
1496{
1497 struct h1c *h1c = conn->mux_ctx;
1498
1499 if (conn_xprt_ready(conn) && conn->xprt->shutw)
1500 conn->xprt->shutw(conn, 1);
1501 if (!(conn->flags & CO_FL_SOCK_RD_SH))
1502 conn_sock_shutw(conn, 1);
1503 else {
1504 h1c->flags = (h1c->flags & ~H1C_F_CS_SHUTW_NOW) | H1C_F_CS_SHUTW;
1505 conn_full_close(conn);
1506 }
1507}
1508
1509/* Called from the upper layer, to unsubscribe to events */
1510static int h1_unsubscribe(struct conn_stream *cs, int event_type, void *param)
1511{
1512 struct wait_event *sw;
1513 struct h1s *h1s = cs->ctx;
1514
1515 if (!h1s)
1516 return 0;
1517
1518 if (event_type & SUB_CAN_RECV) {
1519 sw = param;
1520 if (h1s->recv_wait == sw) {
1521 sw->wait_reason &= ~SUB_CAN_RECV;
1522 h1s->recv_wait = NULL;
1523 }
1524 }
1525 if (event_type & SUB_CAN_SEND) {
1526 sw = param;
1527 if (h1s->send_wait == sw) {
1528 sw->wait_reason &= ~SUB_CAN_SEND;
1529 h1s->send_wait = NULL;
1530 }
1531 }
1532 return 0;
1533}
1534
1535/* Called from the upper layer, to subscribe to events, such as being able to send */
1536static int h1_subscribe(struct conn_stream *cs, int event_type, void *param)
1537{
1538 struct wait_event *sw;
1539 struct h1s *h1s = cs->ctx;
1540
1541 if (!h1s)
1542 return -1;
1543
1544 switch (event_type) {
1545 case SUB_CAN_RECV:
1546 sw = param;
1547 if (!(sw->wait_reason & SUB_CAN_RECV)) {
1548 sw->wait_reason |= SUB_CAN_RECV;
1549 sw->handle = h1s;
1550 h1s->recv_wait = sw;
1551 }
1552 return 0;
1553 case SUB_CAN_SEND:
1554 sw = param;
1555 if (!(sw->wait_reason & SUB_CAN_SEND)) {
1556 sw->wait_reason |= SUB_CAN_SEND;
1557 sw->handle = h1s;
1558 h1s->send_wait = sw;
1559 }
1560 return 0;
1561 default:
1562 break;
1563 }
1564 return -1;
1565}
1566
1567/* Called from the upper layer, to receive data */
1568static size_t h1_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
1569{
1570 struct h1s *h1s = cs->ctx;
1571 size_t ret = 0;
1572
1573 if (!h1s)
1574 return ret;
1575
1576 if (!(h1s->h1c->flags & H1C_F_RX_ALLOC))
1577 ret = h1_xfer(h1s, buf, count);
Christopher Faulet1be55f92018-10-02 15:59:23 +02001578
1579 if (flags & CO_RFL_BUF_FLUSH)
1580 h1s->flags |= H1S_F_BUF_FLUSH;
1581 else if (ret > 0 || (h1s->flags & H1S_F_BUF_FLUSH)) {
1582 h1s->flags &= ~H1S_F_BUF_FLUSH;
Christopher Faulet51dbc942018-09-13 09:05:15 +02001583 if (!(h1s->h1c->wait_event.wait_reason & SUB_CAN_RECV))
1584 tasklet_wakeup(h1s->h1c->wait_event.task);
1585 }
1586 return ret;
1587}
1588
1589
1590/* Called from the upper layer, to send data */
1591static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
1592{
1593 struct h1s *h1s = cs->ctx;
1594 struct h1c *h1c;
1595 size_t ret = 0;
1596
1597 if (!h1s)
1598 return ret;
1599
1600 h1c = h1s->h1c;
1601
1602 /* FIXME: There is a problem when the backend server is down. Channel
1603 * data are consumed, so CF_WROTE_DATA is set by the stream
1604 * interface. We should wait the connection is established before, but
1605 * to do so, we need to have a notification of the connection
1606 * establishment.
1607 */
1608
1609 if (!(h1c->flags & (H1C_F_OUT_FULL|H1C_F_OUT_ALLOC)) && b_data(buf))
1610 ret = h1_process_output(h1c, buf, count);
1611 if (ret > 0) {
1612 h1_send(h1c);
1613
1614 /* We need to do that because of the infinite forwarding. */
1615 if (!b_data(buf))
1616 ret = count;
1617 }
1618 return ret;
1619
1620}
1621
Christopher Faulet1be55f92018-10-02 15:59:23 +02001622#if defined(CONFIG_HAP_LINUX_SPLICE)
1623/* Send and get, using splicing */
1624static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int count)
1625{
1626 struct h1s *h1s = cs->ctx;
1627 struct h1m *h1m = (!conn_is_back(cs->conn) ? &h1s->req : &h1s->res);
1628 int ret = 0;
1629
1630 if (b_data(&h1s->rxbuf) || b_data(&h1s->h1c->ibuf))
1631 goto end;
1632 if (h1m->state == H1_MSG_DATA && count > h1m->curr_len)
1633 count = h1m->curr_len;
1634 ret = cs->conn->xprt->rcv_pipe(cs->conn, pipe, count);
1635 if (h1m->state == H1_MSG_DATA && ret > 0)
1636 h1m->curr_len -= ret;
1637 end:
1638 return ret;
1639
1640}
1641
1642static int h1_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
1643{
1644 struct h1s *h1s = cs->ctx;
1645 struct h1m *h1m = (!conn_is_back(cs->conn) ? &h1s->res : &h1s->req);
1646 int ret = 0;
1647
1648 if (b_data(&h1s->h1c->obuf))
1649 goto end;
1650
1651 ret = cs->conn->xprt->snd_pipe(cs->conn, pipe);
1652 if (h1m->state == H1_MSG_DATA && ret > 0)
1653 h1m->curr_len -= ret;
1654 end:
1655 return ret;
1656}
1657#endif
1658
Christopher Faulet51dbc942018-09-13 09:05:15 +02001659/****************************************/
1660/* MUX initialization and instanciation */
1661/****************************************/
1662
1663/* The mux operations */
1664const struct mux_ops mux_h1_ops = {
1665 .init = h1_init,
1666 .wake = h1_wake,
1667 .attach = h1_attach,
1668 .get_first_cs = h1_get_first_cs,
1669 .detach = h1_detach,
1670 .destroy = h1_destroy,
1671 .avail_streams = h1_avail_streams,
1672 .rcv_buf = h1_rcv_buf,
1673 .snd_buf = h1_snd_buf,
Christopher Faulet1be55f92018-10-02 15:59:23 +02001674#if defined(CONFIG_HAP_LINUX_SPLICE)
1675 .rcv_pipe = h1_rcv_pipe,
1676 .snd_pipe = h1_snd_pipe,
1677#endif
Christopher Faulet51dbc942018-09-13 09:05:15 +02001678 .subscribe = h1_subscribe,
1679 .unsubscribe = h1_unsubscribe,
1680 .shutr = h1_shutr,
1681 .shutw = h1_shutw,
1682 .flags = MX_FL_NONE,
1683 .name = "h1",
1684};
1685
1686
1687/* this mux registers default HTX proto */
1688static struct mux_proto_list mux_proto_htx =
1689{ .token = IST(""), .mode = PROTO_MODE_HTX, .side = PROTO_SIDE_BOTH, .mux = &mux_h1_ops };
1690
1691static void __h1_deinit(void)
1692{
1693 pool_destroy(pool_head_h1c);
1694 pool_destroy(pool_head_h1s);
1695}
1696
1697__attribute__((constructor))
1698static void __h1_init(void)
1699{
1700 register_mux_proto(&mux_proto_htx);
1701 hap_register_post_deinit(__h1_deinit);
1702 pool_head_h1c = create_pool("h1c", sizeof(struct h1c), MEM_F_SHARED);
1703 pool_head_h1s = create_pool("h1s", sizeof(struct h1s), MEM_F_SHARED);
1704}
1705/*
1706 * Local variables:
1707 * c-indent-level: 8
1708 * c-basic-offset: 8
1709 * End:
1710 */