blob: f229ab3b4d73875a7a5db0cc2b52393845e9b704 [file] [log] [blame]
Christopher Fauletf4eb75d2018-10-11 15:55:07 +02001/*
2 * HTTP protocol analyzer
3 *
4 * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Christopher Faulete0768eb2018-10-03 16:38:02 +020013#include <common/base64.h>
14#include <common/config.h>
15#include <common/debug.h>
16#include <common/uri_auth.h>
17
18#include <types/cache.h>
Christopher Faulet0f226952018-10-22 09:29:56 +020019#include <types/capture.h>
Christopher Faulete0768eb2018-10-03 16:38:02 +020020
21#include <proto/acl.h>
22#include <proto/channel.h>
23#include <proto/checks.h>
24#include <proto/connection.h>
25#include <proto/filters.h>
26#include <proto/hdr_idx.h>
Christopher Faulet0f226952018-10-22 09:29:56 +020027#include <proto/http_htx.h>
28#include <proto/htx.h>
Christopher Faulete0768eb2018-10-03 16:38:02 +020029#include <proto/log.h>
30#include <proto/proto_http.h>
31#include <proto/proxy.h>
32#include <proto/stream.h>
33#include <proto/stream_interface.h>
34#include <proto/stats.h>
35
Christopher Fauletf2824e62018-10-01 12:12:37 +020036
37static void htx_end_request(struct stream *s);
38static void htx_end_response(struct stream *s);
39
Christopher Faulet0f226952018-10-22 09:29:56 +020040static void htx_capture_headers(struct htx *htx, char **cap, struct cap_hdr *cap_hdr);
41static size_t htx_fmt_req_line(const union h1_sl sl, char *str, size_t len);
42static void htx_debug_stline(const char *dir, struct stream *s, const union h1_sl sl);
43static void htx_debug_hdr(const char *dir, struct stream *s, const struct ist n, const struct ist v);
44
Christopher Faulete0768eb2018-10-03 16:38:02 +020045/* This stream analyser waits for a complete HTTP request. It returns 1 if the
46 * processing can continue on next analysers, or zero if it either needs more
47 * data or wants to immediately abort the request (eg: timeout, error, ...). It
48 * is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req.analysers
49 * when it has nothing left to do, and may remove any analyser when it wants to
50 * abort.
51 */
52int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
53{
Christopher Faulet9768c262018-10-22 09:34:31 +020054
Christopher Faulete0768eb2018-10-03 16:38:02 +020055 /*
Christopher Faulet9768c262018-10-22 09:34:31 +020056 * We will analyze a complete HTTP request to check the its syntax.
Christopher Faulete0768eb2018-10-03 16:38:02 +020057 *
Christopher Faulet9768c262018-10-22 09:34:31 +020058 * Once the start line and all headers are received, we may perform a
59 * capture of the error (if any), and we will set a few fields. We also
60 * check for monitor-uri, logging and finally headers capture.
Christopher Faulete0768eb2018-10-03 16:38:02 +020061 */
Christopher Faulete0768eb2018-10-03 16:38:02 +020062 struct session *sess = s->sess;
63 struct http_txn *txn = s->txn;
64 struct http_msg *msg = &txn->req;
Christopher Faulet9768c262018-10-22 09:34:31 +020065 struct htx *htx;
66 union h1_sl sl;
Christopher Faulete0768eb2018-10-03 16:38:02 +020067
68 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
69 now_ms, __FUNCTION__,
70 s,
71 req,
72 req->rex, req->wex,
73 req->flags,
74 ci_data(req),
75 req->analysers);
76
Christopher Faulet9768c262018-10-22 09:34:31 +020077 htx = htx_from_buf(&req->buf);
78
Christopher Faulete0768eb2018-10-03 16:38:02 +020079 /* we're speaking HTTP here, so let's speak HTTP to the client */
80 s->srv_error = http_return_srv_error;
81
82 /* If there is data available for analysis, log the end of the idle time. */
83 if (c_data(req) && s->logs.t_idle == -1)
84 s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake;
85
Christopher Faulete0768eb2018-10-03 16:38:02 +020086 /*
87 * Now we quickly check if we have found a full valid request.
88 * If not so, we check the FD and buffer states before leaving.
89 * A full request is indicated by the fact that we have seen
90 * the double LF/CRLF, so the state is >= HTTP_MSG_BODY. Invalid
91 * requests are checked first. When waiting for a second request
92 * on a keep-alive stream, if we encounter and error, close, t/o,
93 * we note the error in the stream flags but don't set any state.
94 * Since the error will be noted there, it will not be counted by
95 * process_stream() as a frontend error.
96 * Last, we may increase some tracked counters' http request errors on
97 * the cases that are deliberately the client's fault. For instance,
98 * a timeout or connection reset is not counted as an error. However
99 * a bad request is.
100 */
Christopher Faulet9768c262018-10-22 09:34:31 +0200101 if (unlikely(htx_is_empty(htx) || htx_get_tail_type(htx) < HTX_BLK_EOH)) {
102 /* 1: have we encountered a read error ? */
103 if (req->flags & CF_READ_ERROR) {
Christopher Faulete0768eb2018-10-03 16:38:02 +0200104 if (!(s->flags & SF_ERR_MASK))
105 s->flags |= SF_ERR_CLICL;
106
107 if (txn->flags & TX_WAIT_NEXT_RQ)
108 goto failed_keep_alive;
109
110 if (sess->fe->options & PR_O_IGNORE_PRB)
111 goto failed_keep_alive;
112
Christopher Faulet9768c262018-10-22 09:34:31 +0200113 stream_inc_http_err_ctr(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +0200114 stream_inc_http_req_ctr(s);
115 proxy_inc_fe_req_ctr(sess->fe);
116 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
117 if (sess->listener->counters)
118 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
119
Christopher Faulet9768c262018-10-22 09:34:31 +0200120 txn->status = 400;
121 msg->err_state = msg->msg_state;
122 msg->msg_state = HTTP_MSG_ERROR;
123 htx_reply_and_close(s, txn->status, NULL);
124 req->analysers &= AN_REQ_FLT_END;
125
Christopher Faulete0768eb2018-10-03 16:38:02 +0200126 if (!(s->flags & SF_FINST_MASK))
127 s->flags |= SF_FINST_R;
128 return 0;
129 }
130
Christopher Faulet9768c262018-10-22 09:34:31 +0200131 /* 2: has the read timeout expired ? */
Christopher Faulete0768eb2018-10-03 16:38:02 +0200132 else if (req->flags & CF_READ_TIMEOUT || tick_is_expired(req->analyse_exp, now_ms)) {
133 if (!(s->flags & SF_ERR_MASK))
134 s->flags |= SF_ERR_CLITO;
135
136 if (txn->flags & TX_WAIT_NEXT_RQ)
137 goto failed_keep_alive;
138
139 if (sess->fe->options & PR_O_IGNORE_PRB)
140 goto failed_keep_alive;
141
Christopher Faulet9768c262018-10-22 09:34:31 +0200142 stream_inc_http_err_ctr(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +0200143 stream_inc_http_req_ctr(s);
144 proxy_inc_fe_req_ctr(sess->fe);
145 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
146 if (sess->listener->counters)
147 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
148
Christopher Faulet9768c262018-10-22 09:34:31 +0200149 txn->status = 408;
150 msg->err_state = msg->msg_state;
151 msg->msg_state = HTTP_MSG_ERROR;
152 htx_reply_and_close(s, txn->status, http_error_message(s));
153 req->analysers &= AN_REQ_FLT_END;
154
Christopher Faulete0768eb2018-10-03 16:38:02 +0200155 if (!(s->flags & SF_FINST_MASK))
156 s->flags |= SF_FINST_R;
157 return 0;
158 }
159
Christopher Faulet9768c262018-10-22 09:34:31 +0200160 /* 3: have we encountered a close ? */
Christopher Faulete0768eb2018-10-03 16:38:02 +0200161 else if (req->flags & CF_SHUTR) {
162 if (!(s->flags & SF_ERR_MASK))
163 s->flags |= SF_ERR_CLICL;
164
165 if (txn->flags & TX_WAIT_NEXT_RQ)
166 goto failed_keep_alive;
167
168 if (sess->fe->options & PR_O_IGNORE_PRB)
169 goto failed_keep_alive;
170
Christopher Faulete0768eb2018-10-03 16:38:02 +0200171 stream_inc_http_err_ctr(s);
172 stream_inc_http_req_ctr(s);
173 proxy_inc_fe_req_ctr(sess->fe);
174 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
175 if (sess->listener->counters)
176 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
177
Christopher Faulet9768c262018-10-22 09:34:31 +0200178 txn->status = 400;
179 msg->err_state = msg->msg_state;
180 msg->msg_state = HTTP_MSG_ERROR;
181 htx_reply_and_close(s, txn->status, http_error_message(s));
182 req->analysers &= AN_REQ_FLT_END;
183
Christopher Faulete0768eb2018-10-03 16:38:02 +0200184 if (!(s->flags & SF_FINST_MASK))
185 s->flags |= SF_FINST_R;
186 return 0;
187 }
188
189 channel_dont_connect(req);
190 req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
191 s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
192#ifdef TCP_QUICKACK
Christopher Faulet9768c262018-10-22 09:34:31 +0200193 if (sess->listener->options & LI_O_NOQUICKACK && htx_is_not_empty(htx) &&
Christopher Faulete0768eb2018-10-03 16:38:02 +0200194 objt_conn(sess->origin) && conn_ctrl_ready(__objt_conn(sess->origin))) {
195 /* We need more data, we have to re-enable quick-ack in case we
196 * previously disabled it, otherwise we might cause the client
197 * to delay next data.
198 */
199 setsockopt(__objt_conn(sess->origin)->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
200 }
201#endif
202
203 if ((msg->msg_state != HTTP_MSG_RQBEFORE) && (txn->flags & TX_WAIT_NEXT_RQ)) {
204 /* If the client starts to talk, let's fall back to
205 * request timeout processing.
206 */
207 txn->flags &= ~TX_WAIT_NEXT_RQ;
208 req->analyse_exp = TICK_ETERNITY;
209 }
210
211 /* just set the request timeout once at the beginning of the request */
212 if (!tick_isset(req->analyse_exp)) {
213 if ((msg->msg_state == HTTP_MSG_RQBEFORE) &&
214 (txn->flags & TX_WAIT_NEXT_RQ) &&
215 tick_isset(s->be->timeout.httpka))
216 req->analyse_exp = tick_add(now_ms, s->be->timeout.httpka);
217 else
218 req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.httpreq);
219 }
220
221 /* we're not ready yet */
222 return 0;
223
224 failed_keep_alive:
225 /* Here we process low-level errors for keep-alive requests. In
226 * short, if the request is not the first one and it experiences
227 * a timeout, read error or shutdown, we just silently close so
228 * that the client can try again.
229 */
230 txn->status = 0;
231 msg->msg_state = HTTP_MSG_RQBEFORE;
232 req->analysers &= AN_REQ_FLT_END;
233 s->logs.logwait = 0;
234 s->logs.level = 0;
235 s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
Christopher Faulet9768c262018-10-22 09:34:31 +0200236 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +0200237 return 0;
238 }
239
Christopher Faulet9768c262018-10-22 09:34:31 +0200240 msg->msg_state = HTTP_MSG_BODY;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200241 stream_inc_http_req_ctr(s);
242 proxy_inc_fe_req_ctr(sess->fe); /* one more valid request for this FE */
243
Christopher Faulet9768c262018-10-22 09:34:31 +0200244 /* kill the pending keep-alive timeout */
245 txn->flags &= ~TX_WAIT_NEXT_RQ;
246 req->analyse_exp = TICK_ETERNITY;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200247
Christopher Faulet9768c262018-10-22 09:34:31 +0200248 /* 0: we might have to print this header in debug mode */
249 if (unlikely((global.mode & MODE_DEBUG) &&
250 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
251 int32_t pos;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200252
Christopher Faulet9768c262018-10-22 09:34:31 +0200253 htx_debug_stline("clireq", s, http_find_stline(htx));
254
255 for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
256 struct htx_blk *blk = htx_get_blk(htx, pos);
257 enum htx_blk_type type = htx_get_blk_type(blk);
258
259 if (type == HTX_BLK_EOH)
260 break;
261 if (type != HTX_BLK_HDR)
262 continue;
263
264 htx_debug_hdr("clihdr", s,
265 htx_get_blk_name(htx, blk),
266 htx_get_blk_value(htx, blk));
267 }
268 }
Christopher Faulete0768eb2018-10-03 16:38:02 +0200269
270 /*
271 * 1: identify the method
272 */
Christopher Faulet9768c262018-10-22 09:34:31 +0200273 sl = http_find_stline(htx);
274 txn->meth = sl.rq.meth;
275 msg->flags |= HTTP_MSGF_XFER_LEN;
276
277 /* ... and check if the request is HTTP/1.1 or above */
278 if ((sl.rq.v.len == 8) &&
279 ((*(sl.rq.v.ptr + 5) > '1') ||
280 ((*(sl.rq.v.ptr + 5) == '1') && (*(sl.rq.v.ptr + 7) >= '1'))))
281 msg->flags |= HTTP_MSGF_VER_11;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200282
283 /* we can make use of server redirect on GET and HEAD */
284 if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
285 s->flags |= SF_REDIRECTABLE;
Christopher Faulet9768c262018-10-22 09:34:31 +0200286 else if (txn->meth == HTTP_METH_OTHER && isteqi(sl.rq.m, ist("PRI"))) {
Christopher Faulete0768eb2018-10-03 16:38:02 +0200287 /* PRI is reserved for the HTTP/2 preface */
Christopher Faulete0768eb2018-10-03 16:38:02 +0200288 goto return_bad_req;
289 }
290
291 /*
292 * 2: check if the URI matches the monitor_uri.
293 * We have to do this for every request which gets in, because
294 * the monitor-uri is defined by the frontend.
295 */
296 if (unlikely((sess->fe->monitor_uri_len != 0) &&
Christopher Faulet9768c262018-10-22 09:34:31 +0200297 isteqi(sl.rq.u, ist2(sess->fe->monitor_uri, sess->fe->monitor_uri_len)))) {
Christopher Faulete0768eb2018-10-03 16:38:02 +0200298 /*
299 * We have found the monitor URI
300 */
301 struct acl_cond *cond;
302
303 s->flags |= SF_MONITOR;
304 HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
305
306 /* Check if we want to fail this monitor request or not */
307 list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
308 int ret = acl_exec_cond(cond, sess->fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
309
310 ret = acl_pass(ret);
311 if (cond->pol == ACL_COND_UNLESS)
312 ret = !ret;
313
314 if (ret) {
315 /* we fail this request, let's return 503 service unavail */
316 txn->status = 503;
Christopher Faulet9768c262018-10-22 09:34:31 +0200317 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +0200318 if (!(s->flags & SF_ERR_MASK))
319 s->flags |= SF_ERR_LOCAL; /* we don't want a real error here */
320 goto return_prx_cond;
321 }
322 }
323
324 /* nothing to fail, let's reply normaly */
325 txn->status = 200;
Christopher Faulet9768c262018-10-22 09:34:31 +0200326 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +0200327 if (!(s->flags & SF_ERR_MASK))
328 s->flags |= SF_ERR_LOCAL; /* we don't want a real error here */
329 goto return_prx_cond;
330 }
331
332 /*
333 * 3: Maybe we have to copy the original REQURI for the logs ?
334 * Note: we cannot log anymore if the request has been
335 * classified as invalid.
336 */
337 if (unlikely(s->logs.logwait & LW_REQ)) {
338 /* we have a complete HTTP request that we must log */
339 if ((txn->uri = pool_alloc(pool_head_requri)) != NULL) {
Christopher Faulet9768c262018-10-22 09:34:31 +0200340 size_t len;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200341
Christopher Faulet9768c262018-10-22 09:34:31 +0200342 len = htx_fmt_req_line(sl, txn->uri, global.tune.requri_len - 1);
343 txn->uri[len] = 0;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200344
345 if (!(s->logs.logwait &= ~(LW_REQ|LW_INIT)))
346 s->do_log(s);
347 } else {
348 ha_alert("HTTP logging : out of memory.\n");
349 }
350 }
Christopher Faulete0768eb2018-10-03 16:38:02 +0200351
Christopher Faulete0768eb2018-10-03 16:38:02 +0200352 /* if the frontend has "option http-use-proxy-header", we'll check if
353 * we have what looks like a proxied connection instead of a connection,
354 * and in this case set the TX_USE_PX_CONN flag to use Proxy-connection.
355 * Note that this is *not* RFC-compliant, however browsers and proxies
356 * happen to do that despite being non-standard :-(
357 * We consider that a request not beginning with either '/' or '*' is
358 * a proxied connection, which covers both "scheme://location" and
359 * CONNECT ip:port.
360 */
361 if ((sess->fe->options2 & PR_O2_USE_PXHDR) &&
Christopher Faulet9768c262018-10-22 09:34:31 +0200362 *(sl.rq.u.ptr) != '/' && *(sl.rq.u.ptr) != '*')
Christopher Faulete0768eb2018-10-03 16:38:02 +0200363 txn->flags |= TX_USE_PX_CONN;
364
Christopher Faulete0768eb2018-10-03 16:38:02 +0200365 /* 5: we may need to capture headers */
366 if (unlikely((s->logs.logwait & LW_REQHDR) && s->req_cap))
Christopher Faulet9768c262018-10-22 09:34:31 +0200367 htx_capture_headers(htx, s->req_cap, sess->fe->req_cap);
Christopher Faulete0768eb2018-10-03 16:38:02 +0200368
369 /* Until set to anything else, the connection mode is set as Keep-Alive. It will
370 * only change if both the request and the config reference something else.
371 * Option httpclose by itself sets tunnel mode where headers are mangled.
372 * However, if another mode is set, it will affect it (eg: server-close/
373 * keep-alive + httpclose = close). Note that we avoid to redo the same work
374 * if FE and BE have the same settings (common). The method consists in
375 * checking if options changed between the two calls (implying that either
376 * one is non-null, or one of them is non-null and we are there for the first
377 * time.
378 */
Christopher Fauletf2824e62018-10-01 12:12:37 +0200379 if ((sess->fe->options & PR_O_HTTP_MODE) != (s->be->options & PR_O_HTTP_MODE))
Christopher Faulet0f226952018-10-22 09:29:56 +0200380 htx_adjust_conn_mode(s, txn);
Christopher Faulete0768eb2018-10-03 16:38:02 +0200381
382 /* we may have to wait for the request's body */
Christopher Faulet9768c262018-10-22 09:34:31 +0200383 if (s->be->options & PR_O_WREQ_BODY)
Christopher Faulete0768eb2018-10-03 16:38:02 +0200384 req->analysers |= AN_REQ_HTTP_BODY;
385
386 /*
387 * RFC7234#4:
388 * A cache MUST write through requests with methods
389 * that are unsafe (Section 4.2.1 of [RFC7231]) to
390 * the origin server; i.e., a cache is not allowed
391 * to generate a reply to such a request before
392 * having forwarded the request and having received
393 * a corresponding response.
394 *
395 * RFC7231#4.2.1:
396 * Of the request methods defined by this
397 * specification, the GET, HEAD, OPTIONS, and TRACE
398 * methods are defined to be safe.
399 */
400 if (likely(txn->meth == HTTP_METH_GET ||
401 txn->meth == HTTP_METH_HEAD ||
402 txn->meth == HTTP_METH_OPTIONS ||
403 txn->meth == HTTP_METH_TRACE))
404 txn->flags |= TX_CACHEABLE | TX_CACHE_COOK;
405
406 /* end of job, return OK */
407 req->analysers &= ~an_bit;
408 req->analyse_exp = TICK_ETERNITY;
Christopher Faulet9768c262018-10-22 09:34:31 +0200409
Christopher Faulete0768eb2018-10-03 16:38:02 +0200410 return 1;
411
412 return_bad_req:
Christopher Faulet9768c262018-10-22 09:34:31 +0200413 txn->status = 400;
Christopher Faulete0768eb2018-10-03 16:38:02 +0200414 txn->req.err_state = txn->req.msg_state;
415 txn->req.msg_state = HTTP_MSG_ERROR;
Christopher Faulet9768c262018-10-22 09:34:31 +0200416 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +0200417 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
418 if (sess->listener->counters)
419 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
420
421 return_prx_cond:
422 if (!(s->flags & SF_ERR_MASK))
423 s->flags |= SF_ERR_PRXCOND;
424 if (!(s->flags & SF_FINST_MASK))
425 s->flags |= SF_FINST_R;
426
427 req->analysers &= AN_REQ_FLT_END;
428 req->analyse_exp = TICK_ETERNITY;
429 return 0;
430}
431
432
433/* This stream analyser runs all HTTP request processing which is common to
434 * frontends and backends, which means blocking ACLs, filters, connection-close,
435 * reqadd, stats and redirects. This is performed for the designated proxy.
436 * It returns 1 if the processing can continue on next analysers, or zero if it
437 * either needs more data or wants to immediately abort the request (eg: deny,
438 * error, ...).
439 */
440int htx_process_req_common(struct stream *s, struct channel *req, int an_bit, struct proxy *px)
441{
442 struct session *sess = s->sess;
443 struct http_txn *txn = s->txn;
444 struct http_msg *msg = &txn->req;
445 struct redirect_rule *rule;
446 struct cond_wordlist *wl;
447 enum rule_result verdict;
448 int deny_status = HTTP_ERR_403;
449 struct connection *conn = objt_conn(sess->origin);
450
Christopher Faulet9768c262018-10-22 09:34:31 +0200451 // TODO: Disabled for now
452 req->analyse_exp = TICK_ETERNITY;
453 req->analysers &= ~an_bit;
454 return 1;
455
Christopher Faulete0768eb2018-10-03 16:38:02 +0200456 if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
457 /* we need more data */
458 goto return_prx_yield;
459 }
460
461 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
462 now_ms, __FUNCTION__,
463 s,
464 req,
465 req->rex, req->wex,
466 req->flags,
467 ci_data(req),
468 req->analysers);
469
470 /* just in case we have some per-backend tracking */
471 stream_inc_be_http_req_ctr(s);
472
473 /* evaluate http-request rules */
474 if (!LIST_ISEMPTY(&px->http_req_rules)) {
475 verdict = http_req_get_intercept_rule(px, &px->http_req_rules, s, &deny_status);
476
477 switch (verdict) {
478 case HTTP_RULE_RES_YIELD: /* some data miss, call the function later. */
479 goto return_prx_yield;
480
481 case HTTP_RULE_RES_CONT:
482 case HTTP_RULE_RES_STOP: /* nothing to do */
483 break;
484
485 case HTTP_RULE_RES_DENY: /* deny or tarpit */
486 if (txn->flags & TX_CLTARPIT)
487 goto tarpit;
488 goto deny;
489
490 case HTTP_RULE_RES_ABRT: /* abort request, response already sent. Eg: auth */
491 goto return_prx_cond;
492
493 case HTTP_RULE_RES_DONE: /* OK, but terminate request processing (eg: redirect) */
494 goto done;
495
496 case HTTP_RULE_RES_BADREQ: /* failed with a bad request */
497 goto return_bad_req;
498 }
499 }
500
501 if (conn && (conn->flags & CO_FL_EARLY_DATA) &&
502 (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_HANDSHAKE))) {
503 struct hdr_ctx ctx;
504
505 ctx.idx = 0;
506 if (!http_find_header2("Early-Data", strlen("Early-Data"),
507 ci_head(&s->req), &txn->hdr_idx, &ctx)) {
508 if (unlikely(http_header_add_tail2(&txn->req,
509 &txn->hdr_idx, "Early-Data: 1",
510 strlen("Early-Data: 1")) < 0)) {
511 goto return_bad_req;
512 }
513 }
514
515 }
516
517 /* OK at this stage, we know that the request was accepted according to
518 * the http-request rules, we can check for the stats. Note that the
519 * URI is detected *before* the req* rules in order not to be affected
520 * by a possible reqrep, while they are processed *after* so that a
521 * reqdeny can still block them. This clearly needs to change in 1.6!
522 */
523 if (stats_check_uri(&s->si[1], txn, px)) {
524 s->target = &http_stats_applet.obj_type;
525 if (unlikely(!stream_int_register_handler(&s->si[1], objt_applet(s->target)))) {
526 txn->status = 500;
527 s->logs.tv_request = now;
528 http_reply_and_close(s, txn->status, http_error_message(s));
529
530 if (!(s->flags & SF_ERR_MASK))
531 s->flags |= SF_ERR_RESOURCE;
532 goto return_prx_cond;
533 }
534
535 /* parse the whole stats request and extract the relevant information */
536 http_handle_stats(s, req);
537 verdict = http_req_get_intercept_rule(px, &px->uri_auth->http_req_rules, s, &deny_status);
538 /* not all actions implemented: deny, allow, auth */
539
540 if (verdict == HTTP_RULE_RES_DENY) /* stats http-request deny */
541 goto deny;
542
543 if (verdict == HTTP_RULE_RES_ABRT) /* stats auth / stats http-request auth */
544 goto return_prx_cond;
545 }
546
547 /* evaluate the req* rules except reqadd */
548 if (px->req_exp != NULL) {
549 if (apply_filters_to_request(s, req, px) < 0)
550 goto return_bad_req;
551
552 if (txn->flags & TX_CLDENY)
553 goto deny;
554
555 if (txn->flags & TX_CLTARPIT) {
556 deny_status = HTTP_ERR_500;
557 goto tarpit;
558 }
559 }
560
561 /* add request headers from the rule sets in the same order */
562 list_for_each_entry(wl, &px->req_add, list) {
563 if (wl->cond) {
564 int ret = acl_exec_cond(wl->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
565 ret = acl_pass(ret);
566 if (((struct acl_cond *)wl->cond)->pol == ACL_COND_UNLESS)
567 ret = !ret;
568 if (!ret)
569 continue;
570 }
571
572 if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, wl->s, strlen(wl->s)) < 0))
573 goto return_bad_req;
574 }
575
576
577 /* Proceed with the stats now. */
578 if (unlikely(objt_applet(s->target) == &http_stats_applet) ||
579 unlikely(objt_applet(s->target) == &http_cache_applet)) {
580 /* process the stats request now */
581 if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
582 HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
583
584 if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is
585 s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy
586 if (!(s->flags & SF_FINST_MASK))
587 s->flags |= SF_FINST_R;
588
589 /* enable the minimally required analyzers to handle keep-alive and compression on the HTTP response */
590 req->analysers &= (AN_REQ_HTTP_BODY | AN_REQ_FLT_HTTP_HDRS | AN_REQ_FLT_END);
591 req->analysers &= ~AN_REQ_FLT_XFER_DATA;
592 req->analysers |= AN_REQ_HTTP_XFER_BODY;
593 goto done;
594 }
595
596 /* check whether we have some ACLs set to redirect this request */
597 list_for_each_entry(rule, &px->redirect_rules, list) {
598 if (rule->cond) {
599 int ret;
600
601 ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
602 ret = acl_pass(ret);
603 if (rule->cond->pol == ACL_COND_UNLESS)
604 ret = !ret;
605 if (!ret)
606 continue;
607 }
Christopher Fauletf2824e62018-10-01 12:12:37 +0200608 if (!htx_apply_redirect_rule(rule, s, txn))
Christopher Faulete0768eb2018-10-03 16:38:02 +0200609 goto return_bad_req;
610 goto done;
611 }
612
613 /* POST requests may be accompanied with an "Expect: 100-Continue" header.
614 * If this happens, then the data will not come immediately, so we must
615 * send all what we have without waiting. Note that due to the small gain
616 * in waiting for the body of the request, it's easier to simply put the
617 * CF_SEND_DONTWAIT flag any time. It's a one-shot flag so it will remove
618 * itself once used.
619 */
620 req->flags |= CF_SEND_DONTWAIT;
621
622 done: /* done with this analyser, continue with next ones that the calling
623 * points will have set, if any.
624 */
625 req->analyse_exp = TICK_ETERNITY;
626 done_without_exp: /* done with this analyser, but dont reset the analyse_exp. */
627 req->analysers &= ~an_bit;
628 return 1;
629
630 tarpit:
631 /* Allow cookie logging
632 */
633 if (s->be->cookie_name || sess->fe->capture_name)
634 manage_client_side_cookies(s, req);
635
636 /* When a connection is tarpitted, we use the tarpit timeout,
637 * which may be the same as the connect timeout if unspecified.
638 * If unset, then set it to zero because we really want it to
639 * eventually expire. We build the tarpit as an analyser.
640 */
641 channel_erase(&s->req);
642
643 /* wipe the request out so that we can drop the connection early
644 * if the client closes first.
645 */
646 channel_dont_connect(req);
647
648 txn->status = http_err_codes[deny_status];
649
650 req->analysers &= AN_REQ_FLT_END; /* remove switching rules etc... */
651 req->analysers |= AN_REQ_HTTP_TARPIT;
652 req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.tarpit);
653 if (!req->analyse_exp)
654 req->analyse_exp = tick_add(now_ms, 0);
655 stream_inc_http_err_ctr(s);
656 HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
657 if (sess->fe != s->be)
658 HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
659 if (sess->listener->counters)
660 HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
661 goto done_without_exp;
662
663 deny: /* this request was blocked (denied) */
664
665 /* Allow cookie logging
666 */
667 if (s->be->cookie_name || sess->fe->capture_name)
668 manage_client_side_cookies(s, req);
669
670 txn->flags |= TX_CLDENY;
671 txn->status = http_err_codes[deny_status];
672 s->logs.tv_request = now;
673 http_reply_and_close(s, txn->status, http_error_message(s));
674 stream_inc_http_err_ctr(s);
675 HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
676 if (sess->fe != s->be)
677 HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
678 if (sess->listener->counters)
679 HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
680 goto return_prx_cond;
681
682 return_bad_req:
683 /* We centralize bad requests processing here */
684 if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) {
685 /* we detected a parsing error. We want to archive this request
686 * in the dedicated proxy area for later troubleshooting.
687 */
688 http_capture_bad_message(sess->fe, s, msg, msg->err_state, sess->fe);
689 }
690
691 txn->req.err_state = txn->req.msg_state;
692 txn->req.msg_state = HTTP_MSG_ERROR;
693 txn->status = 400;
694 http_reply_and_close(s, txn->status, http_error_message(s));
695
696 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
697 if (sess->listener->counters)
698 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
699
700 return_prx_cond:
701 if (!(s->flags & SF_ERR_MASK))
702 s->flags |= SF_ERR_PRXCOND;
703 if (!(s->flags & SF_FINST_MASK))
704 s->flags |= SF_FINST_R;
705
706 req->analysers &= AN_REQ_FLT_END;
707 req->analyse_exp = TICK_ETERNITY;
708 return 0;
709
710 return_prx_yield:
711 channel_dont_connect(req);
712 return 0;
713}
714
715/* This function performs all the processing enabled for the current request.
716 * It returns 1 if the processing can continue on next analysers, or zero if it
717 * needs more data, encounters an error, or wants to immediately abort the
718 * request. It relies on buffers flags, and updates s->req.analysers.
719 */
720int htx_process_request(struct stream *s, struct channel *req, int an_bit)
721{
722 struct session *sess = s->sess;
723 struct http_txn *txn = s->txn;
724 struct http_msg *msg = &txn->req;
725 struct connection *cli_conn = objt_conn(strm_sess(s)->origin);
726
Christopher Faulet9768c262018-10-22 09:34:31 +0200727 // TODO: Disabled for now
728 req->analysers &= ~AN_REQ_FLT_XFER_DATA;
729 req->analysers |= AN_REQ_HTTP_XFER_BODY;
730 req->analyse_exp = TICK_ETERNITY;
731 req->analysers &= ~an_bit;
732 return 1;
733
Christopher Faulete0768eb2018-10-03 16:38:02 +0200734 if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
735 /* we need more data */
736 channel_dont_connect(req);
737 return 0;
738 }
739
740 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
741 now_ms, __FUNCTION__,
742 s,
743 req,
744 req->rex, req->wex,
745 req->flags,
746 ci_data(req),
747 req->analysers);
748
749 /*
750 * Right now, we know that we have processed the entire headers
751 * and that unwanted requests have been filtered out. We can do
752 * whatever we want with the remaining request. Also, now we
753 * may have separate values for ->fe, ->be.
754 */
755
756 /*
757 * If HTTP PROXY is set we simply get remote server address parsing
758 * incoming request. Note that this requires that a connection is
759 * allocated on the server side.
760 */
761 if ((s->be->options & PR_O_HTTP_PROXY) && !(s->flags & SF_ADDR_SET)) {
762 struct connection *conn;
763 char *path;
764
765 /* Note that for now we don't reuse existing proxy connections */
766 if (unlikely((conn = cs_conn(si_alloc_cs(&s->si[1], NULL))) == NULL)) {
767 txn->req.err_state = txn->req.msg_state;
768 txn->req.msg_state = HTTP_MSG_ERROR;
769 txn->status = 500;
770 req->analysers &= AN_REQ_FLT_END;
771 http_reply_and_close(s, txn->status, http_error_message(s));
772
773 if (!(s->flags & SF_ERR_MASK))
774 s->flags |= SF_ERR_RESOURCE;
775 if (!(s->flags & SF_FINST_MASK))
776 s->flags |= SF_FINST_R;
777
778 return 0;
779 }
780
781 path = http_txn_get_path(txn);
782 if (url2sa(ci_head(req) + msg->sl.rq.u,
783 path ? path - (ci_head(req) + msg->sl.rq.u) : msg->sl.rq.u_l,
784 &conn->addr.to, NULL) == -1)
785 goto return_bad_req;
786
787 /* if the path was found, we have to remove everything between
788 * ci_head(req) + msg->sl.rq.u and path (excluded). If it was not
789 * found, we need to replace from ci_head(req) + msg->sl.rq.u for
790 * u_l characters by a single "/".
791 */
792 if (path) {
793 char *cur_ptr = ci_head(req);
794 char *cur_end = cur_ptr + txn->req.sl.rq.l;
795 int delta;
796
797 delta = b_rep_blk(&req->buf, cur_ptr + msg->sl.rq.u, path, NULL, 0);
798 http_msg_move_end(&txn->req, delta);
799 cur_end += delta;
800 if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
801 goto return_bad_req;
802 }
803 else {
804 char *cur_ptr = ci_head(req);
805 char *cur_end = cur_ptr + txn->req.sl.rq.l;
806 int delta;
807
808 delta = b_rep_blk(&req->buf, cur_ptr + msg->sl.rq.u,
809 cur_ptr + msg->sl.rq.u + msg->sl.rq.u_l, "/", 1);
810 http_msg_move_end(&txn->req, delta);
811 cur_end += delta;
812 if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
813 goto return_bad_req;
814 }
815 }
816
817 /*
818 * 7: Now we can work with the cookies.
819 * Note that doing so might move headers in the request, but
820 * the fields will stay coherent and the URI will not move.
821 * This should only be performed in the backend.
822 */
823 if (s->be->cookie_name || sess->fe->capture_name)
824 manage_client_side_cookies(s, req);
825
826 /* add unique-id if "header-unique-id" is specified */
827
828 if (!LIST_ISEMPTY(&sess->fe->format_unique_id) && !s->unique_id) {
829 if ((s->unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
830 goto return_bad_req;
831 s->unique_id[0] = '\0';
832 build_logline(s, s->unique_id, UNIQUEID_LEN, &sess->fe->format_unique_id);
833 }
834
835 if (sess->fe->header_unique_id && s->unique_id) {
836 if (chunk_printf(&trash, "%s: %s", sess->fe->header_unique_id, s->unique_id) < 0)
837 goto return_bad_req;
838 if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.area, trash.data) < 0))
839 goto return_bad_req;
840 }
841
842 /*
843 * 9: add X-Forwarded-For if either the frontend or the backend
844 * asks for it.
845 */
846 if ((sess->fe->options | s->be->options) & PR_O_FWDFOR) {
847 struct hdr_ctx ctx = { .idx = 0 };
848 if (!((sess->fe->options | s->be->options) & PR_O_FF_ALWAYS) &&
849 http_find_header2(s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_name : sess->fe->fwdfor_hdr_name,
850 s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_len : sess->fe->fwdfor_hdr_len,
851 ci_head(req), &txn->hdr_idx, &ctx)) {
852 /* The header is set to be added only if none is present
853 * and we found it, so don't do anything.
854 */
855 }
856 else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
857 /* Add an X-Forwarded-For header unless the source IP is
858 * in the 'except' network range.
859 */
860 if ((!sess->fe->except_mask.s_addr ||
861 (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & sess->fe->except_mask.s_addr)
862 != sess->fe->except_net.s_addr) &&
863 (!s->be->except_mask.s_addr ||
864 (((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->be->except_mask.s_addr)
865 != s->be->except_net.s_addr)) {
866 int len;
867 unsigned char *pn;
868 pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr;
869
870 /* Note: we rely on the backend to get the header name to be used for
871 * x-forwarded-for, because the header is really meant for the backends.
872 * However, if the backend did not specify any option, we have to rely
873 * on the frontend's header name.
874 */
875 if (s->be->fwdfor_hdr_len) {
876 len = s->be->fwdfor_hdr_len;
877 memcpy(trash.area,
878 s->be->fwdfor_hdr_name, len);
879 } else {
880 len = sess->fe->fwdfor_hdr_len;
881 memcpy(trash.area,
882 sess->fe->fwdfor_hdr_name, len);
883 }
884 len += snprintf(trash.area + len,
885 trash.size - len,
886 ": %d.%d.%d.%d", pn[0], pn[1],
887 pn[2], pn[3]);
888
889 if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.area, len) < 0))
890 goto return_bad_req;
891 }
892 }
893 else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET6) {
894 /* FIXME: for the sake of completeness, we should also support
895 * 'except' here, although it is mostly useless in this case.
896 */
897 int len;
898 char pn[INET6_ADDRSTRLEN];
899 inet_ntop(AF_INET6,
900 (const void *)&((struct sockaddr_in6 *)(&cli_conn->addr.from))->sin6_addr,
901 pn, sizeof(pn));
902
903 /* Note: we rely on the backend to get the header name to be used for
904 * x-forwarded-for, because the header is really meant for the backends.
905 * However, if the backend did not specify any option, we have to rely
906 * on the frontend's header name.
907 */
908 if (s->be->fwdfor_hdr_len) {
909 len = s->be->fwdfor_hdr_len;
910 memcpy(trash.area, s->be->fwdfor_hdr_name,
911 len);
912 } else {
913 len = sess->fe->fwdfor_hdr_len;
914 memcpy(trash.area, sess->fe->fwdfor_hdr_name,
915 len);
916 }
917 len += snprintf(trash.area + len, trash.size - len,
918 ": %s", pn);
919
920 if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.area, len) < 0))
921 goto return_bad_req;
922 }
923 }
924
925 /*
926 * 10: add X-Original-To if either the frontend or the backend
927 * asks for it.
928 */
929 if ((sess->fe->options | s->be->options) & PR_O_ORGTO) {
930
931 /* FIXME: don't know if IPv6 can handle that case too. */
932 if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
933 /* Add an X-Original-To header unless the destination IP is
934 * in the 'except' network range.
935 */
936 conn_get_to_addr(cli_conn);
937
938 if (cli_conn->addr.to.ss_family == AF_INET &&
939 ((!sess->fe->except_mask_to.s_addr ||
940 (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & sess->fe->except_mask_to.s_addr)
941 != sess->fe->except_to.s_addr) &&
942 (!s->be->except_mask_to.s_addr ||
943 (((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->be->except_mask_to.s_addr)
944 != s->be->except_to.s_addr))) {
945 int len;
946 unsigned char *pn;
947 pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr;
948
949 /* Note: we rely on the backend to get the header name to be used for
950 * x-original-to, because the header is really meant for the backends.
951 * However, if the backend did not specify any option, we have to rely
952 * on the frontend's header name.
953 */
954 if (s->be->orgto_hdr_len) {
955 len = s->be->orgto_hdr_len;
956 memcpy(trash.area,
957 s->be->orgto_hdr_name, len);
958 } else {
959 len = sess->fe->orgto_hdr_len;
960 memcpy(trash.area,
961 sess->fe->orgto_hdr_name, len);
962 }
963 len += snprintf(trash.area + len,
964 trash.size - len,
965 ": %d.%d.%d.%d", pn[0], pn[1],
966 pn[2], pn[3]);
967
968 if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.area, len) < 0))
969 goto return_bad_req;
970 }
971 }
Christopher Faulete0768eb2018-10-03 16:38:02 +0200972 }
973
Christopher Faulete0768eb2018-10-03 16:38:02 +0200974 /* If we have no server assigned yet and we're balancing on url_param
975 * with a POST request, we may be interested in checking the body for
976 * that parameter. This will be done in another analyser.
977 */
978 if (!(s->flags & (SF_ASSIGNED|SF_DIRECT)) &&
979 s->txn->meth == HTTP_METH_POST && s->be->url_param_name != NULL &&
980 (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
981 channel_dont_connect(req);
982 req->analysers |= AN_REQ_HTTP_BODY;
983 }
984
985 req->analysers &= ~AN_REQ_FLT_XFER_DATA;
986 req->analysers |= AN_REQ_HTTP_XFER_BODY;
987#ifdef TCP_QUICKACK
988 /* We expect some data from the client. Unless we know for sure
989 * we already have a full request, we have to re-enable quick-ack
990 * in case we previously disabled it, otherwise we might cause
991 * the client to delay further data.
992 */
993 if ((sess->listener->options & LI_O_NOQUICKACK) &&
994 cli_conn && conn_ctrl_ready(cli_conn) &&
995 ((msg->flags & HTTP_MSGF_TE_CHNK) ||
996 (msg->body_len > ci_data(req) - txn->req.eoh - 2)))
997 setsockopt(cli_conn->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
998#endif
999
1000 /*************************************************************
1001 * OK, that's finished for the headers. We have done what we *
1002 * could. Let's switch to the DATA state. *
1003 ************************************************************/
1004 req->analyse_exp = TICK_ETERNITY;
1005 req->analysers &= ~an_bit;
1006
1007 s->logs.tv_request = now;
1008 /* OK let's go on with the BODY now */
1009 return 1;
1010
1011 return_bad_req: /* let's centralize all bad requests */
1012 if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) {
1013 /* we detected a parsing error. We want to archive this request
1014 * in the dedicated proxy area for later troubleshooting.
1015 */
1016 http_capture_bad_message(sess->fe, s, msg, msg->err_state, sess->fe);
1017 }
1018
1019 txn->req.err_state = txn->req.msg_state;
1020 txn->req.msg_state = HTTP_MSG_ERROR;
1021 txn->status = 400;
1022 req->analysers &= AN_REQ_FLT_END;
1023 http_reply_and_close(s, txn->status, http_error_message(s));
1024
1025 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
1026 if (sess->listener->counters)
1027 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
1028
1029 if (!(s->flags & SF_ERR_MASK))
1030 s->flags |= SF_ERR_PRXCOND;
1031 if (!(s->flags & SF_FINST_MASK))
1032 s->flags |= SF_FINST_R;
1033 return 0;
1034}
1035
1036/* This function is an analyser which processes the HTTP tarpit. It always
1037 * returns zero, at the beginning because it prevents any other processing
1038 * from occurring, and at the end because it terminates the request.
1039 */
1040int htx_process_tarpit(struct stream *s, struct channel *req, int an_bit)
1041{
1042 struct http_txn *txn = s->txn;
1043
Christopher Faulet9768c262018-10-22 09:34:31 +02001044 // TODO: Disabled for now
1045 req->analyse_exp = TICK_ETERNITY;
1046 req->analysers &= ~an_bit;
1047 return 1;
1048
Christopher Faulete0768eb2018-10-03 16:38:02 +02001049 /* This connection is being tarpitted. The CLIENT side has
1050 * already set the connect expiration date to the right
1051 * timeout. We just have to check that the client is still
1052 * there and that the timeout has not expired.
1053 */
1054 channel_dont_connect(req);
1055 if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) == 0 &&
1056 !tick_is_expired(req->analyse_exp, now_ms))
1057 return 0;
1058
1059 /* We will set the queue timer to the time spent, just for
1060 * logging purposes. We fake a 500 server error, so that the
1061 * attacker will not suspect his connection has been tarpitted.
1062 * It will not cause trouble to the logs because we can exclude
1063 * the tarpitted connections by filtering on the 'PT' status flags.
1064 */
1065 s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
1066
1067 if (!(req->flags & CF_READ_ERROR))
1068 http_reply_and_close(s, txn->status, http_error_message(s));
1069
1070 req->analysers &= AN_REQ_FLT_END;
1071 req->analyse_exp = TICK_ETERNITY;
1072
1073 if (!(s->flags & SF_ERR_MASK))
1074 s->flags |= SF_ERR_PRXCOND;
1075 if (!(s->flags & SF_FINST_MASK))
1076 s->flags |= SF_FINST_T;
1077 return 0;
1078}
1079
1080/* This function is an analyser which waits for the HTTP request body. It waits
1081 * for either the buffer to be full, or the full advertised contents to have
1082 * reached the buffer. It must only be called after the standard HTTP request
1083 * processing has occurred, because it expects the request to be parsed and will
1084 * look for the Expect header. It may send a 100-Continue interim response. It
1085 * takes in input any state starting from HTTP_MSG_BODY and leaves with one of
1086 * HTTP_MSG_CHK_SIZE, HTTP_MSG_DATA or HTTP_MSG_TRAILERS. It returns zero if it
1087 * needs to read more data, or 1 once it has completed its analysis.
1088 */
1089int htx_wait_for_request_body(struct stream *s, struct channel *req, int an_bit)
1090{
1091 struct session *sess = s->sess;
1092 struct http_txn *txn = s->txn;
1093 struct http_msg *msg = &s->txn->req;
1094
Christopher Faulet9768c262018-10-22 09:34:31 +02001095 // TODO: Disabled for now
1096 req->analyse_exp = TICK_ETERNITY;
1097 req->analysers &= ~an_bit;
1098 return 1;
1099
Christopher Faulete0768eb2018-10-03 16:38:02 +02001100 /* We have to parse the HTTP request body to find any required data.
1101 * "balance url_param check_post" should have been the only way to get
1102 * into this. We were brought here after HTTP header analysis, so all
1103 * related structures are ready.
1104 */
1105
1106 if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) {
1107 /* This is the first call */
1108 if (msg->msg_state < HTTP_MSG_BODY)
1109 goto missing_data;
1110
1111 if (msg->msg_state < HTTP_MSG_100_SENT) {
1112 /* If we have HTTP/1.1 and Expect: 100-continue, then we must
1113 * send an HTTP/1.1 100 Continue intermediate response.
1114 */
1115 if (msg->flags & HTTP_MSGF_VER_11) {
1116 struct hdr_ctx ctx;
1117 ctx.idx = 0;
1118 /* Expect is allowed in 1.1, look for it */
1119 if (http_find_header2("Expect", 6, ci_head(req), &txn->hdr_idx, &ctx) &&
1120 unlikely(ctx.vlen == 12 && strncasecmp(ctx.line+ctx.val, "100-continue", 12) == 0)) {
1121 co_inject(&s->res, HTTP_100.ptr, HTTP_100.len);
1122 http_remove_header2(&txn->req, &txn->hdr_idx, &ctx);
1123 }
1124 }
1125 msg->msg_state = HTTP_MSG_100_SENT;
1126 }
1127
1128 /* we have msg->sov which points to the first byte of message body.
1129 * ci_head(req) still points to the beginning of the message. We
1130 * must save the body in msg->next because it survives buffer
1131 * re-alignments.
1132 */
1133 msg->next = msg->sov;
1134
1135 if (msg->flags & HTTP_MSGF_TE_CHNK)
1136 msg->msg_state = HTTP_MSG_CHUNK_SIZE;
1137 else
1138 msg->msg_state = HTTP_MSG_DATA;
1139 }
1140
1141 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
1142 /* We're in content-length mode, we just have to wait for enough data. */
1143 if (http_body_bytes(msg) < msg->body_len)
1144 goto missing_data;
1145
1146 /* OK we have everything we need now */
1147 goto http_end;
1148 }
1149
1150 /* OK here we're parsing a chunked-encoded message */
1151
1152 if (msg->msg_state == HTTP_MSG_CHUNK_SIZE) {
1153 /* read the chunk size and assign it to ->chunk_len, then
1154 * set ->sov and ->next to point to the body and switch to DATA or
1155 * TRAILERS state.
1156 */
1157 unsigned int chunk;
1158 int ret = h1_parse_chunk_size(&req->buf, co_data(req) + msg->next, c_data(req), &chunk);
1159
1160 if (!ret)
1161 goto missing_data;
1162 else if (ret < 0) {
1163 msg->err_pos = ci_data(req) + ret;
1164 if (msg->err_pos < 0)
1165 msg->err_pos += req->buf.size;
1166 stream_inc_http_err_ctr(s);
1167 goto return_bad_req;
1168 }
1169
1170 msg->chunk_len = chunk;
1171 msg->body_len += chunk;
1172
1173 msg->sol = ret;
1174 msg->next += ret;
1175 msg->msg_state = msg->chunk_len ? HTTP_MSG_DATA : HTTP_MSG_TRAILERS;
1176 }
1177
1178 /* Now we're in HTTP_MSG_DATA or HTTP_MSG_TRAILERS state.
1179 * We have the first data byte is in msg->sov + msg->sol. We're waiting
1180 * for at least a whole chunk or the whole content length bytes after
1181 * msg->sov + msg->sol.
1182 */
1183 if (msg->msg_state == HTTP_MSG_TRAILERS)
1184 goto http_end;
1185
1186 if (http_body_bytes(msg) >= msg->body_len) /* we have enough bytes now */
1187 goto http_end;
1188
1189 missing_data:
1190 /* we get here if we need to wait for more data. If the buffer is full,
1191 * we have the maximum we can expect.
1192 */
1193 if (channel_full(req, global.tune.maxrewrite))
1194 goto http_end;
1195
1196 if ((req->flags & CF_READ_TIMEOUT) || tick_is_expired(req->analyse_exp, now_ms)) {
1197 txn->status = 408;
1198 http_reply_and_close(s, txn->status, http_error_message(s));
1199
1200 if (!(s->flags & SF_ERR_MASK))
1201 s->flags |= SF_ERR_CLITO;
1202 if (!(s->flags & SF_FINST_MASK))
1203 s->flags |= SF_FINST_D;
1204 goto return_err_msg;
1205 }
1206
1207 /* we get here if we need to wait for more data */
1208 if (!(req->flags & (CF_SHUTR | CF_READ_ERROR))) {
1209 /* Not enough data. We'll re-use the http-request
1210 * timeout here. Ideally, we should set the timeout
1211 * relative to the accept() date. We just set the
1212 * request timeout once at the beginning of the
1213 * request.
1214 */
1215 channel_dont_connect(req);
1216 if (!tick_isset(req->analyse_exp))
1217 req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.httpreq);
1218 return 0;
1219 }
1220
1221 http_end:
1222 /* The situation will not evolve, so let's give up on the analysis. */
1223 s->logs.tv_request = now; /* update the request timer to reflect full request */
1224 req->analysers &= ~an_bit;
1225 req->analyse_exp = TICK_ETERNITY;
1226 return 1;
1227
1228 return_bad_req: /* let's centralize all bad requests */
1229 txn->req.err_state = txn->req.msg_state;
1230 txn->req.msg_state = HTTP_MSG_ERROR;
1231 txn->status = 400;
1232 http_reply_and_close(s, txn->status, http_error_message(s));
1233
1234 if (!(s->flags & SF_ERR_MASK))
1235 s->flags |= SF_ERR_PRXCOND;
1236 if (!(s->flags & SF_FINST_MASK))
1237 s->flags |= SF_FINST_R;
1238
1239 return_err_msg:
1240 req->analysers &= AN_REQ_FLT_END;
1241 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
1242 if (sess->listener->counters)
1243 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
1244 return 0;
1245}
1246
1247/* This function is an analyser which forwards request body (including chunk
1248 * sizes if any). It is called as soon as we must forward, even if we forward
1249 * zero byte. The only situation where it must not be called is when we're in
1250 * tunnel mode and we want to forward till the close. It's used both to forward
1251 * remaining data and to resync after end of body. It expects the msg_state to
1252 * be between MSG_BODY and MSG_DONE (inclusive). It returns zero if it needs to
1253 * read more data, or 1 once we can go on with next request or end the stream.
1254 * When in MSG_DATA or MSG_TRAILERS, it will automatically forward chunk_len
1255 * bytes of pending data + the headers if not already done.
1256 */
1257int htx_request_forward_body(struct stream *s, struct channel *req, int an_bit)
1258{
1259 struct session *sess = s->sess;
1260 struct http_txn *txn = s->txn;
Christopher Faulet9768c262018-10-22 09:34:31 +02001261 struct http_msg *msg = &txn->req;
1262 struct htx *htx;
1263 //int ret;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001264
1265 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
1266 now_ms, __FUNCTION__,
1267 s,
1268 req,
1269 req->rex, req->wex,
1270 req->flags,
1271 ci_data(req),
1272 req->analysers);
1273
Christopher Faulet9768c262018-10-22 09:34:31 +02001274 htx = htx_from_buf(&req->buf);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001275
1276 if ((req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
1277 ((req->flags & CF_SHUTW) && (req->to_forward || co_data(req)))) {
1278 /* Output closed while we were sending data. We must abort and
1279 * wake the other side up.
1280 */
1281 msg->err_state = msg->msg_state;
1282 msg->msg_state = HTTP_MSG_ERROR;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001283 htx_end_request(s);
1284 htx_end_response(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001285 return 1;
1286 }
1287
1288 /* Note that we don't have to send 100-continue back because we don't
1289 * need the data to complete our job, and it's up to the server to
1290 * decide whether to return 100, 417 or anything else in return of
1291 * an "Expect: 100-continue" header.
1292 */
Christopher Faulet9768c262018-10-22 09:34:31 +02001293 if (msg->msg_state == HTTP_MSG_BODY)
1294 msg->msg_state = HTTP_MSG_DATA;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001295
1296 /* Some post-connect processing might want us to refrain from starting to
1297 * forward data. Currently, the only reason for this is "balance url_param"
1298 * whichs need to parse/process the request after we've enabled forwarding.
1299 */
1300 if (unlikely(msg->flags & HTTP_MSGF_WAIT_CONN)) {
1301 if (!(s->res.flags & CF_READ_ATTACHED)) {
1302 channel_auto_connect(req);
1303 req->flags |= CF_WAKE_CONNECT;
1304 channel_dont_close(req); /* don't fail on early shutr */
1305 goto waiting;
1306 }
1307 msg->flags &= ~HTTP_MSGF_WAIT_CONN;
1308 }
1309
1310 /* in most states, we should abort in case of early close */
1311 channel_auto_close(req);
1312
1313 if (req->to_forward) {
1314 /* We can't process the buffer's contents yet */
1315 req->flags |= CF_WAKE_WRITE;
1316 goto missing_data_or_waiting;
1317 }
1318
Christopher Faulet9768c262018-10-22 09:34:31 +02001319 if (msg->msg_state >= HTTP_MSG_DONE)
1320 goto done;
1321
1322 /* Forward all input data. We get it by removing all outgoing data not
1323 * forwarded yet from HTX data size.
1324 */
1325 c_adv(req, htx->data - co_data(req));
1326
1327 /* To let the function channel_forward work as expected we must update
1328 * the channel's buffer to pretend there is no more input data. The
1329 * right length is then restored. We must do that, because when an HTX
1330 * message is stored into a buffer, it appears as full.
1331 */
1332 b_set_data(&req->buf, co_data(req));
1333 if (htx->extra != ULLONG_MAX)
1334 htx->extra -= channel_forward(req, htx->extra);
1335 b_set_data(&req->buf, b_size(&req->buf));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001336
Christopher Faulet9768c262018-10-22 09:34:31 +02001337 /* Check if the end-of-message is reached and if so, switch the message
1338 * in HTTP_MSG_DONE state.
1339 */
1340 if (htx_get_tail_type(htx) != HTX_BLK_EOM)
1341 goto missing_data_or_waiting;
1342
1343 msg->msg_state = HTTP_MSG_DONE;
1344
1345 done:
Christopher Faulete0768eb2018-10-03 16:38:02 +02001346 /* other states, DONE...TUNNEL */
1347 /* we don't want to forward closes on DONE except in tunnel mode. */
1348 if ((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN)
1349 channel_dont_close(req);
1350
Christopher Fauletf2824e62018-10-01 12:12:37 +02001351 htx_end_request(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001352 if (!(req->analysers & an_bit)) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02001353 htx_end_response(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001354 if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
1355 if (req->flags & CF_SHUTW) {
1356 /* request errors are most likely due to the
1357 * server aborting the transfer. */
1358 goto aborted_xfer;
1359 }
Christopher Faulete0768eb2018-10-03 16:38:02 +02001360 goto return_bad_req;
1361 }
1362 return 1;
1363 }
1364
1365 /* If "option abortonclose" is set on the backend, we want to monitor
1366 * the client's connection and forward any shutdown notification to the
1367 * server, which will decide whether to close or to go on processing the
1368 * request. We only do that in tunnel mode, and not in other modes since
1369 * it can be abused to exhaust source ports. */
1370 if ((s->be->options & PR_O_ABRT_CLOSE) && !(s->si[0].flags & SI_FL_CLEAN_ABRT)) {
1371 channel_auto_read(req);
1372 if ((req->flags & (CF_SHUTR|CF_READ_NULL)) &&
1373 ((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN))
1374 s->si[1].flags |= SI_FL_NOLINGER;
1375 channel_auto_close(req);
1376 }
1377 else if (s->txn->meth == HTTP_METH_POST) {
1378 /* POST requests may require to read extra CRLF sent by broken
1379 * browsers and which could cause an RST to be sent upon close
1380 * on some systems (eg: Linux). */
1381 channel_auto_read(req);
1382 }
1383 return 0;
1384
1385 missing_data_or_waiting:
1386 /* stop waiting for data if the input is closed before the end */
Christopher Faulet9768c262018-10-22 09:34:31 +02001387 if (msg->msg_state < HTTP_MSG_DONE && req->flags & CF_SHUTR) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02001388 if (!(s->flags & SF_ERR_MASK))
1389 s->flags |= SF_ERR_CLICL;
1390 if (!(s->flags & SF_FINST_MASK)) {
1391 if (txn->rsp.msg_state < HTTP_MSG_ERROR)
1392 s->flags |= SF_FINST_H;
1393 else
1394 s->flags |= SF_FINST_D;
1395 }
1396
1397 HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
1398 HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
1399 if (objt_server(s->target))
1400 HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
1401
1402 goto return_bad_req_stats_ok;
1403 }
1404
1405 waiting:
1406 /* waiting for the last bits to leave the buffer */
1407 if (req->flags & CF_SHUTW)
1408 goto aborted_xfer;
1409
Christopher Faulet9768c262018-10-22 09:34:31 +02001410
Christopher Faulete0768eb2018-10-03 16:38:02 +02001411 /* When TE: chunked is used, we need to get there again to parse remaining
1412 * chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
1413 * And when content-length is used, we never want to let the possible
1414 * shutdown be forwarded to the other side, as the state machine will
1415 * take care of it once the client responds. It's also important to
1416 * prevent TIME_WAITs from accumulating on the backend side, and for
1417 * HTTP/2 where the last frame comes with a shutdown.
1418 */
Christopher Faulet9768c262018-10-22 09:34:31 +02001419 if (msg->flags & HTTP_MSGF_XFER_LEN)
Christopher Faulete0768eb2018-10-03 16:38:02 +02001420 channel_dont_close(req);
1421
Christopher Faulet9768c262018-10-22 09:34:31 +02001422#if 0 // FIXME [Cf]: Probably not required now, but I need more time to think
1423 // about if
1424
Christopher Faulete0768eb2018-10-03 16:38:02 +02001425 /* We know that more data are expected, but we couldn't send more that
1426 * what we did. So we always set the CF_EXPECT_MORE flag so that the
1427 * system knows it must not set a PUSH on this first part. Interactive
1428 * modes are already handled by the stream sock layer. We must not do
1429 * this in content-length mode because it could present the MSG_MORE
1430 * flag with the last block of forwarded data, which would cause an
1431 * additional delay to be observed by the receiver.
1432 */
1433 if (msg->flags & HTTP_MSGF_TE_CHNK)
1434 req->flags |= CF_EXPECT_MORE;
Christopher Faulet9768c262018-10-22 09:34:31 +02001435#endif
Christopher Faulete0768eb2018-10-03 16:38:02 +02001436
1437 return 0;
1438
1439 return_bad_req: /* let's centralize all bad requests */
1440 HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
1441 if (sess->listener->counters)
1442 HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
1443
1444 return_bad_req_stats_ok:
1445 txn->req.err_state = txn->req.msg_state;
1446 txn->req.msg_state = HTTP_MSG_ERROR;
Christopher Faulet9768c262018-10-22 09:34:31 +02001447 if (txn->status > 0) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02001448 /* Note: we don't send any error if some data were already sent */
Christopher Faulet9768c262018-10-22 09:34:31 +02001449 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001450 } else {
1451 txn->status = 400;
Christopher Faulet9768c262018-10-22 09:34:31 +02001452 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001453 }
1454 req->analysers &= AN_REQ_FLT_END;
1455 s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */
1456
1457 if (!(s->flags & SF_ERR_MASK))
1458 s->flags |= SF_ERR_PRXCOND;
1459 if (!(s->flags & SF_FINST_MASK)) {
1460 if (txn->rsp.msg_state < HTTP_MSG_ERROR)
1461 s->flags |= SF_FINST_H;
1462 else
1463 s->flags |= SF_FINST_D;
1464 }
1465 return 0;
1466
1467 aborted_xfer:
1468 txn->req.err_state = txn->req.msg_state;
1469 txn->req.msg_state = HTTP_MSG_ERROR;
Christopher Faulet9768c262018-10-22 09:34:31 +02001470 if (txn->status > 0) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02001471 /* Note: we don't send any error if some data were already sent */
Christopher Faulet9768c262018-10-22 09:34:31 +02001472 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001473 } else {
1474 txn->status = 502;
Christopher Faulet9768c262018-10-22 09:34:31 +02001475 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001476 }
1477 req->analysers &= AN_REQ_FLT_END;
1478 s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */
1479
1480 HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
1481 HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
1482 if (objt_server(s->target))
1483 HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
1484
1485 if (!(s->flags & SF_ERR_MASK))
1486 s->flags |= SF_ERR_SRVCL;
1487 if (!(s->flags & SF_FINST_MASK)) {
1488 if (txn->rsp.msg_state < HTTP_MSG_ERROR)
1489 s->flags |= SF_FINST_H;
1490 else
1491 s->flags |= SF_FINST_D;
1492 }
1493 return 0;
1494}
1495
1496/* This stream analyser waits for a complete HTTP response. It returns 1 if the
1497 * processing can continue on next analysers, or zero if it either needs more
1498 * data or wants to immediately abort the response (eg: timeout, error, ...). It
1499 * is tied to AN_RES_WAIT_HTTP and may may remove itself from s->res.analysers
1500 * when it has nothing left to do, and may remove any analyser when it wants to
1501 * abort.
1502 */
1503int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
1504{
Christopher Faulet9768c262018-10-22 09:34:31 +02001505 /*
1506 * We will analyze a complete HTTP response to check the its syntax.
1507 *
1508 * Once the start line and all headers are received, we may perform a
1509 * capture of the error (if any), and we will set a few fields. We also
1510 * logging and finally headers capture.
1511 */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001512 struct session *sess = s->sess;
1513 struct http_txn *txn = s->txn;
1514 struct http_msg *msg = &txn->rsp;
Christopher Faulet9768c262018-10-22 09:34:31 +02001515 struct htx *htx;
1516 union h1_sl sl;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001517 int n;
1518
1519 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
1520 now_ms, __FUNCTION__,
1521 s,
1522 rep,
1523 rep->rex, rep->wex,
1524 rep->flags,
1525 ci_data(rep),
1526 rep->analysers);
1527
Christopher Faulet9768c262018-10-22 09:34:31 +02001528 htx = htx_from_buf(&rep->buf);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001529
1530 /*
1531 * Now we quickly check if we have found a full valid response.
1532 * If not so, we check the FD and buffer states before leaving.
1533 * A full response is indicated by the fact that we have seen
1534 * the double LF/CRLF, so the state is >= HTTP_MSG_BODY. Invalid
1535 * responses are checked first.
1536 *
1537 * Depending on whether the client is still there or not, we
1538 * may send an error response back or not. Note that normally
1539 * we should only check for HTTP status there, and check I/O
1540 * errors somewhere else.
1541 */
Christopher Faulet9768c262018-10-22 09:34:31 +02001542 if (unlikely(htx_is_empty(htx) || htx_get_tail_type(htx) < HTX_BLK_EOH)) {
1543 /* 1: have we encountered a read error ? */
1544 if (rep->flags & CF_READ_ERROR) {
1545 if (txn->flags & TX_NOT_FIRST)
Christopher Faulete0768eb2018-10-03 16:38:02 +02001546 goto abort_keep_alive;
1547
1548 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
1549 if (objt_server(s->target)) {
1550 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
1551 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR);
1552 }
1553
Christopher Faulete0768eb2018-10-03 16:38:02 +02001554 rep->analysers &= AN_RES_FLT_END;
1555 txn->status = 502;
1556
1557 /* Check to see if the server refused the early data.
1558 * If so, just send a 425
1559 */
1560 if (objt_cs(s->si[1].end)) {
1561 struct connection *conn = objt_cs(s->si[1].end)->conn;
1562
1563 if (conn->err_code == CO_ER_SSL_EARLY_FAILED)
1564 txn->status = 425;
1565 }
1566
1567 s->si[1].flags |= SI_FL_NOLINGER;
Christopher Faulet9768c262018-10-22 09:34:31 +02001568 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001569
1570 if (!(s->flags & SF_ERR_MASK))
1571 s->flags |= SF_ERR_SRVCL;
1572 if (!(s->flags & SF_FINST_MASK))
1573 s->flags |= SF_FINST_H;
1574 return 0;
1575 }
1576
Christopher Faulet9768c262018-10-22 09:34:31 +02001577 /* 2: read timeout : return a 504 to the client. */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001578 else if (rep->flags & CF_READ_TIMEOUT) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02001579 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
1580 if (objt_server(s->target)) {
1581 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
1582 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
1583 }
1584
Christopher Faulete0768eb2018-10-03 16:38:02 +02001585 rep->analysers &= AN_RES_FLT_END;
1586 txn->status = 504;
1587 s->si[1].flags |= SI_FL_NOLINGER;
Christopher Faulet9768c262018-10-22 09:34:31 +02001588 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001589
1590 if (!(s->flags & SF_ERR_MASK))
1591 s->flags |= SF_ERR_SRVTO;
1592 if (!(s->flags & SF_FINST_MASK))
1593 s->flags |= SF_FINST_H;
1594 return 0;
1595 }
1596
Christopher Faulet9768c262018-10-22 09:34:31 +02001597 /* 3: client abort with an abortonclose */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001598 else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
1599 HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
1600 HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
1601 if (objt_server(s->target))
1602 HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
1603
1604 rep->analysers &= AN_RES_FLT_END;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001605 txn->status = 400;
Christopher Faulet9768c262018-10-22 09:34:31 +02001606 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001607
1608 if (!(s->flags & SF_ERR_MASK))
1609 s->flags |= SF_ERR_CLICL;
1610 if (!(s->flags & SF_FINST_MASK))
1611 s->flags |= SF_FINST_H;
1612
1613 /* process_stream() will take care of the error */
1614 return 0;
1615 }
1616
Christopher Faulet9768c262018-10-22 09:34:31 +02001617 /* 4: close from server, capture the response if the server has started to respond */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001618 else if (rep->flags & CF_SHUTR) {
Christopher Faulet9768c262018-10-22 09:34:31 +02001619 if (txn->flags & TX_NOT_FIRST)
Christopher Faulete0768eb2018-10-03 16:38:02 +02001620 goto abort_keep_alive;
1621
1622 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
1623 if (objt_server(s->target)) {
1624 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
1625 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
1626 }
1627
Christopher Faulete0768eb2018-10-03 16:38:02 +02001628 rep->analysers &= AN_RES_FLT_END;
1629 txn->status = 502;
1630 s->si[1].flags |= SI_FL_NOLINGER;
Christopher Faulet9768c262018-10-22 09:34:31 +02001631 htx_reply_and_close(s, txn->status, http_error_message(s));
Christopher Faulete0768eb2018-10-03 16:38:02 +02001632
1633 if (!(s->flags & SF_ERR_MASK))
1634 s->flags |= SF_ERR_SRVCL;
1635 if (!(s->flags & SF_FINST_MASK))
1636 s->flags |= SF_FINST_H;
1637 return 0;
1638 }
1639
Christopher Faulet9768c262018-10-22 09:34:31 +02001640 /* 5: write error to client (we don't send any message then) */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001641 else if (rep->flags & CF_WRITE_ERROR) {
Christopher Faulet9768c262018-10-22 09:34:31 +02001642 if (txn->flags & TX_NOT_FIRST)
Christopher Faulete0768eb2018-10-03 16:38:02 +02001643 goto abort_keep_alive;
1644
1645 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
1646 rep->analysers &= AN_RES_FLT_END;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001647
1648 if (!(s->flags & SF_ERR_MASK))
1649 s->flags |= SF_ERR_CLICL;
1650 if (!(s->flags & SF_FINST_MASK))
1651 s->flags |= SF_FINST_H;
1652
1653 /* process_stream() will take care of the error */
1654 return 0;
1655 }
1656
1657 channel_dont_close(rep);
1658 rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
1659 return 0;
1660 }
1661
1662 /* More interesting part now : we know that we have a complete
1663 * response which at least looks like HTTP. We have an indicator
1664 * of each header's length, so we can parse them quickly.
1665 */
1666
Christopher Faulet9768c262018-10-22 09:34:31 +02001667 msg->msg_state = HTTP_MSG_BODY;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001668
Christopher Faulet9768c262018-10-22 09:34:31 +02001669 /* 0: we might have to print this header in debug mode */
1670 if (unlikely((global.mode & MODE_DEBUG) &&
1671 (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
1672 int32_t pos;
1673
1674 htx_debug_stline("srvrep", s, http_find_stline(htx));
1675
1676 for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
1677 struct htx_blk *blk = htx_get_blk(htx, pos);
1678 enum htx_blk_type type = htx_get_blk_type(blk);
1679
1680 if (type == HTX_BLK_EOH)
1681 break;
1682 if (type != HTX_BLK_HDR)
1683 continue;
1684
1685 htx_debug_hdr("srvhdr", s,
1686 htx_get_blk_name(htx, blk),
1687 htx_get_blk_value(htx, blk));
1688 }
1689 }
1690
1691 /* 1: get the status code */
1692 sl = http_find_stline(htx);
1693 txn->status = sl.st.status;
1694 if (htx->extra != ULLONG_MAX)
1695 msg->flags |= HTTP_MSGF_XFER_LEN;
1696
1697 /* ... and check if the request is HTTP/1.1 or above */
1698 if ((sl.st.v.len == 8) &&
1699 ((*(sl.st.v.ptr + 5) > '1') ||
1700 ((*(sl.st.v.ptr + 5) == '1') && (*(sl.st.v.ptr + 7) >= '1'))))
1701 msg->flags |= HTTP_MSGF_VER_11;
1702
1703 n = txn->status / 100;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001704 if (n < 1 || n > 5)
1705 n = 0;
Christopher Faulet9768c262018-10-22 09:34:31 +02001706
Christopher Faulete0768eb2018-10-03 16:38:02 +02001707 /* when the client triggers a 4xx from the server, it's most often due
1708 * to a missing object or permission. These events should be tracked
1709 * because if they happen often, it may indicate a brute force or a
1710 * vulnerability scan.
1711 */
1712 if (n == 4)
1713 stream_inc_http_err_ctr(s);
1714
1715 if (objt_server(s->target))
1716 HA_ATOMIC_ADD(&objt_server(s->target)->counters.p.http.rsp[n], 1);
1717
Christopher Faulete0768eb2018-10-03 16:38:02 +02001718 /* Adjust server's health based on status code. Note: status codes 501
1719 * and 505 are triggered on demand by client request, so we must not
1720 * count them as server failures.
1721 */
1722 if (objt_server(s->target)) {
1723 if (txn->status >= 100 && (txn->status < 500 || txn->status == 501 || txn->status == 505))
1724 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_OK);
1725 else
1726 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_STS);
1727 }
1728
1729 /*
1730 * We may be facing a 100-continue response, or any other informational
1731 * 1xx response which is non-final, in which case this is not the right
1732 * response, and we're waiting for the next one. Let's allow this response
1733 * to go to the client and wait for the next one. There's an exception for
1734 * 101 which is used later in the code to switch protocols.
1735 */
1736 if (txn->status < 200 &&
1737 (txn->status == 100 || txn->status >= 102)) {
Christopher Faulet9768c262018-10-22 09:34:31 +02001738 //FLT_STRM_CB(s, flt_htx_reset(s, http, htx));
1739 c_adv(rep, htx->data);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001740 msg->msg_state = HTTP_MSG_RPBEFORE;
1741 txn->status = 0;
1742 s->logs.t_data = -1; /* was not a response yet */
Christopher Faulet9768c262018-10-22 09:34:31 +02001743 return 0;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001744 }
1745
1746 /*
1747 * 2: check for cacheability.
1748 */
1749
1750 switch (txn->status) {
1751 case 200:
1752 case 203:
1753 case 204:
1754 case 206:
1755 case 300:
1756 case 301:
1757 case 404:
1758 case 405:
1759 case 410:
1760 case 414:
1761 case 501:
1762 break;
1763 default:
1764 /* RFC7231#6.1:
1765 * Responses with status codes that are defined as
1766 * cacheable by default (e.g., 200, 203, 204, 206,
1767 * 300, 301, 404, 405, 410, 414, and 501 in this
1768 * specification) can be reused by a cache with
1769 * heuristic expiration unless otherwise indicated
1770 * by the method definition or explicit cache
1771 * controls [RFC7234]; all other status codes are
1772 * not cacheable by default.
1773 */
1774 txn->flags &= ~(TX_CACHEABLE | TX_CACHE_COOK);
1775 break;
1776 }
1777
1778 /*
1779 * 3: we may need to capture headers
1780 */
1781 s->logs.logwait &= ~LW_RESP;
1782 if (unlikely((s->logs.logwait & LW_RSPHDR) && s->res_cap))
Christopher Faulet9768c262018-10-22 09:34:31 +02001783 htx_capture_headers(htx, s->res_cap, sess->fe->rsp_cap);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001784
Christopher Faulet9768c262018-10-22 09:34:31 +02001785 /* Skip parsing if no content length is possible. */
Christopher Faulete0768eb2018-10-03 16:38:02 +02001786 if (unlikely((txn->meth == HTTP_METH_CONNECT && txn->status == 200) ||
1787 txn->status == 101)) {
1788 /* Either we've established an explicit tunnel, or we're
1789 * switching the protocol. In both cases, we're very unlikely
1790 * to understand the next protocols. We have to switch to tunnel
1791 * mode, so that we transfer the request and responses then let
1792 * this protocol pass unmodified. When we later implement specific
1793 * parsers for such protocols, we'll want to check the Upgrade
1794 * header which contains information about that protocol for
1795 * responses with status 101 (eg: see RFC2817 about TLS).
1796 */
1797 txn->flags = (txn->flags & ~TX_CON_WANT_MSK) | TX_CON_WANT_TUN;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001798 }
1799
Christopher Faulete0768eb2018-10-03 16:38:02 +02001800 /* we want to have the response time before we start processing it */
1801 s->logs.t_data = tv_ms_elapsed(&s->logs.tv_accept, &now);
1802
1803 /* end of job, return OK */
1804 rep->analysers &= ~an_bit;
1805 rep->analyse_exp = TICK_ETERNITY;
1806 channel_auto_close(rep);
1807 return 1;
1808
1809 abort_keep_alive:
1810 /* A keep-alive request to the server failed on a network error.
1811 * The client is required to retry. We need to close without returning
1812 * any other information so that the client retries.
1813 */
1814 txn->status = 0;
1815 rep->analysers &= AN_RES_FLT_END;
1816 s->req.analysers &= AN_REQ_FLT_END;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001817 s->logs.logwait = 0;
1818 s->logs.level = 0;
1819 s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
Christopher Faulet9768c262018-10-22 09:34:31 +02001820 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +02001821 return 0;
1822}
1823
1824/* This function performs all the processing enabled for the current response.
1825 * It normally returns 1 unless it wants to break. It relies on buffers flags,
1826 * and updates s->res.analysers. It might make sense to explode it into several
1827 * other functions. It works like process_request (see indications above).
1828 */
1829int htx_process_res_common(struct stream *s, struct channel *rep, int an_bit, struct proxy *px)
1830{
1831 struct session *sess = s->sess;
1832 struct http_txn *txn = s->txn;
1833 struct http_msg *msg = &txn->rsp;
1834 struct proxy *cur_proxy;
1835 struct cond_wordlist *wl;
1836 enum rule_result ret = HTTP_RULE_RES_CONT;
1837
Christopher Faulet9768c262018-10-22 09:34:31 +02001838 // TODO: Disabled for now
1839 rep->analysers &= ~AN_RES_FLT_XFER_DATA;
1840 rep->analysers |= AN_RES_HTTP_XFER_BODY;
1841 rep->analyse_exp = TICK_ETERNITY;
1842 rep->analysers &= ~an_bit;
1843 return 1;
1844
Christopher Faulete0768eb2018-10-03 16:38:02 +02001845 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
1846 now_ms, __FUNCTION__,
1847 s,
1848 rep,
1849 rep->rex, rep->wex,
1850 rep->flags,
1851 ci_data(rep),
1852 rep->analysers);
1853
1854 if (unlikely(msg->msg_state < HTTP_MSG_BODY)) /* we need more data */
1855 return 0;
1856
1857 /* The stats applet needs to adjust the Connection header but we don't
1858 * apply any filter there.
1859 */
1860 if (unlikely(objt_applet(s->target) == &http_stats_applet)) {
1861 rep->analysers &= ~an_bit;
1862 rep->analyse_exp = TICK_ETERNITY;
Christopher Fauletf2824e62018-10-01 12:12:37 +02001863 goto end;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001864 }
1865
1866 /*
1867 * We will have to evaluate the filters.
1868 * As opposed to version 1.2, now they will be evaluated in the
1869 * filters order and not in the header order. This means that
1870 * each filter has to be validated among all headers.
1871 *
1872 * Filters are tried with ->be first, then with ->fe if it is
1873 * different from ->be.
1874 *
1875 * Maybe we are in resume condiion. In this case I choose the
1876 * "struct proxy" which contains the rule list matching the resume
1877 * pointer. If none of theses "struct proxy" match, I initialise
1878 * the process with the first one.
1879 *
1880 * In fact, I check only correspondance betwwen the current list
1881 * pointer and the ->fe rule list. If it doesn't match, I initialize
1882 * the loop with the ->be.
1883 */
1884 if (s->current_rule_list == &sess->fe->http_res_rules)
1885 cur_proxy = sess->fe;
1886 else
1887 cur_proxy = s->be;
1888 while (1) {
1889 struct proxy *rule_set = cur_proxy;
1890
1891 /* evaluate http-response rules */
1892 if (ret == HTTP_RULE_RES_CONT) {
1893 ret = http_res_get_intercept_rule(cur_proxy, &cur_proxy->http_res_rules, s);
1894
1895 if (ret == HTTP_RULE_RES_BADREQ)
1896 goto return_srv_prx_502;
1897
1898 if (ret == HTTP_RULE_RES_DONE) {
1899 rep->analysers &= ~an_bit;
1900 rep->analyse_exp = TICK_ETERNITY;
1901 return 1;
1902 }
1903 }
1904
1905 /* we need to be called again. */
1906 if (ret == HTTP_RULE_RES_YIELD) {
1907 channel_dont_close(rep);
1908 return 0;
1909 }
1910
1911 /* try headers filters */
1912 if (rule_set->rsp_exp != NULL) {
1913 if (apply_filters_to_response(s, rep, rule_set) < 0) {
1914 return_bad_resp:
1915 if (objt_server(s->target)) {
1916 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
1917 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_RSP);
1918 }
1919 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
1920 return_srv_prx_502:
1921 rep->analysers &= AN_RES_FLT_END;
1922 txn->status = 502;
1923 s->logs.t_data = -1; /* was not a valid response */
1924 s->si[1].flags |= SI_FL_NOLINGER;
1925 channel_truncate(rep);
1926 http_reply_and_close(s, txn->status, http_error_message(s));
1927 if (!(s->flags & SF_ERR_MASK))
1928 s->flags |= SF_ERR_PRXCOND;
1929 if (!(s->flags & SF_FINST_MASK))
1930 s->flags |= SF_FINST_H;
1931 return 0;
1932 }
1933 }
1934
1935 /* has the response been denied ? */
1936 if (txn->flags & TX_SVDENY) {
1937 if (objt_server(s->target))
1938 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1);
1939
1940 HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
1941 HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
1942 if (sess->listener->counters)
1943 HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
1944
1945 goto return_srv_prx_502;
1946 }
1947
1948 /* add response headers from the rule sets in the same order */
1949 list_for_each_entry(wl, &rule_set->rsp_add, list) {
1950 if (txn->status < 200 && txn->status != 101)
1951 break;
1952 if (wl->cond) {
1953 int ret = acl_exec_cond(wl->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
1954 ret = acl_pass(ret);
1955 if (((struct acl_cond *)wl->cond)->pol == ACL_COND_UNLESS)
1956 ret = !ret;
1957 if (!ret)
1958 continue;
1959 }
1960 if (unlikely(http_header_add_tail2(&txn->rsp, &txn->hdr_idx, wl->s, strlen(wl->s)) < 0))
1961 goto return_bad_resp;
1962 }
1963
1964 /* check whether we're already working on the frontend */
1965 if (cur_proxy == sess->fe)
1966 break;
1967 cur_proxy = sess->fe;
1968 }
1969
1970 /* After this point, this anayzer can't return yield, so we can
1971 * remove the bit corresponding to this analyzer from the list.
1972 *
1973 * Note that the intermediate returns and goto found previously
1974 * reset the analyzers.
1975 */
1976 rep->analysers &= ~an_bit;
1977 rep->analyse_exp = TICK_ETERNITY;
1978
1979 /* OK that's all we can do for 1xx responses */
1980 if (unlikely(txn->status < 200 && txn->status != 101))
Christopher Fauletf2824e62018-10-01 12:12:37 +02001981 goto end;
Christopher Faulete0768eb2018-10-03 16:38:02 +02001982
1983 /*
1984 * Now check for a server cookie.
1985 */
1986 if (s->be->cookie_name || sess->fe->capture_name || (s->be->options & PR_O_CHK_CACHE))
1987 manage_server_side_cookies(s, rep);
1988
1989 /*
1990 * Check for cache-control or pragma headers if required.
1991 */
1992 if ((s->be->options & PR_O_CHK_CACHE) || (s->be->ck_opts & PR_CK_NOC))
1993 check_response_for_cacheability(s, rep);
1994
1995 /*
1996 * Add server cookie in the response if needed
1997 */
1998 if (objt_server(s->target) && (s->be->ck_opts & PR_CK_INS) &&
1999 !((txn->flags & TX_SCK_FOUND) && (s->be->ck_opts & PR_CK_PSV)) &&
2000 (!(s->flags & SF_DIRECT) ||
2001 ((s->be->cookie_maxidle || txn->cookie_last_date) &&
2002 (!txn->cookie_last_date || (txn->cookie_last_date - date.tv_sec) < 0)) ||
2003 (s->be->cookie_maxlife && !txn->cookie_first_date) || // set the first_date
2004 (!s->be->cookie_maxlife && txn->cookie_first_date)) && // remove the first_date
2005 (!(s->be->ck_opts & PR_CK_POST) || (txn->meth == HTTP_METH_POST)) &&
2006 !(s->flags & SF_IGNORE_PRST)) {
2007 /* the server is known, it's not the one the client requested, or the
2008 * cookie's last seen date needs to be refreshed. We have to
2009 * insert a set-cookie here, except if we want to insert only on POST
2010 * requests and this one isn't. Note that servers which don't have cookies
2011 * (eg: some backup servers) will return a full cookie removal request.
2012 */
2013 if (!objt_server(s->target)->cookie) {
2014 chunk_printf(&trash,
2015 "Set-Cookie: %s=; Expires=Thu, 01-Jan-1970 00:00:01 GMT; path=/",
2016 s->be->cookie_name);
2017 }
2018 else {
2019 chunk_printf(&trash, "Set-Cookie: %s=%s", s->be->cookie_name, objt_server(s->target)->cookie);
2020
2021 if (s->be->cookie_maxidle || s->be->cookie_maxlife) {
2022 /* emit last_date, which is mandatory */
2023 trash.area[trash.data++] = COOKIE_DELIM_DATE;
2024 s30tob64((date.tv_sec+3) >> 2,
2025 trash.area + trash.data);
2026 trash.data += 5;
2027
2028 if (s->be->cookie_maxlife) {
2029 /* emit first_date, which is either the original one or
2030 * the current date.
2031 */
2032 trash.area[trash.data++] = COOKIE_DELIM_DATE;
2033 s30tob64(txn->cookie_first_date ?
2034 txn->cookie_first_date >> 2 :
2035 (date.tv_sec+3) >> 2,
2036 trash.area + trash.data);
2037 trash.data += 5;
2038 }
2039 }
2040 chunk_appendf(&trash, "; path=/");
2041 }
2042
2043 if (s->be->cookie_domain)
2044 chunk_appendf(&trash, "; domain=%s", s->be->cookie_domain);
2045
2046 if (s->be->ck_opts & PR_CK_HTTPONLY)
2047 chunk_appendf(&trash, "; HttpOnly");
2048
2049 if (s->be->ck_opts & PR_CK_SECURE)
2050 chunk_appendf(&trash, "; Secure");
2051
2052 if (unlikely(http_header_add_tail2(&txn->rsp, &txn->hdr_idx, trash.area, trash.data) < 0))
2053 goto return_bad_resp;
2054
2055 txn->flags &= ~TX_SCK_MASK;
2056 if (__objt_server(s->target)->cookie && (s->flags & SF_DIRECT))
2057 /* the server did not change, only the date was updated */
2058 txn->flags |= TX_SCK_UPDATED;
2059 else
2060 txn->flags |= TX_SCK_INSERTED;
2061
2062 /* Here, we will tell an eventual cache on the client side that we don't
2063 * want it to cache this reply because HTTP/1.0 caches also cache cookies !
2064 * Some caches understand the correct form: 'no-cache="set-cookie"', but
2065 * others don't (eg: apache <= 1.3.26). So we use 'private' instead.
2066 */
2067 if ((s->be->ck_opts & PR_CK_NOC) && (txn->flags & TX_CACHEABLE)) {
2068
2069 txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
2070
2071 if (unlikely(http_header_add_tail2(&txn->rsp, &txn->hdr_idx,
2072 "Cache-control: private", 22) < 0))
2073 goto return_bad_resp;
2074 }
2075 }
2076
2077 /*
2078 * Check if result will be cacheable with a cookie.
2079 * We'll block the response if security checks have caught
2080 * nasty things such as a cacheable cookie.
2081 */
2082 if (((txn->flags & (TX_CACHEABLE | TX_CACHE_COOK | TX_SCK_PRESENT)) ==
2083 (TX_CACHEABLE | TX_CACHE_COOK | TX_SCK_PRESENT)) &&
2084 (s->be->options & PR_O_CHK_CACHE)) {
2085 /* we're in presence of a cacheable response containing
2086 * a set-cookie header. We'll block it as requested by
2087 * the 'checkcache' option, and send an alert.
2088 */
2089 if (objt_server(s->target))
2090 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1);
2091
2092 HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
2093 HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
2094 if (sess->listener->counters)
2095 HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
2096
2097 ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n",
2098 s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>");
2099 send_log(s->be, LOG_ALERT,
2100 "Blocking cacheable cookie in response from instance %s, server %s.\n",
2101 s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>");
2102 goto return_srv_prx_502;
2103 }
2104
Christopher Fauletf2824e62018-10-01 12:12:37 +02002105 end:
Christopher Faulete0768eb2018-10-03 16:38:02 +02002106 /* Always enter in the body analyzer */
2107 rep->analysers &= ~AN_RES_FLT_XFER_DATA;
2108 rep->analysers |= AN_RES_HTTP_XFER_BODY;
2109
2110 /* if the user wants to log as soon as possible, without counting
2111 * bytes from the server, then this is the right moment. We have
2112 * to temporarily assign bytes_out to log what we currently have.
2113 */
2114 if (!LIST_ISEMPTY(&sess->fe->logformat) && !(s->logs.logwait & LW_BYTES)) {
2115 s->logs.t_close = s->logs.t_data; /* to get a valid end date */
2116 s->logs.bytes_out = txn->rsp.eoh;
2117 s->do_log(s);
2118 s->logs.bytes_out = 0;
2119 }
2120 return 1;
2121}
2122
2123/* This function is an analyser which forwards response body (including chunk
2124 * sizes if any). It is called as soon as we must forward, even if we forward
2125 * zero byte. The only situation where it must not be called is when we're in
2126 * tunnel mode and we want to forward till the close. It's used both to forward
2127 * remaining data and to resync after end of body. It expects the msg_state to
2128 * be between MSG_BODY and MSG_DONE (inclusive). It returns zero if it needs to
2129 * read more data, or 1 once we can go on with next request or end the stream.
2130 *
2131 * It is capable of compressing response data both in content-length mode and
2132 * in chunked mode. The state machines follows different flows depending on
2133 * whether content-length and chunked modes are used, since there are no
2134 * trailers in content-length :
2135 *
2136 * chk-mode cl-mode
2137 * ,----- BODY -----.
2138 * / \
2139 * V size > 0 V chk-mode
2140 * .--> SIZE -------------> DATA -------------> CRLF
2141 * | | size == 0 | last byte |
2142 * | v final crlf v inspected |
2143 * | TRAILERS -----------> DONE |
2144 * | |
2145 * `----------------------------------------------'
2146 *
2147 * Compression only happens in the DATA state, and must be flushed in final
2148 * states (TRAILERS/DONE) or when leaving on missing data. Normal forwarding
2149 * is performed at once on final states for all bytes parsed, or when leaving
2150 * on missing data.
2151 */
2152int htx_response_forward_body(struct stream *s, struct channel *res, int an_bit)
2153{
2154 struct session *sess = s->sess;
2155 struct http_txn *txn = s->txn;
2156 struct http_msg *msg = &s->txn->rsp;
Christopher Faulet9768c262018-10-22 09:34:31 +02002157 struct htx *htx;
2158 //int ret;
Christopher Faulete0768eb2018-10-03 16:38:02 +02002159
2160 DPRINTF(stderr,"[%u] %s: stream=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%lu analysers=%02x\n",
2161 now_ms, __FUNCTION__,
2162 s,
2163 res,
2164 res->rex, res->wex,
2165 res->flags,
2166 ci_data(res),
2167 res->analysers);
2168
Christopher Faulet9768c262018-10-22 09:34:31 +02002169 htx = htx_from_buf(&res->buf);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002170
2171 if ((res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
Christopher Fauletf2824e62018-10-01 12:12:37 +02002172 ((res->flags & CF_SHUTW) && (res->to_forward || co_data(res)))) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02002173 /* Output closed while we were sending data. We must abort and
2174 * wake the other side up.
2175 */
2176 msg->err_state = msg->msg_state;
2177 msg->msg_state = HTTP_MSG_ERROR;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002178 htx_end_response(s);
2179 htx_end_request(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002180 return 1;
2181 }
2182
Christopher Faulet9768c262018-10-22 09:34:31 +02002183 if (msg->msg_state == HTTP_MSG_BODY)
2184 msg->msg_state = HTTP_MSG_DATA;
2185
Christopher Faulete0768eb2018-10-03 16:38:02 +02002186 /* in most states, we should abort in case of early close */
2187 channel_auto_close(res);
2188
Christopher Faulete0768eb2018-10-03 16:38:02 +02002189 if (res->to_forward) {
2190 /* We can't process the buffer's contents yet */
2191 res->flags |= CF_WAKE_WRITE;
2192 goto missing_data_or_waiting;
2193 }
2194
Christopher Faulet9768c262018-10-22 09:34:31 +02002195 if (msg->msg_state >= HTTP_MSG_DONE)
2196 goto done;
2197
2198 /* Forward all input data. We get it by removing all outgoing data not
2199 * forwarded yet from HTX data size.
2200 */
2201 c_adv(res, htx->data - co_data(res));
2202
2203 /* To let the function channel_forward work as expected we must update
2204 * the channel's buffer to pretend there is no more input data. The
2205 * right length is then restored. We must do that, because when an HTX
2206 * message is stored into a buffer, it appears as full.
2207 */
2208 b_set_data(&res->buf, co_data(res));
2209 if (htx->extra != ULLONG_MAX)
2210 htx->extra -= channel_forward(res, htx->extra);
2211 b_set_data(&res->buf, b_size(&res->buf));
2212
2213 if (!(msg->flags & HTTP_MSGF_XFER_LEN)) {
2214 /* The server still sending data that should be filtered */
2215 if (res->flags & CF_SHUTR || !HAS_DATA_FILTERS(s, res)) {
2216 msg->msg_state = HTTP_MSG_TUNNEL;
2217 goto done;
2218 }
Christopher Faulete0768eb2018-10-03 16:38:02 +02002219 }
2220
Christopher Faulet9768c262018-10-22 09:34:31 +02002221 /* Check if the end-of-message is reached and if so, switch the message
2222 * in HTTP_MSG_DONE state.
2223 */
2224 if (htx_get_tail_type(htx) != HTX_BLK_EOM)
2225 goto missing_data_or_waiting;
2226
2227 msg->msg_state = HTTP_MSG_DONE;
2228
2229 done:
Christopher Faulete0768eb2018-10-03 16:38:02 +02002230 /* other states, DONE...TUNNEL */
Christopher Faulet9768c262018-10-22 09:34:31 +02002231 channel_dont_close(res);
2232
Christopher Fauletf2824e62018-10-01 12:12:37 +02002233 htx_end_response(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002234 if (!(res->analysers & an_bit)) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02002235 htx_end_request(s);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002236 if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
2237 if (res->flags & CF_SHUTW) {
2238 /* response errors are most likely due to the
2239 * client aborting the transfer. */
2240 goto aborted_xfer;
2241 }
Christopher Faulete0768eb2018-10-03 16:38:02 +02002242 goto return_bad_res;
2243 }
2244 return 1;
2245 }
2246 return 0;
2247
2248 missing_data_or_waiting:
2249 if (res->flags & CF_SHUTW)
2250 goto aborted_xfer;
2251
2252 /* stop waiting for data if the input is closed before the end. If the
2253 * client side was already closed, it means that the client has aborted,
2254 * so we don't want to count this as a server abort. Otherwise it's a
2255 * server abort.
2256 */
Christopher Faulet9768c262018-10-22 09:34:31 +02002257 if (msg->msg_state < HTTP_MSG_DONE && res->flags & CF_SHUTR) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02002258 if ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
2259 goto aborted_xfer;
2260 /* If we have some pending data, we continue the processing */
Christopher Faulet9768c262018-10-22 09:34:31 +02002261 if (htx_is_empty(htx)) {
Christopher Faulete0768eb2018-10-03 16:38:02 +02002262 if (!(s->flags & SF_ERR_MASK))
2263 s->flags |= SF_ERR_SRVCL;
2264 HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
2265 if (objt_server(s->target))
2266 HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
2267 goto return_bad_res_stats_ok;
2268 }
2269 }
2270
Christopher Faulete0768eb2018-10-03 16:38:02 +02002271 /* When TE: chunked is used, we need to get there again to parse
2272 * remaining chunks even if the server has closed, so we don't want to
Christopher Faulet9768c262018-10-22 09:34:31 +02002273 * set CF_DONTCLOSE. Similarly when there is a content-leng or if there
2274 * are filters registered on the stream, we don't want to forward a
2275 * close
Christopher Faulete0768eb2018-10-03 16:38:02 +02002276 */
Christopher Faulet9768c262018-10-22 09:34:31 +02002277 if ((msg->flags & HTTP_MSGF_XFER_LEN) || HAS_DATA_FILTERS(s, res))
Christopher Faulete0768eb2018-10-03 16:38:02 +02002278 channel_dont_close(res);
2279
Christopher Faulet9768c262018-10-22 09:34:31 +02002280#if 0 // FIXME [Cf]: Probably not required now, but I need more time to think
2281 // about if
2282
Christopher Faulete0768eb2018-10-03 16:38:02 +02002283 /* We know that more data are expected, but we couldn't send more that
2284 * what we did. So we always set the CF_EXPECT_MORE flag so that the
2285 * system knows it must not set a PUSH on this first part. Interactive
2286 * modes are already handled by the stream sock layer. We must not do
2287 * this in content-length mode because it could present the MSG_MORE
2288 * flag with the last block of forwarded data, which would cause an
2289 * additional delay to be observed by the receiver.
2290 */
2291 if ((msg->flags & HTTP_MSGF_TE_CHNK) || (msg->flags & HTTP_MSGF_COMPRESSING))
2292 res->flags |= CF_EXPECT_MORE;
Christopher Faulet9768c262018-10-22 09:34:31 +02002293#endif
Christopher Faulete0768eb2018-10-03 16:38:02 +02002294
2295 /* the stream handler will take care of timeouts and errors */
2296 return 0;
2297
2298 return_bad_res: /* let's centralize all bad responses */
2299 HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
2300 if (objt_server(s->target))
2301 HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
2302
2303 return_bad_res_stats_ok:
2304 txn->rsp.err_state = txn->rsp.msg_state;
2305 txn->rsp.msg_state = HTTP_MSG_ERROR;
2306 /* don't send any error message as we're in the body */
Christopher Faulet9768c262018-10-22 09:34:31 +02002307 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002308 res->analysers &= AN_RES_FLT_END;
2309 s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */
2310 if (objt_server(s->target))
2311 health_adjust(objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
2312
2313 if (!(s->flags & SF_ERR_MASK))
2314 s->flags |= SF_ERR_PRXCOND;
2315 if (!(s->flags & SF_FINST_MASK))
2316 s->flags |= SF_FINST_D;
2317 return 0;
2318
2319 aborted_xfer:
2320 txn->rsp.err_state = txn->rsp.msg_state;
2321 txn->rsp.msg_state = HTTP_MSG_ERROR;
2322 /* don't send any error message as we're in the body */
Christopher Faulet9768c262018-10-22 09:34:31 +02002323 htx_reply_and_close(s, txn->status, NULL);
Christopher Faulete0768eb2018-10-03 16:38:02 +02002324 res->analysers &= AN_RES_FLT_END;
2325 s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */
2326
2327 HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
2328 HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2329 if (objt_server(s->target))
2330 HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
2331
2332 if (!(s->flags & SF_ERR_MASK))
2333 s->flags |= SF_ERR_CLICL;
2334 if (!(s->flags & SF_FINST_MASK))
2335 s->flags |= SF_FINST_D;
2336 return 0;
2337}
2338
Christopher Faulet0f226952018-10-22 09:29:56 +02002339void htx_adjust_conn_mode(struct stream *s, struct http_txn *txn)
Christopher Fauletf2824e62018-10-01 12:12:37 +02002340{
2341 struct proxy *fe = strm_fe(s);
2342 int tmp = TX_CON_WANT_CLO;
2343
2344 if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_TUN)
2345 tmp = TX_CON_WANT_TUN;
2346
2347 if ((txn->flags & TX_CON_WANT_MSK) < tmp)
Christopher Faulet0f226952018-10-22 09:29:56 +02002348 txn->flags = (txn->flags & ~TX_CON_WANT_MSK) | tmp;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002349}
2350
2351/* Perform an HTTP redirect based on the information in <rule>. The function
2352 * returns non-zero on success, or zero in case of a, irrecoverable error such
2353 * as too large a request to build a valid response.
2354 */
2355int htx_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struct http_txn *txn)
2356{
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002357 struct htx *htx = htx_from_buf(&s->req.buf);
2358 union h1_sl sl;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002359 const char *msg_fmt;
2360 struct buffer *chunk;
2361 int ret = 0;
2362
2363 chunk = alloc_trash_chunk();
2364 if (!chunk)
2365 goto leave;
2366
2367 /* build redirect message */
2368 switch(rule->code) {
2369 case 308:
2370 msg_fmt = HTTP_308;
2371 break;
2372 case 307:
2373 msg_fmt = HTTP_307;
2374 break;
2375 case 303:
2376 msg_fmt = HTTP_303;
2377 break;
2378 case 301:
2379 msg_fmt = HTTP_301;
2380 break;
2381 case 302:
2382 default:
2383 msg_fmt = HTTP_302;
2384 break;
2385 }
2386
2387 if (unlikely(!chunk_strcpy(chunk, msg_fmt)))
2388 goto leave;
2389
2390 switch(rule->type) {
2391 case REDIRECT_TYPE_SCHEME: {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002392 struct http_hdr_ctx ctx;
2393 struct ist path, host;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002394
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002395 host = ist("");
2396 ctx.blk = NULL;
2397 if (http_find_header(htx, ist("Host"), &ctx, 0))
2398 host = ctx.value;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002399
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002400 sl = http_find_stline(htx);
2401 path = http_get_path(sl.rq.u);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002402 /* build message using path */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002403 if (path.ptr) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02002404 if (rule->flags & REDIRECT_FLAG_DROP_QS) {
2405 int qs = 0;
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002406 while (qs < path.len) {
2407 if (*(path.ptr + qs) == '?') {
2408 path.len = qs;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002409 break;
2410 }
2411 qs++;
2412 }
2413 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02002414 }
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002415 else
2416 path = ist("/");
Christopher Fauletf2824e62018-10-01 12:12:37 +02002417
2418 if (rule->rdr_str) { /* this is an old "redirect" rule */
Christopher Fauletf2824e62018-10-01 12:12:37 +02002419 /* add scheme */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002420 if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
2421 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002422 }
2423 else {
2424 /* add scheme with executing log format */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002425 chunk->data += build_logline(s, chunk->area + chunk->data,
2426 chunk->size - chunk->data,
2427 &rule->rdr_fmt);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002428 }
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002429 /* add "://" + host + path */
2430 if (!chunk_memcat(chunk, "://", 3) ||
2431 !chunk_memcat(chunk, host.ptr, host.len) ||
2432 !chunk_memcat(chunk, path.ptr, path.len))
2433 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002434
2435 /* append a slash at the end of the location if needed and missing */
2436 if (chunk->data && chunk->area[chunk->data - 1] != '/' &&
2437 (rule->flags & REDIRECT_FLAG_APPEND_SLASH)) {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002438 if (chunk->data + 1 >= chunk->size)
Christopher Fauletf2824e62018-10-01 12:12:37 +02002439 goto leave;
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002440 chunk->area[chunk->data++] = '/';
Christopher Fauletf2824e62018-10-01 12:12:37 +02002441 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02002442 break;
2443 }
2444 case REDIRECT_TYPE_PREFIX: {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002445 struct ist path;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002446
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002447 sl = http_find_stline(htx);
2448 path = http_get_path(sl.rq.u);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002449 /* build message using path */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002450 if (path.ptr) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02002451 if (rule->flags & REDIRECT_FLAG_DROP_QS) {
2452 int qs = 0;
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002453 while (qs < path.len) {
2454 if (*(path.ptr + qs) == '?') {
2455 path.len = qs;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002456 break;
2457 }
2458 qs++;
2459 }
2460 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02002461 }
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002462 else
2463 path = ist("/");
Christopher Fauletf2824e62018-10-01 12:12:37 +02002464
2465 if (rule->rdr_str) { /* this is an old "redirect" rule */
Christopher Fauletf2824e62018-10-01 12:12:37 +02002466 /* add prefix. Note that if prefix == "/", we don't want to
2467 * add anything, otherwise it makes it hard for the user to
2468 * configure a self-redirection.
2469 */
2470 if (rule->rdr_len != 1 || *rule->rdr_str != '/') {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002471 if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
2472 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002473 }
2474 }
2475 else {
2476 /* add prefix with executing log format */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002477 chunk->data += build_logline(s, chunk->area + chunk->data,
2478 chunk->size - chunk->data,
2479 &rule->rdr_fmt);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002480 }
2481
2482 /* add path */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002483 if (!chunk_memcat(chunk, path.ptr, path.len))
2484 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002485
2486 /* append a slash at the end of the location if needed and missing */
2487 if (chunk->data && chunk->area[chunk->data - 1] != '/' &&
2488 (rule->flags & REDIRECT_FLAG_APPEND_SLASH)) {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002489 if (chunk->data + 1 >= chunk->size)
Christopher Fauletf2824e62018-10-01 12:12:37 +02002490 goto leave;
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002491 chunk->area[chunk->data++] = '/';
Christopher Fauletf2824e62018-10-01 12:12:37 +02002492 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02002493 break;
2494 }
2495 case REDIRECT_TYPE_LOCATION:
2496 default:
2497 if (rule->rdr_str) { /* this is an old "redirect" rule */
Christopher Fauletf2824e62018-10-01 12:12:37 +02002498 /* add location */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002499 if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
2500 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002501 }
2502 else {
2503 /* add location with executing log format */
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002504 chunk->data += build_logline(s, chunk->area + chunk->data,
2505 chunk->size - chunk->data,
2506 &rule->rdr_fmt);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002507 }
2508 break;
2509 }
2510
2511 if (rule->cookie_len) {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002512 if (!chunk_memcat(chunk, "\r\nSet-Cookie: ", 14) ||
2513 !chunk_memcat(chunk, rule->cookie_str, rule->cookie_len))
2514 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002515 }
2516
2517 /* add end of headers and the keep-alive/close status. */
2518 txn->status = rule->code;
2519 /* let's log the request time */
2520 s->logs.tv_request = now;
2521
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002522 /* FIXME: close for now, but it could be cool to handle the keep-alive here */
2523 if (unlikely(txn->flags & TX_USE_PX_CONN)) {
2524 if (!chunk_memcat(chunk, "\r\nProxy-Connection: close\r\n\r\n", 29))
2525 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002526 } else {
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002527 if (!chunk_memcat(chunk, "\r\nConnection: close\r\n\r\n", 23))
2528 goto leave;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002529 }
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002530 htx_reply_and_close(s, txn->status, chunk);
2531 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002532
2533 if (!(s->flags & SF_ERR_MASK))
2534 s->flags |= SF_ERR_LOCAL;
2535 if (!(s->flags & SF_FINST_MASK))
2536 s->flags |= SF_FINST_R;
2537
2538 ret = 1;
Christopher Faulet80f14bf2018-10-24 11:02:25 +02002539 leave:
Christopher Fauletf2824e62018-10-01 12:12:37 +02002540 free_trash_chunk(chunk);
2541 return ret;
2542}
2543
2544/* This function terminates the request because it was completly analyzed or
2545 * because an error was triggered during the body forwarding.
2546 */
2547static void htx_end_request(struct stream *s)
2548{
2549 struct channel *chn = &s->req;
2550 struct http_txn *txn = s->txn;
2551
2552 DPRINTF(stderr,"[%u] %s: stream=%p states=%s,%s req->analysers=0x%08x res->analysers=0x%08x\n",
2553 now_ms, __FUNCTION__, s,
2554 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state),
2555 s->req.analysers, s->res.analysers);
2556
2557 if (unlikely(txn->req.msg_state == HTTP_MSG_ERROR)) {
2558 channel_abort(chn);
2559 channel_truncate(chn);
2560 goto end;
2561 }
2562
2563 if (unlikely(txn->req.msg_state < HTTP_MSG_DONE))
2564 return;
2565
2566 if (txn->req.msg_state == HTTP_MSG_DONE) {
2567 if (txn->rsp.msg_state < HTTP_MSG_DONE) {
2568 /* The server has not finished to respond, so we
2569 * don't want to move in order not to upset it.
2570 */
2571 return;
2572 }
2573
2574 /* No need to read anymore, the request was completely parsed.
2575 * We can shut the read side unless we want to abort_on_close,
2576 * or we have a POST request. The issue with POST requests is
2577 * that some browsers still send a CRLF after the request, and
2578 * this CRLF must be read so that it does not remain in the kernel
2579 * buffers, otherwise a close could cause an RST on some systems
2580 * (eg: Linux).
2581 */
2582 if ((!(s->be->options & PR_O_ABRT_CLOSE) || (s->si[0].flags & SI_FL_CLEAN_ABRT)) &&
2583 txn->meth != HTTP_METH_POST)
2584 channel_dont_read(chn);
2585
2586 /* if the server closes the connection, we want to immediately react
2587 * and close the socket to save packets and syscalls.
2588 */
2589 s->si[1].flags |= SI_FL_NOHALF;
2590
2591 /* In any case we've finished parsing the request so we must
2592 * disable Nagle when sending data because 1) we're not going
2593 * to shut this side, and 2) the server is waiting for us to
2594 * send pending data.
2595 */
2596 chn->flags |= CF_NEVER_WAIT;
2597
2598 /* When we get here, it means that both the request and the
2599 * response have finished receiving. Depending on the connection
2600 * mode, we'll have to wait for the last bytes to leave in either
2601 * direction, and sometimes for a close to be effective.
2602 */
2603 if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_TUN) {
2604 /* Tunnel mode will not have any analyser so it needs to
2605 * poll for reads.
2606 */
2607 channel_auto_read(chn);
Christopher Faulet9768c262018-10-22 09:34:31 +02002608 if (b_data(&chn->buf))
2609 return;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002610 txn->req.msg_state = HTTP_MSG_TUNNEL;
2611 }
2612 else {
2613 /* we're not expecting any new data to come for this
2614 * transaction, so we can close it.
Christopher Faulet9768c262018-10-22 09:34:31 +02002615 *
2616 * However, there is an exception if the response
2617 * length is undefined. In this case, we need to wait
2618 * the close from the server. The response will be
2619 * switched in TUNNEL mode until the end.
Christopher Fauletf2824e62018-10-01 12:12:37 +02002620 */
2621 if (!(txn->rsp.flags & HTTP_MSGF_XFER_LEN) &&
2622 txn->rsp.msg_state != HTTP_MSG_CLOSED)
Christopher Faulet9768c262018-10-22 09:34:31 +02002623 goto check_channel_flags;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002624
2625 if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
2626 channel_shutr_now(chn);
2627 channel_shutw_now(chn);
2628 }
2629 }
Christopher Fauletf2824e62018-10-01 12:12:37 +02002630 goto check_channel_flags;
2631 }
2632
2633 if (txn->req.msg_state == HTTP_MSG_CLOSING) {
2634 http_msg_closing:
2635 /* nothing else to forward, just waiting for the output buffer
2636 * to be empty and for the shutw_now to take effect.
2637 */
2638 if (channel_is_empty(chn)) {
2639 txn->req.msg_state = HTTP_MSG_CLOSED;
2640 goto http_msg_closed;
2641 }
2642 else if (chn->flags & CF_SHUTW) {
2643 txn->req.err_state = txn->req.msg_state;
2644 txn->req.msg_state = HTTP_MSG_ERROR;
2645 goto end;
2646 }
2647 return;
2648 }
2649
2650 if (txn->req.msg_state == HTTP_MSG_CLOSED) {
2651 http_msg_closed:
Christopher Fauletf2824e62018-10-01 12:12:37 +02002652 /* if we don't know whether the server will close, we need to hard close */
2653 if (txn->rsp.flags & HTTP_MSGF_XFER_LEN)
2654 s->si[1].flags |= SI_FL_NOLINGER; /* we want to close ASAP */
Christopher Fauletf2824e62018-10-01 12:12:37 +02002655 /* see above in MSG_DONE why we only do this in these states */
2656 if ((!(s->be->options & PR_O_ABRT_CLOSE) || (s->si[0].flags & SI_FL_CLEAN_ABRT)))
2657 channel_dont_read(chn);
2658 goto end;
2659 }
2660
2661 check_channel_flags:
2662 /* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
2663 if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
2664 /* if we've just closed an output, let's switch */
2665 txn->req.msg_state = HTTP_MSG_CLOSING;
2666 goto http_msg_closing;
2667 }
2668
2669 end:
2670 chn->analysers &= AN_REQ_FLT_END;
2671 if (txn->req.msg_state == HTTP_MSG_TUNNEL && HAS_REQ_DATA_FILTERS(s))
2672 chn->analysers |= AN_REQ_FLT_XFER_DATA;
2673 channel_auto_close(chn);
2674 channel_auto_read(chn);
2675}
2676
2677
2678/* This function terminates the response because it was completly analyzed or
2679 * because an error was triggered during the body forwarding.
2680 */
2681static void htx_end_response(struct stream *s)
2682{
2683 struct channel *chn = &s->res;
2684 struct http_txn *txn = s->txn;
2685
2686 DPRINTF(stderr,"[%u] %s: stream=%p states=%s,%s req->analysers=0x%08x res->analysers=0x%08x\n",
2687 now_ms, __FUNCTION__, s,
2688 h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state),
2689 s->req.analysers, s->res.analysers);
2690
2691 if (unlikely(txn->rsp.msg_state == HTTP_MSG_ERROR)) {
Christopher Fauletf2824e62018-10-01 12:12:37 +02002692 channel_truncate(chn);
Christopher Faulet9768c262018-10-22 09:34:31 +02002693 channel_abort(&s->req);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002694 goto end;
2695 }
2696
2697 if (unlikely(txn->rsp.msg_state < HTTP_MSG_DONE))
2698 return;
2699
2700 if (txn->rsp.msg_state == HTTP_MSG_DONE) {
2701 /* In theory, we don't need to read anymore, but we must
2702 * still monitor the server connection for a possible close
2703 * while the request is being uploaded, so we don't disable
2704 * reading.
2705 */
2706 /* channel_dont_read(chn); */
2707
2708 if (txn->req.msg_state < HTTP_MSG_DONE) {
2709 /* The client seems to still be sending data, probably
2710 * because we got an error response during an upload.
2711 * We have the choice of either breaking the connection
2712 * or letting it pass through. Let's do the later.
2713 */
2714 return;
2715 }
2716
2717 /* When we get here, it means that both the request and the
2718 * response have finished receiving. Depending on the connection
2719 * mode, we'll have to wait for the last bytes to leave in either
2720 * direction, and sometimes for a close to be effective.
2721 */
2722 if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_TUN) {
2723 channel_auto_read(chn);
2724 chn->flags |= CF_NEVER_WAIT;
Christopher Faulet9768c262018-10-22 09:34:31 +02002725 if (b_data(&chn->buf))
2726 return;
Christopher Fauletf2824e62018-10-01 12:12:37 +02002727 txn->rsp.msg_state = HTTP_MSG_TUNNEL;
2728 }
2729 else {
2730 /* we're not expecting any new data to come for this
2731 * transaction, so we can close it.
2732 */
2733 if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
2734 channel_shutr_now(chn);
2735 channel_shutw_now(chn);
2736 }
2737 }
2738 goto check_channel_flags;
2739 }
2740
2741 if (txn->rsp.msg_state == HTTP_MSG_CLOSING) {
2742 http_msg_closing:
2743 /* nothing else to forward, just waiting for the output buffer
2744 * to be empty and for the shutw_now to take effect.
2745 */
2746 if (channel_is_empty(chn)) {
2747 txn->rsp.msg_state = HTTP_MSG_CLOSED;
2748 goto http_msg_closed;
2749 }
2750 else if (chn->flags & CF_SHUTW) {
2751 txn->rsp.err_state = txn->rsp.msg_state;
2752 txn->rsp.msg_state = HTTP_MSG_ERROR;
2753 HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
2754 if (objt_server(s->target))
2755 HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
2756 goto end;
2757 }
2758 return;
2759 }
2760
2761 if (txn->rsp.msg_state == HTTP_MSG_CLOSED) {
2762 http_msg_closed:
2763 /* drop any pending data */
2764 channel_truncate(chn);
Christopher Faulet9768c262018-10-22 09:34:31 +02002765 channel_abort(&s->req);
Christopher Fauletf2824e62018-10-01 12:12:37 +02002766 goto end;
2767 }
2768
2769 check_channel_flags:
2770 /* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
2771 if (chn->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
2772 /* if we've just closed an output, let's switch */
2773 txn->rsp.msg_state = HTTP_MSG_CLOSING;
2774 goto http_msg_closing;
2775 }
2776
2777 end:
2778 chn->analysers &= AN_RES_FLT_END;
2779 if (txn->rsp.msg_state == HTTP_MSG_TUNNEL && HAS_RSP_DATA_FILTERS(s))
2780 chn->analysers |= AN_RES_FLT_XFER_DATA;
2781 channel_auto_close(chn);
2782 channel_auto_read(chn);
2783}
2784
Christopher Faulet0f226952018-10-22 09:29:56 +02002785void htx_server_error(struct stream *s, struct stream_interface *si, int err,
2786 int finst, const struct buffer *msg)
2787{
2788 channel_auto_read(si_oc(si));
2789 channel_abort(si_oc(si));
2790 channel_auto_close(si_oc(si));
2791 channel_erase(si_oc(si));
2792 channel_auto_close(si_ic(si));
2793 channel_auto_read(si_ic(si));
2794 if (msg) {
2795 struct channel *chn = si_ic(si);
2796 struct htx *htx;
2797
2798 htx = htx_from_buf(&chn->buf);
2799 htx_add_oob(htx, ist2(msg->area, msg->data));
2800 //FLT_STRM_CB(s, flt_htx_reply(s, s->txn->status, htx));
2801 b_set_data(&chn->buf, b_size(&chn->buf));
2802 c_adv(chn, htx->data);
2803 chn->total += htx->data;
2804 }
2805 if (!(s->flags & SF_ERR_MASK))
2806 s->flags |= err;
2807 if (!(s->flags & SF_FINST_MASK))
2808 s->flags |= finst;
2809}
2810
2811void htx_reply_and_close(struct stream *s, short status, struct buffer *msg)
2812{
2813 channel_auto_read(&s->req);
2814 channel_abort(&s->req);
2815 channel_auto_close(&s->req);
2816 channel_erase(&s->req);
2817 channel_truncate(&s->res);
2818
2819 s->txn->flags &= ~TX_WAIT_NEXT_RQ;
2820 if (msg) {
2821 struct channel *chn = &s->res;
2822 struct htx *htx;
2823
2824 htx = htx_from_buf(&chn->buf);
2825 htx_add_oob(htx, ist2(msg->area, msg->data));
2826 //FLT_STRM_CB(s, flt_htx_reply(s, s->txn->status, htx));
2827 b_set_data(&chn->buf, b_size(&chn->buf));
2828 c_adv(chn, htx->data);
2829 chn->total += htx->data;
2830 }
2831
2832 s->res.wex = tick_add_ifset(now_ms, s->res.wto);
2833 channel_auto_read(&s->res);
2834 channel_auto_close(&s->res);
2835 channel_shutr_now(&s->res);
2836}
2837
2838/*
2839 * Capture headers from message <htx> according to header list <cap_hdr>, and
2840 * fill the <cap> pointers appropriately.
2841 */
2842static void htx_capture_headers(struct htx *htx, char **cap, struct cap_hdr *cap_hdr)
2843{
2844 struct cap_hdr *h;
2845 int32_t pos;
2846
2847 for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
2848 struct htx_blk *blk = htx_get_blk(htx, pos);
2849 enum htx_blk_type type = htx_get_blk_type(blk);
2850 struct ist n, v;
2851
2852 if (type == HTX_BLK_EOH)
2853 break;
2854 if (type != HTX_BLK_HDR)
2855 continue;
2856
2857 n = htx_get_blk_name(htx, blk);
2858
2859 for (h = cap_hdr; h; h = h->next) {
2860 if (h->namelen && (h->namelen == n.len) &&
2861 (strncasecmp(n.ptr, h->name, h->namelen) == 0)) {
2862 if (cap[h->index] == NULL)
2863 cap[h->index] =
2864 pool_alloc(h->pool);
2865
2866 if (cap[h->index] == NULL) {
2867 ha_alert("HTTP capture : out of memory.\n");
2868 break;
2869 }
2870
2871 v = htx_get_blk_value(htx, blk);
2872 if (v.len > h->len)
2873 v.len = h->len;
2874
2875 memcpy(cap[h->index], v.ptr, v.len);
2876 cap[h->index][v.len]=0;
2877 }
2878 }
2879 }
2880}
2881
2882
2883/* Formats the start line of the request (without CRLF) and puts it in <str> and
2884 * return the written lenght. The line can be truncated if it exceeds <len>.
2885 */
2886static size_t htx_fmt_req_line(const union h1_sl sl, char *str, size_t len)
2887{
2888 struct ist dst = ist2(str, 0);
2889
2890 if (istcat(&dst, sl.rq.m, len) == -1)
2891 goto end;
2892 if (dst.len + 1 > len)
2893 goto end;
2894 dst.ptr[dst.len++] = ' ';
2895
2896 if (istcat(&dst, sl.rq.u, len) == -1)
2897 goto end;
2898 if (dst.len + 1 > len)
2899 goto end;
2900 dst.ptr[dst.len++] = ' ';
2901
2902 istcat(&dst, sl.rq.v, len);
2903 end:
2904 return dst.len;
2905}
2906
2907/*
2908 * Print a debug line with a start line.
2909 */
2910static void htx_debug_stline(const char *dir, struct stream *s, const union h1_sl sl)
2911{
2912 struct session *sess = strm_sess(s);
2913 int max;
2914
2915 chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
2916 dir,
2917 objt_conn(sess->origin) ? (unsigned short)objt_conn(sess->origin)->handle.fd : -1,
2918 objt_cs(s->si[1].end) ? (unsigned short)objt_cs(s->si[1].end)->conn->handle.fd : -1);
2919
2920 max = sl.rq.m.len;
2921 UBOUND(max, trash.size - trash.data - 3);
2922 chunk_memcat(&trash, sl.rq.m.ptr, max);
2923 trash.area[trash.data++] = ' ';
2924
2925 max = sl.rq.u.len;
2926 UBOUND(max, trash.size - trash.data - 2);
2927 chunk_memcat(&trash, sl.rq.u.ptr, max);
2928 trash.area[trash.data++] = ' ';
2929
2930 max = sl.rq.v.len;
2931 UBOUND(max, trash.size - trash.data - 1);
2932 chunk_memcat(&trash, sl.rq.v.ptr, max);
2933 trash.area[trash.data++] = '\n';
2934
2935 shut_your_big_mouth_gcc(write(1, trash.area, trash.data));
2936}
2937
2938/*
2939 * Print a debug line with a header.
2940 */
2941static void htx_debug_hdr(const char *dir, struct stream *s, const struct ist n, const struct ist v)
2942{
2943 struct session *sess = strm_sess(s);
2944 int max;
2945
2946 chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
2947 dir,
2948 objt_conn(sess->origin) ? (unsigned short)objt_conn(sess->origin)->handle.fd : -1,
2949 objt_cs(s->si[1].end) ? (unsigned short)objt_cs(s->si[1].end)->conn->handle.fd : -1);
2950
2951 max = n.len;
2952 UBOUND(max, trash.size - trash.data - 3);
2953 chunk_memcat(&trash, n.ptr, max);
2954 trash.area[trash.data++] = ':';
2955 trash.area[trash.data++] = ' ';
2956
2957 max = v.len;
2958 UBOUND(max, trash.size - trash.data - 1);
2959 chunk_memcat(&trash, v.ptr, max);
2960 trash.area[trash.data++] = '\n';
2961
2962 shut_your_big_mouth_gcc(write(1, trash.area, trash.data));
2963}
2964
2965
Christopher Fauletf4eb75d2018-10-11 15:55:07 +02002966__attribute__((constructor))
2967static void __htx_protocol_init(void)
2968{
2969}
2970
2971
2972/*
2973 * Local variables:
2974 * c-indent-level: 8
2975 * c-basic-offset: 8
2976 * End:
2977 */