blob: 68bf7ee5272c3085161dbbdaad8716960ba00fa2 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010027#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020028#include <proto/proto_http.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31
32/* Pool used to allocate filters */
33struct pool_head *pool2_filter = NULL;
34
35static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
36
37/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
38 * The first one begins a loop and the seconds one ends it.
39 *
40 * - BREAK_EXECUTION must be used to break the loop and set the filter from
41 * which to resume the next time.
42 *
43 * Here is an exemple:
44 *
45 * RESUME_FILTER_LOOP(stream, channel) {
46 * ...
47 * if (cond)
48 * BREAK_EXECUTION(stream, channel, label);
49 * ...
50 * } RESUME_FILTER_END;
51 * ...
52 * label:
53 * ...
54 *
55 */
56#define RESUME_FILTER_LOOP(strm, chn) \
57 do { \
58 struct filter *filter; \
59 \
Christopher Fauletda02e172015-12-04 09:25:05 +010060 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
61 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
62 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020063 goto resume_execution; \
64 } \
65 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010066 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010067 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020068
69#define RESUME_FILTER_END \
70 } \
71 } while(0)
72
Christopher Fauletda02e172015-12-04 09:25:05 +010073#define BREAK_EXECUTION(strm, chn, label) \
74 do { \
75 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
76 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020077 } while (0)
78
79
80/* List head of all known filter keywords */
81static struct flt_kw_list flt_keywords = {
82 .list = LIST_HEAD_INIT(flt_keywords.list)
83};
84
85/*
86 * Registers the filter keyword list <kwl> as a list of valid keywords for next
87 * parsing sessions.
88 */
89void
90flt_register_keywords(struct flt_kw_list *kwl)
91{
92 LIST_ADDQ(&flt_keywords.list, &kwl->list);
93}
94
95/*
96 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
97 * keyword is found with a NULL ->parse() function, then an attempt is made to
98 * find one with a valid ->parse() function. This way it is possible to declare
99 * platform-dependant, known keywords as NULL, then only declare them as valid
100 * if some options are met. Note that if the requested keyword contains an
101 * opening parenthesis, everything from this point is ignored.
102 */
103struct flt_kw *
104flt_find_kw(const char *kw)
105{
106 int index;
107 const char *kwend;
108 struct flt_kw_list *kwl;
109 struct flt_kw *ret = NULL;
110
111 kwend = strchr(kw, '(');
112 if (!kwend)
113 kwend = kw + strlen(kw);
114
115 list_for_each_entry(kwl, &flt_keywords.list, list) {
116 for (index = 0; kwl->kw[index].kw != NULL; index++) {
117 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
118 kwl->kw[index].kw[kwend-kw] == 0) {
119 if (kwl->kw[index].parse)
120 return &kwl->kw[index]; /* found it !*/
121 else
122 ret = &kwl->kw[index]; /* may be OK */
123 }
124 }
125 }
126 return ret;
127}
128
129/*
130 * Dumps all registered "filter" keywords to the <out> string pointer. The
131 * unsupported keywords are only dumped if their supported form was not found.
132 */
133void
134flt_dump_kws(char **out)
135{
136 struct flt_kw_list *kwl;
137 int index;
138
139 *out = NULL;
140 list_for_each_entry(kwl, &flt_keywords.list, list) {
141 for (index = 0; kwl->kw[index].kw != NULL; index++) {
142 if (kwl->kw[index].parse ||
143 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
144 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
145 kwl->scope,
146 kwl->kw[index].kw,
147 kwl->kw[index].parse ? "" : " (not supported)");
148 }
149 }
150 }
151}
152
153/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100154 * Lists the known filters on <out>
155 */
156void
157list_filters(FILE *out)
158{
159 char *filters, *p, *f;
160
161 fprintf(out, "Available filters :\n");
162 flt_dump_kws(&filters);
163 for (p = filters; (f = strtok_r(p,"\n",&p));)
164 fprintf(out, "\t%s\n", f);
165 free(filters);
166}
167
168/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200169 * Parses the "filter" keyword. All keywords must be handled by filters
170 * themselves
171 */
172static int
173parse_filter(char **args, int section_type, struct proxy *curpx,
174 struct proxy *defpx, const char *file, int line, char **err)
175{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100176 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200177
178 /* Filter cannot be defined on a default proxy */
179 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200180 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200181 file, line, args[0]);
182 return -1;
183 }
184 if (!strcmp(args[0], "filter")) {
185 struct flt_kw *kw;
186 int cur_arg;
187
188 if (!*args[1]) {
189 memprintf(err,
190 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
191 file, line, args[0], proxy_type_str(curpx), curpx->id);
192 goto error;
193 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100194 fconf = calloc(1, sizeof(*fconf));
195 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200196 memprintf(err, "'%s' : out of memory", args[0]);
197 goto error;
198 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200199
200 cur_arg = 1;
201 kw = flt_find_kw(args[cur_arg]);
202 if (kw) {
203 if (!kw->parse) {
204 memprintf(err, "parsing [%s:%d] : '%s' : "
205 "'%s' option is not implemented in this version (check build options).",
206 file, line, args[0], args[cur_arg]);
207 goto error;
208 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200209 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200210 if (err && *err)
211 memprintf(err, "'%s' : '%s'",
212 args[0], *err);
213 else
214 memprintf(err, "'%s' : error encountered while processing '%s'",
215 args[0], args[cur_arg]);
216 goto error;
217 }
218 }
219 else {
220 flt_dump_kws(err);
221 indent_msg(err, 4);
222 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
223 args[0], args[cur_arg],
224 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
225 goto error;
226 }
227 if (*args[cur_arg]) {
228 memprintf(err, "'%s %s' : unknown keyword '%s'.",
229 args[0], args[1], args[cur_arg]);
230 goto error;
231 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200232 if (fconf->ops == NULL) {
233 memprintf(err, "'%s %s' : no callbacks defined.",
234 args[0], args[1]);
235 goto error;
236 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200237
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100238 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200239 }
240 return 0;
241
242 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100243 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200244 return -1;
245
246
247}
248
249/*
250 * Calls 'init' callback for all filters attached to a proxy. This happens after
251 * the configuration parsing. Filters can finish to fill their config. Returns
252 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
253 */
Willy Tarreau64bca592016-12-21 20:13:11 +0100254static int
Christopher Fauletd7c91962015-04-30 11:48:27 +0200255flt_init(struct proxy *proxy)
256{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100257 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200258
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100259 list_for_each_entry(fconf, &proxy->filter_configs, list) {
260 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200261 return ERR_ALERT|ERR_FATAL;
262 }
263 return 0;
264}
265
Willy Tarreau64bca592016-12-21 20:13:11 +0100266/* Calls flt_init() for all proxies, see above */
267static int
268flt_init_all()
269{
270 struct proxy *px;
271 int err_code = 0;
272
273 for (px = proxy; px; px = px->next) {
274 err_code |= flt_init(px);
275 if (err_code & (ERR_ABORT|ERR_FATAL)) {
276 Alert("Failed to initialize filters for proxy '%s'.\n",
277 px->id);
278 return err_code;
279 }
280 }
281 return 0;
282}
283
Christopher Fauletd7c91962015-04-30 11:48:27 +0200284/*
285 * Calls 'check' callback for all filters attached to a proxy. This happens
286 * after the configuration parsing but before filters initialization. Returns
287 * the number of encountered errors.
288 */
289int
290flt_check(struct proxy *proxy)
291{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100292 struct flt_conf *fconf;
293 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200294
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100295 list_for_each_entry(fconf, &proxy->filter_configs, list) {
296 if (fconf->ops->check)
297 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200298 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100299 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200300 return err;
301}
302
303/*
304 * Calls 'denit' callback for all filters attached to a proxy. This happens when
305 * HAProxy is stopped.
306 */
307void
308flt_deinit(struct proxy *proxy)
309{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100310 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200311
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100312 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
313 if (fconf->ops->deinit)
314 fconf->ops->deinit(proxy, fconf);
315 LIST_DEL(&fconf->list);
316 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200317 }
318}
319
Christopher Faulet92d36382015-11-05 13:35:03 +0100320/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
321static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100322flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100323{
324 struct filter *f = pool_alloc2(pool2_filter);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200325
Christopher Faulet92d36382015-11-05 13:35:03 +0100326 if (!f) /* not enough memory */
327 return -1;
328 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100329 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100330 f->flags |= flags;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200331
332 if (FLT_OPS(f)->attach) {
333 int ret = FLT_OPS(f)->attach(s, f);
334 if (ret <= 0) {
335 pool_free2(pool2_filter, f);
336 return ret;
337 }
338 }
339
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100340 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100341 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100342 return 0;
343}
344
345/*
346 * Called when a stream is created. It attaches all frontend filters to the
347 * stream. Returns -1 if an error occurs, 0 otherwise.
348 */
349int
350flt_stream_init(struct stream *s)
351{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100352 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100353
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100354 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
355 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100356 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
357 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100358 return -1;
359 }
360 return 0;
361}
362
363/*
364 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
365 * happens after each request/response exchange). When analyze ends, backend
366 * filters are removed. When the stream is closed, all filters attached to the
367 * stream are removed.
368 */
369void
370flt_stream_release(struct stream *s, int only_backend)
371{
372 struct filter *filter, *back;
373
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100374 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100375 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200376 if (FLT_OPS(filter)->detach)
377 FLT_OPS(filter)->detach(s, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100378 LIST_DEL(&filter->list);
379 pool_free2(pool2_filter, filter);
380 }
381 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100382 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100383 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100384}
385
Christopher Fauletd7c91962015-04-30 11:48:27 +0200386/*
387 * Calls 'stream_start' for all filters attached to a stream. This happens when
388 * the stream is created, just after calling flt_stream_init
389 * function. Returns -1 if an error occurs, 0 otherwise.
390 */
391int
392flt_stream_start(struct stream *s)
393{
394 struct filter *filter;
395
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100396 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100397 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200398 return -1;
399 }
400 return 0;
401}
402
403/*
404 * Calls 'stream_stop' for all filters attached to a stream. This happens when
405 * the stream is stopped, just before calling flt_stream_release function.
406 */
407void
408flt_stream_stop(struct stream *s)
409{
410 struct filter *filter;
411
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100412 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100413 if (FLT_OPS(filter)->stream_stop)
414 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200415 }
416}
417
Christopher Faulet92d36382015-11-05 13:35:03 +0100418/*
Christopher Fauleta00d8172016-11-10 14:58:05 +0100419 * Calls 'check_timeouts' for all filters attached to a stream. This happens when
420 * the stream is woken up because of expired timer.
421 */
422void
423flt_stream_check_timeouts(struct stream *s)
424{
425 struct filter *filter;
426
427 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
428 if (FLT_OPS(filter)->check_timeouts)
429 FLT_OPS(filter)->check_timeouts(s, filter);
430 }
431}
432
433/*
Christopher Faulet92d36382015-11-05 13:35:03 +0100434 * Called when a backend is set for a stream. If the frontend and the backend
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200435 * are not the same, this function attaches all backend filters to the
436 * stream. Returns -1 if an error occurs, 0 otherwise.
Christopher Faulet92d36382015-11-05 13:35:03 +0100437 */
438int
439flt_set_stream_backend(struct stream *s, struct proxy *be)
440{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100441 struct flt_conf *fconf;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200442 struct filter *filter;
Christopher Faulet92d36382015-11-05 13:35:03 +0100443
444 if (strm_fe(s) == be)
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200445 goto end;
Christopher Faulet92d36382015-11-05 13:35:03 +0100446
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100447 list_for_each_entry(fconf, &be->filter_configs, list) {
448 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100449 return -1;
450 }
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200451
452 end:
453 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
454 if (FLT_OPS(filter)->stream_set_backend &&
455 FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
456 return -1;
457 }
458
Christopher Faulet92d36382015-11-05 13:35:03 +0100459 return 0;
460}
461
Christopher Fauletd7c91962015-04-30 11:48:27 +0200462/*
463 * Calls 'http_data' callback for all "data" filters attached to a stream. This
464 * function is called when incoming data are available (excluding chunks
465 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
466 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
467 * filters and adjusts available data to be sure that a filter cannot parse more
468 * data than its predecessors. A filter can choose to not consume all available
469 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
470 */
471int
472flt_http_data(struct stream *s, struct http_msg *msg)
473{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100474 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200475 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200476 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200477
Christopher Fauletd7c91962015-04-30 11:48:27 +0200478 /* Save buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200479 buf_i = msg->chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100480
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100481 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100482 unsigned int *nxt;
483
484 /* Call "data" filters only */
485 if (!IS_DATA_FILTER(filter, msg->chn))
486 continue;
487
Christopher Faulet2fb28802015-12-01 10:40:57 +0100488 /* If the HTTP parser is ahead, we update the next offset of the
489 * current filter. This happens for chunked messages, at the
490 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100491 nxt = &FLT_NXT(filter, msg->chn);
492 if (msg->next > *nxt)
493 *nxt = msg->next;
494
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100495 if (FLT_OPS(filter)->http_data) {
Christopher Faulet55048a42016-06-21 10:44:32 +0200496 unsigned int i = msg->chn->buf->i;
497
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100498 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100499 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200500 break;
Christopher Faulet55048a42016-06-21 10:44:32 +0200501 delta += (int)(msg->chn->buf->i - i);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100502
503 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100504 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100505
506 /* And set this value as the bound for the next
507 * filter. It will not able to parse more data than this
508 * one. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200509 msg->chn->buf->i = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200510 }
511 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100512 /* Consume all available data and update the next offset
513 * of the current filter. buf->i is untouched here. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200514 ret = MIN(msg->chunk_len + msg->next, msg->chn->buf->i) - *nxt;
Christopher Fauletda02e172015-12-04 09:25:05 +0100515 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200516 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200517 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100518
Christopher Fauletd7c91962015-04-30 11:48:27 +0200519 /* Restore the original buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200520 msg->chn->buf->i = buf_i + delta;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100521
Christopher Fauletd7c91962015-04-30 11:48:27 +0200522 return ret;
523}
524
Christopher Fauletd7c91962015-04-30 11:48:27 +0200525/*
526 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
527 * stream. This function is called for chunked messages only when a part of the
528 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
529 * analyzers. Filters can know how much data were parsed by the HTTP parsing
530 * until the last call with the msg->sol value. Returns a negative value if an
531 * error occurs, any other value otherwise.
532 */
533int
534flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
535{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100536 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100537 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200538
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100539 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100540 unsigned int *nxt;
541
542 /* Call "data" filters only */
543 if (!IS_DATA_FILTER(filter, msg->chn))
544 continue;
545
Christopher Faulet2fb28802015-12-01 10:40:57 +0100546 /* Be sure to set the next offset of the filter at the right
547 * place. This is really useful when the first part of the
548 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100549 nxt = &FLT_NXT(filter, msg->chn);
550 *nxt = msg->next;
551
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100552 if (FLT_OPS(filter)->http_chunk_trailers) {
553 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100554 if (ret < 0)
555 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200556 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100557 /* Update the next offset of the current filter. Here all data
558 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100559 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100560 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200561 return ret;
562}
563
564/*
565 * Calls 'http_end' callback for all filters attached to a stream. All filters
566 * are called here, but only if there is at least one "data" filter. This
567 * functions is called when all data were parsed and forwarded. 'http_end'
568 * callback is resumable, so this function returns a negative value if an error
569 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
570 */
571int
572flt_http_end(struct stream *s, struct http_msg *msg)
573{
574 int ret = 1;
575
Christopher Fauletd7c91962015-04-30 11:48:27 +0200576 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100577 if (FLT_OPS(filter)->http_end) {
578 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200579 if (ret <= 0)
580 BREAK_EXECUTION(s, msg->chn, end);
581 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200582 } RESUME_FILTER_END;
583end:
584 return ret;
585}
586
587/*
588 * Calls 'http_reset' callback for all filters attached to a stream. This
589 * happens when a 100-continue response is received.
590 */
591void
592flt_http_reset(struct stream *s, struct http_msg *msg)
593{
594 struct filter *filter;
595
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100596 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100597 if (FLT_OPS(filter)->http_reset)
598 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200599 }
600}
601
602/*
603 * Calls 'http_reply' callback for all filters attached to a stream when HA
604 * decides to stop the HTTP message processing.
605 */
606void
607flt_http_reply(struct stream *s, short status, const struct chunk *msg)
608{
609 struct filter *filter;
610
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100611 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100612 if (FLT_OPS(filter)->http_reply)
613 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200614 }
615}
616
617/*
618 * Calls 'http_forward_data' callback for all "data" filters attached to a
619 * stream. This function is called when some data can be forwarded in the
620 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
621 * update the forward offset of filters and adjusts "forwardable" data to be
622 * sure that a filter cannot forward more data than its predecessors. A filter
623 * can choose to not forward all parsed data. Returns a negative value if an
624 * error occurs, else the number of forwarded bytes.
625 */
626int
627flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
628{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100629 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200630 int ret = len;
631
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100632 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100633 unsigned int *nxt, *fwd;
634
635 /* Call "data" filters only */
636 if (!IS_DATA_FILTER(filter, msg->chn))
637 continue;
638
Christopher Faulet2fb28802015-12-01 10:40:57 +0100639 /* If the HTTP parser is ahead, we update the next offset of the
640 * current filter. This happens for chunked messages, when the
641 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100642 nxt = &FLT_NXT(filter, msg->chn);
643 fwd = &FLT_FWD(filter, msg->chn);
644 if (msg->next > *nxt)
645 *nxt = msg->next;
646
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100647 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100648 /* Remove bytes that the current filter considered as
649 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100650 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200651 if (ret < 0)
652 goto end;
653 }
654
655 /* Adjust bytes that the current filter considers as
656 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100657 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200658
659 /* And set this value as the bound for the next filter. It will
660 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100661 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200662 }
663
664 if (!ret)
665 goto end;
666
667 /* Finally, adjust filters offsets by removing data that HAProxy will
668 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100669 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100670 if (!IS_DATA_FILTER(filter, msg->chn))
671 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200672 FLT_NXT(filter, msg->chn) -= ret;
673 FLT_FWD(filter, msg->chn) -= ret;
674 }
675 end:
676 return ret;
677}
678
679/*
680 * Calls 'channel_start_analyze' callback for all filters attached to a
681 * stream. This function is called when we start to analyze a request or a
682 * response. For frontend filters, it is called before all other analyzers. For
683 * backend ones, it is called before all backend
684 * analyzers. 'channel_start_analyze' callback is resumable, so this function
685 * returns 0 if an error occurs or if it needs to wait, any other value
686 * otherwise.
687 */
688int
689flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
690{
691 int ret = 1;
692
693 /* If this function is called, this means there is at least one filter,
694 * so we do not need to check the filter list's emptiness. */
695
Christopher Faulete6006242017-03-10 11:52:44 +0100696 /* Set flag on channel to tell that the channel is filtered */
697 chn->flags |= CF_FLT_ANALYZE;
698
Christopher Fauletd7c91962015-04-30 11:48:27 +0200699 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet0184ea72017-01-05 14:06:34 +0100700 if (!(chn->flags & CF_ISRESP)) {
701 if (an_bit == AN_REQ_FLT_START_BE &&
702 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
703 continue;
704 }
705 else {
706 if (an_bit == AN_RES_FLT_START_BE &&
707 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
708 continue;
709 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200710
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100711 FLT_NXT(filter, chn) = 0;
712 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200713
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100714 if (FLT_OPS(filter)->channel_start_analyze) {
715 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200716 if (ret <= 0)
717 BREAK_EXECUTION(s, chn, end);
718 }
719 } RESUME_FILTER_END;
720
721 end:
722 return handle_analyzer_result(s, chn, an_bit, ret);
723}
724
725/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200726 * Calls 'channel_pre_analyze' callback for all filters attached to a
727 * stream. This function is called BEFORE each analyzer attached to a channel,
728 * expects analyzers responsible for data sending. 'channel_pre_analyze'
729 * callback is resumable, so this function returns 0 if an error occurs or if it
730 * needs to wait, any other value otherwise.
731 *
732 * Note this function can be called many times for the same analyzer. In fact,
733 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200734 */
735int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200736flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200737{
738 int ret = 1;
739
Christopher Fauletd7c91962015-04-30 11:48:27 +0200740 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200741 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
742 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200743 if (ret <= 0)
744 BREAK_EXECUTION(s, chn, check_result);
745 }
746 } RESUME_FILTER_END;
747
748 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100749 return handle_analyzer_result(s, chn, 0, ret);
750}
751
752/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200753 * Calls 'channel_post_analyze' callback for all filters attached to a
754 * stream. This function is called AFTER each analyzer attached to a channel,
755 * expects analyzers responsible for data sending. 'channel_post_analyze'
756 * callback is NOT resumable, so this function returns a 0 if an error occurs,
757 * any other value otherwise.
758 *
759 * Here, AFTER means when the analyzer finishes its processing.
760 */
761int
762flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
763{
764 struct filter *filter;
765 int ret = 1;
766
767 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
768 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
769 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
770 if (ret < 0)
771 break;
772 }
773 }
774 return handle_analyzer_result(s, chn, 0, ret);
775}
776
777/*
Christopher Faulet0184ea72017-01-05 14:06:34 +0100778 * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
779 * headers or a request or a response. Returns 0 if an error occurs or if it
780 * needs to wait, any other value otherwise.
Christopher Faulet309c6412015-12-02 09:57:32 +0100781 */
782int
783flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
784{
Christopher Faulet1339d742016-05-11 16:48:33 +0200785 struct filter *filter;
786 struct http_msg *msg;
787 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100788
Christopher Faulet1339d742016-05-11 16:48:33 +0200789 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100790 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200791 if (FLT_OPS(filter)->http_headers) {
792 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100793 if (ret <= 0)
794 BREAK_EXECUTION(s, chn, check_result);
795 }
796 } RESUME_FILTER_END;
797
798 /* We increase next offset of all "data" filters after all processing on
799 * headers because any filter can alter them. So the definitive size of
800 * headers (msg->sov) is only known when all filters have been
801 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100802 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100803 /* Handle "data" filters only */
804 if (!IS_DATA_FILTER(filter, chn))
805 continue;
Christopher Faulet1339d742016-05-11 16:48:33 +0200806 FLT_NXT(filter, chn) = msg->sov;
Christopher Faulet309c6412015-12-02 09:57:32 +0100807 }
808
809 check_result:
810 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200811}
812
813/*
814 * Calls 'channel_end_analyze' callback for all filters attached to a
815 * stream. This function is called when we stop to analyze a request or a
816 * response. It is called after all other analyzers. 'channel_end_analyze'
817 * callback is resumable, so this function returns 0 if an error occurs or if it
818 * needs to wait, any other value otherwise.
819 */
820int
821flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
822{
823 int ret = 1;
824
Christopher Faulete6006242017-03-10 11:52:44 +0100825 /* Check if all filters attached on the stream have finished their
826 * processing on this channel. */
827 if (!(chn->flags & CF_FLT_ANALYZE))
828 goto sync;
829
Christopher Fauletd7c91962015-04-30 11:48:27 +0200830 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100831 FLT_NXT(filter, chn) = 0;
832 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100833 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200834
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100835 if (FLT_OPS(filter)->channel_end_analyze) {
836 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200837 if (ret <= 0)
838 BREAK_EXECUTION(s, chn, end);
839 }
840 } RESUME_FILTER_END;
841
Christopher Faulete6006242017-03-10 11:52:44 +0100842 end:
843 /* We don't remove yet this analyzer because we need to synchronize the
844 * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
845 ret = handle_analyzer_result(s, chn, 0, ret);
846 if (ret)
847 chn->flags &= ~CF_FLT_ANALYZE;
Christopher Faulet02c7b222015-12-22 12:01:29 +0100848
Christopher Faulete6006242017-03-10 11:52:44 +0100849 sync:
850 /* Now we can check if filters have finished their work on the both
851 * channels */
852 if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
853 /* Sync channels by removing this analyzer for the both channels */
854 s->req.analysers &= ~AN_REQ_FLT_END;
855 s->res.analysers &= ~AN_RES_FLT_END;
Christopher Fauletc6062be2016-10-31 11:22:37 +0100856
Christopher Faulete6006242017-03-10 11:52:44 +0100857 /* Clean up the HTTP transaction if needed */
858 if (s->txn && (s->txn->flags & TX_WAIT_CLEANUP))
859 http_end_txn_clean_session(s);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200860
Christopher Faulete6006242017-03-10 11:52:44 +0100861 /* Remove backend filters from the list */
862 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200863 }
Christopher Faulet2b553de2017-03-30 11:13:22 +0200864
Christopher Fauletd7c91962015-04-30 11:48:27 +0200865 return ret;
866}
867
868
869/*
870 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
871 * function is called when incoming data are available. It takes care to update
872 * the next offset of filters and adjusts available data to be sure that a
873 * filter cannot parse more data than its predecessors. A filter can choose to
874 * not consume all available data. Returns -1 if an error occurs, the number of
875 * consumed bytes otherwise.
876 */
877static int
878flt_data(struct stream *s, struct channel *chn)
879{
Christopher Fauletda02e172015-12-04 09:25:05 +0100880 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200881 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200882 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200883
884 /* Save buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200885 buf_i = chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100886
887 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100888 unsigned int *nxt;
889
890 /* Call "data" filters only */
891 if (!IS_DATA_FILTER(filter, chn))
892 continue;
893
894 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100895 if (FLT_OPS(filter)->tcp_data) {
Christopher Faulet55048a42016-06-21 10:44:32 +0200896 unsigned int i = chn->buf->i;
897
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100898 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200899 if (ret < 0)
900 break;
Christopher Faulet55048a42016-06-21 10:44:32 +0200901 delta += (int)(chn->buf->i - i);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200902
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100903 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100904 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100905
906 /* And set this value as the bound for the next
907 * filter. It will not able to parse more data than the
908 * current one. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200909 chn->buf->i = *nxt;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100910 }
911 else {
912 /* Consume all available data */
Christopher Faulet55048a42016-06-21 10:44:32 +0200913 *nxt = chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100914 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200915
916 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +0100917 * exit from the loop. This value will be used to know how much
918 * data are "forwardable" */
919 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200920 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100921
922 /* Restore the original buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200923 chn->buf->i = buf_i + delta;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100924
Christopher Fauletd7c91962015-04-30 11:48:27 +0200925 return ret;
926}
927
928/*
929 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
930 * stream. This function is called when some data can be forwarded. It takes
931 * care to update the forward offset of filters and adjusts "forwardable" data
932 * to be sure that a filter cannot forward more data than its predecessors. A
933 * filter can choose to not forward all parsed data. Returns a negative value if
934 * an error occurs, else the number of forwarded bytes.
935 */
936static int
937flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
938{
Christopher Fauletda02e172015-12-04 09:25:05 +0100939 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200940 int ret = len;
941
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100942 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100943 unsigned int *fwd;
944
945 /* Call "data" filters only */
946 if (!IS_DATA_FILTER(filter, chn))
947 continue;
948
949 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100950 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200951 /* Remove bytes that the current filter considered as
952 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100953 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200954 if (ret < 0)
955 goto end;
956 }
957
Christopher Fauletda02e172015-12-04 09:25:05 +0100958 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +0200959 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100960 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200961
962 /* And set this value as the bound for the next filter. It will
963 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100964 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200965 }
966
967 if (!ret)
968 goto end;
969
Christopher Fauletda02e172015-12-04 09:25:05 +0100970 /* Finally, adjust filters offsets by removing data that HAProxy will
971 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100972 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100973 if (!IS_DATA_FILTER(filter, chn))
974 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200975 FLT_NXT(filter, chn) -= ret;
976 FLT_FWD(filter, chn) -= ret;
977 }
978
Christopher Fauletd7c91962015-04-30 11:48:27 +0200979 end:
980 return ret;
981}
982
983/*
984 * Called when TCP data must be filtered on a channel. This function is the
Christopher Faulet0184ea72017-01-05 14:06:34 +0100985 * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
986 * data when the proxy is not in http mode. Behind the scene, it calls
987 * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
988 * filters attached to a stream. Returns 0 if an error occurs or if it needs to
989 * wait, any other value otherwise.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200990 */
991int
992flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
993{
994 int ret = 1;
995
Christopher Fauletda02e172015-12-04 09:25:05 +0100996 /* If there is no "data" filters, we do nothing */
997 if (!HAS_DATA_FILTERS(s, chn))
998 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200999
1000 /* Be sure that the output is still opened. Else we stop the data
1001 * filtering. */
1002 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
1003 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
1004 goto end;
1005
1006 /* Let all "data" filters parsing incoming data */
1007 ret = flt_data(s, chn);
1008 if (ret < 0)
1009 goto end;
1010
1011 /* And forward them */
1012 ret = flt_forward_data(s, chn, ret);
1013 if (ret < 0)
1014 goto end;
1015
Christopher Fauletda02e172015-12-04 09:25:05 +01001016 /* Consume data that all filters consider as forwarded. */
1017 b_adv(chn->buf, ret);
1018
Christopher Fauletd7c91962015-04-30 11:48:27 +02001019 /* Stop waiting data if the input in closed and no data is pending or if
1020 * the output is closed. */
1021 if ((chn->flags & CF_SHUTW) ||
1022 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
1023 ret = 1;
1024 goto end;
1025 }
1026
1027 /* Wait for data */
1028 return 0;
1029 end:
1030 /* Terminate the data filtering. If <ret> is negative, an error was
1031 * encountered during the filtering. */
1032 return handle_analyzer_result(s, chn, an_bit, ret);
1033}
1034
1035/*
1036 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
1037 * it needs to wait, any other value otherwise.
1038 */
1039static int
1040handle_analyzer_result(struct stream *s, struct channel *chn,
1041 unsigned int an_bit, int ret)
1042{
1043 int finst;
1044
1045 if (ret < 0)
1046 goto return_bad_req;
1047 else if (!ret)
1048 goto wait;
1049
1050 /* End of job, return OK */
1051 if (an_bit) {
1052 chn->analysers &= ~an_bit;
1053 chn->analyse_exp = TICK_ETERNITY;
1054 }
1055 return 1;
1056
1057 return_bad_req:
1058 /* An error occurs */
1059 channel_abort(&s->req);
1060 channel_abort(&s->res);
1061
1062 if (!(chn->flags & CF_ISRESP)) {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001063 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001064 finst = SF_FINST_R;
1065 /* FIXME: incr counters */
1066 }
1067 else {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001068 s->res.analysers &= AN_RES_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001069 finst = SF_FINST_H;
1070 /* FIXME: incr counters */
1071 }
1072
1073 if (s->txn) {
1074 /* Do not do that when we are waiting for the next request */
1075 if (s->txn->status)
1076 http_reply_and_close(s, s->txn->status, NULL);
1077 else {
1078 s->txn->status = 400;
Jarno Huuskonen9e6906b2017-03-06 14:21:49 +02001079 http_reply_and_close(s, 400, http_error_message(s));
Christopher Fauletd7c91962015-04-30 11:48:27 +02001080 }
1081 }
1082
1083 if (!(s->flags & SF_ERR_MASK))
1084 s->flags |= SF_ERR_PRXCOND;
1085 if (!(s->flags & SF_FINST_MASK))
1086 s->flags |= finst;
1087 return 0;
1088
1089 wait:
1090 if (!(chn->flags & CF_ISRESP))
1091 channel_dont_connect(chn);
1092 return 0;
1093}
1094
1095
1096/* Note: must not be declared <const> as its list will be overwritten.
1097 * Please take care of keeping this list alphabetically sorted, doing so helps
1098 * all code contributors.
1099 * Optional keywords are also declared with a NULL ->parse() function so that
1100 * the config parser can report an appropriate error when a known keyword was
1101 * not enabled. */
1102static struct cfg_kw_list cfg_kws = {ILH, {
1103 { CFG_LISTEN, "filter", parse_filter },
1104 { 0, NULL, NULL },
1105 }
1106};
1107
1108__attribute__((constructor))
1109static void
1110__filters_init(void)
1111{
1112 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
1113 cfg_register_keywords(&cfg_kws);
Willy Tarreau64bca592016-12-21 20:13:11 +01001114 hap_register_post_check(flt_init_all);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001115}
1116
1117__attribute__((destructor))
1118static void
1119__filters_deinit(void)
1120{
1121 pool_destroy2(pool2_filter);
1122}
1123
1124/*
1125 * Local variables:
1126 * c-indent-level: 8
1127 * c-basic-offset: 8
1128 * End:
1129 */