blob: 8bb9a904d3ff37b2379b89573b0c483be5ad31dd [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
Christopher Faulet71a6a8e2017-07-27 16:33:28 +020021#include <common/hathreads.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020022
23#include <types/filters.h>
24#include <types/proto_http.h>
25
26#include <proto/compression.h>
27#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010028#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020029#include <proto/proto_http.h>
30#include <proto/stream.h>
31#include <proto/stream_interface.h>
32
33/* Pool used to allocate filters */
Willy Tarreaubafbe012017-11-24 17:34:44 +010034struct pool_head *pool_head_filter = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +020035
36static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
37
38/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
39 * The first one begins a loop and the seconds one ends it.
40 *
41 * - BREAK_EXECUTION must be used to break the loop and set the filter from
42 * which to resume the next time.
43 *
Bertrand Jacquin874a35c2018-09-10 21:26:07 +010044 * Here is an example:
Christopher Fauletd7c91962015-04-30 11:48:27 +020045 *
46 * RESUME_FILTER_LOOP(stream, channel) {
47 * ...
48 * if (cond)
49 * BREAK_EXECUTION(stream, channel, label);
50 * ...
51 * } RESUME_FILTER_END;
52 * ...
53 * label:
54 * ...
55 *
56 */
57#define RESUME_FILTER_LOOP(strm, chn) \
58 do { \
59 struct filter *filter; \
60 \
Christopher Fauletda02e172015-12-04 09:25:05 +010061 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
62 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
63 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020064 goto resume_execution; \
65 } \
66 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010067 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010068 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020069
70#define RESUME_FILTER_END \
71 } \
72 } while(0)
73
Christopher Fauletda02e172015-12-04 09:25:05 +010074#define BREAK_EXECUTION(strm, chn, label) \
75 do { \
76 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
77 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020078 } while (0)
79
80
81/* List head of all known filter keywords */
82static struct flt_kw_list flt_keywords = {
83 .list = LIST_HEAD_INIT(flt_keywords.list)
84};
85
86/*
87 * Registers the filter keyword list <kwl> as a list of valid keywords for next
88 * parsing sessions.
89 */
90void
91flt_register_keywords(struct flt_kw_list *kwl)
92{
93 LIST_ADDQ(&flt_keywords.list, &kwl->list);
94}
95
96/*
97 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
98 * keyword is found with a NULL ->parse() function, then an attempt is made to
99 * find one with a valid ->parse() function. This way it is possible to declare
100 * platform-dependant, known keywords as NULL, then only declare them as valid
101 * if some options are met. Note that if the requested keyword contains an
102 * opening parenthesis, everything from this point is ignored.
103 */
104struct flt_kw *
105flt_find_kw(const char *kw)
106{
107 int index;
108 const char *kwend;
109 struct flt_kw_list *kwl;
110 struct flt_kw *ret = NULL;
111
112 kwend = strchr(kw, '(');
113 if (!kwend)
114 kwend = kw + strlen(kw);
115
116 list_for_each_entry(kwl, &flt_keywords.list, list) {
117 for (index = 0; kwl->kw[index].kw != NULL; index++) {
118 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
119 kwl->kw[index].kw[kwend-kw] == 0) {
120 if (kwl->kw[index].parse)
121 return &kwl->kw[index]; /* found it !*/
122 else
123 ret = &kwl->kw[index]; /* may be OK */
124 }
125 }
126 }
127 return ret;
128}
129
130/*
131 * Dumps all registered "filter" keywords to the <out> string pointer. The
132 * unsupported keywords are only dumped if their supported form was not found.
133 */
134void
135flt_dump_kws(char **out)
136{
137 struct flt_kw_list *kwl;
138 int index;
139
140 *out = NULL;
141 list_for_each_entry(kwl, &flt_keywords.list, list) {
142 for (index = 0; kwl->kw[index].kw != NULL; index++) {
143 if (kwl->kw[index].parse ||
144 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
145 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
146 kwl->scope,
147 kwl->kw[index].kw,
148 kwl->kw[index].parse ? "" : " (not supported)");
149 }
150 }
151 }
152}
153
154/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100155 * Lists the known filters on <out>
156 */
157void
158list_filters(FILE *out)
159{
160 char *filters, *p, *f;
161
162 fprintf(out, "Available filters :\n");
163 flt_dump_kws(&filters);
164 for (p = filters; (f = strtok_r(p,"\n",&p));)
165 fprintf(out, "\t%s\n", f);
166 free(filters);
167}
168
169/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200170 * Parses the "filter" keyword. All keywords must be handled by filters
171 * themselves
172 */
173static int
174parse_filter(char **args, int section_type, struct proxy *curpx,
175 struct proxy *defpx, const char *file, int line, char **err)
176{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100177 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200178
179 /* Filter cannot be defined on a default proxy */
180 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200181 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200182 file, line, args[0]);
183 return -1;
184 }
185 if (!strcmp(args[0], "filter")) {
186 struct flt_kw *kw;
187 int cur_arg;
188
189 if (!*args[1]) {
190 memprintf(err,
191 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
192 file, line, args[0], proxy_type_str(curpx), curpx->id);
193 goto error;
194 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100195 fconf = calloc(1, sizeof(*fconf));
196 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200197 memprintf(err, "'%s' : out of memory", args[0]);
198 goto error;
199 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200200
201 cur_arg = 1;
202 kw = flt_find_kw(args[cur_arg]);
203 if (kw) {
204 if (!kw->parse) {
205 memprintf(err, "parsing [%s:%d] : '%s' : "
206 "'%s' option is not implemented in this version (check build options).",
207 file, line, args[0], args[cur_arg]);
208 goto error;
209 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200210 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200211 if (err && *err)
212 memprintf(err, "'%s' : '%s'",
213 args[0], *err);
214 else
215 memprintf(err, "'%s' : error encountered while processing '%s'",
216 args[0], args[cur_arg]);
217 goto error;
218 }
219 }
220 else {
221 flt_dump_kws(err);
222 indent_msg(err, 4);
223 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
224 args[0], args[cur_arg],
225 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
226 goto error;
227 }
228 if (*args[cur_arg]) {
229 memprintf(err, "'%s %s' : unknown keyword '%s'.",
230 args[0], args[1], args[cur_arg]);
231 goto error;
232 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200233 if (fconf->ops == NULL) {
234 memprintf(err, "'%s %s' : no callbacks defined.",
235 args[0], args[1]);
236 goto error;
237 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200238
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100239 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200240 }
241 return 0;
242
243 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100244 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200245 return -1;
246
247
248}
249
250/*
251 * Calls 'init' callback for all filters attached to a proxy. This happens after
252 * the configuration parsing. Filters can finish to fill their config. Returns
253 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
254 */
Willy Tarreau64bca592016-12-21 20:13:11 +0100255static int
Christopher Fauletd7c91962015-04-30 11:48:27 +0200256flt_init(struct proxy *proxy)
257{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100258 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200259
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100260 list_for_each_entry(fconf, &proxy->filter_configs, list) {
261 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200262 return ERR_ALERT|ERR_FATAL;
263 }
264 return 0;
265}
266
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200267/*
268 * Calls 'init_per_thread' callback for all filters attached to a proxy for each
269 * threads. This happens after the thread creation. Filters can finish to fill
270 * their config. Returns (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
271 */
272static int
273flt_init_per_thread(struct proxy *proxy)
274{
275 struct flt_conf *fconf;
276
277 list_for_each_entry(fconf, &proxy->filter_configs, list) {
278 if (fconf->ops->init_per_thread && fconf->ops->init_per_thread(proxy, fconf) < 0)
279 return ERR_ALERT|ERR_FATAL;
280 }
281 return 0;
282}
283
Willy Tarreau64bca592016-12-21 20:13:11 +0100284/* Calls flt_init() for all proxies, see above */
285static int
286flt_init_all()
287{
288 struct proxy *px;
289 int err_code = 0;
290
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100291 for (px = proxies_list; px; px = px->next) {
Willy Tarreau64bca592016-12-21 20:13:11 +0100292 err_code |= flt_init(px);
293 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100294 ha_alert("Failed to initialize filters for proxy '%s'.\n",
295 px->id);
Willy Tarreau64bca592016-12-21 20:13:11 +0100296 return err_code;
297 }
298 }
299 return 0;
300}
301
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200302/* Calls flt_init_per_thread() for all proxies, see above. Be carefull here, it
303 * returns 0 if an error occured. This is the opposite of flt_init_all. */
304static int
305flt_init_all_per_thread()
306{
307 struct proxy *px;
308 int err_code = 0;
309
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100310 for (px = proxies_list; px; px = px->next) {
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200311 err_code = flt_init_per_thread(px);
312 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100313 ha_alert("Failed to initialize filters for proxy '%s' for thread %u.\n",
314 px->id, tid);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200315 return 0;
316 }
317 }
318 return 1;
319}
320
Christopher Fauletd7c91962015-04-30 11:48:27 +0200321/*
322 * Calls 'check' callback for all filters attached to a proxy. This happens
323 * after the configuration parsing but before filters initialization. Returns
324 * the number of encountered errors.
325 */
326int
327flt_check(struct proxy *proxy)
328{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100329 struct flt_conf *fconf;
330 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200331
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100332 list_for_each_entry(fconf, &proxy->filter_configs, list) {
333 if (fconf->ops->check)
334 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200335 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100336 err += check_legacy_http_comp_flt(proxy);
Christopher Faulet0c859122018-11-07 14:09:32 +0100337
338 if (!LIST_ISEMPTY(&proxy->filter_configs) &&
339 (proxy->options2 & PR_O2_USE_HTX)) {
340 ha_alert("config: %s '%s' : filters cannot be used when "
341 "the HTX internal representation is enabled.\n",
342 proxy_type_str(proxy), proxy->id);
343 err++;
344 }
345
Christopher Fauletd7c91962015-04-30 11:48:27 +0200346 return err;
347}
348
349/*
350 * Calls 'denit' callback for all filters attached to a proxy. This happens when
351 * HAProxy is stopped.
352 */
353void
354flt_deinit(struct proxy *proxy)
355{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100356 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200357
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100358 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
359 if (fconf->ops->deinit)
360 fconf->ops->deinit(proxy, fconf);
361 LIST_DEL(&fconf->list);
362 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200363 }
364}
365
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200366/*
367 * Calls 'denit_per_thread' callback for all filters attached to a proxy for
368 * each threads. This happens before exiting a thread.
369 */
370void
371flt_deinit_per_thread(struct proxy *proxy)
372{
373 struct flt_conf *fconf, *back;
374
375 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
376 if (fconf->ops->deinit_per_thread)
377 fconf->ops->deinit_per_thread(proxy, fconf);
378 }
379}
380
381
382/* Calls flt_deinit_per_thread() for all proxies, see above */
383static void
384flt_deinit_all_per_thread()
385{
386 struct proxy *px;
387
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100388 for (px = proxies_list; px; px = px->next)
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200389 flt_deinit_per_thread(px);
390}
391
Christopher Faulet92d36382015-11-05 13:35:03 +0100392/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
393static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100394flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100395{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100396 struct filter *f = pool_alloc(pool_head_filter);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200397
Christopher Faulet92d36382015-11-05 13:35:03 +0100398 if (!f) /* not enough memory */
399 return -1;
400 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100401 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100402 f->flags |= flags;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200403
404 if (FLT_OPS(f)->attach) {
405 int ret = FLT_OPS(f)->attach(s, f);
406 if (ret <= 0) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100407 pool_free(pool_head_filter, f);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200408 return ret;
409 }
410 }
411
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100412 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100413 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100414 return 0;
415}
416
417/*
418 * Called when a stream is created. It attaches all frontend filters to the
419 * stream. Returns -1 if an error occurs, 0 otherwise.
420 */
421int
422flt_stream_init(struct stream *s)
423{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100424 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100425
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100426 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
427 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100428 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
429 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100430 return -1;
431 }
432 return 0;
433}
434
435/*
436 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
437 * happens after each request/response exchange). When analyze ends, backend
438 * filters are removed. When the stream is closed, all filters attached to the
439 * stream are removed.
440 */
441void
442flt_stream_release(struct stream *s, int only_backend)
443{
444 struct filter *filter, *back;
445
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100446 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100447 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200448 if (FLT_OPS(filter)->detach)
449 FLT_OPS(filter)->detach(s, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100450 LIST_DEL(&filter->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100451 pool_free(pool_head_filter, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100452 }
453 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100454 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100455 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100456}
457
Christopher Fauletd7c91962015-04-30 11:48:27 +0200458/*
459 * Calls 'stream_start' for all filters attached to a stream. This happens when
460 * the stream is created, just after calling flt_stream_init
461 * function. Returns -1 if an error occurs, 0 otherwise.
462 */
463int
464flt_stream_start(struct stream *s)
465{
466 struct filter *filter;
467
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100468 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100469 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200470 return -1;
471 }
472 return 0;
473}
474
475/*
476 * Calls 'stream_stop' for all filters attached to a stream. This happens when
477 * the stream is stopped, just before calling flt_stream_release function.
478 */
479void
480flt_stream_stop(struct stream *s)
481{
482 struct filter *filter;
483
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100484 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100485 if (FLT_OPS(filter)->stream_stop)
486 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200487 }
488}
489
Christopher Faulet92d36382015-11-05 13:35:03 +0100490/*
Christopher Fauleta00d8172016-11-10 14:58:05 +0100491 * Calls 'check_timeouts' for all filters attached to a stream. This happens when
492 * the stream is woken up because of expired timer.
493 */
494void
495flt_stream_check_timeouts(struct stream *s)
496{
497 struct filter *filter;
498
499 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
500 if (FLT_OPS(filter)->check_timeouts)
501 FLT_OPS(filter)->check_timeouts(s, filter);
502 }
503}
504
505/*
Christopher Faulet92d36382015-11-05 13:35:03 +0100506 * Called when a backend is set for a stream. If the frontend and the backend
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200507 * are not the same, this function attaches all backend filters to the
508 * stream. Returns -1 if an error occurs, 0 otherwise.
Christopher Faulet92d36382015-11-05 13:35:03 +0100509 */
510int
511flt_set_stream_backend(struct stream *s, struct proxy *be)
512{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100513 struct flt_conf *fconf;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200514 struct filter *filter;
Christopher Faulet92d36382015-11-05 13:35:03 +0100515
516 if (strm_fe(s) == be)
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200517 goto end;
Christopher Faulet92d36382015-11-05 13:35:03 +0100518
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100519 list_for_each_entry(fconf, &be->filter_configs, list) {
520 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100521 return -1;
522 }
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200523
524 end:
525 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
526 if (FLT_OPS(filter)->stream_set_backend &&
527 FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
528 return -1;
529 }
530
Christopher Faulet92d36382015-11-05 13:35:03 +0100531 return 0;
532}
533
Christopher Fauletd7c91962015-04-30 11:48:27 +0200534/*
535 * Calls 'http_data' callback for all "data" filters attached to a stream. This
536 * function is called when incoming data are available (excluding chunks
537 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
538 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
539 * filters and adjusts available data to be sure that a filter cannot parse more
540 * data than its predecessors. A filter can choose to not consume all available
541 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
542 */
543int
544flt_http_data(struct stream *s, struct http_msg *msg)
545{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100546 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200547 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200548 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200549
Christopher Fauletd7c91962015-04-30 11:48:27 +0200550 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200551 buf_i = ci_data(msg->chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100552
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100553 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100554 unsigned int *nxt;
555
556 /* Call "data" filters only */
557 if (!IS_DATA_FILTER(filter, msg->chn))
558 continue;
559
Christopher Faulet2fb28802015-12-01 10:40:57 +0100560 /* If the HTTP parser is ahead, we update the next offset of the
561 * current filter. This happens for chunked messages, at the
562 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100563 nxt = &FLT_NXT(filter, msg->chn);
564 if (msg->next > *nxt)
565 *nxt = msg->next;
566
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100567 if (FLT_OPS(filter)->http_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +0200568 unsigned int i = ci_data(msg->chn);
Christopher Faulet55048a42016-06-21 10:44:32 +0200569
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100570 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100571 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200572 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +0200573 delta += (int)(ci_data(msg->chn) - i);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100574
575 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100576 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100577
578 /* And set this value as the bound for the next
579 * filter. It will not able to parse more data than this
580 * one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200581 b_set_data(&msg->chn->buf, co_data(msg->chn) + *nxt);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200582 }
583 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100584 /* Consume all available data and update the next offset
585 * of the current filter. buf->i is untouched here. */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200586 ret = MIN(msg->chunk_len + msg->next, ci_data(msg->chn)) - *nxt;
Christopher Fauletda02e172015-12-04 09:25:05 +0100587 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200588 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200589 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100590
Christopher Fauletd7c91962015-04-30 11:48:27 +0200591 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200592 b_set_data(&msg->chn->buf, co_data(msg->chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100593
Christopher Fauletd7c91962015-04-30 11:48:27 +0200594 return ret;
595}
596
Christopher Fauletd7c91962015-04-30 11:48:27 +0200597/*
598 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
599 * stream. This function is called for chunked messages only when a part of the
600 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
601 * analyzers. Filters can know how much data were parsed by the HTTP parsing
602 * until the last call with the msg->sol value. Returns a negative value if an
603 * error occurs, any other value otherwise.
604 */
605int
606flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
607{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100608 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100609 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200610
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100611 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100612 unsigned int *nxt;
613
614 /* Call "data" filters only */
615 if (!IS_DATA_FILTER(filter, msg->chn))
616 continue;
617
Christopher Faulet2fb28802015-12-01 10:40:57 +0100618 /* Be sure to set the next offset of the filter at the right
619 * place. This is really useful when the first part of the
620 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100621 nxt = &FLT_NXT(filter, msg->chn);
622 *nxt = msg->next;
623
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100624 if (FLT_OPS(filter)->http_chunk_trailers) {
625 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100626 if (ret < 0)
627 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200628 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100629 /* Update the next offset of the current filter. Here all data
630 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100631 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100632 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200633 return ret;
634}
635
636/*
637 * Calls 'http_end' callback for all filters attached to a stream. All filters
638 * are called here, but only if there is at least one "data" filter. This
639 * functions is called when all data were parsed and forwarded. 'http_end'
640 * callback is resumable, so this function returns a negative value if an error
641 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
642 */
643int
644flt_http_end(struct stream *s, struct http_msg *msg)
645{
646 int ret = 1;
647
Christopher Fauletd7c91962015-04-30 11:48:27 +0200648 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100649 if (FLT_OPS(filter)->http_end) {
650 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200651 if (ret <= 0)
652 BREAK_EXECUTION(s, msg->chn, end);
653 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200654 } RESUME_FILTER_END;
655end:
656 return ret;
657}
658
659/*
660 * Calls 'http_reset' callback for all filters attached to a stream. This
661 * happens when a 100-continue response is received.
662 */
663void
664flt_http_reset(struct stream *s, struct http_msg *msg)
665{
666 struct filter *filter;
667
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100668 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100669 if (FLT_OPS(filter)->http_reset)
670 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200671 }
672}
673
674/*
675 * Calls 'http_reply' callback for all filters attached to a stream when HA
676 * decides to stop the HTTP message processing.
677 */
678void
Willy Tarreau83061a82018-07-13 11:56:34 +0200679flt_http_reply(struct stream *s, short status, const struct buffer *msg)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200680{
681 struct filter *filter;
682
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100683 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100684 if (FLT_OPS(filter)->http_reply)
685 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200686 }
687}
688
689/*
690 * Calls 'http_forward_data' callback for all "data" filters attached to a
691 * stream. This function is called when some data can be forwarded in the
692 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
693 * update the forward offset of filters and adjusts "forwardable" data to be
694 * sure that a filter cannot forward more data than its predecessors. A filter
695 * can choose to not forward all parsed data. Returns a negative value if an
696 * error occurs, else the number of forwarded bytes.
697 */
698int
699flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
700{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100701 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200702 int ret = len;
703
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100704 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100705 unsigned int *nxt, *fwd;
706
707 /* Call "data" filters only */
708 if (!IS_DATA_FILTER(filter, msg->chn))
709 continue;
710
Christopher Faulet2fb28802015-12-01 10:40:57 +0100711 /* If the HTTP parser is ahead, we update the next offset of the
712 * current filter. This happens for chunked messages, when the
713 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100714 nxt = &FLT_NXT(filter, msg->chn);
715 fwd = &FLT_FWD(filter, msg->chn);
716 if (msg->next > *nxt)
717 *nxt = msg->next;
718
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100719 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100720 /* Remove bytes that the current filter considered as
721 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100722 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200723 if (ret < 0)
724 goto end;
725 }
726
727 /* Adjust bytes that the current filter considers as
728 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100729 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200730
731 /* And set this value as the bound for the next filter. It will
732 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100733 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200734 }
735
736 if (!ret)
737 goto end;
738
739 /* Finally, adjust filters offsets by removing data that HAProxy will
740 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100741 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100742 if (!IS_DATA_FILTER(filter, msg->chn))
743 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200744 FLT_NXT(filter, msg->chn) -= ret;
745 FLT_FWD(filter, msg->chn) -= ret;
746 }
747 end:
748 return ret;
749}
750
751/*
752 * Calls 'channel_start_analyze' callback for all filters attached to a
753 * stream. This function is called when we start to analyze a request or a
754 * response. For frontend filters, it is called before all other analyzers. For
755 * backend ones, it is called before all backend
756 * analyzers. 'channel_start_analyze' callback is resumable, so this function
757 * returns 0 if an error occurs or if it needs to wait, any other value
758 * otherwise.
759 */
760int
761flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
762{
763 int ret = 1;
764
765 /* If this function is called, this means there is at least one filter,
766 * so we do not need to check the filter list's emptiness. */
767
Christopher Faulete6006242017-03-10 11:52:44 +0100768 /* Set flag on channel to tell that the channel is filtered */
769 chn->flags |= CF_FLT_ANALYZE;
770
Christopher Fauletd7c91962015-04-30 11:48:27 +0200771 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet0184ea72017-01-05 14:06:34 +0100772 if (!(chn->flags & CF_ISRESP)) {
773 if (an_bit == AN_REQ_FLT_START_BE &&
774 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
775 continue;
776 }
777 else {
778 if (an_bit == AN_RES_FLT_START_BE &&
779 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
780 continue;
781 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200782
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100783 FLT_NXT(filter, chn) = 0;
784 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200785
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100786 if (FLT_OPS(filter)->channel_start_analyze) {
787 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200788 if (ret <= 0)
789 BREAK_EXECUTION(s, chn, end);
790 }
791 } RESUME_FILTER_END;
792
793 end:
794 return handle_analyzer_result(s, chn, an_bit, ret);
795}
796
797/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200798 * Calls 'channel_pre_analyze' callback for all filters attached to a
799 * stream. This function is called BEFORE each analyzer attached to a channel,
800 * expects analyzers responsible for data sending. 'channel_pre_analyze'
801 * callback is resumable, so this function returns 0 if an error occurs or if it
802 * needs to wait, any other value otherwise.
803 *
804 * Note this function can be called many times for the same analyzer. In fact,
805 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200806 */
807int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200808flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200809{
810 int ret = 1;
811
Christopher Fauletd7c91962015-04-30 11:48:27 +0200812 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200813 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
814 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200815 if (ret <= 0)
816 BREAK_EXECUTION(s, chn, check_result);
817 }
818 } RESUME_FILTER_END;
819
820 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100821 return handle_analyzer_result(s, chn, 0, ret);
822}
823
824/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200825 * Calls 'channel_post_analyze' callback for all filters attached to a
826 * stream. This function is called AFTER each analyzer attached to a channel,
827 * expects analyzers responsible for data sending. 'channel_post_analyze'
828 * callback is NOT resumable, so this function returns a 0 if an error occurs,
829 * any other value otherwise.
830 *
831 * Here, AFTER means when the analyzer finishes its processing.
832 */
833int
834flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
835{
836 struct filter *filter;
837 int ret = 1;
838
839 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
840 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
841 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
842 if (ret < 0)
843 break;
844 }
845 }
846 return handle_analyzer_result(s, chn, 0, ret);
847}
848
849/*
Christopher Faulet0184ea72017-01-05 14:06:34 +0100850 * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
851 * headers or a request or a response. Returns 0 if an error occurs or if it
852 * needs to wait, any other value otherwise.
Christopher Faulet309c6412015-12-02 09:57:32 +0100853 */
854int
855flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
856{
Christopher Faulet1339d742016-05-11 16:48:33 +0200857 struct filter *filter;
858 struct http_msg *msg;
859 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100860
Christopher Faulet1339d742016-05-11 16:48:33 +0200861 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100862 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200863 if (FLT_OPS(filter)->http_headers) {
864 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100865 if (ret <= 0)
866 BREAK_EXECUTION(s, chn, check_result);
867 }
868 } RESUME_FILTER_END;
869
870 /* We increase next offset of all "data" filters after all processing on
871 * headers because any filter can alter them. So the definitive size of
872 * headers (msg->sov) is only known when all filters have been
873 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100874 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100875 /* Handle "data" filters only */
876 if (!IS_DATA_FILTER(filter, chn))
877 continue;
Christopher Faulet1339d742016-05-11 16:48:33 +0200878 FLT_NXT(filter, chn) = msg->sov;
Christopher Faulet309c6412015-12-02 09:57:32 +0100879 }
880
881 check_result:
882 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200883}
884
885/*
886 * Calls 'channel_end_analyze' callback for all filters attached to a
887 * stream. This function is called when we stop to analyze a request or a
888 * response. It is called after all other analyzers. 'channel_end_analyze'
889 * callback is resumable, so this function returns 0 if an error occurs or if it
890 * needs to wait, any other value otherwise.
891 */
892int
893flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
894{
895 int ret = 1;
896
Christopher Faulete6006242017-03-10 11:52:44 +0100897 /* Check if all filters attached on the stream have finished their
898 * processing on this channel. */
899 if (!(chn->flags & CF_FLT_ANALYZE))
900 goto sync;
901
Christopher Fauletd7c91962015-04-30 11:48:27 +0200902 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100903 FLT_NXT(filter, chn) = 0;
904 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100905 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200906
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100907 if (FLT_OPS(filter)->channel_end_analyze) {
908 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200909 if (ret <= 0)
910 BREAK_EXECUTION(s, chn, end);
911 }
912 } RESUME_FILTER_END;
913
Christopher Faulete6006242017-03-10 11:52:44 +0100914 end:
915 /* We don't remove yet this analyzer because we need to synchronize the
916 * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
917 ret = handle_analyzer_result(s, chn, 0, ret);
Christopher Faulet570f7992017-07-06 15:53:02 +0200918 if (ret) {
Christopher Faulete6006242017-03-10 11:52:44 +0100919 chn->flags &= ~CF_FLT_ANALYZE;
Christopher Faulet02c7b222015-12-22 12:01:29 +0100920
Christopher Faulet570f7992017-07-06 15:53:02 +0200921 /* Pretend there is an activity on both channels. Flag on the
922 * current one will be automatically removed, so only the other
923 * one will remain. This is a way to be sure that
924 * 'channel_end_analyze' callback will have a chance to be
925 * called at least once for the other side to finish the current
926 * processing. Of course, this is the filter responsiblity to
927 * wakeup the stream if it choose to loop on this callback. */
928 s->req.flags |= CF_WAKE_ONCE;
929 s->res.flags |= CF_WAKE_ONCE;
930 }
931
932
Christopher Faulete6006242017-03-10 11:52:44 +0100933 sync:
934 /* Now we can check if filters have finished their work on the both
935 * channels */
936 if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
937 /* Sync channels by removing this analyzer for the both channels */
938 s->req.analysers &= ~AN_REQ_FLT_END;
939 s->res.analysers &= ~AN_RES_FLT_END;
Christopher Fauletc6062be2016-10-31 11:22:37 +0100940
Christopher Faulete6006242017-03-10 11:52:44 +0100941 /* Clean up the HTTP transaction if needed */
942 if (s->txn && (s->txn->flags & TX_WAIT_CLEANUP))
943 http_end_txn_clean_session(s);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200944
Christopher Faulete6006242017-03-10 11:52:44 +0100945 /* Remove backend filters from the list */
946 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200947 }
Christopher Faulet2b553de2017-03-30 11:13:22 +0200948
Christopher Fauletd7c91962015-04-30 11:48:27 +0200949 return ret;
950}
951
952
953/*
954 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
955 * function is called when incoming data are available. It takes care to update
956 * the next offset of filters and adjusts available data to be sure that a
957 * filter cannot parse more data than its predecessors. A filter can choose to
958 * not consume all available data. Returns -1 if an error occurs, the number of
959 * consumed bytes otherwise.
960 */
961static int
962flt_data(struct stream *s, struct channel *chn)
963{
Christopher Fauletda02e172015-12-04 09:25:05 +0100964 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200965 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200966 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200967
968 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200969 buf_i = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100970
971 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100972 unsigned int *nxt;
973
974 /* Call "data" filters only */
975 if (!IS_DATA_FILTER(filter, chn))
976 continue;
977
978 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100979 if (FLT_OPS(filter)->tcp_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +0200980 unsigned int i = ci_data(chn);
Christopher Faulet55048a42016-06-21 10:44:32 +0200981
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100982 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200983 if (ret < 0)
984 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +0200985 delta += (int)(ci_data(chn) - i);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200986
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100987 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100988 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100989
990 /* And set this value as the bound for the next
991 * filter. It will not able to parse more data than the
992 * current one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200993 b_set_data(&chn->buf, co_data(chn) + *nxt);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100994 }
995 else {
996 /* Consume all available data */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200997 *nxt = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100998 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200999
1000 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +01001001 * exit from the loop. This value will be used to know how much
1002 * data are "forwardable" */
1003 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001004 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001005
1006 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001007 b_set_data(&chn->buf, co_data(chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001008
Christopher Fauletd7c91962015-04-30 11:48:27 +02001009 return ret;
1010}
1011
1012/*
1013 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
1014 * stream. This function is called when some data can be forwarded. It takes
1015 * care to update the forward offset of filters and adjusts "forwardable" data
1016 * to be sure that a filter cannot forward more data than its predecessors. A
1017 * filter can choose to not forward all parsed data. Returns a negative value if
1018 * an error occurs, else the number of forwarded bytes.
1019 */
1020static int
1021flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
1022{
Christopher Fauletda02e172015-12-04 09:25:05 +01001023 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001024 int ret = len;
1025
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001026 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001027 unsigned int *fwd;
1028
1029 /* Call "data" filters only */
1030 if (!IS_DATA_FILTER(filter, chn))
1031 continue;
1032
1033 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001034 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001035 /* Remove bytes that the current filter considered as
1036 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001037 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001038 if (ret < 0)
1039 goto end;
1040 }
1041
Christopher Fauletda02e172015-12-04 09:25:05 +01001042 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +02001043 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +01001044 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001045
1046 /* And set this value as the bound for the next filter. It will
1047 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +01001048 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001049 }
1050
1051 if (!ret)
1052 goto end;
1053
Christopher Fauletda02e172015-12-04 09:25:05 +01001054 /* Finally, adjust filters offsets by removing data that HAProxy will
1055 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001056 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001057 if (!IS_DATA_FILTER(filter, chn))
1058 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001059 FLT_NXT(filter, chn) -= ret;
1060 FLT_FWD(filter, chn) -= ret;
1061 }
1062
Christopher Fauletd7c91962015-04-30 11:48:27 +02001063 end:
1064 return ret;
1065}
1066
1067/*
1068 * Called when TCP data must be filtered on a channel. This function is the
Christopher Faulet0184ea72017-01-05 14:06:34 +01001069 * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
1070 * data when the proxy is not in http mode. Behind the scene, it calls
1071 * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
1072 * filters attached to a stream. Returns 0 if an error occurs or if it needs to
1073 * wait, any other value otherwise.
Christopher Fauletd7c91962015-04-30 11:48:27 +02001074 */
1075int
1076flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
1077{
1078 int ret = 1;
1079
Christopher Fauletda02e172015-12-04 09:25:05 +01001080 /* If there is no "data" filters, we do nothing */
1081 if (!HAS_DATA_FILTERS(s, chn))
1082 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001083
1084 /* Be sure that the output is still opened. Else we stop the data
1085 * filtering. */
1086 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
Willy Tarreau44a41a82018-06-19 07:16:31 +02001087 ((chn->flags & CF_SHUTW) && (chn->to_forward || co_data(chn))))
Christopher Fauletd7c91962015-04-30 11:48:27 +02001088 goto end;
1089
1090 /* Let all "data" filters parsing incoming data */
1091 ret = flt_data(s, chn);
1092 if (ret < 0)
1093 goto end;
1094
1095 /* And forward them */
1096 ret = flt_forward_data(s, chn, ret);
1097 if (ret < 0)
1098 goto end;
1099
Christopher Fauletda02e172015-12-04 09:25:05 +01001100 /* Consume data that all filters consider as forwarded. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +02001101 c_adv(chn, ret);
Christopher Fauletda02e172015-12-04 09:25:05 +01001102
Christopher Fauletd7c91962015-04-30 11:48:27 +02001103 /* Stop waiting data if the input in closed and no data is pending or if
1104 * the output is closed. */
1105 if ((chn->flags & CF_SHUTW) ||
Willy Tarreau5ba65522018-06-15 15:14:53 +02001106 ((chn->flags & CF_SHUTR) && !ci_data(chn))) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001107 ret = 1;
1108 goto end;
1109 }
1110
1111 /* Wait for data */
1112 return 0;
1113 end:
1114 /* Terminate the data filtering. If <ret> is negative, an error was
1115 * encountered during the filtering. */
1116 return handle_analyzer_result(s, chn, an_bit, ret);
1117}
1118
1119/*
1120 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
1121 * it needs to wait, any other value otherwise.
1122 */
1123static int
1124handle_analyzer_result(struct stream *s, struct channel *chn,
1125 unsigned int an_bit, int ret)
1126{
1127 int finst;
1128
1129 if (ret < 0)
1130 goto return_bad_req;
1131 else if (!ret)
1132 goto wait;
1133
1134 /* End of job, return OK */
1135 if (an_bit) {
1136 chn->analysers &= ~an_bit;
1137 chn->analyse_exp = TICK_ETERNITY;
1138 }
1139 return 1;
1140
1141 return_bad_req:
1142 /* An error occurs */
1143 channel_abort(&s->req);
1144 channel_abort(&s->res);
1145
1146 if (!(chn->flags & CF_ISRESP)) {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001147 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001148 finst = SF_FINST_R;
1149 /* FIXME: incr counters */
1150 }
1151 else {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001152 s->res.analysers &= AN_RES_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001153 finst = SF_FINST_H;
1154 /* FIXME: incr counters */
1155 }
1156
1157 if (s->txn) {
1158 /* Do not do that when we are waiting for the next request */
1159 if (s->txn->status)
1160 http_reply_and_close(s, s->txn->status, NULL);
1161 else {
1162 s->txn->status = 400;
Jarno Huuskonen9e6906b2017-03-06 14:21:49 +02001163 http_reply_and_close(s, 400, http_error_message(s));
Christopher Fauletd7c91962015-04-30 11:48:27 +02001164 }
1165 }
1166
1167 if (!(s->flags & SF_ERR_MASK))
1168 s->flags |= SF_ERR_PRXCOND;
1169 if (!(s->flags & SF_FINST_MASK))
1170 s->flags |= finst;
1171 return 0;
1172
1173 wait:
1174 if (!(chn->flags & CF_ISRESP))
1175 channel_dont_connect(chn);
1176 return 0;
1177}
1178
1179
1180/* Note: must not be declared <const> as its list will be overwritten.
1181 * Please take care of keeping this list alphabetically sorted, doing so helps
1182 * all code contributors.
1183 * Optional keywords are also declared with a NULL ->parse() function so that
1184 * the config parser can report an appropriate error when a known keyword was
1185 * not enabled. */
1186static struct cfg_kw_list cfg_kws = {ILH, {
1187 { CFG_LISTEN, "filter", parse_filter },
1188 { 0, NULL, NULL },
1189 }
1190};
1191
1192__attribute__((constructor))
1193static void
1194__filters_init(void)
1195{
Willy Tarreaubafbe012017-11-24 17:34:44 +01001196 pool_head_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001197 cfg_register_keywords(&cfg_kws);
Willy Tarreau64bca592016-12-21 20:13:11 +01001198 hap_register_post_check(flt_init_all);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +02001199 hap_register_per_thread_init(flt_init_all_per_thread);
1200 hap_register_per_thread_deinit(flt_deinit_all_per_thread);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001201}
1202
1203__attribute__((destructor))
1204static void
1205__filters_deinit(void)
1206{
Willy Tarreaubafbe012017-11-24 17:34:44 +01001207 pool_destroy(pool_head_filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001208}
1209
1210/*
1211 * Local variables:
1212 * c-indent-level: 8
1213 * c-basic-offset: 8
1214 * End:
1215 */