blob: f57cc8f2f03a1c2835ad3b7118f54d26981b9161 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
Willy Tarreaub96b77e2018-12-11 10:22:41 +010019#include <common/htx.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010020#include <common/initcall.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020021#include <common/namespace.h>
22#include <common/standard.h>
Christopher Faulet71a6a8e2017-07-27 16:33:28 +020023#include <common/hathreads.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020024
25#include <types/filters.h>
26#include <types/proto_http.h>
27
28#include <proto/compression.h>
29#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010030#include <proto/flt_http_comp.h>
Christopher Faulet75bc9132018-11-30 15:18:09 +010031#include <proto/http_htx.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020032#include <proto/proto_http.h>
33#include <proto/stream.h>
34#include <proto/stream_interface.h>
35
36/* Pool used to allocate filters */
Willy Tarreau8ceae722018-11-26 11:58:30 +010037DECLARE_STATIC_POOL(pool_head_filter, "filter", sizeof(struct filter));
Christopher Fauletd7c91962015-04-30 11:48:27 +020038
39static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
40
41/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
42 * The first one begins a loop and the seconds one ends it.
43 *
44 * - BREAK_EXECUTION must be used to break the loop and set the filter from
45 * which to resume the next time.
46 *
Bertrand Jacquin874a35c2018-09-10 21:26:07 +010047 * Here is an example:
Christopher Fauletd7c91962015-04-30 11:48:27 +020048 *
49 * RESUME_FILTER_LOOP(stream, channel) {
50 * ...
51 * if (cond)
52 * BREAK_EXECUTION(stream, channel, label);
53 * ...
54 * } RESUME_FILTER_END;
55 * ...
56 * label:
57 * ...
58 *
59 */
60#define RESUME_FILTER_LOOP(strm, chn) \
61 do { \
62 struct filter *filter; \
63 \
Christopher Fauletda02e172015-12-04 09:25:05 +010064 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
65 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
66 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020067 goto resume_execution; \
68 } \
69 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010070 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010071 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020072
73#define RESUME_FILTER_END \
74 } \
75 } while(0)
76
Christopher Fauletda02e172015-12-04 09:25:05 +010077#define BREAK_EXECUTION(strm, chn, label) \
78 do { \
79 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
80 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020081 } while (0)
82
83
84/* List head of all known filter keywords */
85static struct flt_kw_list flt_keywords = {
86 .list = LIST_HEAD_INIT(flt_keywords.list)
87};
88
89/*
90 * Registers the filter keyword list <kwl> as a list of valid keywords for next
91 * parsing sessions.
92 */
93void
94flt_register_keywords(struct flt_kw_list *kwl)
95{
96 LIST_ADDQ(&flt_keywords.list, &kwl->list);
97}
98
99/*
100 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
101 * keyword is found with a NULL ->parse() function, then an attempt is made to
102 * find one with a valid ->parse() function. This way it is possible to declare
103 * platform-dependant, known keywords as NULL, then only declare them as valid
104 * if some options are met. Note that if the requested keyword contains an
105 * opening parenthesis, everything from this point is ignored.
106 */
107struct flt_kw *
108flt_find_kw(const char *kw)
109{
110 int index;
111 const char *kwend;
112 struct flt_kw_list *kwl;
113 struct flt_kw *ret = NULL;
114
115 kwend = strchr(kw, '(');
116 if (!kwend)
117 kwend = kw + strlen(kw);
118
119 list_for_each_entry(kwl, &flt_keywords.list, list) {
120 for (index = 0; kwl->kw[index].kw != NULL; index++) {
121 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
122 kwl->kw[index].kw[kwend-kw] == 0) {
123 if (kwl->kw[index].parse)
124 return &kwl->kw[index]; /* found it !*/
125 else
126 ret = &kwl->kw[index]; /* may be OK */
127 }
128 }
129 }
130 return ret;
131}
132
133/*
134 * Dumps all registered "filter" keywords to the <out> string pointer. The
135 * unsupported keywords are only dumped if their supported form was not found.
136 */
137void
138flt_dump_kws(char **out)
139{
140 struct flt_kw_list *kwl;
141 int index;
142
143 *out = NULL;
144 list_for_each_entry(kwl, &flt_keywords.list, list) {
145 for (index = 0; kwl->kw[index].kw != NULL; index++) {
146 if (kwl->kw[index].parse ||
147 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
148 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
149 kwl->scope,
150 kwl->kw[index].kw,
151 kwl->kw[index].parse ? "" : " (not supported)");
152 }
153 }
154 }
155}
156
157/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100158 * Lists the known filters on <out>
159 */
160void
161list_filters(FILE *out)
162{
163 char *filters, *p, *f;
164
165 fprintf(out, "Available filters :\n");
166 flt_dump_kws(&filters);
167 for (p = filters; (f = strtok_r(p,"\n",&p));)
168 fprintf(out, "\t%s\n", f);
169 free(filters);
170}
171
172/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200173 * Parses the "filter" keyword. All keywords must be handled by filters
174 * themselves
175 */
176static int
177parse_filter(char **args, int section_type, struct proxy *curpx,
178 struct proxy *defpx, const char *file, int line, char **err)
179{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100180 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200181
182 /* Filter cannot be defined on a default proxy */
183 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200184 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200185 file, line, args[0]);
186 return -1;
187 }
188 if (!strcmp(args[0], "filter")) {
189 struct flt_kw *kw;
190 int cur_arg;
191
192 if (!*args[1]) {
193 memprintf(err,
194 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
195 file, line, args[0], proxy_type_str(curpx), curpx->id);
196 goto error;
197 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100198 fconf = calloc(1, sizeof(*fconf));
199 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200200 memprintf(err, "'%s' : out of memory", args[0]);
201 goto error;
202 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200203
204 cur_arg = 1;
205 kw = flt_find_kw(args[cur_arg]);
206 if (kw) {
207 if (!kw->parse) {
208 memprintf(err, "parsing [%s:%d] : '%s' : "
209 "'%s' option is not implemented in this version (check build options).",
210 file, line, args[0], args[cur_arg]);
211 goto error;
212 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200213 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200214 if (err && *err)
215 memprintf(err, "'%s' : '%s'",
216 args[0], *err);
217 else
218 memprintf(err, "'%s' : error encountered while processing '%s'",
219 args[0], args[cur_arg]);
220 goto error;
221 }
222 }
223 else {
224 flt_dump_kws(err);
225 indent_msg(err, 4);
226 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
227 args[0], args[cur_arg],
228 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
229 goto error;
230 }
231 if (*args[cur_arg]) {
232 memprintf(err, "'%s %s' : unknown keyword '%s'.",
233 args[0], args[1], args[cur_arg]);
234 goto error;
235 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200236 if (fconf->ops == NULL) {
237 memprintf(err, "'%s %s' : no callbacks defined.",
238 args[0], args[1]);
239 goto error;
240 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200241
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100242 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200243 }
244 return 0;
245
246 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100247 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200248 return -1;
249
250
251}
252
253/*
254 * Calls 'init' callback for all filters attached to a proxy. This happens after
255 * the configuration parsing. Filters can finish to fill their config. Returns
256 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
257 */
Willy Tarreau64bca592016-12-21 20:13:11 +0100258static int
Christopher Fauletd7c91962015-04-30 11:48:27 +0200259flt_init(struct proxy *proxy)
260{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100261 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200262
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100263 list_for_each_entry(fconf, &proxy->filter_configs, list) {
264 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200265 return ERR_ALERT|ERR_FATAL;
266 }
267 return 0;
268}
269
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200270/*
271 * Calls 'init_per_thread' callback for all filters attached to a proxy for each
272 * threads. This happens after the thread creation. Filters can finish to fill
273 * their config. Returns (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
274 */
275static int
276flt_init_per_thread(struct proxy *proxy)
277{
278 struct flt_conf *fconf;
279
280 list_for_each_entry(fconf, &proxy->filter_configs, list) {
281 if (fconf->ops->init_per_thread && fconf->ops->init_per_thread(proxy, fconf) < 0)
282 return ERR_ALERT|ERR_FATAL;
283 }
284 return 0;
285}
286
Willy Tarreau64bca592016-12-21 20:13:11 +0100287/* Calls flt_init() for all proxies, see above */
288static int
289flt_init_all()
290{
291 struct proxy *px;
292 int err_code = 0;
293
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100294 for (px = proxies_list; px; px = px->next) {
Christopher Faulet73e7d1c2020-11-02 16:08:09 +0100295 if (px->state == PR_STSTOPPED) {
296 flt_deinit(px);
297 continue;
298 }
Willy Tarreau64bca592016-12-21 20:13:11 +0100299 err_code |= flt_init(px);
300 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100301 ha_alert("Failed to initialize filters for proxy '%s'.\n",
302 px->id);
Willy Tarreau64bca592016-12-21 20:13:11 +0100303 return err_code;
304 }
305 }
306 return 0;
307}
308
Joseph Herlantb35ea682018-11-15 12:24:23 -0800309/* Calls flt_init_per_thread() for all proxies, see above. Be careful here, it
310 * returns 0 if an error occurred. This is the opposite of flt_init_all. */
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200311static int
312flt_init_all_per_thread()
313{
314 struct proxy *px;
315 int err_code = 0;
316
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100317 for (px = proxies_list; px; px = px->next) {
Christopher Faulet73e7d1c2020-11-02 16:08:09 +0100318 if (px->state == PR_STSTOPPED)
319 continue;
320
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200321 err_code = flt_init_per_thread(px);
322 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100323 ha_alert("Failed to initialize filters for proxy '%s' for thread %u.\n",
324 px->id, tid);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200325 return 0;
326 }
327 }
328 return 1;
329}
330
Christopher Fauletd7c91962015-04-30 11:48:27 +0200331/*
332 * Calls 'check' callback for all filters attached to a proxy. This happens
333 * after the configuration parsing but before filters initialization. Returns
334 * the number of encountered errors.
335 */
336int
337flt_check(struct proxy *proxy)
338{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100339 struct flt_conf *fconf;
340 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200341
Christopher Fauletff17b182019-01-07 15:03:22 +0100342 err += check_implicit_http_comp_flt(proxy);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100343 list_for_each_entry(fconf, &proxy->filter_configs, list) {
344 if (fconf->ops->check)
345 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200346 }
347 return err;
348}
349
350/*
351 * Calls 'denit' callback for all filters attached to a proxy. This happens when
352 * HAProxy is stopped.
353 */
354void
355flt_deinit(struct proxy *proxy)
356{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100357 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200358
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100359 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
Christopher Fauletdf36d432020-11-03 16:40:37 +0100360 if (fconf->ops->deinit)
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100361 fconf->ops->deinit(proxy, fconf);
362 LIST_DEL(&fconf->list);
363 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200364 }
365}
366
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200367/*
368 * Calls 'denit_per_thread' callback for all filters attached to a proxy for
369 * each threads. This happens before exiting a thread.
370 */
371void
372flt_deinit_per_thread(struct proxy *proxy)
373{
374 struct flt_conf *fconf, *back;
375
376 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
377 if (fconf->ops->deinit_per_thread)
378 fconf->ops->deinit_per_thread(proxy, fconf);
379 }
380}
381
382
383/* Calls flt_deinit_per_thread() for all proxies, see above */
384static void
385flt_deinit_all_per_thread()
386{
387 struct proxy *px;
388
Christopher Fauletdf36d432020-11-03 16:40:37 +0100389 for (px = proxies_list; px; px = px->next)
390 flt_deinit_per_thread(px);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200391}
392
Christopher Faulet92d36382015-11-05 13:35:03 +0100393/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
394static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100395flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100396{
Christopher Faulet75bc9132018-11-30 15:18:09 +0100397 struct filter *f;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200398
Christopher Faulet0f17a9b2019-04-05 10:11:38 +0200399 if (IS_HTX_STRM(s) && !(fconf->flags & FLT_CFG_FL_HTX))
Christopher Faulet75bc9132018-11-30 15:18:09 +0100400 return 0;
401
402 f = pool_alloc(pool_head_filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100403 if (!f) /* not enough memory */
404 return -1;
405 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100406 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100407 f->flags |= flags;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200408
409 if (FLT_OPS(f)->attach) {
410 int ret = FLT_OPS(f)->attach(s, f);
411 if (ret <= 0) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100412 pool_free(pool_head_filter, f);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200413 return ret;
414 }
415 }
416
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100417 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100418 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100419 return 0;
420}
421
422/*
423 * Called when a stream is created. It attaches all frontend filters to the
424 * stream. Returns -1 if an error occurs, 0 otherwise.
425 */
426int
427flt_stream_init(struct stream *s)
428{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100429 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100430
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100431 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
432 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100433 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
434 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100435 return -1;
436 }
437 return 0;
438}
439
440/*
441 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
442 * happens after each request/response exchange). When analyze ends, backend
443 * filters are removed. When the stream is closed, all filters attached to the
444 * stream are removed.
445 */
446void
447flt_stream_release(struct stream *s, int only_backend)
448{
449 struct filter *filter, *back;
450
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100451 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100452 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200453 if (FLT_OPS(filter)->detach)
454 FLT_OPS(filter)->detach(s, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100455 LIST_DEL(&filter->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100456 pool_free(pool_head_filter, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100457 }
458 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100459 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100460 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100461}
462
Christopher Fauletd7c91962015-04-30 11:48:27 +0200463/*
464 * Calls 'stream_start' for all filters attached to a stream. This happens when
465 * the stream is created, just after calling flt_stream_init
466 * function. Returns -1 if an error occurs, 0 otherwise.
467 */
468int
469flt_stream_start(struct stream *s)
470{
471 struct filter *filter;
472
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100473 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100474 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200475 return -1;
476 }
Christopher Faulet640c5382021-03-08 13:40:30 +0100477 if (strm_li(s) && (strm_li(s)->analysers & AN_REQ_FLT_START_FE))
478 s->req.flags |= CF_FLT_ANALYZE;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200479 return 0;
480}
481
482/*
483 * Calls 'stream_stop' for all filters attached to a stream. This happens when
484 * the stream is stopped, just before calling flt_stream_release function.
485 */
486void
487flt_stream_stop(struct stream *s)
488{
489 struct filter *filter;
490
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100491 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100492 if (FLT_OPS(filter)->stream_stop)
493 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200494 }
495}
496
Christopher Faulet92d36382015-11-05 13:35:03 +0100497/*
Christopher Fauleta00d8172016-11-10 14:58:05 +0100498 * Calls 'check_timeouts' for all filters attached to a stream. This happens when
499 * the stream is woken up because of expired timer.
500 */
501void
502flt_stream_check_timeouts(struct stream *s)
503{
504 struct filter *filter;
505
506 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
507 if (FLT_OPS(filter)->check_timeouts)
508 FLT_OPS(filter)->check_timeouts(s, filter);
509 }
510}
511
512/*
Christopher Faulet92d36382015-11-05 13:35:03 +0100513 * Called when a backend is set for a stream. If the frontend and the backend
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200514 * are not the same, this function attaches all backend filters to the
515 * stream. Returns -1 if an error occurs, 0 otherwise.
Christopher Faulet92d36382015-11-05 13:35:03 +0100516 */
517int
518flt_set_stream_backend(struct stream *s, struct proxy *be)
519{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100520 struct flt_conf *fconf;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200521 struct filter *filter;
Christopher Faulet92d36382015-11-05 13:35:03 +0100522
523 if (strm_fe(s) == be)
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200524 goto end;
Christopher Faulet92d36382015-11-05 13:35:03 +0100525
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100526 list_for_each_entry(fconf, &be->filter_configs, list) {
527 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100528 return -1;
529 }
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200530
531 end:
532 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
533 if (FLT_OPS(filter)->stream_set_backend &&
534 FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
535 return -1;
536 }
Christopher Faulet640c5382021-03-08 13:40:30 +0100537 if (be->be_req_ana & AN_REQ_FLT_START_BE)
538 s->req.flags |= CF_FLT_ANALYZE;
539 if ((strm_fe(s)->fe_rsp_ana | be->be_rsp_ana) & (AN_RES_FLT_START_FE|AN_RES_FLT_START_BE))
540 s->res.flags |= CF_FLT_ANALYZE;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200541
Christopher Faulet92d36382015-11-05 13:35:03 +0100542 return 0;
543}
544
Christopher Fauletd7c91962015-04-30 11:48:27 +0200545/*
546 * Calls 'http_data' callback for all "data" filters attached to a stream. This
547 * function is called when incoming data are available (excluding chunks
548 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
549 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
550 * filters and adjusts available data to be sure that a filter cannot parse more
551 * data than its predecessors. A filter can choose to not consume all available
552 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100553 *
554 * DEPRECATED FUNCTION - CALLED FROM LEGACY HTTP ANALYZERS
Christopher Fauletd7c91962015-04-30 11:48:27 +0200555 */
556int
557flt_http_data(struct stream *s, struct http_msg *msg)
558{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100559 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200560 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200561 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200562
Christopher Fauletd7c91962015-04-30 11:48:27 +0200563 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200564 buf_i = ci_data(msg->chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100565
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100566 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100567 unsigned int *nxt;
568
569 /* Call "data" filters only */
570 if (!IS_DATA_FILTER(filter, msg->chn))
571 continue;
572
Christopher Faulet2fb28802015-12-01 10:40:57 +0100573 /* If the HTTP parser is ahead, we update the next offset of the
574 * current filter. This happens for chunked messages, at the
Joseph Herlantb35ea682018-11-15 12:24:23 -0800575 * beginning of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100576 nxt = &FLT_NXT(filter, msg->chn);
577 if (msg->next > *nxt)
578 *nxt = msg->next;
579
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100580 if (FLT_OPS(filter)->http_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +0200581 unsigned int i = ci_data(msg->chn);
Christopher Faulet55048a42016-06-21 10:44:32 +0200582
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100583 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100584 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200585 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +0200586 delta += (int)(ci_data(msg->chn) - i);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100587
588 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100589 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100590
591 /* And set this value as the bound for the next
592 * filter. It will not able to parse more data than this
593 * one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200594 b_set_data(&msg->chn->buf, co_data(msg->chn) + *nxt);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200595 }
596 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100597 /* Consume all available data and update the next offset
598 * of the current filter. buf->i is untouched here. */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200599 ret = MIN(msg->chunk_len + msg->next, ci_data(msg->chn)) - *nxt;
Christopher Fauletda02e172015-12-04 09:25:05 +0100600 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200601 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200602 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100603
Christopher Fauletd7c91962015-04-30 11:48:27 +0200604 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200605 b_set_data(&msg->chn->buf, co_data(msg->chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100606
Christopher Fauletd7c91962015-04-30 11:48:27 +0200607 return ret;
608}
609
Christopher Fauletd7c91962015-04-30 11:48:27 +0200610/*
611 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
612 * stream. This function is called for chunked messages only when a part of the
613 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
614 * analyzers. Filters can know how much data were parsed by the HTTP parsing
615 * until the last call with the msg->sol value. Returns a negative value if an
616 * error occurs, any other value otherwise.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100617 *
618 * DEPRECATED FUNCTION - CALLED FROM LEGACY HTTP ANALYZERS
Christopher Fauletd7c91962015-04-30 11:48:27 +0200619 */
620int
621flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
622{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100623 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100624 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200625
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100626 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100627 unsigned int *nxt;
628
629 /* Call "data" filters only */
630 if (!IS_DATA_FILTER(filter, msg->chn))
631 continue;
632
Christopher Faulet2fb28802015-12-01 10:40:57 +0100633 /* Be sure to set the next offset of the filter at the right
634 * place. This is really useful when the first part of the
635 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100636 nxt = &FLT_NXT(filter, msg->chn);
637 *nxt = msg->next;
638
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100639 if (FLT_OPS(filter)->http_chunk_trailers) {
640 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100641 if (ret < 0)
642 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200643 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100644 /* Update the next offset of the current filter. Here all data
645 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100646 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100647 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200648 return ret;
649}
650
651/*
652 * Calls 'http_end' callback for all filters attached to a stream. All filters
653 * are called here, but only if there is at least one "data" filter. This
654 * functions is called when all data were parsed and forwarded. 'http_end'
655 * callback is resumable, so this function returns a negative value if an error
656 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100657 *
658 * Be carefull, this function can be called from the HTTP legacy analyzers or
659 * from HTX analyzers. If your filter is compatible with the two modes, use
660 * IS_HTX_STRM macro on the stream.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200661 */
662int
663flt_http_end(struct stream *s, struct http_msg *msg)
664{
Christopher Fauletda4576d2020-11-16 10:10:38 +0100665 unsigned long long *strm_off = &FLT_STRM_OFF(s, msg->chn);
666 unsigned int offset = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200667 int ret = 1;
668
Christopher Fauletd7c91962015-04-30 11:48:27 +0200669 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Fauletda4576d2020-11-16 10:10:38 +0100670 unsigned long long flt_off = FLT_OFF(filter, msg->chn);
671 offset = flt_off - *strm_off;
672
Christopher Faulet7102f542020-11-24 09:49:01 +0100673 /* Call http_end for data filters only. But the filter offset is
674 * still valid for all filters
675 . */
676 if (!IS_DATA_FILTER(filter, msg->chn))
677 continue;
678
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100679 if (FLT_OPS(filter)->http_end) {
680 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200681 if (ret <= 0)
682 BREAK_EXECUTION(s, msg->chn, end);
683 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200684 } RESUME_FILTER_END;
Christopher Fauletda4576d2020-11-16 10:10:38 +0100685
686 c_adv(msg->chn, offset);
687 *strm_off += offset;
688
Christopher Fauletd7c91962015-04-30 11:48:27 +0200689end:
690 return ret;
691}
692
693/*
694 * Calls 'http_reset' callback for all filters attached to a stream. This
695 * happens when a 100-continue response is received.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100696 *
697 * Be carefull, this function can be called from the HTTP legacy analyzers or
698 * from HTX analyzers. If your filter is compatible with the two modes, use
699 * IS_HTX_STRM macro on the stream.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200700 */
701void
702flt_http_reset(struct stream *s, struct http_msg *msg)
703{
704 struct filter *filter;
705
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100706 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100707 if (FLT_OPS(filter)->http_reset)
708 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200709 }
710}
711
712/*
713 * Calls 'http_reply' callback for all filters attached to a stream when HA
714 * decides to stop the HTTP message processing.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100715 *
716 * Be carefull, this function can be called from the HTTP legacy analyzers or
717 * from HTX analyzers. If your filter is compatible with the two modes, use
718 * IS_HTX_STRM macro on the stream.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200719 */
720void
Willy Tarreau83061a82018-07-13 11:56:34 +0200721flt_http_reply(struct stream *s, short status, const struct buffer *msg)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200722{
723 struct filter *filter;
724
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100725 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100726 if (FLT_OPS(filter)->http_reply)
727 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200728 }
729}
730
731/*
Christopher Faulet75bc9132018-11-30 15:18:09 +0100732 * Calls 'http_forward_data' callback for all "data" filters attached to a HTTP
733 * legacy stream. This function is called when some data can be forwarded in the
Christopher Fauletd7c91962015-04-30 11:48:27 +0200734 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
735 * update the forward offset of filters and adjusts "forwardable" data to be
736 * sure that a filter cannot forward more data than its predecessors. A filter
737 * can choose to not forward all parsed data. Returns a negative value if an
738 * error occurs, else the number of forwarded bytes.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100739 *
740 * DEPRECATED FUNCTION - CALLED FROM LEGACY HTTP ANALYZERS
Christopher Fauletd7c91962015-04-30 11:48:27 +0200741 */
742int
743flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
744{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100745 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200746 int ret = len;
747
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100748 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100749 unsigned int *nxt, *fwd;
750
751 /* Call "data" filters only */
752 if (!IS_DATA_FILTER(filter, msg->chn))
753 continue;
754
Christopher Faulet2fb28802015-12-01 10:40:57 +0100755 /* If the HTTP parser is ahead, we update the next offset of the
756 * current filter. This happens for chunked messages, when the
757 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100758 nxt = &FLT_NXT(filter, msg->chn);
759 fwd = &FLT_FWD(filter, msg->chn);
760 if (msg->next > *nxt)
761 *nxt = msg->next;
762
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100763 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100764 /* Remove bytes that the current filter considered as
765 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100766 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200767 if (ret < 0)
768 goto end;
769 }
770
771 /* Adjust bytes that the current filter considers as
772 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100773 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200774
775 /* And set this value as the bound for the next filter. It will
776 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100777 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200778 }
779
780 if (!ret)
781 goto end;
782
783 /* Finally, adjust filters offsets by removing data that HAProxy will
784 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100785 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100786 if (!IS_DATA_FILTER(filter, msg->chn))
787 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200788 FLT_NXT(filter, msg->chn) -= ret;
789 FLT_FWD(filter, msg->chn) -= ret;
790 }
791 end:
792 return ret;
793}
794
795/*
Christopher Faulet75bc9132018-11-30 15:18:09 +0100796 * Calls 'http_payload' callback for all "data" filters attached to a
797 * stream. This function is called when some data can be forwarded in the
798 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
799 * update the filters and the stream offset to be sure that a filter cannot
800 * forward more data than its predecessors. A filter can choose to not forward
801 * all data. Returns a negative value if an error occurs, else the number of
802 * forwarded bytes.
803 *
804 * Be carefull, this callback is only called from HTX analyzers. So the
805 * channel's buffer must be considered as an HTX structured. Of course, your
806 * filter must support HTX streams.
807 */
808int
809flt_http_payload(struct stream *s, struct http_msg *msg, unsigned int len)
810{
811 struct filter *filter;
812 unsigned long long *strm_off = &FLT_STRM_OFF(s, msg->chn);
Christopher Faulet421e7692019-06-13 11:16:45 +0200813 unsigned int out = co_data(msg->chn);
Christopher Faulet11920a42020-02-26 15:47:22 +0100814 int ret, data;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100815
Christopher Fauletf127c3e2021-01-25 12:02:00 +0100816 strm_flt(s)->flags &= ~STRM_FLT_FL_HOLD_HTTP_HDRS;
817
Christopher Faulet11920a42020-02-26 15:47:22 +0100818 ret = data = len - out;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100819 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet7102f542020-11-24 09:49:01 +0100820 unsigned long long *flt_off = &FLT_OFF(filter, msg->chn);
821 unsigned int offset = *flt_off - *strm_off;
822
823 /* Call http_payload for filters only. Forward all data for
824 * others and update the filter offset
825 */
826 if (!IS_DATA_FILTER(filter, msg->chn)) {
827 *flt_off += data - offset;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100828 continue;
Christopher Faulet7102f542020-11-24 09:49:01 +0100829 }
Christopher Faulet75bc9132018-11-30 15:18:09 +0100830
Christopher Faulet7102f542020-11-24 09:49:01 +0100831 if (FLT_OPS(filter)->http_payload) {
Christopher Fauleta2f67b32020-02-07 16:40:33 +0100832 ret = FLT_OPS(filter)->http_payload(s, filter, msg, out + offset, data - offset);
Christopher Faulet75bc9132018-11-30 15:18:09 +0100833 if (ret < 0)
834 goto end;
Christopher Faulet31df6362020-02-24 16:20:09 +0100835 data = ret + *flt_off - *strm_off;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100836 *flt_off += ret;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100837 }
838 }
Christopher Fauleta2f67b32020-02-07 16:40:33 +0100839
Christopher Fauletf127c3e2021-01-25 12:02:00 +0100840 /* If nothing was forwarded yet, we take care to hold the headers if
841 * following conditions are met :
842 *
843 * - *strm_off == 0 (nothing forwarded yet)
844 * - ret == 0 (no data forwarded at all on this turn)
845 * - STRM_FLT_FL_HOLD_HTTP_HDRS flag set (at least one filter want to hold the headers)
846 *
847 * Be careful, STRM_FLT_FL_HOLD_HTTP_HDRS is removed before each http_payload loop.
848 * Thus, it must explicitly be set when necessary. We must do that to hold the headers
849 * when there is no payload.
850 */
851 if (!ret && !*strm_off && (strm_flt(s)->flags & STRM_FLT_FL_HOLD_HTTP_HDRS))
852 goto end;
853
854 ret = data;
855 *strm_off += ret;
Christopher Faulet75bc9132018-11-30 15:18:09 +0100856 end:
857 return ret;
858}
859
860/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200861 * Calls 'channel_start_analyze' callback for all filters attached to a
862 * stream. This function is called when we start to analyze a request or a
863 * response. For frontend filters, it is called before all other analyzers. For
864 * backend ones, it is called before all backend
865 * analyzers. 'channel_start_analyze' callback is resumable, so this function
866 * returns 0 if an error occurs or if it needs to wait, any other value
867 * otherwise.
868 */
869int
870flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
871{
872 int ret = 1;
873
874 /* If this function is called, this means there is at least one filter,
875 * so we do not need to check the filter list's emptiness. */
876
Christopher Faulete6006242017-03-10 11:52:44 +0100877 /* Set flag on channel to tell that the channel is filtered */
878 chn->flags |= CF_FLT_ANALYZE;
879
Christopher Fauletd7c91962015-04-30 11:48:27 +0200880 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet0184ea72017-01-05 14:06:34 +0100881 if (!(chn->flags & CF_ISRESP)) {
882 if (an_bit == AN_REQ_FLT_START_BE &&
883 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
884 continue;
885 }
886 else {
887 if (an_bit == AN_RES_FLT_START_BE &&
888 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
889 continue;
890 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200891
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100892 FLT_NXT(filter, chn) = 0;
893 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200894
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100895 if (FLT_OPS(filter)->channel_start_analyze) {
896 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200897 if (ret <= 0)
898 BREAK_EXECUTION(s, chn, end);
899 }
900 } RESUME_FILTER_END;
901
902 end:
903 return handle_analyzer_result(s, chn, an_bit, ret);
904}
905
906/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200907 * Calls 'channel_pre_analyze' callback for all filters attached to a
908 * stream. This function is called BEFORE each analyzer attached to a channel,
909 * expects analyzers responsible for data sending. 'channel_pre_analyze'
910 * callback is resumable, so this function returns 0 if an error occurs or if it
911 * needs to wait, any other value otherwise.
912 *
913 * Note this function can be called many times for the same analyzer. In fact,
914 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200915 */
916int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200917flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200918{
919 int ret = 1;
920
Christopher Fauletd7c91962015-04-30 11:48:27 +0200921 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200922 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
923 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200924 if (ret <= 0)
925 BREAK_EXECUTION(s, chn, check_result);
Christopher Fauletea5199d2021-05-20 18:00:55 +0200926 filter->pre_analyzers &= ~an_bit;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200927 }
928 } RESUME_FILTER_END;
929
930 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100931 return handle_analyzer_result(s, chn, 0, ret);
932}
933
934/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200935 * Calls 'channel_post_analyze' callback for all filters attached to a
936 * stream. This function is called AFTER each analyzer attached to a channel,
937 * expects analyzers responsible for data sending. 'channel_post_analyze'
938 * callback is NOT resumable, so this function returns a 0 if an error occurs,
939 * any other value otherwise.
940 *
941 * Here, AFTER means when the analyzer finishes its processing.
942 */
943int
944flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
945{
946 struct filter *filter;
947 int ret = 1;
948
949 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
950 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
951 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
952 if (ret < 0)
953 break;
Christopher Fauletea5199d2021-05-20 18:00:55 +0200954 filter->post_analyzers &= ~an_bit;
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200955 }
956 }
957 return handle_analyzer_result(s, chn, 0, ret);
958}
959
960/*
Christopher Faulet0184ea72017-01-05 14:06:34 +0100961 * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
962 * headers or a request or a response. Returns 0 if an error occurs or if it
963 * needs to wait, any other value otherwise.
Christopher Faulet75bc9132018-11-30 15:18:09 +0100964 *
965 * Be carefull, this function can be called from the HTTP legacy analyzers or
966 * from HTX analyzers. If your filter is compatible with the two modes, use
967 * IS_HTX_STRM macro on the stream.
Christopher Faulet309c6412015-12-02 09:57:32 +0100968 */
969int
970flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
971{
Christopher Faulet1339d742016-05-11 16:48:33 +0200972 struct filter *filter;
973 struct http_msg *msg;
974 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100975
Christopher Faulet1339d742016-05-11 16:48:33 +0200976 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100977 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200978 if (FLT_OPS(filter)->http_headers) {
979 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100980 if (ret <= 0)
981 BREAK_EXECUTION(s, chn, check_result);
982 }
983 } RESUME_FILTER_END;
984
Christopher Fauletdf7d6812020-02-12 15:31:20 +0100985 if (IS_HTX_STRM(s)) {
986 if (HAS_DATA_FILTERS(s, chn)) {
987 size_t data = http_get_hdrs_size(htxbuf(&chn->buf));
988 struct filter *f;
989
Christopher Faulet7102f542020-11-24 09:49:01 +0100990 list_for_each_entry(f, &strm_flt(s)->filters, list)
991 FLT_OFF(f, chn) = data;
Christopher Fauletdf7d6812020-02-12 15:31:20 +0100992 }
993 }
Christopher Faulet75bc9132018-11-30 15:18:09 +0100994 else {
995 /* We increase next offset of all "data" filters after all processing on
996 * headers because any filter can alter them. So the definitive size of
997 * headers (msg->sov) is only known when all filters have been
998 * called. */
999 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
1000 /* Handle "data" filters only */
1001 if (!IS_DATA_FILTER(filter, chn))
1002 continue;
1003 FLT_NXT(filter, chn) = msg->sov;
1004 }
Christopher Faulet309c6412015-12-02 09:57:32 +01001005 }
1006
1007 check_result:
1008 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001009}
1010
1011/*
1012 * Calls 'channel_end_analyze' callback for all filters attached to a
1013 * stream. This function is called when we stop to analyze a request or a
1014 * response. It is called after all other analyzers. 'channel_end_analyze'
1015 * callback is resumable, so this function returns 0 if an error occurs or if it
1016 * needs to wait, any other value otherwise.
1017 */
1018int
1019flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
1020{
1021 int ret = 1;
1022
Christopher Faulete6006242017-03-10 11:52:44 +01001023 /* Check if all filters attached on the stream have finished their
1024 * processing on this channel. */
1025 if (!(chn->flags & CF_FLT_ANALYZE))
1026 goto sync;
1027
Christopher Fauletd7c91962015-04-30 11:48:27 +02001028 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001029 FLT_NXT(filter, chn) = 0;
1030 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +01001031 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001032
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001033 if (FLT_OPS(filter)->channel_end_analyze) {
1034 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001035 if (ret <= 0)
1036 BREAK_EXECUTION(s, chn, end);
1037 }
1038 } RESUME_FILTER_END;
1039
Christopher Faulete6006242017-03-10 11:52:44 +01001040 end:
1041 /* We don't remove yet this analyzer because we need to synchronize the
1042 * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
1043 ret = handle_analyzer_result(s, chn, 0, ret);
Christopher Faulet570f7992017-07-06 15:53:02 +02001044 if (ret) {
Christopher Faulete6006242017-03-10 11:52:44 +01001045 chn->flags &= ~CF_FLT_ANALYZE;
Christopher Faulet02c7b222015-12-22 12:01:29 +01001046
Christopher Faulet570f7992017-07-06 15:53:02 +02001047 /* Pretend there is an activity on both channels. Flag on the
1048 * current one will be automatically removed, so only the other
1049 * one will remain. This is a way to be sure that
1050 * 'channel_end_analyze' callback will have a chance to be
1051 * called at least once for the other side to finish the current
Joseph Herlantb35ea682018-11-15 12:24:23 -08001052 * processing. Of course, this is the filter responsibility to
Christopher Faulet570f7992017-07-06 15:53:02 +02001053 * wakeup the stream if it choose to loop on this callback. */
1054 s->req.flags |= CF_WAKE_ONCE;
1055 s->res.flags |= CF_WAKE_ONCE;
1056 }
1057
1058
Christopher Faulete6006242017-03-10 11:52:44 +01001059 sync:
1060 /* Now we can check if filters have finished their work on the both
1061 * channels */
1062 if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
1063 /* Sync channels by removing this analyzer for the both channels */
1064 s->req.analysers &= ~AN_REQ_FLT_END;
1065 s->res.analysers &= ~AN_RES_FLT_END;
Christopher Fauletc6062be2016-10-31 11:22:37 +01001066
Christopher Faulete6006242017-03-10 11:52:44 +01001067 /* Clean up the HTTP transaction if needed */
1068 if (s->txn && (s->txn->flags & TX_WAIT_CLEANUP))
1069 http_end_txn_clean_session(s);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001070
Christopher Faulete6006242017-03-10 11:52:44 +01001071 /* Remove backend filters from the list */
1072 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001073 }
Christopher Faulet2b553de2017-03-30 11:13:22 +02001074
Christopher Fauletd7c91962015-04-30 11:48:27 +02001075 return ret;
1076}
1077
1078
1079/*
1080 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
1081 * function is called when incoming data are available. It takes care to update
1082 * the next offset of filters and adjusts available data to be sure that a
1083 * filter cannot parse more data than its predecessors. A filter can choose to
1084 * not consume all available data. Returns -1 if an error occurs, the number of
1085 * consumed bytes otherwise.
1086 */
1087static int
1088flt_data(struct stream *s, struct channel *chn)
1089{
Christopher Fauletda02e172015-12-04 09:25:05 +01001090 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001091 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +02001092 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001093
1094 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +02001095 buf_i = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001096
1097 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001098 unsigned int *nxt;
1099
1100 /* Call "data" filters only */
1101 if (!IS_DATA_FILTER(filter, chn))
1102 continue;
1103
1104 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001105 if (FLT_OPS(filter)->tcp_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +02001106 unsigned int i = ci_data(chn);
Christopher Faulet55048a42016-06-21 10:44:32 +02001107
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001108 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001109 if (ret < 0)
1110 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +02001111 delta += (int)(ci_data(chn) - i);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001112
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001113 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +01001114 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001115
1116 /* And set this value as the bound for the next
1117 * filter. It will not able to parse more data than the
1118 * current one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001119 b_set_data(&chn->buf, co_data(chn) + *nxt);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001120 }
1121 else {
1122 /* Consume all available data */
Willy Tarreau44a41a82018-06-19 07:16:31 +02001123 *nxt = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001124 }
Christopher Fauletd7c91962015-04-30 11:48:27 +02001125
1126 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +01001127 * exit from the loop. This value will be used to know how much
1128 * data are "forwardable" */
1129 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001130 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001131
1132 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001133 b_set_data(&chn->buf, co_data(chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001134
Christopher Fauletd7c91962015-04-30 11:48:27 +02001135 return ret;
1136}
1137
1138/*
1139 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
1140 * stream. This function is called when some data can be forwarded. It takes
1141 * care to update the forward offset of filters and adjusts "forwardable" data
1142 * to be sure that a filter cannot forward more data than its predecessors. A
1143 * filter can choose to not forward all parsed data. Returns a negative value if
1144 * an error occurs, else the number of forwarded bytes.
1145 */
1146static int
1147flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
1148{
Christopher Fauletda02e172015-12-04 09:25:05 +01001149 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001150 int ret = len;
1151
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001152 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001153 unsigned int *fwd;
1154
1155 /* Call "data" filters only */
1156 if (!IS_DATA_FILTER(filter, chn))
1157 continue;
1158
1159 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001160 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001161 /* Remove bytes that the current filter considered as
1162 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001163 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001164 if (ret < 0)
1165 goto end;
1166 }
1167
Christopher Fauletda02e172015-12-04 09:25:05 +01001168 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +02001169 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +01001170 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001171
1172 /* And set this value as the bound for the next filter. It will
1173 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +01001174 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001175 }
1176
1177 if (!ret)
1178 goto end;
1179
Christopher Fauletda02e172015-12-04 09:25:05 +01001180 /* Finally, adjust filters offsets by removing data that HAProxy will
1181 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001182 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001183 if (!IS_DATA_FILTER(filter, chn))
1184 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001185 FLT_NXT(filter, chn) -= ret;
1186 FLT_FWD(filter, chn) -= ret;
1187 }
1188
Christopher Fauletd7c91962015-04-30 11:48:27 +02001189 end:
1190 return ret;
1191}
1192
1193/*
1194 * Called when TCP data must be filtered on a channel. This function is the
Christopher Faulet0184ea72017-01-05 14:06:34 +01001195 * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
1196 * data when the proxy is not in http mode. Behind the scene, it calls
1197 * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
1198 * filters attached to a stream. Returns 0 if an error occurs or if it needs to
1199 * wait, any other value otherwise.
Christopher Fauletd7c91962015-04-30 11:48:27 +02001200 */
1201int
1202flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
1203{
1204 int ret = 1;
1205
Christopher Fauletda02e172015-12-04 09:25:05 +01001206 /* If there is no "data" filters, we do nothing */
Christopher Faulet166bd752019-11-08 15:31:49 +01001207 if (!HAS_DATA_FILTERS(s, chn) || (s->flags & SF_HTX))
Christopher Fauletda02e172015-12-04 09:25:05 +01001208 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001209
1210 /* Be sure that the output is still opened. Else we stop the data
1211 * filtering. */
1212 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
Willy Tarreau44a41a82018-06-19 07:16:31 +02001213 ((chn->flags & CF_SHUTW) && (chn->to_forward || co_data(chn))))
Christopher Fauletd7c91962015-04-30 11:48:27 +02001214 goto end;
1215
1216 /* Let all "data" filters parsing incoming data */
1217 ret = flt_data(s, chn);
1218 if (ret < 0)
1219 goto end;
1220
1221 /* And forward them */
1222 ret = flt_forward_data(s, chn, ret);
1223 if (ret < 0)
1224 goto end;
1225
Christopher Fauletda02e172015-12-04 09:25:05 +01001226 /* Consume data that all filters consider as forwarded. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +02001227 c_adv(chn, ret);
Christopher Fauletda02e172015-12-04 09:25:05 +01001228
Christopher Fauletd7c91962015-04-30 11:48:27 +02001229 /* Stop waiting data if the input in closed and no data is pending or if
1230 * the output is closed. */
1231 if ((chn->flags & CF_SHUTW) ||
Willy Tarreau5ba65522018-06-15 15:14:53 +02001232 ((chn->flags & CF_SHUTR) && !ci_data(chn))) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001233 ret = 1;
1234 goto end;
1235 }
1236
1237 /* Wait for data */
1238 return 0;
1239 end:
1240 /* Terminate the data filtering. If <ret> is negative, an error was
1241 * encountered during the filtering. */
1242 return handle_analyzer_result(s, chn, an_bit, ret);
1243}
1244
1245/*
1246 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
1247 * it needs to wait, any other value otherwise.
1248 */
1249static int
1250handle_analyzer_result(struct stream *s, struct channel *chn,
1251 unsigned int an_bit, int ret)
1252{
1253 int finst;
Christopher Fauleteab13f02019-09-06 15:24:55 +02001254 int status = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001255
1256 if (ret < 0)
1257 goto return_bad_req;
1258 else if (!ret)
1259 goto wait;
1260
1261 /* End of job, return OK */
1262 if (an_bit) {
1263 chn->analysers &= ~an_bit;
1264 chn->analyse_exp = TICK_ETERNITY;
1265 }
1266 return 1;
1267
1268 return_bad_req:
1269 /* An error occurs */
1270 channel_abort(&s->req);
1271 channel_abort(&s->res);
1272
1273 if (!(chn->flags & CF_ISRESP)) {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001274 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001275 finst = SF_FINST_R;
Christopher Fauleteab13f02019-09-06 15:24:55 +02001276 status = 400;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001277 /* FIXME: incr counters */
1278 }
1279 else {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001280 s->res.analysers &= AN_RES_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001281 finst = SF_FINST_H;
Christopher Fauleteab13f02019-09-06 15:24:55 +02001282 status = 502;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001283 /* FIXME: incr counters */
1284 }
1285
1286 if (s->txn) {
1287 /* Do not do that when we are waiting for the next request */
Christopher Fauleteab13f02019-09-06 15:24:55 +02001288 if (s->txn->status > 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +02001289 http_reply_and_close(s, s->txn->status, NULL);
1290 else {
Christopher Fauleteab13f02019-09-06 15:24:55 +02001291 s->txn->status = status;
1292 http_reply_and_close(s, status, http_error_message(s));
Christopher Fauletd7c91962015-04-30 11:48:27 +02001293 }
1294 }
1295
1296 if (!(s->flags & SF_ERR_MASK))
1297 s->flags |= SF_ERR_PRXCOND;
1298 if (!(s->flags & SF_FINST_MASK))
1299 s->flags |= finst;
1300 return 0;
1301
1302 wait:
1303 if (!(chn->flags & CF_ISRESP))
1304 channel_dont_connect(chn);
1305 return 0;
1306}
1307
1308
1309/* Note: must not be declared <const> as its list will be overwritten.
1310 * Please take care of keeping this list alphabetically sorted, doing so helps
1311 * all code contributors.
1312 * Optional keywords are also declared with a NULL ->parse() function so that
1313 * the config parser can report an appropriate error when a known keyword was
1314 * not enabled. */
1315static struct cfg_kw_list cfg_kws = {ILH, {
1316 { CFG_LISTEN, "filter", parse_filter },
1317 { 0, NULL, NULL },
1318 }
1319};
1320
Willy Tarreau0108d902018-11-25 19:14:37 +01001321INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
1322
Willy Tarreau172f5ce2018-11-26 11:21:50 +01001323REGISTER_POST_CHECK(flt_init_all);
1324REGISTER_PER_THREAD_INIT(flt_init_all_per_thread);
1325REGISTER_PER_THREAD_DEINIT(flt_deinit_all_per_thread);
1326
Christopher Fauletd7c91962015-04-30 11:48:27 +02001327/*
1328 * Local variables:
1329 * c-indent-level: 8
1330 * c-basic-offset: 8
1331 * End:
1332 */