blob: 7b5700d497bad34d861b30f93fa95f231737f7e5 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010019#include <common/initcall.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020020#include <common/namespace.h>
21#include <common/standard.h>
Christopher Faulet71a6a8e2017-07-27 16:33:28 +020022#include <common/hathreads.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020023
24#include <types/filters.h>
25#include <types/proto_http.h>
26
27#include <proto/compression.h>
28#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010029#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020030#include <proto/proto_http.h>
31#include <proto/stream.h>
32#include <proto/stream_interface.h>
33
34/* Pool used to allocate filters */
Willy Tarreau8ceae722018-11-26 11:58:30 +010035DECLARE_STATIC_POOL(pool_head_filter, "filter", sizeof(struct filter));
Christopher Fauletd7c91962015-04-30 11:48:27 +020036
37static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
38
39/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
40 * The first one begins a loop and the seconds one ends it.
41 *
42 * - BREAK_EXECUTION must be used to break the loop and set the filter from
43 * which to resume the next time.
44 *
Bertrand Jacquin874a35c2018-09-10 21:26:07 +010045 * Here is an example:
Christopher Fauletd7c91962015-04-30 11:48:27 +020046 *
47 * RESUME_FILTER_LOOP(stream, channel) {
48 * ...
49 * if (cond)
50 * BREAK_EXECUTION(stream, channel, label);
51 * ...
52 * } RESUME_FILTER_END;
53 * ...
54 * label:
55 * ...
56 *
57 */
58#define RESUME_FILTER_LOOP(strm, chn) \
59 do { \
60 struct filter *filter; \
61 \
Christopher Fauletda02e172015-12-04 09:25:05 +010062 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
63 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
64 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020065 goto resume_execution; \
66 } \
67 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010068 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010069 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020070
71#define RESUME_FILTER_END \
72 } \
73 } while(0)
74
Christopher Fauletda02e172015-12-04 09:25:05 +010075#define BREAK_EXECUTION(strm, chn, label) \
76 do { \
77 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
78 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020079 } while (0)
80
81
82/* List head of all known filter keywords */
83static struct flt_kw_list flt_keywords = {
84 .list = LIST_HEAD_INIT(flt_keywords.list)
85};
86
87/*
88 * Registers the filter keyword list <kwl> as a list of valid keywords for next
89 * parsing sessions.
90 */
91void
92flt_register_keywords(struct flt_kw_list *kwl)
93{
94 LIST_ADDQ(&flt_keywords.list, &kwl->list);
95}
96
97/*
98 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
99 * keyword is found with a NULL ->parse() function, then an attempt is made to
100 * find one with a valid ->parse() function. This way it is possible to declare
101 * platform-dependant, known keywords as NULL, then only declare them as valid
102 * if some options are met. Note that if the requested keyword contains an
103 * opening parenthesis, everything from this point is ignored.
104 */
105struct flt_kw *
106flt_find_kw(const char *kw)
107{
108 int index;
109 const char *kwend;
110 struct flt_kw_list *kwl;
111 struct flt_kw *ret = NULL;
112
113 kwend = strchr(kw, '(');
114 if (!kwend)
115 kwend = kw + strlen(kw);
116
117 list_for_each_entry(kwl, &flt_keywords.list, list) {
118 for (index = 0; kwl->kw[index].kw != NULL; index++) {
119 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
120 kwl->kw[index].kw[kwend-kw] == 0) {
121 if (kwl->kw[index].parse)
122 return &kwl->kw[index]; /* found it !*/
123 else
124 ret = &kwl->kw[index]; /* may be OK */
125 }
126 }
127 }
128 return ret;
129}
130
131/*
132 * Dumps all registered "filter" keywords to the <out> string pointer. The
133 * unsupported keywords are only dumped if their supported form was not found.
134 */
135void
136flt_dump_kws(char **out)
137{
138 struct flt_kw_list *kwl;
139 int index;
140
141 *out = NULL;
142 list_for_each_entry(kwl, &flt_keywords.list, list) {
143 for (index = 0; kwl->kw[index].kw != NULL; index++) {
144 if (kwl->kw[index].parse ||
145 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
146 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
147 kwl->scope,
148 kwl->kw[index].kw,
149 kwl->kw[index].parse ? "" : " (not supported)");
150 }
151 }
152 }
153}
154
155/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100156 * Lists the known filters on <out>
157 */
158void
159list_filters(FILE *out)
160{
161 char *filters, *p, *f;
162
163 fprintf(out, "Available filters :\n");
164 flt_dump_kws(&filters);
165 for (p = filters; (f = strtok_r(p,"\n",&p));)
166 fprintf(out, "\t%s\n", f);
167 free(filters);
168}
169
170/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200171 * Parses the "filter" keyword. All keywords must be handled by filters
172 * themselves
173 */
174static int
175parse_filter(char **args, int section_type, struct proxy *curpx,
176 struct proxy *defpx, const char *file, int line, char **err)
177{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100178 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200179
180 /* Filter cannot be defined on a default proxy */
181 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200182 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200183 file, line, args[0]);
184 return -1;
185 }
186 if (!strcmp(args[0], "filter")) {
187 struct flt_kw *kw;
188 int cur_arg;
189
190 if (!*args[1]) {
191 memprintf(err,
192 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
193 file, line, args[0], proxy_type_str(curpx), curpx->id);
194 goto error;
195 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100196 fconf = calloc(1, sizeof(*fconf));
197 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200198 memprintf(err, "'%s' : out of memory", args[0]);
199 goto error;
200 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200201
202 cur_arg = 1;
203 kw = flt_find_kw(args[cur_arg]);
204 if (kw) {
205 if (!kw->parse) {
206 memprintf(err, "parsing [%s:%d] : '%s' : "
207 "'%s' option is not implemented in this version (check build options).",
208 file, line, args[0], args[cur_arg]);
209 goto error;
210 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200211 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200212 if (err && *err)
213 memprintf(err, "'%s' : '%s'",
214 args[0], *err);
215 else
216 memprintf(err, "'%s' : error encountered while processing '%s'",
217 args[0], args[cur_arg]);
218 goto error;
219 }
220 }
221 else {
222 flt_dump_kws(err);
223 indent_msg(err, 4);
224 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
225 args[0], args[cur_arg],
226 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
227 goto error;
228 }
229 if (*args[cur_arg]) {
230 memprintf(err, "'%s %s' : unknown keyword '%s'.",
231 args[0], args[1], args[cur_arg]);
232 goto error;
233 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200234 if (fconf->ops == NULL) {
235 memprintf(err, "'%s %s' : no callbacks defined.",
236 args[0], args[1]);
237 goto error;
238 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200239
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100240 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200241 }
242 return 0;
243
244 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100245 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200246 return -1;
247
248
249}
250
251/*
252 * Calls 'init' callback for all filters attached to a proxy. This happens after
253 * the configuration parsing. Filters can finish to fill their config. Returns
254 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
255 */
Willy Tarreau64bca592016-12-21 20:13:11 +0100256static int
Christopher Fauletd7c91962015-04-30 11:48:27 +0200257flt_init(struct proxy *proxy)
258{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100259 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200260
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100261 list_for_each_entry(fconf, &proxy->filter_configs, list) {
262 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200263 return ERR_ALERT|ERR_FATAL;
264 }
265 return 0;
266}
267
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200268/*
269 * Calls 'init_per_thread' callback for all filters attached to a proxy for each
270 * threads. This happens after the thread creation. Filters can finish to fill
271 * their config. Returns (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
272 */
273static int
274flt_init_per_thread(struct proxy *proxy)
275{
276 struct flt_conf *fconf;
277
278 list_for_each_entry(fconf, &proxy->filter_configs, list) {
279 if (fconf->ops->init_per_thread && fconf->ops->init_per_thread(proxy, fconf) < 0)
280 return ERR_ALERT|ERR_FATAL;
281 }
282 return 0;
283}
284
Willy Tarreau64bca592016-12-21 20:13:11 +0100285/* Calls flt_init() for all proxies, see above */
286static int
287flt_init_all()
288{
289 struct proxy *px;
290 int err_code = 0;
291
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100292 for (px = proxies_list; px; px = px->next) {
Willy Tarreau64bca592016-12-21 20:13:11 +0100293 err_code |= flt_init(px);
294 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100295 ha_alert("Failed to initialize filters for proxy '%s'.\n",
296 px->id);
Willy Tarreau64bca592016-12-21 20:13:11 +0100297 return err_code;
298 }
299 }
300 return 0;
301}
302
Joseph Herlantb35ea682018-11-15 12:24:23 -0800303/* Calls flt_init_per_thread() for all proxies, see above. Be careful here, it
304 * returns 0 if an error occurred. This is the opposite of flt_init_all. */
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200305static int
306flt_init_all_per_thread()
307{
308 struct proxy *px;
309 int err_code = 0;
310
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100311 for (px = proxies_list; px; px = px->next) {
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200312 err_code = flt_init_per_thread(px);
313 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100314 ha_alert("Failed to initialize filters for proxy '%s' for thread %u.\n",
315 px->id, tid);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200316 return 0;
317 }
318 }
319 return 1;
320}
321
Christopher Fauletd7c91962015-04-30 11:48:27 +0200322/*
323 * Calls 'check' callback for all filters attached to a proxy. This happens
324 * after the configuration parsing but before filters initialization. Returns
325 * the number of encountered errors.
326 */
327int
328flt_check(struct proxy *proxy)
329{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100330 struct flt_conf *fconf;
331 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200332
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100333 list_for_each_entry(fconf, &proxy->filter_configs, list) {
334 if (fconf->ops->check)
335 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200336 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100337 err += check_legacy_http_comp_flt(proxy);
Christopher Faulet0c859122018-11-07 14:09:32 +0100338
339 if (!LIST_ISEMPTY(&proxy->filter_configs) &&
340 (proxy->options2 & PR_O2_USE_HTX)) {
341 ha_alert("config: %s '%s' : filters cannot be used when "
342 "the HTX internal representation is enabled.\n",
343 proxy_type_str(proxy), proxy->id);
344 err++;
345 }
346
Christopher Fauletd7c91962015-04-30 11:48:27 +0200347 return err;
348}
349
350/*
351 * Calls 'denit' callback for all filters attached to a proxy. This happens when
352 * HAProxy is stopped.
353 */
354void
355flt_deinit(struct proxy *proxy)
356{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100357 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200358
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100359 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
360 if (fconf->ops->deinit)
361 fconf->ops->deinit(proxy, fconf);
362 LIST_DEL(&fconf->list);
363 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200364 }
365}
366
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200367/*
368 * Calls 'denit_per_thread' callback for all filters attached to a proxy for
369 * each threads. This happens before exiting a thread.
370 */
371void
372flt_deinit_per_thread(struct proxy *proxy)
373{
374 struct flt_conf *fconf, *back;
375
376 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
377 if (fconf->ops->deinit_per_thread)
378 fconf->ops->deinit_per_thread(proxy, fconf);
379 }
380}
381
382
383/* Calls flt_deinit_per_thread() for all proxies, see above */
384static void
385flt_deinit_all_per_thread()
386{
387 struct proxy *px;
388
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100389 for (px = proxies_list; px; px = px->next)
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200390 flt_deinit_per_thread(px);
391}
392
Christopher Faulet92d36382015-11-05 13:35:03 +0100393/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
394static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100395flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100396{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100397 struct filter *f = pool_alloc(pool_head_filter);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200398
Christopher Faulet92d36382015-11-05 13:35:03 +0100399 if (!f) /* not enough memory */
400 return -1;
401 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100402 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100403 f->flags |= flags;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200404
405 if (FLT_OPS(f)->attach) {
406 int ret = FLT_OPS(f)->attach(s, f);
407 if (ret <= 0) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100408 pool_free(pool_head_filter, f);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200409 return ret;
410 }
411 }
412
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100413 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100414 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100415 return 0;
416}
417
418/*
419 * Called when a stream is created. It attaches all frontend filters to the
420 * stream. Returns -1 if an error occurs, 0 otherwise.
421 */
422int
423flt_stream_init(struct stream *s)
424{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100425 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100426
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100427 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
428 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100429 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
430 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100431 return -1;
432 }
433 return 0;
434}
435
436/*
437 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
438 * happens after each request/response exchange). When analyze ends, backend
439 * filters are removed. When the stream is closed, all filters attached to the
440 * stream are removed.
441 */
442void
443flt_stream_release(struct stream *s, int only_backend)
444{
445 struct filter *filter, *back;
446
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100447 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100448 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200449 if (FLT_OPS(filter)->detach)
450 FLT_OPS(filter)->detach(s, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100451 LIST_DEL(&filter->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100452 pool_free(pool_head_filter, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100453 }
454 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100455 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100456 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100457}
458
Christopher Fauletd7c91962015-04-30 11:48:27 +0200459/*
460 * Calls 'stream_start' for all filters attached to a stream. This happens when
461 * the stream is created, just after calling flt_stream_init
462 * function. Returns -1 if an error occurs, 0 otherwise.
463 */
464int
465flt_stream_start(struct stream *s)
466{
467 struct filter *filter;
468
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100469 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100470 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200471 return -1;
472 }
473 return 0;
474}
475
476/*
477 * Calls 'stream_stop' for all filters attached to a stream. This happens when
478 * the stream is stopped, just before calling flt_stream_release function.
479 */
480void
481flt_stream_stop(struct stream *s)
482{
483 struct filter *filter;
484
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100485 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100486 if (FLT_OPS(filter)->stream_stop)
487 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200488 }
489}
490
Christopher Faulet92d36382015-11-05 13:35:03 +0100491/*
Christopher Fauleta00d8172016-11-10 14:58:05 +0100492 * Calls 'check_timeouts' for all filters attached to a stream. This happens when
493 * the stream is woken up because of expired timer.
494 */
495void
496flt_stream_check_timeouts(struct stream *s)
497{
498 struct filter *filter;
499
500 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
501 if (FLT_OPS(filter)->check_timeouts)
502 FLT_OPS(filter)->check_timeouts(s, filter);
503 }
504}
505
506/*
Christopher Faulet92d36382015-11-05 13:35:03 +0100507 * Called when a backend is set for a stream. If the frontend and the backend
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200508 * are not the same, this function attaches all backend filters to the
509 * stream. Returns -1 if an error occurs, 0 otherwise.
Christopher Faulet92d36382015-11-05 13:35:03 +0100510 */
511int
512flt_set_stream_backend(struct stream *s, struct proxy *be)
513{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100514 struct flt_conf *fconf;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200515 struct filter *filter;
Christopher Faulet92d36382015-11-05 13:35:03 +0100516
517 if (strm_fe(s) == be)
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200518 goto end;
Christopher Faulet92d36382015-11-05 13:35:03 +0100519
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100520 list_for_each_entry(fconf, &be->filter_configs, list) {
521 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100522 return -1;
523 }
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200524
525 end:
526 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
527 if (FLT_OPS(filter)->stream_set_backend &&
528 FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
529 return -1;
530 }
531
Christopher Faulet92d36382015-11-05 13:35:03 +0100532 return 0;
533}
534
Christopher Fauletd7c91962015-04-30 11:48:27 +0200535/*
536 * Calls 'http_data' callback for all "data" filters attached to a stream. This
537 * function is called when incoming data are available (excluding chunks
538 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
539 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
540 * filters and adjusts available data to be sure that a filter cannot parse more
541 * data than its predecessors. A filter can choose to not consume all available
542 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
543 */
544int
545flt_http_data(struct stream *s, struct http_msg *msg)
546{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100547 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200548 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200549 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200550
Christopher Fauletd7c91962015-04-30 11:48:27 +0200551 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200552 buf_i = ci_data(msg->chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100553
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100554 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100555 unsigned int *nxt;
556
557 /* Call "data" filters only */
558 if (!IS_DATA_FILTER(filter, msg->chn))
559 continue;
560
Christopher Faulet2fb28802015-12-01 10:40:57 +0100561 /* If the HTTP parser is ahead, we update the next offset of the
562 * current filter. This happens for chunked messages, at the
Joseph Herlantb35ea682018-11-15 12:24:23 -0800563 * beginning of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100564 nxt = &FLT_NXT(filter, msg->chn);
565 if (msg->next > *nxt)
566 *nxt = msg->next;
567
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100568 if (FLT_OPS(filter)->http_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +0200569 unsigned int i = ci_data(msg->chn);
Christopher Faulet55048a42016-06-21 10:44:32 +0200570
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100571 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100572 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200573 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +0200574 delta += (int)(ci_data(msg->chn) - i);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100575
576 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100577 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100578
579 /* And set this value as the bound for the next
580 * filter. It will not able to parse more data than this
581 * one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200582 b_set_data(&msg->chn->buf, co_data(msg->chn) + *nxt);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200583 }
584 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100585 /* Consume all available data and update the next offset
586 * of the current filter. buf->i is untouched here. */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200587 ret = MIN(msg->chunk_len + msg->next, ci_data(msg->chn)) - *nxt;
Christopher Fauletda02e172015-12-04 09:25:05 +0100588 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200589 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200590 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100591
Christopher Fauletd7c91962015-04-30 11:48:27 +0200592 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200593 b_set_data(&msg->chn->buf, co_data(msg->chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100594
Christopher Fauletd7c91962015-04-30 11:48:27 +0200595 return ret;
596}
597
Christopher Fauletd7c91962015-04-30 11:48:27 +0200598/*
599 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
600 * stream. This function is called for chunked messages only when a part of the
601 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
602 * analyzers. Filters can know how much data were parsed by the HTTP parsing
603 * until the last call with the msg->sol value. Returns a negative value if an
604 * error occurs, any other value otherwise.
605 */
606int
607flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
608{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100609 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100610 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200611
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100612 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100613 unsigned int *nxt;
614
615 /* Call "data" filters only */
616 if (!IS_DATA_FILTER(filter, msg->chn))
617 continue;
618
Christopher Faulet2fb28802015-12-01 10:40:57 +0100619 /* Be sure to set the next offset of the filter at the right
620 * place. This is really useful when the first part of the
621 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100622 nxt = &FLT_NXT(filter, msg->chn);
623 *nxt = msg->next;
624
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100625 if (FLT_OPS(filter)->http_chunk_trailers) {
626 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100627 if (ret < 0)
628 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200629 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100630 /* Update the next offset of the current filter. Here all data
631 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100632 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100633 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200634 return ret;
635}
636
637/*
638 * Calls 'http_end' callback for all filters attached to a stream. All filters
639 * are called here, but only if there is at least one "data" filter. This
640 * functions is called when all data were parsed and forwarded. 'http_end'
641 * callback is resumable, so this function returns a negative value if an error
642 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
643 */
644int
645flt_http_end(struct stream *s, struct http_msg *msg)
646{
647 int ret = 1;
648
Christopher Fauletd7c91962015-04-30 11:48:27 +0200649 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100650 if (FLT_OPS(filter)->http_end) {
651 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200652 if (ret <= 0)
653 BREAK_EXECUTION(s, msg->chn, end);
654 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200655 } RESUME_FILTER_END;
656end:
657 return ret;
658}
659
660/*
661 * Calls 'http_reset' callback for all filters attached to a stream. This
662 * happens when a 100-continue response is received.
663 */
664void
665flt_http_reset(struct stream *s, struct http_msg *msg)
666{
667 struct filter *filter;
668
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100669 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100670 if (FLT_OPS(filter)->http_reset)
671 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200672 }
673}
674
675/*
676 * Calls 'http_reply' callback for all filters attached to a stream when HA
677 * decides to stop the HTTP message processing.
678 */
679void
Willy Tarreau83061a82018-07-13 11:56:34 +0200680flt_http_reply(struct stream *s, short status, const struct buffer *msg)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200681{
682 struct filter *filter;
683
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100684 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100685 if (FLT_OPS(filter)->http_reply)
686 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200687 }
688}
689
690/*
691 * Calls 'http_forward_data' callback for all "data" filters attached to a
692 * stream. This function is called when some data can be forwarded in the
693 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
694 * update the forward offset of filters and adjusts "forwardable" data to be
695 * sure that a filter cannot forward more data than its predecessors. A filter
696 * can choose to not forward all parsed data. Returns a negative value if an
697 * error occurs, else the number of forwarded bytes.
698 */
699int
700flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
701{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100702 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200703 int ret = len;
704
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100705 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100706 unsigned int *nxt, *fwd;
707
708 /* Call "data" filters only */
709 if (!IS_DATA_FILTER(filter, msg->chn))
710 continue;
711
Christopher Faulet2fb28802015-12-01 10:40:57 +0100712 /* If the HTTP parser is ahead, we update the next offset of the
713 * current filter. This happens for chunked messages, when the
714 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100715 nxt = &FLT_NXT(filter, msg->chn);
716 fwd = &FLT_FWD(filter, msg->chn);
717 if (msg->next > *nxt)
718 *nxt = msg->next;
719
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100720 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100721 /* Remove bytes that the current filter considered as
722 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100723 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200724 if (ret < 0)
725 goto end;
726 }
727
728 /* Adjust bytes that the current filter considers as
729 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100730 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200731
732 /* And set this value as the bound for the next filter. It will
733 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100734 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200735 }
736
737 if (!ret)
738 goto end;
739
740 /* Finally, adjust filters offsets by removing data that HAProxy will
741 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100742 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100743 if (!IS_DATA_FILTER(filter, msg->chn))
744 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200745 FLT_NXT(filter, msg->chn) -= ret;
746 FLT_FWD(filter, msg->chn) -= ret;
747 }
748 end:
749 return ret;
750}
751
752/*
753 * Calls 'channel_start_analyze' callback for all filters attached to a
754 * stream. This function is called when we start to analyze a request or a
755 * response. For frontend filters, it is called before all other analyzers. For
756 * backend ones, it is called before all backend
757 * analyzers. 'channel_start_analyze' callback is resumable, so this function
758 * returns 0 if an error occurs or if it needs to wait, any other value
759 * otherwise.
760 */
761int
762flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
763{
764 int ret = 1;
765
766 /* If this function is called, this means there is at least one filter,
767 * so we do not need to check the filter list's emptiness. */
768
Christopher Faulete6006242017-03-10 11:52:44 +0100769 /* Set flag on channel to tell that the channel is filtered */
770 chn->flags |= CF_FLT_ANALYZE;
771
Christopher Fauletd7c91962015-04-30 11:48:27 +0200772 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet0184ea72017-01-05 14:06:34 +0100773 if (!(chn->flags & CF_ISRESP)) {
774 if (an_bit == AN_REQ_FLT_START_BE &&
775 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
776 continue;
777 }
778 else {
779 if (an_bit == AN_RES_FLT_START_BE &&
780 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
781 continue;
782 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200783
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100784 FLT_NXT(filter, chn) = 0;
785 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200786
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100787 if (FLT_OPS(filter)->channel_start_analyze) {
788 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200789 if (ret <= 0)
790 BREAK_EXECUTION(s, chn, end);
791 }
792 } RESUME_FILTER_END;
793
794 end:
795 return handle_analyzer_result(s, chn, an_bit, ret);
796}
797
798/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200799 * Calls 'channel_pre_analyze' callback for all filters attached to a
800 * stream. This function is called BEFORE each analyzer attached to a channel,
801 * expects analyzers responsible for data sending. 'channel_pre_analyze'
802 * callback is resumable, so this function returns 0 if an error occurs or if it
803 * needs to wait, any other value otherwise.
804 *
805 * Note this function can be called many times for the same analyzer. In fact,
806 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200807 */
808int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200809flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200810{
811 int ret = 1;
812
Christopher Fauletd7c91962015-04-30 11:48:27 +0200813 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200814 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
815 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200816 if (ret <= 0)
817 BREAK_EXECUTION(s, chn, check_result);
818 }
819 } RESUME_FILTER_END;
820
821 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100822 return handle_analyzer_result(s, chn, 0, ret);
823}
824
825/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200826 * Calls 'channel_post_analyze' callback for all filters attached to a
827 * stream. This function is called AFTER each analyzer attached to a channel,
828 * expects analyzers responsible for data sending. 'channel_post_analyze'
829 * callback is NOT resumable, so this function returns a 0 if an error occurs,
830 * any other value otherwise.
831 *
832 * Here, AFTER means when the analyzer finishes its processing.
833 */
834int
835flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
836{
837 struct filter *filter;
838 int ret = 1;
839
840 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
841 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
842 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
843 if (ret < 0)
844 break;
845 }
846 }
847 return handle_analyzer_result(s, chn, 0, ret);
848}
849
850/*
Christopher Faulet0184ea72017-01-05 14:06:34 +0100851 * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
852 * headers or a request or a response. Returns 0 if an error occurs or if it
853 * needs to wait, any other value otherwise.
Christopher Faulet309c6412015-12-02 09:57:32 +0100854 */
855int
856flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
857{
Christopher Faulet1339d742016-05-11 16:48:33 +0200858 struct filter *filter;
859 struct http_msg *msg;
860 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100861
Christopher Faulet1339d742016-05-11 16:48:33 +0200862 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100863 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200864 if (FLT_OPS(filter)->http_headers) {
865 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100866 if (ret <= 0)
867 BREAK_EXECUTION(s, chn, check_result);
868 }
869 } RESUME_FILTER_END;
870
871 /* We increase next offset of all "data" filters after all processing on
872 * headers because any filter can alter them. So the definitive size of
873 * headers (msg->sov) is only known when all filters have been
874 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100875 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100876 /* Handle "data" filters only */
877 if (!IS_DATA_FILTER(filter, chn))
878 continue;
Christopher Faulet1339d742016-05-11 16:48:33 +0200879 FLT_NXT(filter, chn) = msg->sov;
Christopher Faulet309c6412015-12-02 09:57:32 +0100880 }
881
882 check_result:
883 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200884}
885
886/*
887 * Calls 'channel_end_analyze' callback for all filters attached to a
888 * stream. This function is called when we stop to analyze a request or a
889 * response. It is called after all other analyzers. 'channel_end_analyze'
890 * callback is resumable, so this function returns 0 if an error occurs or if it
891 * needs to wait, any other value otherwise.
892 */
893int
894flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
895{
896 int ret = 1;
897
Christopher Faulete6006242017-03-10 11:52:44 +0100898 /* Check if all filters attached on the stream have finished their
899 * processing on this channel. */
900 if (!(chn->flags & CF_FLT_ANALYZE))
901 goto sync;
902
Christopher Fauletd7c91962015-04-30 11:48:27 +0200903 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100904 FLT_NXT(filter, chn) = 0;
905 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100906 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200907
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100908 if (FLT_OPS(filter)->channel_end_analyze) {
909 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200910 if (ret <= 0)
911 BREAK_EXECUTION(s, chn, end);
912 }
913 } RESUME_FILTER_END;
914
Christopher Faulete6006242017-03-10 11:52:44 +0100915 end:
916 /* We don't remove yet this analyzer because we need to synchronize the
917 * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
918 ret = handle_analyzer_result(s, chn, 0, ret);
Christopher Faulet570f7992017-07-06 15:53:02 +0200919 if (ret) {
Christopher Faulete6006242017-03-10 11:52:44 +0100920 chn->flags &= ~CF_FLT_ANALYZE;
Christopher Faulet02c7b222015-12-22 12:01:29 +0100921
Christopher Faulet570f7992017-07-06 15:53:02 +0200922 /* Pretend there is an activity on both channels. Flag on the
923 * current one will be automatically removed, so only the other
924 * one will remain. This is a way to be sure that
925 * 'channel_end_analyze' callback will have a chance to be
926 * called at least once for the other side to finish the current
Joseph Herlantb35ea682018-11-15 12:24:23 -0800927 * processing. Of course, this is the filter responsibility to
Christopher Faulet570f7992017-07-06 15:53:02 +0200928 * wakeup the stream if it choose to loop on this callback. */
929 s->req.flags |= CF_WAKE_ONCE;
930 s->res.flags |= CF_WAKE_ONCE;
931 }
932
933
Christopher Faulete6006242017-03-10 11:52:44 +0100934 sync:
935 /* Now we can check if filters have finished their work on the both
936 * channels */
937 if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
938 /* Sync channels by removing this analyzer for the both channels */
939 s->req.analysers &= ~AN_REQ_FLT_END;
940 s->res.analysers &= ~AN_RES_FLT_END;
Christopher Fauletc6062be2016-10-31 11:22:37 +0100941
Christopher Faulete6006242017-03-10 11:52:44 +0100942 /* Clean up the HTTP transaction if needed */
943 if (s->txn && (s->txn->flags & TX_WAIT_CLEANUP))
944 http_end_txn_clean_session(s);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200945
Christopher Faulete6006242017-03-10 11:52:44 +0100946 /* Remove backend filters from the list */
947 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200948 }
Christopher Faulet2b553de2017-03-30 11:13:22 +0200949
Christopher Fauletd7c91962015-04-30 11:48:27 +0200950 return ret;
951}
952
953
954/*
955 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
956 * function is called when incoming data are available. It takes care to update
957 * the next offset of filters and adjusts available data to be sure that a
958 * filter cannot parse more data than its predecessors. A filter can choose to
959 * not consume all available data. Returns -1 if an error occurs, the number of
960 * consumed bytes otherwise.
961 */
962static int
963flt_data(struct stream *s, struct channel *chn)
964{
Christopher Fauletda02e172015-12-04 09:25:05 +0100965 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200966 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200967 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200968
969 /* Save buffer state */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200970 buf_i = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100971
972 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100973 unsigned int *nxt;
974
975 /* Call "data" filters only */
976 if (!IS_DATA_FILTER(filter, chn))
977 continue;
978
979 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100980 if (FLT_OPS(filter)->tcp_data) {
Willy Tarreau44a41a82018-06-19 07:16:31 +0200981 unsigned int i = ci_data(chn);
Christopher Faulet55048a42016-06-21 10:44:32 +0200982
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100983 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200984 if (ret < 0)
985 break;
Willy Tarreau44a41a82018-06-19 07:16:31 +0200986 delta += (int)(ci_data(chn) - i);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200987
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100988 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100989 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100990
991 /* And set this value as the bound for the next
992 * filter. It will not able to parse more data than the
993 * current one. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200994 b_set_data(&chn->buf, co_data(chn) + *nxt);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100995 }
996 else {
997 /* Consume all available data */
Willy Tarreau44a41a82018-06-19 07:16:31 +0200998 *nxt = ci_data(chn);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100999 }
Christopher Fauletd7c91962015-04-30 11:48:27 +02001000
1001 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +01001002 * exit from the loop. This value will be used to know how much
1003 * data are "forwardable" */
1004 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001005 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001006
1007 /* Restore the original buffer state */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001008 b_set_data(&chn->buf, co_data(chn) + buf_i + delta);
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001009
Christopher Fauletd7c91962015-04-30 11:48:27 +02001010 return ret;
1011}
1012
1013/*
1014 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
1015 * stream. This function is called when some data can be forwarded. It takes
1016 * care to update the forward offset of filters and adjusts "forwardable" data
1017 * to be sure that a filter cannot forward more data than its predecessors. A
1018 * filter can choose to not forward all parsed data. Returns a negative value if
1019 * an error occurs, else the number of forwarded bytes.
1020 */
1021static int
1022flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
1023{
Christopher Fauletda02e172015-12-04 09:25:05 +01001024 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001025 int ret = len;
1026
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001027 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001028 unsigned int *fwd;
1029
1030 /* Call "data" filters only */
1031 if (!IS_DATA_FILTER(filter, chn))
1032 continue;
1033
1034 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001035 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001036 /* Remove bytes that the current filter considered as
1037 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001038 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001039 if (ret < 0)
1040 goto end;
1041 }
1042
Christopher Fauletda02e172015-12-04 09:25:05 +01001043 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +02001044 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +01001045 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001046
1047 /* And set this value as the bound for the next filter. It will
1048 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +01001049 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001050 }
1051
1052 if (!ret)
1053 goto end;
1054
Christopher Fauletda02e172015-12-04 09:25:05 +01001055 /* Finally, adjust filters offsets by removing data that HAProxy will
1056 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001057 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001058 if (!IS_DATA_FILTER(filter, chn))
1059 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001060 FLT_NXT(filter, chn) -= ret;
1061 FLT_FWD(filter, chn) -= ret;
1062 }
1063
Christopher Fauletd7c91962015-04-30 11:48:27 +02001064 end:
1065 return ret;
1066}
1067
1068/*
1069 * Called when TCP data must be filtered on a channel. This function is the
Christopher Faulet0184ea72017-01-05 14:06:34 +01001070 * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
1071 * data when the proxy is not in http mode. Behind the scene, it calls
1072 * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
1073 * filters attached to a stream. Returns 0 if an error occurs or if it needs to
1074 * wait, any other value otherwise.
Christopher Fauletd7c91962015-04-30 11:48:27 +02001075 */
1076int
1077flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
1078{
1079 int ret = 1;
1080
Christopher Fauletda02e172015-12-04 09:25:05 +01001081 /* If there is no "data" filters, we do nothing */
1082 if (!HAS_DATA_FILTERS(s, chn))
1083 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001084
1085 /* Be sure that the output is still opened. Else we stop the data
1086 * filtering. */
1087 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
Willy Tarreau44a41a82018-06-19 07:16:31 +02001088 ((chn->flags & CF_SHUTW) && (chn->to_forward || co_data(chn))))
Christopher Fauletd7c91962015-04-30 11:48:27 +02001089 goto end;
1090
1091 /* Let all "data" filters parsing incoming data */
1092 ret = flt_data(s, chn);
1093 if (ret < 0)
1094 goto end;
1095
1096 /* And forward them */
1097 ret = flt_forward_data(s, chn, ret);
1098 if (ret < 0)
1099 goto end;
1100
Christopher Fauletda02e172015-12-04 09:25:05 +01001101 /* Consume data that all filters consider as forwarded. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +02001102 c_adv(chn, ret);
Christopher Fauletda02e172015-12-04 09:25:05 +01001103
Christopher Fauletd7c91962015-04-30 11:48:27 +02001104 /* Stop waiting data if the input in closed and no data is pending or if
1105 * the output is closed. */
1106 if ((chn->flags & CF_SHUTW) ||
Willy Tarreau5ba65522018-06-15 15:14:53 +02001107 ((chn->flags & CF_SHUTR) && !ci_data(chn))) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001108 ret = 1;
1109 goto end;
1110 }
1111
1112 /* Wait for data */
1113 return 0;
1114 end:
1115 /* Terminate the data filtering. If <ret> is negative, an error was
1116 * encountered during the filtering. */
1117 return handle_analyzer_result(s, chn, an_bit, ret);
1118}
1119
1120/*
1121 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
1122 * it needs to wait, any other value otherwise.
1123 */
1124static int
1125handle_analyzer_result(struct stream *s, struct channel *chn,
1126 unsigned int an_bit, int ret)
1127{
1128 int finst;
1129
1130 if (ret < 0)
1131 goto return_bad_req;
1132 else if (!ret)
1133 goto wait;
1134
1135 /* End of job, return OK */
1136 if (an_bit) {
1137 chn->analysers &= ~an_bit;
1138 chn->analyse_exp = TICK_ETERNITY;
1139 }
1140 return 1;
1141
1142 return_bad_req:
1143 /* An error occurs */
1144 channel_abort(&s->req);
1145 channel_abort(&s->res);
1146
1147 if (!(chn->flags & CF_ISRESP)) {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001148 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001149 finst = SF_FINST_R;
1150 /* FIXME: incr counters */
1151 }
1152 else {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001153 s->res.analysers &= AN_RES_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001154 finst = SF_FINST_H;
1155 /* FIXME: incr counters */
1156 }
1157
1158 if (s->txn) {
1159 /* Do not do that when we are waiting for the next request */
1160 if (s->txn->status)
1161 http_reply_and_close(s, s->txn->status, NULL);
1162 else {
1163 s->txn->status = 400;
Jarno Huuskonen9e6906b2017-03-06 14:21:49 +02001164 http_reply_and_close(s, 400, http_error_message(s));
Christopher Fauletd7c91962015-04-30 11:48:27 +02001165 }
1166 }
1167
1168 if (!(s->flags & SF_ERR_MASK))
1169 s->flags |= SF_ERR_PRXCOND;
1170 if (!(s->flags & SF_FINST_MASK))
1171 s->flags |= finst;
1172 return 0;
1173
1174 wait:
1175 if (!(chn->flags & CF_ISRESP))
1176 channel_dont_connect(chn);
1177 return 0;
1178}
1179
1180
1181/* Note: must not be declared <const> as its list will be overwritten.
1182 * Please take care of keeping this list alphabetically sorted, doing so helps
1183 * all code contributors.
1184 * Optional keywords are also declared with a NULL ->parse() function so that
1185 * the config parser can report an appropriate error when a known keyword was
1186 * not enabled. */
1187static struct cfg_kw_list cfg_kws = {ILH, {
1188 { CFG_LISTEN, "filter", parse_filter },
1189 { 0, NULL, NULL },
1190 }
1191};
1192
Willy Tarreau0108d902018-11-25 19:14:37 +01001193INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
1194
Christopher Fauletd7c91962015-04-30 11:48:27 +02001195__attribute__((destructor))
1196static void
1197__filters_deinit(void)
1198{
Willy Tarreaubafbe012017-11-24 17:34:44 +01001199 pool_destroy(pool_head_filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001200}
1201
Willy Tarreau172f5ce2018-11-26 11:21:50 +01001202REGISTER_POST_CHECK(flt_init_all);
1203REGISTER_PER_THREAD_INIT(flt_init_all_per_thread);
1204REGISTER_PER_THREAD_DEINIT(flt_deinit_all_per_thread);
1205
Christopher Fauletd7c91962015-04-30 11:48:27 +02001206/*
1207 * Local variables:
1208 * c-indent-level: 8
1209 * c-basic-offset: 8
1210 * End:
1211 */