blob: e9982caecb4473ea5d543d12d9c9978725e0b507 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
Christopher Faulet71a6a8e2017-07-27 16:33:28 +020021#include <common/hathreads.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020022
23#include <types/filters.h>
24#include <types/proto_http.h>
25
26#include <proto/compression.h>
27#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010028#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020029#include <proto/proto_http.h>
30#include <proto/stream.h>
31#include <proto/stream_interface.h>
32
33/* Pool used to allocate filters */
34struct pool_head *pool2_filter = NULL;
35
36static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
37
38/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
39 * The first one begins a loop and the seconds one ends it.
40 *
41 * - BREAK_EXECUTION must be used to break the loop and set the filter from
42 * which to resume the next time.
43 *
44 * Here is an exemple:
45 *
46 * RESUME_FILTER_LOOP(stream, channel) {
47 * ...
48 * if (cond)
49 * BREAK_EXECUTION(stream, channel, label);
50 * ...
51 * } RESUME_FILTER_END;
52 * ...
53 * label:
54 * ...
55 *
56 */
57#define RESUME_FILTER_LOOP(strm, chn) \
58 do { \
59 struct filter *filter; \
60 \
Christopher Fauletda02e172015-12-04 09:25:05 +010061 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
62 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
63 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020064 goto resume_execution; \
65 } \
66 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010067 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010068 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020069
70#define RESUME_FILTER_END \
71 } \
72 } while(0)
73
Christopher Fauletda02e172015-12-04 09:25:05 +010074#define BREAK_EXECUTION(strm, chn, label) \
75 do { \
76 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
77 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020078 } while (0)
79
80
81/* List head of all known filter keywords */
82static struct flt_kw_list flt_keywords = {
83 .list = LIST_HEAD_INIT(flt_keywords.list)
84};
85
86/*
87 * Registers the filter keyword list <kwl> as a list of valid keywords for next
88 * parsing sessions.
89 */
90void
91flt_register_keywords(struct flt_kw_list *kwl)
92{
93 LIST_ADDQ(&flt_keywords.list, &kwl->list);
94}
95
96/*
97 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
98 * keyword is found with a NULL ->parse() function, then an attempt is made to
99 * find one with a valid ->parse() function. This way it is possible to declare
100 * platform-dependant, known keywords as NULL, then only declare them as valid
101 * if some options are met. Note that if the requested keyword contains an
102 * opening parenthesis, everything from this point is ignored.
103 */
104struct flt_kw *
105flt_find_kw(const char *kw)
106{
107 int index;
108 const char *kwend;
109 struct flt_kw_list *kwl;
110 struct flt_kw *ret = NULL;
111
112 kwend = strchr(kw, '(');
113 if (!kwend)
114 kwend = kw + strlen(kw);
115
116 list_for_each_entry(kwl, &flt_keywords.list, list) {
117 for (index = 0; kwl->kw[index].kw != NULL; index++) {
118 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
119 kwl->kw[index].kw[kwend-kw] == 0) {
120 if (kwl->kw[index].parse)
121 return &kwl->kw[index]; /* found it !*/
122 else
123 ret = &kwl->kw[index]; /* may be OK */
124 }
125 }
126 }
127 return ret;
128}
129
130/*
131 * Dumps all registered "filter" keywords to the <out> string pointer. The
132 * unsupported keywords are only dumped if their supported form was not found.
133 */
134void
135flt_dump_kws(char **out)
136{
137 struct flt_kw_list *kwl;
138 int index;
139
140 *out = NULL;
141 list_for_each_entry(kwl, &flt_keywords.list, list) {
142 for (index = 0; kwl->kw[index].kw != NULL; index++) {
143 if (kwl->kw[index].parse ||
144 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
145 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
146 kwl->scope,
147 kwl->kw[index].kw,
148 kwl->kw[index].parse ? "" : " (not supported)");
149 }
150 }
151 }
152}
153
154/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100155 * Lists the known filters on <out>
156 */
157void
158list_filters(FILE *out)
159{
160 char *filters, *p, *f;
161
162 fprintf(out, "Available filters :\n");
163 flt_dump_kws(&filters);
164 for (p = filters; (f = strtok_r(p,"\n",&p));)
165 fprintf(out, "\t%s\n", f);
166 free(filters);
167}
168
169/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200170 * Parses the "filter" keyword. All keywords must be handled by filters
171 * themselves
172 */
173static int
174parse_filter(char **args, int section_type, struct proxy *curpx,
175 struct proxy *defpx, const char *file, int line, char **err)
176{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100177 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200178
179 /* Filter cannot be defined on a default proxy */
180 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200181 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200182 file, line, args[0]);
183 return -1;
184 }
185 if (!strcmp(args[0], "filter")) {
186 struct flt_kw *kw;
187 int cur_arg;
188
189 if (!*args[1]) {
190 memprintf(err,
191 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
192 file, line, args[0], proxy_type_str(curpx), curpx->id);
193 goto error;
194 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100195 fconf = calloc(1, sizeof(*fconf));
196 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200197 memprintf(err, "'%s' : out of memory", args[0]);
198 goto error;
199 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200200
201 cur_arg = 1;
202 kw = flt_find_kw(args[cur_arg]);
203 if (kw) {
204 if (!kw->parse) {
205 memprintf(err, "parsing [%s:%d] : '%s' : "
206 "'%s' option is not implemented in this version (check build options).",
207 file, line, args[0], args[cur_arg]);
208 goto error;
209 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200210 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200211 if (err && *err)
212 memprintf(err, "'%s' : '%s'",
213 args[0], *err);
214 else
215 memprintf(err, "'%s' : error encountered while processing '%s'",
216 args[0], args[cur_arg]);
217 goto error;
218 }
219 }
220 else {
221 flt_dump_kws(err);
222 indent_msg(err, 4);
223 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
224 args[0], args[cur_arg],
225 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
226 goto error;
227 }
228 if (*args[cur_arg]) {
229 memprintf(err, "'%s %s' : unknown keyword '%s'.",
230 args[0], args[1], args[cur_arg]);
231 goto error;
232 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200233 if (fconf->ops == NULL) {
234 memprintf(err, "'%s %s' : no callbacks defined.",
235 args[0], args[1]);
236 goto error;
237 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200238
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100239 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200240 }
241 return 0;
242
243 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100244 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200245 return -1;
246
247
248}
249
250/*
251 * Calls 'init' callback for all filters attached to a proxy. This happens after
252 * the configuration parsing. Filters can finish to fill their config. Returns
253 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
254 */
Willy Tarreau64bca592016-12-21 20:13:11 +0100255static int
Christopher Fauletd7c91962015-04-30 11:48:27 +0200256flt_init(struct proxy *proxy)
257{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100258 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200259
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100260 list_for_each_entry(fconf, &proxy->filter_configs, list) {
261 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200262 return ERR_ALERT|ERR_FATAL;
263 }
264 return 0;
265}
266
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200267/*
268 * Calls 'init_per_thread' callback for all filters attached to a proxy for each
269 * threads. This happens after the thread creation. Filters can finish to fill
270 * their config. Returns (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
271 */
272static int
273flt_init_per_thread(struct proxy *proxy)
274{
275 struct flt_conf *fconf;
276
277 list_for_each_entry(fconf, &proxy->filter_configs, list) {
278 if (fconf->ops->init_per_thread && fconf->ops->init_per_thread(proxy, fconf) < 0)
279 return ERR_ALERT|ERR_FATAL;
280 }
281 return 0;
282}
283
Willy Tarreau64bca592016-12-21 20:13:11 +0100284/* Calls flt_init() for all proxies, see above */
285static int
286flt_init_all()
287{
288 struct proxy *px;
289 int err_code = 0;
290
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100291 for (px = proxies_list; px; px = px->next) {
Willy Tarreau64bca592016-12-21 20:13:11 +0100292 err_code |= flt_init(px);
293 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100294 ha_alert("Failed to initialize filters for proxy '%s'.\n",
295 px->id);
Willy Tarreau64bca592016-12-21 20:13:11 +0100296 return err_code;
297 }
298 }
299 return 0;
300}
301
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200302/* Calls flt_init_per_thread() for all proxies, see above. Be carefull here, it
303 * returns 0 if an error occured. This is the opposite of flt_init_all. */
304static int
305flt_init_all_per_thread()
306{
307 struct proxy *px;
308 int err_code = 0;
309
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100310 for (px = proxies_list; px; px = px->next) {
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200311 err_code = flt_init_per_thread(px);
312 if (err_code & (ERR_ABORT|ERR_FATAL)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100313 ha_alert("Failed to initialize filters for proxy '%s' for thread %u.\n",
314 px->id, tid);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200315 return 0;
316 }
317 }
318 return 1;
319}
320
Christopher Fauletd7c91962015-04-30 11:48:27 +0200321/*
322 * Calls 'check' callback for all filters attached to a proxy. This happens
323 * after the configuration parsing but before filters initialization. Returns
324 * the number of encountered errors.
325 */
326int
327flt_check(struct proxy *proxy)
328{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100329 struct flt_conf *fconf;
330 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200331
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100332 list_for_each_entry(fconf, &proxy->filter_configs, list) {
333 if (fconf->ops->check)
334 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200335 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100336 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200337 return err;
338}
339
340/*
341 * Calls 'denit' callback for all filters attached to a proxy. This happens when
342 * HAProxy is stopped.
343 */
344void
345flt_deinit(struct proxy *proxy)
346{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100347 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200348
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100349 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
350 if (fconf->ops->deinit)
351 fconf->ops->deinit(proxy, fconf);
352 LIST_DEL(&fconf->list);
353 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200354 }
355}
356
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200357/*
358 * Calls 'denit_per_thread' callback for all filters attached to a proxy for
359 * each threads. This happens before exiting a thread.
360 */
361void
362flt_deinit_per_thread(struct proxy *proxy)
363{
364 struct flt_conf *fconf, *back;
365
366 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
367 if (fconf->ops->deinit_per_thread)
368 fconf->ops->deinit_per_thread(proxy, fconf);
369 }
370}
371
372
373/* Calls flt_deinit_per_thread() for all proxies, see above */
374static void
375flt_deinit_all_per_thread()
376{
377 struct proxy *px;
378
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100379 for (px = proxies_list; px; px = px->next)
Christopher Faulet71a6a8e2017-07-27 16:33:28 +0200380 flt_deinit_per_thread(px);
381}
382
Christopher Faulet92d36382015-11-05 13:35:03 +0100383/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
384static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100385flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100386{
387 struct filter *f = pool_alloc2(pool2_filter);
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200388
Christopher Faulet92d36382015-11-05 13:35:03 +0100389 if (!f) /* not enough memory */
390 return -1;
391 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100392 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100393 f->flags |= flags;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200394
395 if (FLT_OPS(f)->attach) {
396 int ret = FLT_OPS(f)->attach(s, f);
397 if (ret <= 0) {
398 pool_free2(pool2_filter, f);
399 return ret;
400 }
401 }
402
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100403 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100404 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100405 return 0;
406}
407
408/*
409 * Called when a stream is created. It attaches all frontend filters to the
410 * stream. Returns -1 if an error occurs, 0 otherwise.
411 */
412int
413flt_stream_init(struct stream *s)
414{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100415 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100416
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100417 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
418 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100419 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
420 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100421 return -1;
422 }
423 return 0;
424}
425
426/*
427 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
428 * happens after each request/response exchange). When analyze ends, backend
429 * filters are removed. When the stream is closed, all filters attached to the
430 * stream are removed.
431 */
432void
433flt_stream_release(struct stream *s, int only_backend)
434{
435 struct filter *filter, *back;
436
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100437 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100438 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200439 if (FLT_OPS(filter)->detach)
440 FLT_OPS(filter)->detach(s, filter);
Christopher Faulet92d36382015-11-05 13:35:03 +0100441 LIST_DEL(&filter->list);
442 pool_free2(pool2_filter, filter);
443 }
444 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100445 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100446 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100447}
448
Christopher Fauletd7c91962015-04-30 11:48:27 +0200449/*
450 * Calls 'stream_start' for all filters attached to a stream. This happens when
451 * the stream is created, just after calling flt_stream_init
452 * function. Returns -1 if an error occurs, 0 otherwise.
453 */
454int
455flt_stream_start(struct stream *s)
456{
457 struct filter *filter;
458
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100459 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100460 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200461 return -1;
462 }
463 return 0;
464}
465
466/*
467 * Calls 'stream_stop' for all filters attached to a stream. This happens when
468 * the stream is stopped, just before calling flt_stream_release function.
469 */
470void
471flt_stream_stop(struct stream *s)
472{
473 struct filter *filter;
474
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100475 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100476 if (FLT_OPS(filter)->stream_stop)
477 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200478 }
479}
480
Christopher Faulet92d36382015-11-05 13:35:03 +0100481/*
Christopher Fauleta00d8172016-11-10 14:58:05 +0100482 * Calls 'check_timeouts' for all filters attached to a stream. This happens when
483 * the stream is woken up because of expired timer.
484 */
485void
486flt_stream_check_timeouts(struct stream *s)
487{
488 struct filter *filter;
489
490 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
491 if (FLT_OPS(filter)->check_timeouts)
492 FLT_OPS(filter)->check_timeouts(s, filter);
493 }
494}
495
496/*
Christopher Faulet92d36382015-11-05 13:35:03 +0100497 * Called when a backend is set for a stream. If the frontend and the backend
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200498 * are not the same, this function attaches all backend filters to the
499 * stream. Returns -1 if an error occurs, 0 otherwise.
Christopher Faulet92d36382015-11-05 13:35:03 +0100500 */
501int
502flt_set_stream_backend(struct stream *s, struct proxy *be)
503{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100504 struct flt_conf *fconf;
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200505 struct filter *filter;
Christopher Faulet92d36382015-11-05 13:35:03 +0100506
507 if (strm_fe(s) == be)
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200508 goto end;
Christopher Faulet92d36382015-11-05 13:35:03 +0100509
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100510 list_for_each_entry(fconf, &be->filter_configs, list) {
511 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100512 return -1;
513 }
Christopher Faulet31ed32d2016-06-21 11:42:37 +0200514
515 end:
516 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
517 if (FLT_OPS(filter)->stream_set_backend &&
518 FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
519 return -1;
520 }
521
Christopher Faulet92d36382015-11-05 13:35:03 +0100522 return 0;
523}
524
Christopher Fauletd7c91962015-04-30 11:48:27 +0200525/*
526 * Calls 'http_data' callback for all "data" filters attached to a stream. This
527 * function is called when incoming data are available (excluding chunks
528 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
529 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
530 * filters and adjusts available data to be sure that a filter cannot parse more
531 * data than its predecessors. A filter can choose to not consume all available
532 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
533 */
534int
535flt_http_data(struct stream *s, struct http_msg *msg)
536{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100537 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200538 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200539 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200540
Christopher Fauletd7c91962015-04-30 11:48:27 +0200541 /* Save buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200542 buf_i = msg->chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100543
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100544 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100545 unsigned int *nxt;
546
547 /* Call "data" filters only */
548 if (!IS_DATA_FILTER(filter, msg->chn))
549 continue;
550
Christopher Faulet2fb28802015-12-01 10:40:57 +0100551 /* If the HTTP parser is ahead, we update the next offset of the
552 * current filter. This happens for chunked messages, at the
553 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100554 nxt = &FLT_NXT(filter, msg->chn);
555 if (msg->next > *nxt)
556 *nxt = msg->next;
557
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100558 if (FLT_OPS(filter)->http_data) {
Christopher Faulet55048a42016-06-21 10:44:32 +0200559 unsigned int i = msg->chn->buf->i;
560
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100561 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100562 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200563 break;
Christopher Faulet55048a42016-06-21 10:44:32 +0200564 delta += (int)(msg->chn->buf->i - i);
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100565
566 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100567 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100568
569 /* And set this value as the bound for the next
570 * filter. It will not able to parse more data than this
571 * one. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200572 msg->chn->buf->i = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200573 }
574 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100575 /* Consume all available data and update the next offset
576 * of the current filter. buf->i is untouched here. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200577 ret = MIN(msg->chunk_len + msg->next, msg->chn->buf->i) - *nxt;
Christopher Fauletda02e172015-12-04 09:25:05 +0100578 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200579 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200580 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100581
Christopher Fauletd7c91962015-04-30 11:48:27 +0200582 /* Restore the original buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200583 msg->chn->buf->i = buf_i + delta;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100584
Christopher Fauletd7c91962015-04-30 11:48:27 +0200585 return ret;
586}
587
Christopher Fauletd7c91962015-04-30 11:48:27 +0200588/*
589 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
590 * stream. This function is called for chunked messages only when a part of the
591 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
592 * analyzers. Filters can know how much data were parsed by the HTTP parsing
593 * until the last call with the msg->sol value. Returns a negative value if an
594 * error occurs, any other value otherwise.
595 */
596int
597flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
598{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100599 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100600 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200601
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100602 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100603 unsigned int *nxt;
604
605 /* Call "data" filters only */
606 if (!IS_DATA_FILTER(filter, msg->chn))
607 continue;
608
Christopher Faulet2fb28802015-12-01 10:40:57 +0100609 /* Be sure to set the next offset of the filter at the right
610 * place. This is really useful when the first part of the
611 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100612 nxt = &FLT_NXT(filter, msg->chn);
613 *nxt = msg->next;
614
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100615 if (FLT_OPS(filter)->http_chunk_trailers) {
616 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100617 if (ret < 0)
618 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200619 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100620 /* Update the next offset of the current filter. Here all data
621 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100622 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100623 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200624 return ret;
625}
626
627/*
628 * Calls 'http_end' callback for all filters attached to a stream. All filters
629 * are called here, but only if there is at least one "data" filter. This
630 * functions is called when all data were parsed and forwarded. 'http_end'
631 * callback is resumable, so this function returns a negative value if an error
632 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
633 */
634int
635flt_http_end(struct stream *s, struct http_msg *msg)
636{
637 int ret = 1;
638
Christopher Fauletd7c91962015-04-30 11:48:27 +0200639 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100640 if (FLT_OPS(filter)->http_end) {
641 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200642 if (ret <= 0)
643 BREAK_EXECUTION(s, msg->chn, end);
644 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200645 } RESUME_FILTER_END;
646end:
647 return ret;
648}
649
650/*
651 * Calls 'http_reset' callback for all filters attached to a stream. This
652 * happens when a 100-continue response is received.
653 */
654void
655flt_http_reset(struct stream *s, struct http_msg *msg)
656{
657 struct filter *filter;
658
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100659 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100660 if (FLT_OPS(filter)->http_reset)
661 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200662 }
663}
664
665/*
666 * Calls 'http_reply' callback for all filters attached to a stream when HA
667 * decides to stop the HTTP message processing.
668 */
669void
670flt_http_reply(struct stream *s, short status, const struct chunk *msg)
671{
672 struct filter *filter;
673
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100674 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100675 if (FLT_OPS(filter)->http_reply)
676 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200677 }
678}
679
680/*
681 * Calls 'http_forward_data' callback for all "data" filters attached to a
682 * stream. This function is called when some data can be forwarded in the
683 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
684 * update the forward offset of filters and adjusts "forwardable" data to be
685 * sure that a filter cannot forward more data than its predecessors. A filter
686 * can choose to not forward all parsed data. Returns a negative value if an
687 * error occurs, else the number of forwarded bytes.
688 */
689int
690flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
691{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100692 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200693 int ret = len;
694
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100695 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100696 unsigned int *nxt, *fwd;
697
698 /* Call "data" filters only */
699 if (!IS_DATA_FILTER(filter, msg->chn))
700 continue;
701
Christopher Faulet2fb28802015-12-01 10:40:57 +0100702 /* If the HTTP parser is ahead, we update the next offset of the
703 * current filter. This happens for chunked messages, when the
704 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100705 nxt = &FLT_NXT(filter, msg->chn);
706 fwd = &FLT_FWD(filter, msg->chn);
707 if (msg->next > *nxt)
708 *nxt = msg->next;
709
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100710 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100711 /* Remove bytes that the current filter considered as
712 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100713 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200714 if (ret < 0)
715 goto end;
716 }
717
718 /* Adjust bytes that the current filter considers as
719 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100720 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200721
722 /* And set this value as the bound for the next filter. It will
723 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100724 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200725 }
726
727 if (!ret)
728 goto end;
729
730 /* Finally, adjust filters offsets by removing data that HAProxy will
731 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100732 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100733 if (!IS_DATA_FILTER(filter, msg->chn))
734 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200735 FLT_NXT(filter, msg->chn) -= ret;
736 FLT_FWD(filter, msg->chn) -= ret;
737 }
738 end:
739 return ret;
740}
741
742/*
743 * Calls 'channel_start_analyze' callback for all filters attached to a
744 * stream. This function is called when we start to analyze a request or a
745 * response. For frontend filters, it is called before all other analyzers. For
746 * backend ones, it is called before all backend
747 * analyzers. 'channel_start_analyze' callback is resumable, so this function
748 * returns 0 if an error occurs or if it needs to wait, any other value
749 * otherwise.
750 */
751int
752flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
753{
754 int ret = 1;
755
756 /* If this function is called, this means there is at least one filter,
757 * so we do not need to check the filter list's emptiness. */
758
Christopher Faulete6006242017-03-10 11:52:44 +0100759 /* Set flag on channel to tell that the channel is filtered */
760 chn->flags |= CF_FLT_ANALYZE;
761
Christopher Fauletd7c91962015-04-30 11:48:27 +0200762 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet0184ea72017-01-05 14:06:34 +0100763 if (!(chn->flags & CF_ISRESP)) {
764 if (an_bit == AN_REQ_FLT_START_BE &&
765 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
766 continue;
767 }
768 else {
769 if (an_bit == AN_RES_FLT_START_BE &&
770 !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
771 continue;
772 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200773
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100774 FLT_NXT(filter, chn) = 0;
775 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200776
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100777 if (FLT_OPS(filter)->channel_start_analyze) {
778 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200779 if (ret <= 0)
780 BREAK_EXECUTION(s, chn, end);
781 }
782 } RESUME_FILTER_END;
783
784 end:
785 return handle_analyzer_result(s, chn, an_bit, ret);
786}
787
788/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200789 * Calls 'channel_pre_analyze' callback for all filters attached to a
790 * stream. This function is called BEFORE each analyzer attached to a channel,
791 * expects analyzers responsible for data sending. 'channel_pre_analyze'
792 * callback is resumable, so this function returns 0 if an error occurs or if it
793 * needs to wait, any other value otherwise.
794 *
795 * Note this function can be called many times for the same analyzer. In fact,
796 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200797 */
798int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200799flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200800{
801 int ret = 1;
802
Christopher Fauletd7c91962015-04-30 11:48:27 +0200803 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200804 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
805 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200806 if (ret <= 0)
807 BREAK_EXECUTION(s, chn, check_result);
808 }
809 } RESUME_FILTER_END;
810
811 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100812 return handle_analyzer_result(s, chn, 0, ret);
813}
814
815/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200816 * Calls 'channel_post_analyze' callback for all filters attached to a
817 * stream. This function is called AFTER each analyzer attached to a channel,
818 * expects analyzers responsible for data sending. 'channel_post_analyze'
819 * callback is NOT resumable, so this function returns a 0 if an error occurs,
820 * any other value otherwise.
821 *
822 * Here, AFTER means when the analyzer finishes its processing.
823 */
824int
825flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
826{
827 struct filter *filter;
828 int ret = 1;
829
830 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
831 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
832 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
833 if (ret < 0)
834 break;
835 }
836 }
837 return handle_analyzer_result(s, chn, 0, ret);
838}
839
840/*
Christopher Faulet0184ea72017-01-05 14:06:34 +0100841 * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
842 * headers or a request or a response. Returns 0 if an error occurs or if it
843 * needs to wait, any other value otherwise.
Christopher Faulet309c6412015-12-02 09:57:32 +0100844 */
845int
846flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
847{
Christopher Faulet1339d742016-05-11 16:48:33 +0200848 struct filter *filter;
849 struct http_msg *msg;
850 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100851
Christopher Faulet1339d742016-05-11 16:48:33 +0200852 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100853 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200854 if (FLT_OPS(filter)->http_headers) {
855 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100856 if (ret <= 0)
857 BREAK_EXECUTION(s, chn, check_result);
858 }
859 } RESUME_FILTER_END;
860
861 /* We increase next offset of all "data" filters after all processing on
862 * headers because any filter can alter them. So the definitive size of
863 * headers (msg->sov) is only known when all filters have been
864 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100865 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100866 /* Handle "data" filters only */
867 if (!IS_DATA_FILTER(filter, chn))
868 continue;
Christopher Faulet1339d742016-05-11 16:48:33 +0200869 FLT_NXT(filter, chn) = msg->sov;
Christopher Faulet309c6412015-12-02 09:57:32 +0100870 }
871
872 check_result:
873 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200874}
875
876/*
877 * Calls 'channel_end_analyze' callback for all filters attached to a
878 * stream. This function is called when we stop to analyze a request or a
879 * response. It is called after all other analyzers. 'channel_end_analyze'
880 * callback is resumable, so this function returns 0 if an error occurs or if it
881 * needs to wait, any other value otherwise.
882 */
883int
884flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
885{
886 int ret = 1;
887
Christopher Faulete6006242017-03-10 11:52:44 +0100888 /* Check if all filters attached on the stream have finished their
889 * processing on this channel. */
890 if (!(chn->flags & CF_FLT_ANALYZE))
891 goto sync;
892
Christopher Fauletd7c91962015-04-30 11:48:27 +0200893 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100894 FLT_NXT(filter, chn) = 0;
895 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100896 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200897
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100898 if (FLT_OPS(filter)->channel_end_analyze) {
899 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200900 if (ret <= 0)
901 BREAK_EXECUTION(s, chn, end);
902 }
903 } RESUME_FILTER_END;
904
Christopher Faulete6006242017-03-10 11:52:44 +0100905 end:
906 /* We don't remove yet this analyzer because we need to synchronize the
907 * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
908 ret = handle_analyzer_result(s, chn, 0, ret);
Christopher Faulet570f7992017-07-06 15:53:02 +0200909 if (ret) {
Christopher Faulete6006242017-03-10 11:52:44 +0100910 chn->flags &= ~CF_FLT_ANALYZE;
Christopher Faulet02c7b222015-12-22 12:01:29 +0100911
Christopher Faulet570f7992017-07-06 15:53:02 +0200912 /* Pretend there is an activity on both channels. Flag on the
913 * current one will be automatically removed, so only the other
914 * one will remain. This is a way to be sure that
915 * 'channel_end_analyze' callback will have a chance to be
916 * called at least once for the other side to finish the current
917 * processing. Of course, this is the filter responsiblity to
918 * wakeup the stream if it choose to loop on this callback. */
919 s->req.flags |= CF_WAKE_ONCE;
920 s->res.flags |= CF_WAKE_ONCE;
921 }
922
923
Christopher Faulete6006242017-03-10 11:52:44 +0100924 sync:
925 /* Now we can check if filters have finished their work on the both
926 * channels */
927 if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
928 /* Sync channels by removing this analyzer for the both channels */
929 s->req.analysers &= ~AN_REQ_FLT_END;
930 s->res.analysers &= ~AN_RES_FLT_END;
Christopher Fauletc6062be2016-10-31 11:22:37 +0100931
Christopher Faulete6006242017-03-10 11:52:44 +0100932 /* Clean up the HTTP transaction if needed */
933 if (s->txn && (s->txn->flags & TX_WAIT_CLEANUP))
934 http_end_txn_clean_session(s);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200935
Christopher Faulete6006242017-03-10 11:52:44 +0100936 /* Remove backend filters from the list */
937 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200938 }
Christopher Faulet2b553de2017-03-30 11:13:22 +0200939
Christopher Fauletd7c91962015-04-30 11:48:27 +0200940 return ret;
941}
942
943
944/*
945 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
946 * function is called when incoming data are available. It takes care to update
947 * the next offset of filters and adjusts available data to be sure that a
948 * filter cannot parse more data than its predecessors. A filter can choose to
949 * not consume all available data. Returns -1 if an error occurs, the number of
950 * consumed bytes otherwise.
951 */
952static int
953flt_data(struct stream *s, struct channel *chn)
954{
Christopher Fauletda02e172015-12-04 09:25:05 +0100955 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200956 unsigned int buf_i;
Christopher Faulet55048a42016-06-21 10:44:32 +0200957 int delta = 0, ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200958
959 /* Save buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200960 buf_i = chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100961
962 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100963 unsigned int *nxt;
964
965 /* Call "data" filters only */
966 if (!IS_DATA_FILTER(filter, chn))
967 continue;
968
969 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100970 if (FLT_OPS(filter)->tcp_data) {
Christopher Faulet55048a42016-06-21 10:44:32 +0200971 unsigned int i = chn->buf->i;
972
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100973 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200974 if (ret < 0)
975 break;
Christopher Faulet55048a42016-06-21 10:44:32 +0200976 delta += (int)(chn->buf->i - i);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200977
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100978 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100979 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100980
981 /* And set this value as the bound for the next
982 * filter. It will not able to parse more data than the
983 * current one. */
Christopher Faulet55048a42016-06-21 10:44:32 +0200984 chn->buf->i = *nxt;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100985 }
986 else {
987 /* Consume all available data */
Christopher Faulet55048a42016-06-21 10:44:32 +0200988 *nxt = chn->buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100989 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200990
991 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +0100992 * exit from the loop. This value will be used to know how much
993 * data are "forwardable" */
994 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200995 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100996
997 /* Restore the original buffer state */
Christopher Faulet55048a42016-06-21 10:44:32 +0200998 chn->buf->i = buf_i + delta;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100999
Christopher Fauletd7c91962015-04-30 11:48:27 +02001000 return ret;
1001}
1002
1003/*
1004 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
1005 * stream. This function is called when some data can be forwarded. It takes
1006 * care to update the forward offset of filters and adjusts "forwardable" data
1007 * to be sure that a filter cannot forward more data than its predecessors. A
1008 * filter can choose to not forward all parsed data. Returns a negative value if
1009 * an error occurs, else the number of forwarded bytes.
1010 */
1011static int
1012flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
1013{
Christopher Fauletda02e172015-12-04 09:25:05 +01001014 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001015 int ret = len;
1016
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001017 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001018 unsigned int *fwd;
1019
1020 /* Call "data" filters only */
1021 if (!IS_DATA_FILTER(filter, chn))
1022 continue;
1023
1024 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001025 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +02001026 /* Remove bytes that the current filter considered as
1027 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001028 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001029 if (ret < 0)
1030 goto end;
1031 }
1032
Christopher Fauletda02e172015-12-04 09:25:05 +01001033 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +02001034 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +01001035 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001036
1037 /* And set this value as the bound for the next filter. It will
1038 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +01001039 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001040 }
1041
1042 if (!ret)
1043 goto end;
1044
Christopher Fauletda02e172015-12-04 09:25:05 +01001045 /* Finally, adjust filters offsets by removing data that HAProxy will
1046 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001047 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +01001048 if (!IS_DATA_FILTER(filter, chn))
1049 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001050 FLT_NXT(filter, chn) -= ret;
1051 FLT_FWD(filter, chn) -= ret;
1052 }
1053
Christopher Fauletd7c91962015-04-30 11:48:27 +02001054 end:
1055 return ret;
1056}
1057
1058/*
1059 * Called when TCP data must be filtered on a channel. This function is the
Christopher Faulet0184ea72017-01-05 14:06:34 +01001060 * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
1061 * data when the proxy is not in http mode. Behind the scene, it calls
1062 * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
1063 * filters attached to a stream. Returns 0 if an error occurs or if it needs to
1064 * wait, any other value otherwise.
Christopher Fauletd7c91962015-04-30 11:48:27 +02001065 */
1066int
1067flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
1068{
1069 int ret = 1;
1070
Christopher Fauletda02e172015-12-04 09:25:05 +01001071 /* If there is no "data" filters, we do nothing */
1072 if (!HAS_DATA_FILTERS(s, chn))
1073 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001074
1075 /* Be sure that the output is still opened. Else we stop the data
1076 * filtering. */
1077 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
1078 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
1079 goto end;
1080
1081 /* Let all "data" filters parsing incoming data */
1082 ret = flt_data(s, chn);
1083 if (ret < 0)
1084 goto end;
1085
1086 /* And forward them */
1087 ret = flt_forward_data(s, chn, ret);
1088 if (ret < 0)
1089 goto end;
1090
Christopher Fauletda02e172015-12-04 09:25:05 +01001091 /* Consume data that all filters consider as forwarded. */
1092 b_adv(chn->buf, ret);
1093
Christopher Fauletd7c91962015-04-30 11:48:27 +02001094 /* Stop waiting data if the input in closed and no data is pending or if
1095 * the output is closed. */
1096 if ((chn->flags & CF_SHUTW) ||
1097 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
1098 ret = 1;
1099 goto end;
1100 }
1101
1102 /* Wait for data */
1103 return 0;
1104 end:
1105 /* Terminate the data filtering. If <ret> is negative, an error was
1106 * encountered during the filtering. */
1107 return handle_analyzer_result(s, chn, an_bit, ret);
1108}
1109
1110/*
1111 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
1112 * it needs to wait, any other value otherwise.
1113 */
1114static int
1115handle_analyzer_result(struct stream *s, struct channel *chn,
1116 unsigned int an_bit, int ret)
1117{
1118 int finst;
1119
1120 if (ret < 0)
1121 goto return_bad_req;
1122 else if (!ret)
1123 goto wait;
1124
1125 /* End of job, return OK */
1126 if (an_bit) {
1127 chn->analysers &= ~an_bit;
1128 chn->analyse_exp = TICK_ETERNITY;
1129 }
1130 return 1;
1131
1132 return_bad_req:
1133 /* An error occurs */
1134 channel_abort(&s->req);
1135 channel_abort(&s->res);
1136
1137 if (!(chn->flags & CF_ISRESP)) {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001138 s->req.analysers &= AN_REQ_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001139 finst = SF_FINST_R;
1140 /* FIXME: incr counters */
1141 }
1142 else {
Christopher Faulet0184ea72017-01-05 14:06:34 +01001143 s->res.analysers &= AN_RES_FLT_END;
Christopher Fauletd7c91962015-04-30 11:48:27 +02001144 finst = SF_FINST_H;
1145 /* FIXME: incr counters */
1146 }
1147
1148 if (s->txn) {
1149 /* Do not do that when we are waiting for the next request */
1150 if (s->txn->status)
1151 http_reply_and_close(s, s->txn->status, NULL);
1152 else {
1153 s->txn->status = 400;
Jarno Huuskonen9e6906b2017-03-06 14:21:49 +02001154 http_reply_and_close(s, 400, http_error_message(s));
Christopher Fauletd7c91962015-04-30 11:48:27 +02001155 }
1156 }
1157
1158 if (!(s->flags & SF_ERR_MASK))
1159 s->flags |= SF_ERR_PRXCOND;
1160 if (!(s->flags & SF_FINST_MASK))
1161 s->flags |= finst;
1162 return 0;
1163
1164 wait:
1165 if (!(chn->flags & CF_ISRESP))
1166 channel_dont_connect(chn);
1167 return 0;
1168}
1169
1170
1171/* Note: must not be declared <const> as its list will be overwritten.
1172 * Please take care of keeping this list alphabetically sorted, doing so helps
1173 * all code contributors.
1174 * Optional keywords are also declared with a NULL ->parse() function so that
1175 * the config parser can report an appropriate error when a known keyword was
1176 * not enabled. */
1177static struct cfg_kw_list cfg_kws = {ILH, {
1178 { CFG_LISTEN, "filter", parse_filter },
1179 { 0, NULL, NULL },
1180 }
1181};
1182
1183__attribute__((constructor))
1184static void
1185__filters_init(void)
1186{
1187 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
1188 cfg_register_keywords(&cfg_kws);
Willy Tarreau64bca592016-12-21 20:13:11 +01001189 hap_register_post_check(flt_init_all);
Christopher Faulet71a6a8e2017-07-27 16:33:28 +02001190 hap_register_per_thread_init(flt_init_all_per_thread);
1191 hap_register_per_thread_deinit(flt_deinit_all_per_thread);
Christopher Fauletd7c91962015-04-30 11:48:27 +02001192}
1193
1194__attribute__((destructor))
1195static void
1196__filters_deinit(void)
1197{
1198 pool_destroy2(pool2_filter);
1199}
1200
1201/*
1202 * Local variables:
1203 * c-indent-level: 8
1204 * c-basic-offset: 8
1205 * End:
1206 */