blob: 1ab2b376df169b9a02b6fe42a1c8adbdb8d76d69 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010027#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020028#include <proto/proto_http.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31
32/* Pool used to allocate filters */
33struct pool_head *pool2_filter = NULL;
34
35static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
36
37/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
38 * The first one begins a loop and the seconds one ends it.
39 *
40 * - BREAK_EXECUTION must be used to break the loop and set the filter from
41 * which to resume the next time.
42 *
43 * Here is an exemple:
44 *
45 * RESUME_FILTER_LOOP(stream, channel) {
46 * ...
47 * if (cond)
48 * BREAK_EXECUTION(stream, channel, label);
49 * ...
50 * } RESUME_FILTER_END;
51 * ...
52 * label:
53 * ...
54 *
55 */
56#define RESUME_FILTER_LOOP(strm, chn) \
57 do { \
58 struct filter *filter; \
59 \
Christopher Fauletda02e172015-12-04 09:25:05 +010060 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
61 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
62 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020063 goto resume_execution; \
64 } \
65 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010066 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010067 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020068
69#define RESUME_FILTER_END \
70 } \
71 } while(0)
72
Christopher Fauletda02e172015-12-04 09:25:05 +010073#define BREAK_EXECUTION(strm, chn, label) \
74 do { \
75 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
76 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020077 } while (0)
78
79
80/* List head of all known filter keywords */
81static struct flt_kw_list flt_keywords = {
82 .list = LIST_HEAD_INIT(flt_keywords.list)
83};
84
85/*
86 * Registers the filter keyword list <kwl> as a list of valid keywords for next
87 * parsing sessions.
88 */
89void
90flt_register_keywords(struct flt_kw_list *kwl)
91{
92 LIST_ADDQ(&flt_keywords.list, &kwl->list);
93}
94
95/*
96 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
97 * keyword is found with a NULL ->parse() function, then an attempt is made to
98 * find one with a valid ->parse() function. This way it is possible to declare
99 * platform-dependant, known keywords as NULL, then only declare them as valid
100 * if some options are met. Note that if the requested keyword contains an
101 * opening parenthesis, everything from this point is ignored.
102 */
103struct flt_kw *
104flt_find_kw(const char *kw)
105{
106 int index;
107 const char *kwend;
108 struct flt_kw_list *kwl;
109 struct flt_kw *ret = NULL;
110
111 kwend = strchr(kw, '(');
112 if (!kwend)
113 kwend = kw + strlen(kw);
114
115 list_for_each_entry(kwl, &flt_keywords.list, list) {
116 for (index = 0; kwl->kw[index].kw != NULL; index++) {
117 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
118 kwl->kw[index].kw[kwend-kw] == 0) {
119 if (kwl->kw[index].parse)
120 return &kwl->kw[index]; /* found it !*/
121 else
122 ret = &kwl->kw[index]; /* may be OK */
123 }
124 }
125 }
126 return ret;
127}
128
129/*
130 * Dumps all registered "filter" keywords to the <out> string pointer. The
131 * unsupported keywords are only dumped if their supported form was not found.
132 */
133void
134flt_dump_kws(char **out)
135{
136 struct flt_kw_list *kwl;
137 int index;
138
139 *out = NULL;
140 list_for_each_entry(kwl, &flt_keywords.list, list) {
141 for (index = 0; kwl->kw[index].kw != NULL; index++) {
142 if (kwl->kw[index].parse ||
143 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
144 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
145 kwl->scope,
146 kwl->kw[index].kw,
147 kwl->kw[index].parse ? "" : " (not supported)");
148 }
149 }
150 }
151}
152
153/*
154 * Parses the "filter" keyword. All keywords must be handled by filters
155 * themselves
156 */
157static int
158parse_filter(char **args, int section_type, struct proxy *curpx,
159 struct proxy *defpx, const char *file, int line, char **err)
160{
161 struct filter *filter = NULL;
162
163 /* Filter cannot be defined on a default proxy */
164 if (curpx == defpx) {
165 memprintf(err, "parsing [%s:%d] : %s is only allowed in a 'default' section.",
166 file, line, args[0]);
167 return -1;
168 }
169 if (!strcmp(args[0], "filter")) {
170 struct flt_kw *kw;
171 int cur_arg;
172
173 if (!*args[1]) {
174 memprintf(err,
175 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
176 file, line, args[0], proxy_type_str(curpx), curpx->id);
177 goto error;
178 }
179 filter = pool_alloc2(pool2_filter);
180 if (!filter) {
181 memprintf(err, "'%s' : out of memory", args[0]);
182 goto error;
183 }
184 memset(filter, 0, sizeof(*filter));
185
186 cur_arg = 1;
187 kw = flt_find_kw(args[cur_arg]);
188 if (kw) {
189 if (!kw->parse) {
190 memprintf(err, "parsing [%s:%d] : '%s' : "
191 "'%s' option is not implemented in this version (check build options).",
192 file, line, args[0], args[cur_arg]);
193 goto error;
194 }
195 if (kw->parse(args, &cur_arg, curpx, filter, err) != 0) {
196 if (err && *err)
197 memprintf(err, "'%s' : '%s'",
198 args[0], *err);
199 else
200 memprintf(err, "'%s' : error encountered while processing '%s'",
201 args[0], args[cur_arg]);
202 goto error;
203 }
204 }
205 else {
206 flt_dump_kws(err);
207 indent_msg(err, 4);
208 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
209 args[0], args[cur_arg],
210 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
211 goto error;
212 }
213 if (*args[cur_arg]) {
214 memprintf(err, "'%s %s' : unknown keyword '%s'.",
215 args[0], args[1], args[cur_arg]);
216 goto error;
217 }
218
219 LIST_ADDQ(&curpx->filters, &filter->list);
220 }
221 return 0;
222
223 error:
224 if (filter)
225 pool_free2(pool2_filter, filter);
226 return -1;
227
228
229}
230
231/*
232 * Calls 'init' callback for all filters attached to a proxy. This happens after
233 * the configuration parsing. Filters can finish to fill their config. Returns
234 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
235 */
236int
237flt_init(struct proxy *proxy)
238{
239 struct filter *filter;
240
241 list_for_each_entry(filter, &proxy->filters, list) {
242 if (filter->ops->init && filter->ops->init(proxy, filter) < 0)
243 return ERR_ALERT|ERR_FATAL;
244 }
245 return 0;
246}
247
248/*
249 * Calls 'check' callback for all filters attached to a proxy. This happens
250 * after the configuration parsing but before filters initialization. Returns
251 * the number of encountered errors.
252 */
253int
254flt_check(struct proxy *proxy)
255{
256 struct filter *filter;
257 int err = 0;
258
259 list_for_each_entry(filter, &proxy->filters, list) {
260 if (filter->ops->check)
261 err += filter->ops->check(proxy, filter);
262 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100263 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200264 return err;
265}
266
267/*
268 * Calls 'denit' callback for all filters attached to a proxy. This happens when
269 * HAProxy is stopped.
270 */
271void
272flt_deinit(struct proxy *proxy)
273{
274 struct filter *filter, *back;
275
276 list_for_each_entry_safe(filter, back, &proxy->filters, list) {
277 if (filter->ops->deinit)
278 filter->ops->deinit(proxy, filter);
279 LIST_DEL(&filter->list);
280 pool_free2(pool2_filter, filter);
281 }
282}
283
Christopher Faulet92d36382015-11-05 13:35:03 +0100284/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
285static int
Christopher Fauletda02e172015-12-04 09:25:05 +0100286flt_stream_add_filter(struct stream *s, struct filter *filter, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100287{
288 struct filter *f = pool_alloc2(pool2_filter);
289 if (!f) /* not enough memory */
290 return -1;
291 memset(f, 0, sizeof(*f));
292 f->id = filter->id;
293 f->ops = filter->ops;
294 f->conf = filter->conf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100295 f->flags |= flags;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100296 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100297 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100298 return 0;
299}
300
301/*
302 * Called when a stream is created. It attaches all frontend filters to the
303 * stream. Returns -1 if an error occurs, 0 otherwise.
304 */
305int
306flt_stream_init(struct stream *s)
307{
308 struct filter *filter;
309
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100310 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
311 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet92d36382015-11-05 13:35:03 +0100312 list_for_each_entry(filter, &strm_fe(s)->filters, list) {
313 if (flt_stream_add_filter(s, filter, 0) < 0)
314 return -1;
315 }
316 return 0;
317}
318
319/*
320 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
321 * happens after each request/response exchange). When analyze ends, backend
322 * filters are removed. When the stream is closed, all filters attached to the
323 * stream are removed.
324 */
325void
326flt_stream_release(struct stream *s, int only_backend)
327{
328 struct filter *filter, *back;
329
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100330 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100331 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100332 LIST_DEL(&filter->list);
333 pool_free2(pool2_filter, filter);
334 }
335 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100336 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100337 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100338}
339
Christopher Fauletd7c91962015-04-30 11:48:27 +0200340/*
341 * Calls 'stream_start' for all filters attached to a stream. This happens when
342 * the stream is created, just after calling flt_stream_init
343 * function. Returns -1 if an error occurs, 0 otherwise.
344 */
345int
346flt_stream_start(struct stream *s)
347{
348 struct filter *filter;
349
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100350 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200351 if (filter->ops->stream_start && filter->ops->stream_start(s, filter) < 0)
352 return -1;
353 }
354 return 0;
355}
356
357/*
358 * Calls 'stream_stop' for all filters attached to a stream. This happens when
359 * the stream is stopped, just before calling flt_stream_release function.
360 */
361void
362flt_stream_stop(struct stream *s)
363{
364 struct filter *filter;
365
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100366 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200367 if (filter->ops->stream_stop)
368 filter->ops->stream_stop(s, filter);
369 }
370}
371
Christopher Faulet92d36382015-11-05 13:35:03 +0100372/*
373 * Called when a backend is set for a stream. If the frontend and the backend
374 * are the same, this function does nothing. Else it attaches all backend
375 * filters to the stream. Returns -1 if an error occurs, 0 otherwise.
376 */
377int
378flt_set_stream_backend(struct stream *s, struct proxy *be)
379{
380 struct filter *filter;
381
382 if (strm_fe(s) == be)
383 return 0;
384
385 list_for_each_entry(filter, &be->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100386 if (flt_stream_add_filter(s, filter, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100387 return -1;
388 }
389 return 0;
390}
391
Christopher Fauletd7c91962015-04-30 11:48:27 +0200392/*
393 * Calls 'http_data' callback for all "data" filters attached to a stream. This
394 * function is called when incoming data are available (excluding chunks
395 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
396 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
397 * filters and adjusts available data to be sure that a filter cannot parse more
398 * data than its predecessors. A filter can choose to not consume all available
399 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
400 */
401int
402flt_http_data(struct stream *s, struct http_msg *msg)
403{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100404 struct filter *filter;
405 struct buffer *buf = msg->chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200406 unsigned int buf_i;
407 int ret = 0;
408
Christopher Fauletd7c91962015-04-30 11:48:27 +0200409 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100410 buf_i = buf->i;
411
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100412 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100413 unsigned int *nxt;
414
415 /* Call "data" filters only */
416 if (!IS_DATA_FILTER(filter, msg->chn))
417 continue;
418
Christopher Faulet2fb28802015-12-01 10:40:57 +0100419 /* If the HTTP parser is ahead, we update the next offset of the
420 * current filter. This happens for chunked messages, at the
421 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100422 nxt = &FLT_NXT(filter, msg->chn);
423 if (msg->next > *nxt)
424 *nxt = msg->next;
425
426 if (filter->ops->http_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200427 ret = filter->ops->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100428 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200429 break;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100430
431 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100432 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100433
434 /* And set this value as the bound for the next
435 * filter. It will not able to parse more data than this
436 * one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100437 buf->i = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200438 }
439 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100440 /* Consume all available data and update the next offset
441 * of the current filter. buf->i is untouched here. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100442 ret = MIN(msg->chunk_len + msg->next, buf->i) - *nxt;
443 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200444 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200445 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100446
Christopher Fauletd7c91962015-04-30 11:48:27 +0200447 /* Restore the original buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100448 buf->i = buf_i;
449
Christopher Fauletd7c91962015-04-30 11:48:27 +0200450 return ret;
451}
452
Christopher Fauletd7c91962015-04-30 11:48:27 +0200453/*
454 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
455 * stream. This function is called for chunked messages only when a part of the
456 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
457 * analyzers. Filters can know how much data were parsed by the HTTP parsing
458 * until the last call with the msg->sol value. Returns a negative value if an
459 * error occurs, any other value otherwise.
460 */
461int
462flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
463{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100464 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100465 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200466
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100467 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100468 unsigned int *nxt;
469
470 /* Call "data" filters only */
471 if (!IS_DATA_FILTER(filter, msg->chn))
472 continue;
473
Christopher Faulet2fb28802015-12-01 10:40:57 +0100474 /* Be sure to set the next offset of the filter at the right
475 * place. This is really useful when the first part of the
476 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100477 nxt = &FLT_NXT(filter, msg->chn);
478 *nxt = msg->next;
479
Christopher Fauletd7c91962015-04-30 11:48:27 +0200480 if (filter->ops->http_chunk_trailers) {
481 ret = filter->ops->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100482 if (ret < 0)
483 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200484 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100485 /* Update the next offset of the current filter. Here all data
486 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100487 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100488 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200489 return ret;
490}
491
492/*
493 * Calls 'http_end' callback for all filters attached to a stream. All filters
494 * are called here, but only if there is at least one "data" filter. This
495 * functions is called when all data were parsed and forwarded. 'http_end'
496 * callback is resumable, so this function returns a negative value if an error
497 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
498 */
499int
500flt_http_end(struct stream *s, struct http_msg *msg)
501{
502 int ret = 1;
503
Christopher Fauletd7c91962015-04-30 11:48:27 +0200504 RESUME_FILTER_LOOP(s, msg->chn) {
505 if (filter->ops->http_end) {
506 ret = filter->ops->http_end(s, filter, msg);
507 if (ret <= 0)
508 BREAK_EXECUTION(s, msg->chn, end);
509 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200510 } RESUME_FILTER_END;
511end:
512 return ret;
513}
514
515/*
516 * Calls 'http_reset' callback for all filters attached to a stream. This
517 * happens when a 100-continue response is received.
518 */
519void
520flt_http_reset(struct stream *s, struct http_msg *msg)
521{
522 struct filter *filter;
523
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100524 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200525 if (filter->ops->http_reset)
526 filter->ops->http_reset(s, filter, msg);
527 }
528}
529
530/*
531 * Calls 'http_reply' callback for all filters attached to a stream when HA
532 * decides to stop the HTTP message processing.
533 */
534void
535flt_http_reply(struct stream *s, short status, const struct chunk *msg)
536{
537 struct filter *filter;
538
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100539 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200540 if (filter->ops->http_reply)
541 filter->ops->http_reply(s, filter, status, msg);
542 }
543}
544
545/*
546 * Calls 'http_forward_data' callback for all "data" filters attached to a
547 * stream. This function is called when some data can be forwarded in the
548 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
549 * update the forward offset of filters and adjusts "forwardable" data to be
550 * sure that a filter cannot forward more data than its predecessors. A filter
551 * can choose to not forward all parsed data. Returns a negative value if an
552 * error occurs, else the number of forwarded bytes.
553 */
554int
555flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
556{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100557 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200558 int ret = len;
559
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100560 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100561 unsigned int *nxt, *fwd;
562
563 /* Call "data" filters only */
564 if (!IS_DATA_FILTER(filter, msg->chn))
565 continue;
566
Christopher Faulet2fb28802015-12-01 10:40:57 +0100567 /* If the HTTP parser is ahead, we update the next offset of the
568 * current filter. This happens for chunked messages, when the
569 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100570 nxt = &FLT_NXT(filter, msg->chn);
571 fwd = &FLT_FWD(filter, msg->chn);
572 if (msg->next > *nxt)
573 *nxt = msg->next;
574
Christopher Fauletd7c91962015-04-30 11:48:27 +0200575 if (filter->ops->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100576 /* Remove bytes that the current filter considered as
577 * forwarded */
578 ret = filter->ops->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200579 if (ret < 0)
580 goto end;
581 }
582
583 /* Adjust bytes that the current filter considers as
584 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100585 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200586
587 /* And set this value as the bound for the next filter. It will
588 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100589 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200590 }
591
592 if (!ret)
593 goto end;
594
595 /* Finally, adjust filters offsets by removing data that HAProxy will
596 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100597 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100598 if (!IS_DATA_FILTER(filter, msg->chn))
599 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200600 FLT_NXT(filter, msg->chn) -= ret;
601 FLT_FWD(filter, msg->chn) -= ret;
602 }
603 end:
604 return ret;
605}
606
607/*
608 * Calls 'channel_start_analyze' callback for all filters attached to a
609 * stream. This function is called when we start to analyze a request or a
610 * response. For frontend filters, it is called before all other analyzers. For
611 * backend ones, it is called before all backend
612 * analyzers. 'channel_start_analyze' callback is resumable, so this function
613 * returns 0 if an error occurs or if it needs to wait, any other value
614 * otherwise.
615 */
616int
617flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
618{
619 int ret = 1;
620
621 /* If this function is called, this means there is at least one filter,
622 * so we do not need to check the filter list's emptiness. */
623
624 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100625 if (an_bit == AN_FLT_START_BE && !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
Christopher Fauletd7c91962015-04-30 11:48:27 +0200626 continue;
627
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100628 FLT_NXT(filter, chn) = 0;
629 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200630
631 if (filter->ops->channel_start_analyze) {
632 ret = filter->ops->channel_start_analyze(s, filter, chn);
633 if (ret <= 0)
634 BREAK_EXECUTION(s, chn, end);
635 }
636 } RESUME_FILTER_END;
637
638 end:
639 return handle_analyzer_result(s, chn, an_bit, ret);
640}
641
642/*
643 * Calls 'channel_analyze' callback for all filters attached to a stream. This
644 * function is called before each analyzer attached to a channel, expects
645 * analyzers responsible for data sending. 'channel_analyze' callback is
646 * resumable, so this function returns 0 if an error occurs or if it needs to
647 * wait, any other value otherwise.
648 */
649int
650flt_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
651{
652 int ret = 1;
653
Christopher Fauletd7c91962015-04-30 11:48:27 +0200654 RESUME_FILTER_LOOP(s, chn) {
655 if (filter->ops->channel_analyze) {
656 ret = filter->ops->channel_analyze(s, filter, chn, an_bit);
657 if (ret <= 0)
658 BREAK_EXECUTION(s, chn, check_result);
659 }
660 } RESUME_FILTER_END;
661
662 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100663 return handle_analyzer_result(s, chn, 0, ret);
664}
665
666/*
667 * This function do the same that the previsous one, but for the
668 * AN_FLT_HTTP_HDRS analyzer. The difference is what is done when all filters
669 * have been called. Returns 0 if an error occurs or if it needs to wait, any
670 * other value otherwise.
671 */
672int
673flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
674{
Christopher Fauletda02e172015-12-04 09:25:05 +0100675 struct filter *filter;
676 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100677
678 RESUME_FILTER_LOOP(s, chn) {
679 if (filter->ops->channel_analyze) {
680 ret = filter->ops->channel_analyze(s, filter, chn, an_bit);
681 if (ret <= 0)
682 BREAK_EXECUTION(s, chn, check_result);
683 }
684 } RESUME_FILTER_END;
685
686 /* We increase next offset of all "data" filters after all processing on
687 * headers because any filter can alter them. So the definitive size of
688 * headers (msg->sov) is only known when all filters have been
689 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100690 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100691 /* Handle "data" filters only */
692 if (!IS_DATA_FILTER(filter, chn))
693 continue;
694
695 FLT_NXT(filter, chn) = ((chn->flags & CF_ISRESP)
696 ? s->txn->rsp.sov : s->txn->req.sov);
Christopher Faulet309c6412015-12-02 09:57:32 +0100697 }
698
699 check_result:
700 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200701}
702
703/*
704 * Calls 'channel_end_analyze' callback for all filters attached to a
705 * stream. This function is called when we stop to analyze a request or a
706 * response. It is called after all other analyzers. 'channel_end_analyze'
707 * callback is resumable, so this function returns 0 if an error occurs or if it
708 * needs to wait, any other value otherwise.
709 */
710int
711flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
712{
713 int ret = 1;
714
Christopher Fauletd7c91962015-04-30 11:48:27 +0200715 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100716 FLT_NXT(filter, chn) = 0;
717 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100718 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200719
720 if (filter->ops->channel_end_analyze) {
721 ret = filter->ops->channel_end_analyze(s, filter, chn);
722 if (ret <= 0)
723 BREAK_EXECUTION(s, chn, end);
724 }
725 } RESUME_FILTER_END;
726
727end:
728 ret = handle_analyzer_result(s, chn, an_bit, ret);
Christopher Faulet02c7b222015-12-22 12:01:29 +0100729
730 /* Check if 'channel_end_analyze' callback has been called for the
731 * request and the response. */
732 if (!(s->req.analysers & AN_FLT_END) && !(s->res.analysers & AN_FLT_END)) {
Christopher Faulet02c7b222015-12-22 12:01:29 +0100733 /* When we are waiting for a new request, so we must reset
734 * stream analyzers. The input must not be closed the request
735 * channel, else it is useless to wait. */
736 if (s->txn && (s->txn->flags & TX_WAIT_NEXT_RQ) && !channel_input_closed(&s->req)) {
737 s->req.analysers = strm_li(s) ? strm_li(s)->analysers : 0;
738 s->res.analysers = 0;
739 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200740
Christopher Faulet92d36382015-11-05 13:35:03 +0100741 /* Remove backend filters from the list */
742 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200743 }
744 else if (ret) {
745 /* Analyzer ends only for one channel. So wake up the stream to
746 * be sure to process it for the other side as soon as
747 * possible. */
748 task_wakeup(s->task, TASK_WOKEN_MSG);
749 }
750 return ret;
751}
752
753
754/*
755 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
756 * function is called when incoming data are available. It takes care to update
757 * the next offset of filters and adjusts available data to be sure that a
758 * filter cannot parse more data than its predecessors. A filter can choose to
759 * not consume all available data. Returns -1 if an error occurs, the number of
760 * consumed bytes otherwise.
761 */
762static int
763flt_data(struct stream *s, struct channel *chn)
764{
Christopher Fauletda02e172015-12-04 09:25:05 +0100765 struct filter *filter;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100766 struct buffer *buf = chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200767 unsigned int buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100768 int ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200769
770 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100771 buf_i = buf->i;
772
773 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100774 unsigned int *nxt;
775
776 /* Call "data" filters only */
777 if (!IS_DATA_FILTER(filter, chn))
778 continue;
779
780 nxt = &FLT_NXT(filter, chn);
781 if (filter->ops->tcp_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200782 ret = filter->ops->tcp_data(s, filter, chn);
783 if (ret < 0)
784 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200785
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100786 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100787 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100788
789 /* And set this value as the bound for the next
790 * filter. It will not able to parse more data than the
791 * current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100792 buf->i = *nxt;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100793 }
794 else {
795 /* Consume all available data */
Christopher Fauletda02e172015-12-04 09:25:05 +0100796 *nxt = buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100797 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200798
799 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +0100800 * exit from the loop. This value will be used to know how much
801 * data are "forwardable" */
802 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200803 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100804
805 /* Restore the original buffer state */
Christopher Fauletd7c91962015-04-30 11:48:27 +0200806 chn->buf->i = buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100807
Christopher Fauletd7c91962015-04-30 11:48:27 +0200808 return ret;
809}
810
811/*
812 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
813 * stream. This function is called when some data can be forwarded. It takes
814 * care to update the forward offset of filters and adjusts "forwardable" data
815 * to be sure that a filter cannot forward more data than its predecessors. A
816 * filter can choose to not forward all parsed data. Returns a negative value if
817 * an error occurs, else the number of forwarded bytes.
818 */
819static int
820flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
821{
Christopher Fauletda02e172015-12-04 09:25:05 +0100822 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200823 int ret = len;
824
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100825 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100826 unsigned int *fwd;
827
828 /* Call "data" filters only */
829 if (!IS_DATA_FILTER(filter, chn))
830 continue;
831
832 fwd = &FLT_FWD(filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200833 if (filter->ops->tcp_forward_data) {
834 /* Remove bytes that the current filter considered as
835 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100836 ret = filter->ops->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200837 if (ret < 0)
838 goto end;
839 }
840
Christopher Fauletda02e172015-12-04 09:25:05 +0100841 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +0200842 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100843 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200844
845 /* And set this value as the bound for the next filter. It will
846 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100847 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200848 }
849
850 if (!ret)
851 goto end;
852
Christopher Fauletda02e172015-12-04 09:25:05 +0100853 /* Finally, adjust filters offsets by removing data that HAProxy will
854 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100855 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100856 if (!IS_DATA_FILTER(filter, chn))
857 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200858 FLT_NXT(filter, chn) -= ret;
859 FLT_FWD(filter, chn) -= ret;
860 }
861
Christopher Fauletd7c91962015-04-30 11:48:27 +0200862 end:
863 return ret;
864}
865
866/*
867 * Called when TCP data must be filtered on a channel. This function is the
868 * AN_FLT_XFER_DATA analyzer. When called, it is responsible to forward data
869 * when the proxy is not in http mode. Behind the scene, it calls consecutively
870 * 'tcp_data' and 'tcp_forward_data' callbacks for all "data" filters attached
871 * to a stream. Returns 0 if an error occurs or if it needs to wait, any other
872 * value otherwise.
873 */
874int
875flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
876{
877 int ret = 1;
878
Christopher Fauletda02e172015-12-04 09:25:05 +0100879 /* If there is no "data" filters, we do nothing */
880 if (!HAS_DATA_FILTERS(s, chn))
881 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200882
883 /* Be sure that the output is still opened. Else we stop the data
884 * filtering. */
885 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
886 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
887 goto end;
888
889 /* Let all "data" filters parsing incoming data */
890 ret = flt_data(s, chn);
891 if (ret < 0)
892 goto end;
893
894 /* And forward them */
895 ret = flt_forward_data(s, chn, ret);
896 if (ret < 0)
897 goto end;
898
Christopher Fauletda02e172015-12-04 09:25:05 +0100899 /* Consume data that all filters consider as forwarded. */
900 b_adv(chn->buf, ret);
901
Christopher Fauletd7c91962015-04-30 11:48:27 +0200902 /* Stop waiting data if the input in closed and no data is pending or if
903 * the output is closed. */
904 if ((chn->flags & CF_SHUTW) ||
905 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
906 ret = 1;
907 goto end;
908 }
909
910 /* Wait for data */
911 return 0;
912 end:
913 /* Terminate the data filtering. If <ret> is negative, an error was
914 * encountered during the filtering. */
915 return handle_analyzer_result(s, chn, an_bit, ret);
916}
917
918/*
919 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
920 * it needs to wait, any other value otherwise.
921 */
922static int
923handle_analyzer_result(struct stream *s, struct channel *chn,
924 unsigned int an_bit, int ret)
925{
926 int finst;
927
928 if (ret < 0)
929 goto return_bad_req;
930 else if (!ret)
931 goto wait;
932
933 /* End of job, return OK */
934 if (an_bit) {
935 chn->analysers &= ~an_bit;
936 chn->analyse_exp = TICK_ETERNITY;
937 }
938 return 1;
939
940 return_bad_req:
941 /* An error occurs */
942 channel_abort(&s->req);
943 channel_abort(&s->res);
944
945 if (!(chn->flags & CF_ISRESP)) {
946 s->req.analysers &= AN_FLT_END;
947 finst = SF_FINST_R;
948 /* FIXME: incr counters */
949 }
950 else {
951 s->res.analysers &= AN_FLT_END;
952 finst = SF_FINST_H;
953 /* FIXME: incr counters */
954 }
955
956 if (s->txn) {
957 /* Do not do that when we are waiting for the next request */
958 if (s->txn->status)
959 http_reply_and_close(s, s->txn->status, NULL);
960 else {
961 s->txn->status = 400;
962 http_reply_and_close(s, 400, http_error_message(s, HTTP_ERR_400));
963 }
964 }
965
966 if (!(s->flags & SF_ERR_MASK))
967 s->flags |= SF_ERR_PRXCOND;
968 if (!(s->flags & SF_FINST_MASK))
969 s->flags |= finst;
970 return 0;
971
972 wait:
973 if (!(chn->flags & CF_ISRESP))
974 channel_dont_connect(chn);
975 return 0;
976}
977
978
979/* Note: must not be declared <const> as its list will be overwritten.
980 * Please take care of keeping this list alphabetically sorted, doing so helps
981 * all code contributors.
982 * Optional keywords are also declared with a NULL ->parse() function so that
983 * the config parser can report an appropriate error when a known keyword was
984 * not enabled. */
985static struct cfg_kw_list cfg_kws = {ILH, {
986 { CFG_LISTEN, "filter", parse_filter },
987 { 0, NULL, NULL },
988 }
989};
990
991__attribute__((constructor))
992static void
993__filters_init(void)
994{
995 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
996 cfg_register_keywords(&cfg_kws);
997}
998
999__attribute__((destructor))
1000static void
1001__filters_deinit(void)
1002{
1003 pool_destroy2(pool2_filter);
1004}
1005
1006/*
1007 * Local variables:
1008 * c-indent-level: 8
1009 * c-basic-offset: 8
1010 * End:
1011 */