blob: b4af33be7a5ee73c50250f65c6de1a62b5d04710 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
27#include <proto/proto_http.h>
28#include <proto/stream.h>
29#include <proto/stream_interface.h>
30
31/* Pool used to allocate filters */
32struct pool_head *pool2_filter = NULL;
33
34static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
35
36/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
37 * The first one begins a loop and the seconds one ends it.
38 *
39 * - BREAK_EXECUTION must be used to break the loop and set the filter from
40 * which to resume the next time.
41 *
42 * Here is an exemple:
43 *
44 * RESUME_FILTER_LOOP(stream, channel) {
45 * ...
46 * if (cond)
47 * BREAK_EXECUTION(stream, channel, label);
48 * ...
49 * } RESUME_FILTER_END;
50 * ...
51 * label:
52 * ...
53 *
54 */
55#define RESUME_FILTER_LOOP(strm, chn) \
56 do { \
57 struct filter *filter; \
58 \
59 if ((strm)->strm_flt.current[CHN_IDX(chn)]) { \
60 filter = (strm)->strm_flt.current[CHN_IDX(chn)]; \
61 (strm)->strm_flt.current[CHN_IDX(chn)] = NULL; \
62 goto resume_execution; \
63 } \
64 \
65 list_for_each_entry(filter, &s->strm_flt.filters, list) { \
66 resume_execution:
67
68#define RESUME_FILTER_END \
69 } \
70 } while(0)
71
72#define BREAK_EXECUTION(strm, chn, label) \
73 do { \
74 (strm)->strm_flt.current[CHN_IDX(chn)] = filter; \
75 goto label; \
76 } while (0)
77
78
79/* List head of all known filter keywords */
80static struct flt_kw_list flt_keywords = {
81 .list = LIST_HEAD_INIT(flt_keywords.list)
82};
83
84/*
85 * Registers the filter keyword list <kwl> as a list of valid keywords for next
86 * parsing sessions.
87 */
88void
89flt_register_keywords(struct flt_kw_list *kwl)
90{
91 LIST_ADDQ(&flt_keywords.list, &kwl->list);
92}
93
94/*
95 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
96 * keyword is found with a NULL ->parse() function, then an attempt is made to
97 * find one with a valid ->parse() function. This way it is possible to declare
98 * platform-dependant, known keywords as NULL, then only declare them as valid
99 * if some options are met. Note that if the requested keyword contains an
100 * opening parenthesis, everything from this point is ignored.
101 */
102struct flt_kw *
103flt_find_kw(const char *kw)
104{
105 int index;
106 const char *kwend;
107 struct flt_kw_list *kwl;
108 struct flt_kw *ret = NULL;
109
110 kwend = strchr(kw, '(');
111 if (!kwend)
112 kwend = kw + strlen(kw);
113
114 list_for_each_entry(kwl, &flt_keywords.list, list) {
115 for (index = 0; kwl->kw[index].kw != NULL; index++) {
116 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
117 kwl->kw[index].kw[kwend-kw] == 0) {
118 if (kwl->kw[index].parse)
119 return &kwl->kw[index]; /* found it !*/
120 else
121 ret = &kwl->kw[index]; /* may be OK */
122 }
123 }
124 }
125 return ret;
126}
127
128/*
129 * Dumps all registered "filter" keywords to the <out> string pointer. The
130 * unsupported keywords are only dumped if their supported form was not found.
131 */
132void
133flt_dump_kws(char **out)
134{
135 struct flt_kw_list *kwl;
136 int index;
137
138 *out = NULL;
139 list_for_each_entry(kwl, &flt_keywords.list, list) {
140 for (index = 0; kwl->kw[index].kw != NULL; index++) {
141 if (kwl->kw[index].parse ||
142 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
143 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
144 kwl->scope,
145 kwl->kw[index].kw,
146 kwl->kw[index].parse ? "" : " (not supported)");
147 }
148 }
149 }
150}
151
152/*
153 * Parses the "filter" keyword. All keywords must be handled by filters
154 * themselves
155 */
156static int
157parse_filter(char **args, int section_type, struct proxy *curpx,
158 struct proxy *defpx, const char *file, int line, char **err)
159{
160 struct filter *filter = NULL;
161
162 /* Filter cannot be defined on a default proxy */
163 if (curpx == defpx) {
164 memprintf(err, "parsing [%s:%d] : %s is only allowed in a 'default' section.",
165 file, line, args[0]);
166 return -1;
167 }
168 if (!strcmp(args[0], "filter")) {
169 struct flt_kw *kw;
170 int cur_arg;
171
172 if (!*args[1]) {
173 memprintf(err,
174 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
175 file, line, args[0], proxy_type_str(curpx), curpx->id);
176 goto error;
177 }
178 filter = pool_alloc2(pool2_filter);
179 if (!filter) {
180 memprintf(err, "'%s' : out of memory", args[0]);
181 goto error;
182 }
183 memset(filter, 0, sizeof(*filter));
184
185 cur_arg = 1;
186 kw = flt_find_kw(args[cur_arg]);
187 if (kw) {
188 if (!kw->parse) {
189 memprintf(err, "parsing [%s:%d] : '%s' : "
190 "'%s' option is not implemented in this version (check build options).",
191 file, line, args[0], args[cur_arg]);
192 goto error;
193 }
194 if (kw->parse(args, &cur_arg, curpx, filter, err) != 0) {
195 if (err && *err)
196 memprintf(err, "'%s' : '%s'",
197 args[0], *err);
198 else
199 memprintf(err, "'%s' : error encountered while processing '%s'",
200 args[0], args[cur_arg]);
201 goto error;
202 }
203 }
204 else {
205 flt_dump_kws(err);
206 indent_msg(err, 4);
207 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
208 args[0], args[cur_arg],
209 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
210 goto error;
211 }
212 if (*args[cur_arg]) {
213 memprintf(err, "'%s %s' : unknown keyword '%s'.",
214 args[0], args[1], args[cur_arg]);
215 goto error;
216 }
217
218 LIST_ADDQ(&curpx->filters, &filter->list);
219 }
220 return 0;
221
222 error:
223 if (filter)
224 pool_free2(pool2_filter, filter);
225 return -1;
226
227
228}
229
230/*
231 * Calls 'init' callback for all filters attached to a proxy. This happens after
232 * the configuration parsing. Filters can finish to fill their config. Returns
233 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
234 */
235int
236flt_init(struct proxy *proxy)
237{
238 struct filter *filter;
239
240 list_for_each_entry(filter, &proxy->filters, list) {
241 if (filter->ops->init && filter->ops->init(proxy, filter) < 0)
242 return ERR_ALERT|ERR_FATAL;
243 }
244 return 0;
245}
246
247/*
248 * Calls 'check' callback for all filters attached to a proxy. This happens
249 * after the configuration parsing but before filters initialization. Returns
250 * the number of encountered errors.
251 */
252int
253flt_check(struct proxy *proxy)
254{
255 struct filter *filter;
256 int err = 0;
257
258 list_for_each_entry(filter, &proxy->filters, list) {
259 if (filter->ops->check)
260 err += filter->ops->check(proxy, filter);
261 }
262 return err;
263}
264
265/*
266 * Calls 'denit' callback for all filters attached to a proxy. This happens when
267 * HAProxy is stopped.
268 */
269void
270flt_deinit(struct proxy *proxy)
271{
272 struct filter *filter, *back;
273
274 list_for_each_entry_safe(filter, back, &proxy->filters, list) {
275 if (filter->ops->deinit)
276 filter->ops->deinit(proxy, filter);
277 LIST_DEL(&filter->list);
278 pool_free2(pool2_filter, filter);
279 }
280}
281
282/*
283 * Calls 'stream_start' for all filters attached to a stream. This happens when
284 * the stream is created, just after calling flt_stream_init
285 * function. Returns -1 if an error occurs, 0 otherwise.
286 */
287int
288flt_stream_start(struct stream *s)
289{
290 struct filter *filter;
291
292 list_for_each_entry(filter, &s->strm_flt.filters, list) {
293 if (filter->ops->stream_start && filter->ops->stream_start(s, filter) < 0)
294 return -1;
295 }
296 return 0;
297}
298
299/*
300 * Calls 'stream_stop' for all filters attached to a stream. This happens when
301 * the stream is stopped, just before calling flt_stream_release function.
302 */
303void
304flt_stream_stop(struct stream *s)
305{
306 struct filter *filter;
307
308 list_for_each_entry(filter, &s->strm_flt.filters, list) {
309 if (filter->ops->stream_stop)
310 filter->ops->stream_stop(s, filter);
311 }
312}
313
314int
315flt_http_headers(struct stream *s, struct http_msg *msg)
316{
317 struct filter *filter;
318 int ret = 1;
319
320 if (LIST_ISEMPTY(&s->strm_flt.filters))
321 goto end;
322
323 RESUME_FILTER_LOOP(s, msg->chn) {
324 if (filter->ops && filter->ops->http_headers) {
325 ret = filter->ops->http_headers(s, filter, msg);
326 if (ret <= 0)
327 BREAK_EXECUTION(s, msg->chn, end);
328 }
329 } RESUME_FILTER_END;
330
331 /* We increase FLT_NXT offset after all processing on headers because
332 * any filter can alter them. So the definitive size of headers
333 * (msg->sov) is only known when all filters have been called. */
334 list_for_each_entry(filter, &s->strm_flt.filters, list) {
335 FLT_NXT(filter, msg->chn) = msg->sov;
336 }
337 end:
338 return ret;
339}
340
341int
342flt_http_start_chunk(struct stream *s, struct http_msg *msg)
343{
344 int ret = 1;
345
346 if (LIST_ISEMPTY(&s->strm_flt.filters))
347 goto end;
348
349 RESUME_FILTER_LOOP(s, msg->chn) {
350 if (filter->ops->http_start_chunk) {
351 ret = filter->ops->http_start_chunk(s, filter, msg);
352 if (ret <= 0)
353 BREAK_EXECUTION(s, msg->chn, end);
354 }
355 FLT_NXT(filter, msg->chn) += msg->sol;
356 } RESUME_FILTER_END;
357 end:
358 return ret;
359}
360
361/*
362 * Calls 'http_data' callback for all "data" filters attached to a stream. This
363 * function is called when incoming data are available (excluding chunks
364 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
365 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
366 * filters and adjusts available data to be sure that a filter cannot parse more
367 * data than its predecessors. A filter can choose to not consume all available
368 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
369 */
370int
371flt_http_data(struct stream *s, struct http_msg *msg)
372{
373 struct filter *filter = NULL;
374 unsigned int buf_i;
375 int ret = 0;
376
377 /* No filter, consume all available data */
378 if (LIST_ISEMPTY(&s->strm_flt.filters)) {
379 ret = MIN(msg->chunk_len, msg->chn->buf->i - msg->next);
380 goto end;
381 }
382
383 /* Save buffer state */
384 buf_i = msg->chn->buf->i;
385 list_for_each_entry(filter, &s->strm_flt.filters, list) {
386 if (filter->ops->http_data && !flt_want_forward_data(filter, msg->chn)) {
387 ret = filter->ops->http_data(s, filter, msg);
388 if (ret < 0)
389 break;
390 }
391 else {
392 /* msg->chunk_len is the remaining size of data to parse
393 * in the body (or in the current chunk for
394 * chunk-encoded messages) from the HTTP parser point of
395 * view (relatively to msg->next). To have it from the
396 * filter point of view, we need to be add (msg->next
397 * -FLT_NEXT) to it. */
398 ret = MIN(msg->chunk_len + msg->next, msg->chn->buf->i) - FLT_NXT(filter, msg->chn);
399 }
400
401 /* Increase FLT_NXT offset of the current filter */
402 FLT_NXT(filter, msg->chn) += ret;
403
404 /* And set this value as the bound for the next filter. It will
405 * not able to parse more data than the current one. */
406 msg->chn->buf->i = FLT_NXT(filter, msg->chn);
407 }
408 /* Restore the original buffer state */
409 msg->chn->buf->i = buf_i;
410 end:
411 return ret;
412}
413
414int
415flt_http_end_chunk(struct stream *s, struct http_msg *msg)
416{
417 int ret = 1;
418
419 if (LIST_ISEMPTY(&s->strm_flt.filters))
420 goto end;
421
422 RESUME_FILTER_LOOP(s, msg->chn) {
423 if (filter->ops->http_end_chunk) {
424 ret = filter->ops->http_end_chunk(s, filter, msg);
425 if (ret <= 0)
426 BREAK_EXECUTION(s, msg->chn, end);
427 }
428 flt_reset_forward_data(filter, msg->chn);
429 FLT_NXT(filter, msg->chn) += msg->sol;
430 } RESUME_FILTER_END;
431 end:
432 return ret;
433}
434
435int
436flt_http_last_chunk(struct stream *s, struct http_msg *msg)
437{
438 int ret = 1;
439
440 if (LIST_ISEMPTY(&s->strm_flt.filters))
441 goto end;
442
443 RESUME_FILTER_LOOP(s, msg->chn) {
444 if (filter->ops->http_last_chunk) {
445 ret = filter->ops->http_last_chunk(s, filter, msg);
446 if (ret <= 0)
447 BREAK_EXECUTION(s, msg->chn, end);
448 }
449 flt_reset_forward_data(filter, msg->chn);
450 FLT_NXT(filter, msg->chn) += msg->sol;
451 } RESUME_FILTER_END;
452 end:
453 return ret;
454}
455
456
457/*
458 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
459 * stream. This function is called for chunked messages only when a part of the
460 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
461 * analyzers. Filters can know how much data were parsed by the HTTP parsing
462 * until the last call with the msg->sol value. Returns a negative value if an
463 * error occurs, any other value otherwise.
464 */
465int
466flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
467{
468 int ret = 1;
469
470 if (LIST_ISEMPTY(&s->strm_flt.filters))
471 goto end;
472
473 RESUME_FILTER_LOOP(s, msg->chn) {
474 if (filter->ops->http_chunk_trailers) {
475 ret = filter->ops->http_chunk_trailers(s, filter, msg);
476 if (ret <= 0)
477 BREAK_EXECUTION(s, msg->chn, end);
478 }
479 FLT_NXT(filter, msg->chn) += msg->sol;
480 } RESUME_FILTER_END;
481end:
482 return ret;
483}
484
485/*
486 * Calls 'http_end' callback for all filters attached to a stream. All filters
487 * are called here, but only if there is at least one "data" filter. This
488 * functions is called when all data were parsed and forwarded. 'http_end'
489 * callback is resumable, so this function returns a negative value if an error
490 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
491 */
492int
493flt_http_end(struct stream *s, struct http_msg *msg)
494{
495 int ret = 1;
496
497 if (LIST_ISEMPTY(&s->strm_flt.filters))
498 goto end;
499
500 RESUME_FILTER_LOOP(s, msg->chn) {
501 if (filter->ops->http_end) {
502 ret = filter->ops->http_end(s, filter, msg);
503 if (ret <= 0)
504 BREAK_EXECUTION(s, msg->chn, end);
505 }
506 flt_reset_forward_data(filter, msg->chn);
507 } RESUME_FILTER_END;
508end:
509 return ret;
510}
511
512/*
513 * Calls 'http_reset' callback for all filters attached to a stream. This
514 * happens when a 100-continue response is received.
515 */
516void
517flt_http_reset(struct stream *s, struct http_msg *msg)
518{
519 struct filter *filter;
520
521 if (LIST_ISEMPTY(&s->strm_flt.filters))
522 return;
523
524 list_for_each_entry(filter, &s->strm_flt.filters, list) {
525 if (filter->ops->http_reset)
526 filter->ops->http_reset(s, filter, msg);
527 }
528}
529
530/*
531 * Calls 'http_reply' callback for all filters attached to a stream when HA
532 * decides to stop the HTTP message processing.
533 */
534void
535flt_http_reply(struct stream *s, short status, const struct chunk *msg)
536{
537 struct filter *filter;
538
539 if (LIST_ISEMPTY(&s->strm_flt.filters))
540 return;
541
542 list_for_each_entry(filter, &s->strm_flt.filters, list) {
543 if (filter->ops->http_reply)
544 filter->ops->http_reply(s, filter, status, msg);
545 }
546}
547
548/*
549 * Calls 'http_forward_data' callback for all "data" filters attached to a
550 * stream. This function is called when some data can be forwarded in the
551 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
552 * update the forward offset of filters and adjusts "forwardable" data to be
553 * sure that a filter cannot forward more data than its predecessors. A filter
554 * can choose to not forward all parsed data. Returns a negative value if an
555 * error occurs, else the number of forwarded bytes.
556 */
557int
558flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
559{
560 struct filter *filter = NULL;
561 int ret = len;
562
563 /* No filter, forward all data */
564 if (LIST_ISEMPTY(&s->strm_flt.filters))
565 goto end;
566
567 list_for_each_entry(filter, &s->strm_flt.filters, list) {
568 if (filter->ops->http_forward_data) {
569 /* Remove bytes that the current filter considered as
570 * forwarded */
571 ret = filter->ops->http_forward_data(s, filter, msg,
572 ret - FLT_FWD(filter, msg->chn));
573 if (ret < 0)
574 goto end;
575 }
576
577 /* Adjust bytes that the current filter considers as
578 * forwarded */
579 FLT_FWD(filter, msg->chn) += ret;
580
581 /* And set this value as the bound for the next filter. It will
582 * not able to forward more data than the current one. */
583 ret = FLT_FWD(filter, msg->chn);
584 }
585
586 if (!ret)
587 goto end;
588
589 /* Finally, adjust filters offsets by removing data that HAProxy will
590 * forward. */
591 list_for_each_entry(filter, &s->strm_flt.filters, list) {
592 FLT_NXT(filter, msg->chn) -= ret;
593 FLT_FWD(filter, msg->chn) -= ret;
594 }
595 end:
596 return ret;
597}
598
599/*
600 * Calls 'channel_start_analyze' callback for all filters attached to a
601 * stream. This function is called when we start to analyze a request or a
602 * response. For frontend filters, it is called before all other analyzers. For
603 * backend ones, it is called before all backend
604 * analyzers. 'channel_start_analyze' callback is resumable, so this function
605 * returns 0 if an error occurs or if it needs to wait, any other value
606 * otherwise.
607 */
608int
609flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
610{
611 int ret = 1;
612
613 /* If this function is called, this means there is at least one filter,
614 * so we do not need to check the filter list's emptiness. */
615
616 RESUME_FILTER_LOOP(s, chn) {
617 if (an_bit == AN_FLT_START_BE && !filter->is_backend_filter)
618 continue;
619
620 filter->next[CHN_IDX(chn)] = 0;
621 filter->fwd[CHN_IDX(chn)] = 0;
622
623 if (filter->ops->channel_start_analyze) {
624 ret = filter->ops->channel_start_analyze(s, filter, chn);
625 if (ret <= 0)
626 BREAK_EXECUTION(s, chn, end);
627 }
628 } RESUME_FILTER_END;
629
630 end:
631 return handle_analyzer_result(s, chn, an_bit, ret);
632}
633
634/*
635 * Calls 'channel_analyze' callback for all filters attached to a stream. This
636 * function is called before each analyzer attached to a channel, expects
637 * analyzers responsible for data sending. 'channel_analyze' callback is
638 * resumable, so this function returns 0 if an error occurs or if it needs to
639 * wait, any other value otherwise.
640 */
641int
642flt_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
643{
644 int ret = 1;
645
646 if (LIST_ISEMPTY(&s->strm_flt.filters))
647 goto end;
648
649 RESUME_FILTER_LOOP(s, chn) {
650 if (filter->ops->channel_analyze) {
651 ret = filter->ops->channel_analyze(s, filter, chn, an_bit);
652 if (ret <= 0)
653 BREAK_EXECUTION(s, chn, check_result);
654 }
655 } RESUME_FILTER_END;
656
657 check_result:
658 ret = handle_analyzer_result(s, chn, 0, ret);
659 end:
660 return ret;
661}
662
663/*
664 * Calls 'channel_end_analyze' callback for all filters attached to a
665 * stream. This function is called when we stop to analyze a request or a
666 * response. It is called after all other analyzers. 'channel_end_analyze'
667 * callback is resumable, so this function returns 0 if an error occurs or if it
668 * needs to wait, any other value otherwise.
669 */
670int
671flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
672{
673 int ret = 1;
674
675 /* If this function is called, this means there is at least one filter,
676 * so we do not need to check the filter list's emptiness. */
677
678 RESUME_FILTER_LOOP(s, chn) {
679 filter->next[CHN_IDX(chn)] = 0;
680
681 if (filter->ops->channel_end_analyze) {
682 ret = filter->ops->channel_end_analyze(s, filter, chn);
683 if (ret <= 0)
684 BREAK_EXECUTION(s, chn, end);
685 }
686 } RESUME_FILTER_END;
687
688end:
689 ret = handle_analyzer_result(s, chn, an_bit, ret);
Christopher Faulet02c7b222015-12-22 12:01:29 +0100690
691 /* Check if 'channel_end_analyze' callback has been called for the
692 * request and the response. */
693 if (!(s->req.analysers & AN_FLT_END) && !(s->res.analysers & AN_FLT_END)) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200694 struct filter *filter, *back;
695
Christopher Faulet02c7b222015-12-22 12:01:29 +0100696 /* When we are waiting for a new request, so we must reset
697 * stream analyzers. The input must not be closed the request
698 * channel, else it is useless to wait. */
699 if (s->txn && (s->txn->flags & TX_WAIT_NEXT_RQ) && !channel_input_closed(&s->req)) {
700 s->req.analysers = strm_li(s) ? strm_li(s)->analysers : 0;
701 s->res.analysers = 0;
702 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200703
704 list_for_each_entry_safe(filter, back, &s->strm_flt.filters, list) {
705 if (filter->is_backend_filter) {
706 LIST_DEL(&filter->list);
707 pool_free2(pool2_filter, filter);
708 }
709 }
710 }
711 else if (ret) {
712 /* Analyzer ends only for one channel. So wake up the stream to
713 * be sure to process it for the other side as soon as
714 * possible. */
715 task_wakeup(s->task, TASK_WOKEN_MSG);
716 }
717 return ret;
718}
719
720
721/*
722 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
723 * function is called when incoming data are available. It takes care to update
724 * the next offset of filters and adjusts available data to be sure that a
725 * filter cannot parse more data than its predecessors. A filter can choose to
726 * not consume all available data. Returns -1 if an error occurs, the number of
727 * consumed bytes otherwise.
728 */
729static int
730flt_data(struct stream *s, struct channel *chn)
731{
732 struct filter *filter = NULL;
733 unsigned int buf_i;
734 int ret = chn->buf->i;
735
736 /* Save buffer state */
737 buf_i = chn->buf->i;
738 list_for_each_entry(filter, &s->strm_flt.filters, list) {
739 if (filter->ops->tcp_data && !flt_want_forward_data(filter, chn)) {
740 ret = filter->ops->tcp_data(s, filter, chn);
741 if (ret < 0)
742 break;
743 }
744 else
745 ret = chn->buf->i - FLT_NXT(filter, chn);
746
747 /* Increase next offset of the current filter */
748 FLT_NXT(filter, chn) += ret;
749
750 /* Update <ret> value to be sure to have the last one when we
751 * exit from the loop. */
752 ret = FLT_NXT(filter, chn);
753
754 /* And set this value as the bound for the next filter. It will
755 * not able to parse more data than the current one. */
756 chn->buf->i = FLT_NXT(filter, chn);
757 }
758 // Restore the original buffer state
759 chn->buf->i = buf_i;
760 return ret;
761}
762
763/*
764 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
765 * stream. This function is called when some data can be forwarded. It takes
766 * care to update the forward offset of filters and adjusts "forwardable" data
767 * to be sure that a filter cannot forward more data than its predecessors. A
768 * filter can choose to not forward all parsed data. Returns a negative value if
769 * an error occurs, else the number of forwarded bytes.
770 */
771static int
772flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
773{
774 struct filter *filter = NULL;
775 int ret = len;
776
777 list_for_each_entry(filter, &s->strm_flt.filters, list) {
778 if (filter->ops->tcp_forward_data) {
779 /* Remove bytes that the current filter considered as
780 * forwarded */
781 ret = filter->ops->tcp_forward_data(s, filter, chn, ret - FLT_FWD(filter, chn));
782 if (ret < 0)
783 goto end;
784 }
785
786 /* Adjust bytes taht the current filter considers as
787 * forwarded */
788 FLT_FWD(filter, chn) += ret;
789
790 /* And set this value as the bound for the next filter. It will
791 * not able to forward more data than the current one. */
792 ret = FLT_FWD(filter, chn);
793 }
794
795 if (!ret)
796 goto end;
797
798 /* Adjust forward counter and next offset of filters by removing data
799 * that HAProxy will consider as forwarded. */
800 list_for_each_entry(filter, &s->strm_flt.filters, list) {
801 FLT_NXT(filter, chn) -= ret;
802 FLT_FWD(filter, chn) -= ret;
803 }
804
805 /* Consume data that all filters consider as forwarded. */
806 b_adv(chn->buf, ret);
807 end:
808 return ret;
809}
810
811/*
812 * Called when TCP data must be filtered on a channel. This function is the
813 * AN_FLT_XFER_DATA analyzer. When called, it is responsible to forward data
814 * when the proxy is not in http mode. Behind the scene, it calls consecutively
815 * 'tcp_data' and 'tcp_forward_data' callbacks for all "data" filters attached
816 * to a stream. Returns 0 if an error occurs or if it needs to wait, any other
817 * value otherwise.
818 */
819int
820flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
821{
822 int ret = 1;
823
824 /* If this function is called, this means there is at least one filter,
825 * so we do not need to check the filter list's emptiness. */
826
827 /* Be sure that the output is still opened. Else we stop the data
828 * filtering. */
829 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
830 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
831 goto end;
832
833 /* Let all "data" filters parsing incoming data */
834 ret = flt_data(s, chn);
835 if (ret < 0)
836 goto end;
837
838 /* And forward them */
839 ret = flt_forward_data(s, chn, ret);
840 if (ret < 0)
841 goto end;
842
843 /* Stop waiting data if the input in closed and no data is pending or if
844 * the output is closed. */
845 if ((chn->flags & CF_SHUTW) ||
846 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
847 ret = 1;
848 goto end;
849 }
850
851 /* Wait for data */
852 return 0;
853 end:
854 /* Terminate the data filtering. If <ret> is negative, an error was
855 * encountered during the filtering. */
856 return handle_analyzer_result(s, chn, an_bit, ret);
857}
858
859/*
860 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
861 * it needs to wait, any other value otherwise.
862 */
863static int
864handle_analyzer_result(struct stream *s, struct channel *chn,
865 unsigned int an_bit, int ret)
866{
867 int finst;
868
869 if (ret < 0)
870 goto return_bad_req;
871 else if (!ret)
872 goto wait;
873
874 /* End of job, return OK */
875 if (an_bit) {
876 chn->analysers &= ~an_bit;
877 chn->analyse_exp = TICK_ETERNITY;
878 }
879 return 1;
880
881 return_bad_req:
882 /* An error occurs */
883 channel_abort(&s->req);
884 channel_abort(&s->res);
885
886 if (!(chn->flags & CF_ISRESP)) {
887 s->req.analysers &= AN_FLT_END;
888 finst = SF_FINST_R;
889 /* FIXME: incr counters */
890 }
891 else {
892 s->res.analysers &= AN_FLT_END;
893 finst = SF_FINST_H;
894 /* FIXME: incr counters */
895 }
896
897 if (s->txn) {
898 /* Do not do that when we are waiting for the next request */
899 if (s->txn->status)
900 http_reply_and_close(s, s->txn->status, NULL);
901 else {
902 s->txn->status = 400;
903 http_reply_and_close(s, 400, http_error_message(s, HTTP_ERR_400));
904 }
905 }
906
907 if (!(s->flags & SF_ERR_MASK))
908 s->flags |= SF_ERR_PRXCOND;
909 if (!(s->flags & SF_FINST_MASK))
910 s->flags |= finst;
911 return 0;
912
913 wait:
914 if (!(chn->flags & CF_ISRESP))
915 channel_dont_connect(chn);
916 return 0;
917}
918
919
920/* Note: must not be declared <const> as its list will be overwritten.
921 * Please take care of keeping this list alphabetically sorted, doing so helps
922 * all code contributors.
923 * Optional keywords are also declared with a NULL ->parse() function so that
924 * the config parser can report an appropriate error when a known keyword was
925 * not enabled. */
926static struct cfg_kw_list cfg_kws = {ILH, {
927 { CFG_LISTEN, "filter", parse_filter },
928 { 0, NULL, NULL },
929 }
930};
931
932__attribute__((constructor))
933static void
934__filters_init(void)
935{
936 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
937 cfg_register_keywords(&cfg_kws);
938}
939
940__attribute__((destructor))
941static void
942__filters_deinit(void)
943{
944 pool_destroy2(pool2_filter);
945}
946
947/*
948 * Local variables:
949 * c-indent-level: 8
950 * c-basic-offset: 8
951 * End:
952 */