blob: 88c361246187991f5e2b2e676c81fd77eff4ae5d [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010027#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020028#include <proto/proto_http.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31
32/* Pool used to allocate filters */
33struct pool_head *pool2_filter = NULL;
34
35static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
36
37/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
38 * The first one begins a loop and the seconds one ends it.
39 *
40 * - BREAK_EXECUTION must be used to break the loop and set the filter from
41 * which to resume the next time.
42 *
43 * Here is an exemple:
44 *
45 * RESUME_FILTER_LOOP(stream, channel) {
46 * ...
47 * if (cond)
48 * BREAK_EXECUTION(stream, channel, label);
49 * ...
50 * } RESUME_FILTER_END;
51 * ...
52 * label:
53 * ...
54 *
55 */
56#define RESUME_FILTER_LOOP(strm, chn) \
57 do { \
58 struct filter *filter; \
59 \
60 if ((strm)->strm_flt.current[CHN_IDX(chn)]) { \
61 filter = (strm)->strm_flt.current[CHN_IDX(chn)]; \
62 (strm)->strm_flt.current[CHN_IDX(chn)] = NULL; \
63 goto resume_execution; \
64 } \
65 \
66 list_for_each_entry(filter, &s->strm_flt.filters, list) { \
67 resume_execution:
68
69#define RESUME_FILTER_END \
70 } \
71 } while(0)
72
73#define BREAK_EXECUTION(strm, chn, label) \
74 do { \
75 (strm)->strm_flt.current[CHN_IDX(chn)] = filter; \
76 goto label; \
77 } while (0)
78
79
80/* List head of all known filter keywords */
81static struct flt_kw_list flt_keywords = {
82 .list = LIST_HEAD_INIT(flt_keywords.list)
83};
84
85/*
86 * Registers the filter keyword list <kwl> as a list of valid keywords for next
87 * parsing sessions.
88 */
89void
90flt_register_keywords(struct flt_kw_list *kwl)
91{
92 LIST_ADDQ(&flt_keywords.list, &kwl->list);
93}
94
95/*
96 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
97 * keyword is found with a NULL ->parse() function, then an attempt is made to
98 * find one with a valid ->parse() function. This way it is possible to declare
99 * platform-dependant, known keywords as NULL, then only declare them as valid
100 * if some options are met. Note that if the requested keyword contains an
101 * opening parenthesis, everything from this point is ignored.
102 */
103struct flt_kw *
104flt_find_kw(const char *kw)
105{
106 int index;
107 const char *kwend;
108 struct flt_kw_list *kwl;
109 struct flt_kw *ret = NULL;
110
111 kwend = strchr(kw, '(');
112 if (!kwend)
113 kwend = kw + strlen(kw);
114
115 list_for_each_entry(kwl, &flt_keywords.list, list) {
116 for (index = 0; kwl->kw[index].kw != NULL; index++) {
117 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
118 kwl->kw[index].kw[kwend-kw] == 0) {
119 if (kwl->kw[index].parse)
120 return &kwl->kw[index]; /* found it !*/
121 else
122 ret = &kwl->kw[index]; /* may be OK */
123 }
124 }
125 }
126 return ret;
127}
128
129/*
130 * Dumps all registered "filter" keywords to the <out> string pointer. The
131 * unsupported keywords are only dumped if their supported form was not found.
132 */
133void
134flt_dump_kws(char **out)
135{
136 struct flt_kw_list *kwl;
137 int index;
138
139 *out = NULL;
140 list_for_each_entry(kwl, &flt_keywords.list, list) {
141 for (index = 0; kwl->kw[index].kw != NULL; index++) {
142 if (kwl->kw[index].parse ||
143 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
144 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
145 kwl->scope,
146 kwl->kw[index].kw,
147 kwl->kw[index].parse ? "" : " (not supported)");
148 }
149 }
150 }
151}
152
153/*
154 * Parses the "filter" keyword. All keywords must be handled by filters
155 * themselves
156 */
157static int
158parse_filter(char **args, int section_type, struct proxy *curpx,
159 struct proxy *defpx, const char *file, int line, char **err)
160{
161 struct filter *filter = NULL;
162
163 /* Filter cannot be defined on a default proxy */
164 if (curpx == defpx) {
165 memprintf(err, "parsing [%s:%d] : %s is only allowed in a 'default' section.",
166 file, line, args[0]);
167 return -1;
168 }
169 if (!strcmp(args[0], "filter")) {
170 struct flt_kw *kw;
171 int cur_arg;
172
173 if (!*args[1]) {
174 memprintf(err,
175 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
176 file, line, args[0], proxy_type_str(curpx), curpx->id);
177 goto error;
178 }
179 filter = pool_alloc2(pool2_filter);
180 if (!filter) {
181 memprintf(err, "'%s' : out of memory", args[0]);
182 goto error;
183 }
184 memset(filter, 0, sizeof(*filter));
185
186 cur_arg = 1;
187 kw = flt_find_kw(args[cur_arg]);
188 if (kw) {
189 if (!kw->parse) {
190 memprintf(err, "parsing [%s:%d] : '%s' : "
191 "'%s' option is not implemented in this version (check build options).",
192 file, line, args[0], args[cur_arg]);
193 goto error;
194 }
195 if (kw->parse(args, &cur_arg, curpx, filter, err) != 0) {
196 if (err && *err)
197 memprintf(err, "'%s' : '%s'",
198 args[0], *err);
199 else
200 memprintf(err, "'%s' : error encountered while processing '%s'",
201 args[0], args[cur_arg]);
202 goto error;
203 }
204 }
205 else {
206 flt_dump_kws(err);
207 indent_msg(err, 4);
208 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
209 args[0], args[cur_arg],
210 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
211 goto error;
212 }
213 if (*args[cur_arg]) {
214 memprintf(err, "'%s %s' : unknown keyword '%s'.",
215 args[0], args[1], args[cur_arg]);
216 goto error;
217 }
218
219 LIST_ADDQ(&curpx->filters, &filter->list);
220 }
221 return 0;
222
223 error:
224 if (filter)
225 pool_free2(pool2_filter, filter);
226 return -1;
227
228
229}
230
231/*
232 * Calls 'init' callback for all filters attached to a proxy. This happens after
233 * the configuration parsing. Filters can finish to fill their config. Returns
234 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
235 */
236int
237flt_init(struct proxy *proxy)
238{
239 struct filter *filter;
240
241 list_for_each_entry(filter, &proxy->filters, list) {
242 if (filter->ops->init && filter->ops->init(proxy, filter) < 0)
243 return ERR_ALERT|ERR_FATAL;
244 }
245 return 0;
246}
247
248/*
249 * Calls 'check' callback for all filters attached to a proxy. This happens
250 * after the configuration parsing but before filters initialization. Returns
251 * the number of encountered errors.
252 */
253int
254flt_check(struct proxy *proxy)
255{
256 struct filter *filter;
257 int err = 0;
258
259 list_for_each_entry(filter, &proxy->filters, list) {
260 if (filter->ops->check)
261 err += filter->ops->check(proxy, filter);
262 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100263 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200264 return err;
265}
266
267/*
268 * Calls 'denit' callback for all filters attached to a proxy. This happens when
269 * HAProxy is stopped.
270 */
271void
272flt_deinit(struct proxy *proxy)
273{
274 struct filter *filter, *back;
275
276 list_for_each_entry_safe(filter, back, &proxy->filters, list) {
277 if (filter->ops->deinit)
278 filter->ops->deinit(proxy, filter);
279 LIST_DEL(&filter->list);
280 pool_free2(pool2_filter, filter);
281 }
282}
283
Christopher Faulet92d36382015-11-05 13:35:03 +0100284/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
285static int
286flt_stream_add_filter(struct stream *s, struct filter *filter,
287 int is_backend)
288{
289 struct filter *f = pool_alloc2(pool2_filter);
290 if (!f) /* not enough memory */
291 return -1;
292 memset(f, 0, sizeof(*f));
293 f->id = filter->id;
294 f->ops = filter->ops;
295 f->conf = filter->conf;
296 f->is_backend_filter = is_backend;
297 LIST_ADDQ(&s->strm_flt.filters, &f->list);
Christopher Faulet3e344292015-11-24 16:24:13 +0100298 s->strm_flt.has_filters = 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100299 return 0;
300}
301
302/*
303 * Called when a stream is created. It attaches all frontend filters to the
304 * stream. Returns -1 if an error occurs, 0 otherwise.
305 */
306int
307flt_stream_init(struct stream *s)
308{
309 struct filter *filter;
310
311 LIST_INIT(&s->strm_flt.filters);
312 memset(s->strm_flt.current, 0, sizeof(s->strm_flt.current));
Christopher Faulet3e344292015-11-24 16:24:13 +0100313 s->strm_flt.has_filters = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100314 list_for_each_entry(filter, &strm_fe(s)->filters, list) {
315 if (flt_stream_add_filter(s, filter, 0) < 0)
316 return -1;
317 }
318 return 0;
319}
320
321/*
322 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
323 * happens after each request/response exchange). When analyze ends, backend
324 * filters are removed. When the stream is closed, all filters attached to the
325 * stream are removed.
326 */
327void
328flt_stream_release(struct stream *s, int only_backend)
329{
330 struct filter *filter, *back;
331
332 list_for_each_entry_safe(filter, back, &s->strm_flt.filters, list) {
333 if (!only_backend || filter->is_backend_filter) {
334 LIST_DEL(&filter->list);
335 pool_free2(pool2_filter, filter);
336 }
337 }
Christopher Faulet3e344292015-11-24 16:24:13 +0100338 if (LIST_ISEMPTY(&s->strm_flt.filters))
339 s->strm_flt.has_filters = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100340}
341
Christopher Fauletd7c91962015-04-30 11:48:27 +0200342/*
343 * Calls 'stream_start' for all filters attached to a stream. This happens when
344 * the stream is created, just after calling flt_stream_init
345 * function. Returns -1 if an error occurs, 0 otherwise.
346 */
347int
348flt_stream_start(struct stream *s)
349{
350 struct filter *filter;
351
352 list_for_each_entry(filter, &s->strm_flt.filters, list) {
353 if (filter->ops->stream_start && filter->ops->stream_start(s, filter) < 0)
354 return -1;
355 }
356 return 0;
357}
358
359/*
360 * Calls 'stream_stop' for all filters attached to a stream. This happens when
361 * the stream is stopped, just before calling flt_stream_release function.
362 */
363void
364flt_stream_stop(struct stream *s)
365{
366 struct filter *filter;
367
368 list_for_each_entry(filter, &s->strm_flt.filters, list) {
369 if (filter->ops->stream_stop)
370 filter->ops->stream_stop(s, filter);
371 }
372}
373
Christopher Faulet92d36382015-11-05 13:35:03 +0100374/*
375 * Called when a backend is set for a stream. If the frontend and the backend
376 * are the same, this function does nothing. Else it attaches all backend
377 * filters to the stream. Returns -1 if an error occurs, 0 otherwise.
378 */
379int
380flt_set_stream_backend(struct stream *s, struct proxy *be)
381{
382 struct filter *filter;
383
384 if (strm_fe(s) == be)
385 return 0;
386
387 list_for_each_entry(filter, &be->filters, list) {
388 if (flt_stream_add_filter(s, filter, 1) < 0)
389 return -1;
390 }
391 return 0;
392}
393
Christopher Fauletd7c91962015-04-30 11:48:27 +0200394int
395flt_http_headers(struct stream *s, struct http_msg *msg)
396{
397 struct filter *filter;
398 int ret = 1;
399
Christopher Fauletd7c91962015-04-30 11:48:27 +0200400 RESUME_FILTER_LOOP(s, msg->chn) {
401 if (filter->ops && filter->ops->http_headers) {
402 ret = filter->ops->http_headers(s, filter, msg);
403 if (ret <= 0)
404 BREAK_EXECUTION(s, msg->chn, end);
405 }
406 } RESUME_FILTER_END;
407
408 /* We increase FLT_NXT offset after all processing on headers because
409 * any filter can alter them. So the definitive size of headers
410 * (msg->sov) is only known when all filters have been called. */
411 list_for_each_entry(filter, &s->strm_flt.filters, list) {
412 FLT_NXT(filter, msg->chn) = msg->sov;
413 }
414 end:
415 return ret;
416}
417
Christopher Fauletd7c91962015-04-30 11:48:27 +0200418/*
419 * Calls 'http_data' callback for all "data" filters attached to a stream. This
420 * function is called when incoming data are available (excluding chunks
421 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
422 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
423 * filters and adjusts available data to be sure that a filter cannot parse more
424 * data than its predecessors. A filter can choose to not consume all available
425 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
426 */
427int
428flt_http_data(struct stream *s, struct http_msg *msg)
429{
430 struct filter *filter = NULL;
431 unsigned int buf_i;
432 int ret = 0;
433
Christopher Fauletd7c91962015-04-30 11:48:27 +0200434 /* Save buffer state */
435 buf_i = msg->chn->buf->i;
436 list_for_each_entry(filter, &s->strm_flt.filters, list) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100437 /* If the HTTP parser is ahead, we update the next offset of the
438 * current filter. This happens for chunked messages, at the
439 * begining of a new chunk. */
440 if (msg->next > FLT_NXT(filter, msg->chn))
441 FLT_NXT(filter, msg->chn) = msg->next;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200442 if (filter->ops->http_data && !flt_want_forward_data(filter, msg->chn)) {
443 ret = filter->ops->http_data(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100444 if (ret <= 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200445 break;
446 }
447 else {
448 /* msg->chunk_len is the remaining size of data to parse
449 * in the body (or in the current chunk for
450 * chunk-encoded messages) from the HTTP parser point of
451 * view (relatively to msg->next). To have it from the
452 * filter point of view, we need to be add (msg->next
453 * -FLT_NEXT) to it. */
454 ret = MIN(msg->chunk_len + msg->next, msg->chn->buf->i) - FLT_NXT(filter, msg->chn);
455 }
456
Christopher Faulet2fb28802015-12-01 10:40:57 +0100457 /* Update the next offset of the current filter */
Christopher Fauletd7c91962015-04-30 11:48:27 +0200458 FLT_NXT(filter, msg->chn) += ret;
459
460 /* And set this value as the bound for the next filter. It will
461 * not able to parse more data than the current one. */
462 msg->chn->buf->i = FLT_NXT(filter, msg->chn);
463 }
464 /* Restore the original buffer state */
465 msg->chn->buf->i = buf_i;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200466 return ret;
467}
468
Christopher Fauletd7c91962015-04-30 11:48:27 +0200469/*
470 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
471 * stream. This function is called for chunked messages only when a part of the
472 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
473 * analyzers. Filters can know how much data were parsed by the HTTP parsing
474 * until the last call with the msg->sol value. Returns a negative value if an
475 * error occurs, any other value otherwise.
476 */
477int
478flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
479{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100480 struct filter *filter;
481 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200482
Christopher Faulet2fb28802015-12-01 10:40:57 +0100483 list_for_each_entry(filter, &s->strm_flt.filters, list) {
484 /* Be sure to set the next offset of the filter at the right
485 * place. This is really useful when the first part of the
486 * trailers was parsed. */
487 FLT_NXT(filter, msg->chn) = msg->next;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200488 if (filter->ops->http_chunk_trailers) {
489 ret = filter->ops->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100490 if (ret < 0)
491 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200492 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100493 /* Update the next offset of the current filter. Here all data
494 * are always consumed. */
Christopher Fauletd7c91962015-04-30 11:48:27 +0200495 FLT_NXT(filter, msg->chn) += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100496 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200497 return ret;
498}
499
500/*
501 * Calls 'http_end' callback for all filters attached to a stream. All filters
502 * are called here, but only if there is at least one "data" filter. This
503 * functions is called when all data were parsed and forwarded. 'http_end'
504 * callback is resumable, so this function returns a negative value if an error
505 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
506 */
507int
508flt_http_end(struct stream *s, struct http_msg *msg)
509{
510 int ret = 1;
511
Christopher Fauletd7c91962015-04-30 11:48:27 +0200512 RESUME_FILTER_LOOP(s, msg->chn) {
513 if (filter->ops->http_end) {
514 ret = filter->ops->http_end(s, filter, msg);
515 if (ret <= 0)
516 BREAK_EXECUTION(s, msg->chn, end);
517 }
518 flt_reset_forward_data(filter, msg->chn);
519 } RESUME_FILTER_END;
520end:
521 return ret;
522}
523
524/*
525 * Calls 'http_reset' callback for all filters attached to a stream. This
526 * happens when a 100-continue response is received.
527 */
528void
529flt_http_reset(struct stream *s, struct http_msg *msg)
530{
531 struct filter *filter;
532
Christopher Fauletd7c91962015-04-30 11:48:27 +0200533 list_for_each_entry(filter, &s->strm_flt.filters, list) {
534 if (filter->ops->http_reset)
535 filter->ops->http_reset(s, filter, msg);
536 }
537}
538
539/*
540 * Calls 'http_reply' callback for all filters attached to a stream when HA
541 * decides to stop the HTTP message processing.
542 */
543void
544flt_http_reply(struct stream *s, short status, const struct chunk *msg)
545{
546 struct filter *filter;
547
Christopher Fauletd7c91962015-04-30 11:48:27 +0200548 list_for_each_entry(filter, &s->strm_flt.filters, list) {
549 if (filter->ops->http_reply)
550 filter->ops->http_reply(s, filter, status, msg);
551 }
552}
553
554/*
555 * Calls 'http_forward_data' callback for all "data" filters attached to a
556 * stream. This function is called when some data can be forwarded in the
557 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
558 * update the forward offset of filters and adjusts "forwardable" data to be
559 * sure that a filter cannot forward more data than its predecessors. A filter
560 * can choose to not forward all parsed data. Returns a negative value if an
561 * error occurs, else the number of forwarded bytes.
562 */
563int
564flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
565{
566 struct filter *filter = NULL;
567 int ret = len;
568
Christopher Fauletd7c91962015-04-30 11:48:27 +0200569 list_for_each_entry(filter, &s->strm_flt.filters, list) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100570 /* If the HTTP parser is ahead, we update the next offset of the
571 * current filter. This happens for chunked messages, when the
572 * chunk envelope is parsed. */
573 if (msg->next > FLT_NXT(filter, msg->chn))
574 FLT_NXT(filter, msg->chn) = msg->next;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200575 if (filter->ops->http_forward_data) {
576 /* Remove bytes that the current filter considered as
577 * forwarded */
578 ret = filter->ops->http_forward_data(s, filter, msg,
579 ret - FLT_FWD(filter, msg->chn));
580 if (ret < 0)
581 goto end;
582 }
583
584 /* Adjust bytes that the current filter considers as
585 * forwarded */
586 FLT_FWD(filter, msg->chn) += ret;
587
588 /* And set this value as the bound for the next filter. It will
589 * not able to forward more data than the current one. */
590 ret = FLT_FWD(filter, msg->chn);
591 }
592
593 if (!ret)
594 goto end;
595
596 /* Finally, adjust filters offsets by removing data that HAProxy will
597 * forward. */
598 list_for_each_entry(filter, &s->strm_flt.filters, list) {
599 FLT_NXT(filter, msg->chn) -= ret;
600 FLT_FWD(filter, msg->chn) -= ret;
601 }
602 end:
603 return ret;
604}
605
606/*
607 * Calls 'channel_start_analyze' callback for all filters attached to a
608 * stream. This function is called when we start to analyze a request or a
609 * response. For frontend filters, it is called before all other analyzers. For
610 * backend ones, it is called before all backend
611 * analyzers. 'channel_start_analyze' callback is resumable, so this function
612 * returns 0 if an error occurs or if it needs to wait, any other value
613 * otherwise.
614 */
615int
616flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
617{
618 int ret = 1;
619
620 /* If this function is called, this means there is at least one filter,
621 * so we do not need to check the filter list's emptiness. */
622
623 RESUME_FILTER_LOOP(s, chn) {
624 if (an_bit == AN_FLT_START_BE && !filter->is_backend_filter)
625 continue;
626
627 filter->next[CHN_IDX(chn)] = 0;
628 filter->fwd[CHN_IDX(chn)] = 0;
629
630 if (filter->ops->channel_start_analyze) {
631 ret = filter->ops->channel_start_analyze(s, filter, chn);
632 if (ret <= 0)
633 BREAK_EXECUTION(s, chn, end);
634 }
635 } RESUME_FILTER_END;
636
637 end:
638 return handle_analyzer_result(s, chn, an_bit, ret);
639}
640
641/*
642 * Calls 'channel_analyze' callback for all filters attached to a stream. This
643 * function is called before each analyzer attached to a channel, expects
644 * analyzers responsible for data sending. 'channel_analyze' callback is
645 * resumable, so this function returns 0 if an error occurs or if it needs to
646 * wait, any other value otherwise.
647 */
648int
649flt_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
650{
651 int ret = 1;
652
Christopher Fauletd7c91962015-04-30 11:48:27 +0200653 RESUME_FILTER_LOOP(s, chn) {
654 if (filter->ops->channel_analyze) {
655 ret = filter->ops->channel_analyze(s, filter, chn, an_bit);
656 if (ret <= 0)
657 BREAK_EXECUTION(s, chn, check_result);
658 }
659 } RESUME_FILTER_END;
660
661 check_result:
662 ret = handle_analyzer_result(s, chn, 0, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200663 return ret;
664}
665
666/*
667 * Calls 'channel_end_analyze' callback for all filters attached to a
668 * stream. This function is called when we stop to analyze a request or a
669 * response. It is called after all other analyzers. 'channel_end_analyze'
670 * callback is resumable, so this function returns 0 if an error occurs or if it
671 * needs to wait, any other value otherwise.
672 */
673int
674flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
675{
676 int ret = 1;
677
678 /* If this function is called, this means there is at least one filter,
679 * so we do not need to check the filter list's emptiness. */
680
681 RESUME_FILTER_LOOP(s, chn) {
682 filter->next[CHN_IDX(chn)] = 0;
683
684 if (filter->ops->channel_end_analyze) {
685 ret = filter->ops->channel_end_analyze(s, filter, chn);
686 if (ret <= 0)
687 BREAK_EXECUTION(s, chn, end);
688 }
689 } RESUME_FILTER_END;
690
691end:
692 ret = handle_analyzer_result(s, chn, an_bit, ret);
Christopher Faulet02c7b222015-12-22 12:01:29 +0100693
694 /* Check if 'channel_end_analyze' callback has been called for the
695 * request and the response. */
696 if (!(s->req.analysers & AN_FLT_END) && !(s->res.analysers & AN_FLT_END)) {
Christopher Faulet02c7b222015-12-22 12:01:29 +0100697 /* When we are waiting for a new request, so we must reset
698 * stream analyzers. The input must not be closed the request
699 * channel, else it is useless to wait. */
700 if (s->txn && (s->txn->flags & TX_WAIT_NEXT_RQ) && !channel_input_closed(&s->req)) {
701 s->req.analysers = strm_li(s) ? strm_li(s)->analysers : 0;
702 s->res.analysers = 0;
703 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200704
Christopher Faulet92d36382015-11-05 13:35:03 +0100705 /* Remove backend filters from the list */
706 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200707 }
708 else if (ret) {
709 /* Analyzer ends only for one channel. So wake up the stream to
710 * be sure to process it for the other side as soon as
711 * possible. */
712 task_wakeup(s->task, TASK_WOKEN_MSG);
713 }
714 return ret;
715}
716
717
718/*
719 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
720 * function is called when incoming data are available. It takes care to update
721 * the next offset of filters and adjusts available data to be sure that a
722 * filter cannot parse more data than its predecessors. A filter can choose to
723 * not consume all available data. Returns -1 if an error occurs, the number of
724 * consumed bytes otherwise.
725 */
726static int
727flt_data(struct stream *s, struct channel *chn)
728{
729 struct filter *filter = NULL;
730 unsigned int buf_i;
731 int ret = chn->buf->i;
732
733 /* Save buffer state */
734 buf_i = chn->buf->i;
735 list_for_each_entry(filter, &s->strm_flt.filters, list) {
736 if (filter->ops->tcp_data && !flt_want_forward_data(filter, chn)) {
737 ret = filter->ops->tcp_data(s, filter, chn);
738 if (ret < 0)
739 break;
740 }
741 else
742 ret = chn->buf->i - FLT_NXT(filter, chn);
743
744 /* Increase next offset of the current filter */
745 FLT_NXT(filter, chn) += ret;
746
747 /* Update <ret> value to be sure to have the last one when we
748 * exit from the loop. */
749 ret = FLT_NXT(filter, chn);
750
751 /* And set this value as the bound for the next filter. It will
752 * not able to parse more data than the current one. */
753 chn->buf->i = FLT_NXT(filter, chn);
754 }
755 // Restore the original buffer state
756 chn->buf->i = buf_i;
757 return ret;
758}
759
760/*
761 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
762 * stream. This function is called when some data can be forwarded. It takes
763 * care to update the forward offset of filters and adjusts "forwardable" data
764 * to be sure that a filter cannot forward more data than its predecessors. A
765 * filter can choose to not forward all parsed data. Returns a negative value if
766 * an error occurs, else the number of forwarded bytes.
767 */
768static int
769flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
770{
771 struct filter *filter = NULL;
772 int ret = len;
773
774 list_for_each_entry(filter, &s->strm_flt.filters, list) {
775 if (filter->ops->tcp_forward_data) {
776 /* Remove bytes that the current filter considered as
777 * forwarded */
778 ret = filter->ops->tcp_forward_data(s, filter, chn, ret - FLT_FWD(filter, chn));
779 if (ret < 0)
780 goto end;
781 }
782
783 /* Adjust bytes taht the current filter considers as
784 * forwarded */
785 FLT_FWD(filter, chn) += ret;
786
787 /* And set this value as the bound for the next filter. It will
788 * not able to forward more data than the current one. */
789 ret = FLT_FWD(filter, chn);
790 }
791
792 if (!ret)
793 goto end;
794
795 /* Adjust forward counter and next offset of filters by removing data
796 * that HAProxy will consider as forwarded. */
797 list_for_each_entry(filter, &s->strm_flt.filters, list) {
798 FLT_NXT(filter, chn) -= ret;
799 FLT_FWD(filter, chn) -= ret;
800 }
801
802 /* Consume data that all filters consider as forwarded. */
803 b_adv(chn->buf, ret);
804 end:
805 return ret;
806}
807
808/*
809 * Called when TCP data must be filtered on a channel. This function is the
810 * AN_FLT_XFER_DATA analyzer. When called, it is responsible to forward data
811 * when the proxy is not in http mode. Behind the scene, it calls consecutively
812 * 'tcp_data' and 'tcp_forward_data' callbacks for all "data" filters attached
813 * to a stream. Returns 0 if an error occurs or if it needs to wait, any other
814 * value otherwise.
815 */
816int
817flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
818{
819 int ret = 1;
820
821 /* If this function is called, this means there is at least one filter,
822 * so we do not need to check the filter list's emptiness. */
823
824 /* Be sure that the output is still opened. Else we stop the data
825 * filtering. */
826 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
827 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
828 goto end;
829
830 /* Let all "data" filters parsing incoming data */
831 ret = flt_data(s, chn);
832 if (ret < 0)
833 goto end;
834
835 /* And forward them */
836 ret = flt_forward_data(s, chn, ret);
837 if (ret < 0)
838 goto end;
839
840 /* Stop waiting data if the input in closed and no data is pending or if
841 * the output is closed. */
842 if ((chn->flags & CF_SHUTW) ||
843 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
844 ret = 1;
845 goto end;
846 }
847
848 /* Wait for data */
849 return 0;
850 end:
851 /* Terminate the data filtering. If <ret> is negative, an error was
852 * encountered during the filtering. */
853 return handle_analyzer_result(s, chn, an_bit, ret);
854}
855
856/*
857 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
858 * it needs to wait, any other value otherwise.
859 */
860static int
861handle_analyzer_result(struct stream *s, struct channel *chn,
862 unsigned int an_bit, int ret)
863{
864 int finst;
865
866 if (ret < 0)
867 goto return_bad_req;
868 else if (!ret)
869 goto wait;
870
871 /* End of job, return OK */
872 if (an_bit) {
873 chn->analysers &= ~an_bit;
874 chn->analyse_exp = TICK_ETERNITY;
875 }
876 return 1;
877
878 return_bad_req:
879 /* An error occurs */
880 channel_abort(&s->req);
881 channel_abort(&s->res);
882
883 if (!(chn->flags & CF_ISRESP)) {
884 s->req.analysers &= AN_FLT_END;
885 finst = SF_FINST_R;
886 /* FIXME: incr counters */
887 }
888 else {
889 s->res.analysers &= AN_FLT_END;
890 finst = SF_FINST_H;
891 /* FIXME: incr counters */
892 }
893
894 if (s->txn) {
895 /* Do not do that when we are waiting for the next request */
896 if (s->txn->status)
897 http_reply_and_close(s, s->txn->status, NULL);
898 else {
899 s->txn->status = 400;
900 http_reply_and_close(s, 400, http_error_message(s, HTTP_ERR_400));
901 }
902 }
903
904 if (!(s->flags & SF_ERR_MASK))
905 s->flags |= SF_ERR_PRXCOND;
906 if (!(s->flags & SF_FINST_MASK))
907 s->flags |= finst;
908 return 0;
909
910 wait:
911 if (!(chn->flags & CF_ISRESP))
912 channel_dont_connect(chn);
913 return 0;
914}
915
916
917/* Note: must not be declared <const> as its list will be overwritten.
918 * Please take care of keeping this list alphabetically sorted, doing so helps
919 * all code contributors.
920 * Optional keywords are also declared with a NULL ->parse() function so that
921 * the config parser can report an appropriate error when a known keyword was
922 * not enabled. */
923static struct cfg_kw_list cfg_kws = {ILH, {
924 { CFG_LISTEN, "filter", parse_filter },
925 { 0, NULL, NULL },
926 }
927};
928
929__attribute__((constructor))
930static void
931__filters_init(void)
932{
933 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
934 cfg_register_keywords(&cfg_kws);
935}
936
937__attribute__((destructor))
938static void
939__filters_deinit(void)
940{
941 pool_destroy2(pool2_filter);
942}
943
944/*
945 * Local variables:
946 * c-indent-level: 8
947 * c-basic-offset: 8
948 * End:
949 */