blob: 420ad8029a00710982b1efd094849299fb3bacc5 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010027#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020028#include <proto/proto_http.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31
32/* Pool used to allocate filters */
33struct pool_head *pool2_filter = NULL;
34
35static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
36
37/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
38 * The first one begins a loop and the seconds one ends it.
39 *
40 * - BREAK_EXECUTION must be used to break the loop and set the filter from
41 * which to resume the next time.
42 *
43 * Here is an exemple:
44 *
45 * RESUME_FILTER_LOOP(stream, channel) {
46 * ...
47 * if (cond)
48 * BREAK_EXECUTION(stream, channel, label);
49 * ...
50 * } RESUME_FILTER_END;
51 * ...
52 * label:
53 * ...
54 *
55 */
56#define RESUME_FILTER_LOOP(strm, chn) \
57 do { \
58 struct filter *filter; \
59 \
Christopher Fauletda02e172015-12-04 09:25:05 +010060 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
61 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
62 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020063 goto resume_execution; \
64 } \
65 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010066 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010067 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020068
69#define RESUME_FILTER_END \
70 } \
71 } while(0)
72
Christopher Fauletda02e172015-12-04 09:25:05 +010073#define BREAK_EXECUTION(strm, chn, label) \
74 do { \
75 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
76 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020077 } while (0)
78
79
80/* List head of all known filter keywords */
81static struct flt_kw_list flt_keywords = {
82 .list = LIST_HEAD_INIT(flt_keywords.list)
83};
84
85/*
86 * Registers the filter keyword list <kwl> as a list of valid keywords for next
87 * parsing sessions.
88 */
89void
90flt_register_keywords(struct flt_kw_list *kwl)
91{
92 LIST_ADDQ(&flt_keywords.list, &kwl->list);
93}
94
95/*
96 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
97 * keyword is found with a NULL ->parse() function, then an attempt is made to
98 * find one with a valid ->parse() function. This way it is possible to declare
99 * platform-dependant, known keywords as NULL, then only declare them as valid
100 * if some options are met. Note that if the requested keyword contains an
101 * opening parenthesis, everything from this point is ignored.
102 */
103struct flt_kw *
104flt_find_kw(const char *kw)
105{
106 int index;
107 const char *kwend;
108 struct flt_kw_list *kwl;
109 struct flt_kw *ret = NULL;
110
111 kwend = strchr(kw, '(');
112 if (!kwend)
113 kwend = kw + strlen(kw);
114
115 list_for_each_entry(kwl, &flt_keywords.list, list) {
116 for (index = 0; kwl->kw[index].kw != NULL; index++) {
117 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
118 kwl->kw[index].kw[kwend-kw] == 0) {
119 if (kwl->kw[index].parse)
120 return &kwl->kw[index]; /* found it !*/
121 else
122 ret = &kwl->kw[index]; /* may be OK */
123 }
124 }
125 }
126 return ret;
127}
128
129/*
130 * Dumps all registered "filter" keywords to the <out> string pointer. The
131 * unsupported keywords are only dumped if their supported form was not found.
132 */
133void
134flt_dump_kws(char **out)
135{
136 struct flt_kw_list *kwl;
137 int index;
138
139 *out = NULL;
140 list_for_each_entry(kwl, &flt_keywords.list, list) {
141 for (index = 0; kwl->kw[index].kw != NULL; index++) {
142 if (kwl->kw[index].parse ||
143 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
144 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
145 kwl->scope,
146 kwl->kw[index].kw,
147 kwl->kw[index].parse ? "" : " (not supported)");
148 }
149 }
150 }
151}
152
153/*
154 * Parses the "filter" keyword. All keywords must be handled by filters
155 * themselves
156 */
157static int
158parse_filter(char **args, int section_type, struct proxy *curpx,
159 struct proxy *defpx, const char *file, int line, char **err)
160{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100161 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200162
163 /* Filter cannot be defined on a default proxy */
164 if (curpx == defpx) {
165 memprintf(err, "parsing [%s:%d] : %s is only allowed in a 'default' section.",
166 file, line, args[0]);
167 return -1;
168 }
169 if (!strcmp(args[0], "filter")) {
170 struct flt_kw *kw;
171 int cur_arg;
172
173 if (!*args[1]) {
174 memprintf(err,
175 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
176 file, line, args[0], proxy_type_str(curpx), curpx->id);
177 goto error;
178 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100179 fconf = calloc(1, sizeof(*fconf));
180 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200181 memprintf(err, "'%s' : out of memory", args[0]);
182 goto error;
183 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200184
185 cur_arg = 1;
186 kw = flt_find_kw(args[cur_arg]);
187 if (kw) {
188 if (!kw->parse) {
189 memprintf(err, "parsing [%s:%d] : '%s' : "
190 "'%s' option is not implemented in this version (check build options).",
191 file, line, args[0], args[cur_arg]);
192 goto error;
193 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100194 if (kw->parse(args, &cur_arg, curpx, fconf, err) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200195 if (err && *err)
196 memprintf(err, "'%s' : '%s'",
197 args[0], *err);
198 else
199 memprintf(err, "'%s' : error encountered while processing '%s'",
200 args[0], args[cur_arg]);
201 goto error;
202 }
203 }
204 else {
205 flt_dump_kws(err);
206 indent_msg(err, 4);
207 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
208 args[0], args[cur_arg],
209 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
210 goto error;
211 }
212 if (*args[cur_arg]) {
213 memprintf(err, "'%s %s' : unknown keyword '%s'.",
214 args[0], args[1], args[cur_arg]);
215 goto error;
216 }
217
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100218 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200219 }
220 return 0;
221
222 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100223 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200224 return -1;
225
226
227}
228
229/*
230 * Calls 'init' callback for all filters attached to a proxy. This happens after
231 * the configuration parsing. Filters can finish to fill their config. Returns
232 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
233 */
234int
235flt_init(struct proxy *proxy)
236{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100237 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200238
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100239 list_for_each_entry(fconf, &proxy->filter_configs, list) {
240 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200241 return ERR_ALERT|ERR_FATAL;
242 }
243 return 0;
244}
245
246/*
247 * Calls 'check' callback for all filters attached to a proxy. This happens
248 * after the configuration parsing but before filters initialization. Returns
249 * the number of encountered errors.
250 */
251int
252flt_check(struct proxy *proxy)
253{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100254 struct flt_conf *fconf;
255 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200256
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100257 list_for_each_entry(fconf, &proxy->filter_configs, list) {
258 if (fconf->ops->check)
259 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200260 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100261 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200262 return err;
263}
264
265/*
266 * Calls 'denit' callback for all filters attached to a proxy. This happens when
267 * HAProxy is stopped.
268 */
269void
270flt_deinit(struct proxy *proxy)
271{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100272 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200273
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100274 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
275 if (fconf->ops->deinit)
276 fconf->ops->deinit(proxy, fconf);
277 LIST_DEL(&fconf->list);
278 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200279 }
280}
281
Christopher Faulet92d36382015-11-05 13:35:03 +0100282/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
283static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100284flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100285{
286 struct filter *f = pool_alloc2(pool2_filter);
287 if (!f) /* not enough memory */
288 return -1;
289 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100290 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100291 f->flags |= flags;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100292 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100293 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100294 return 0;
295}
296
297/*
298 * Called when a stream is created. It attaches all frontend filters to the
299 * stream. Returns -1 if an error occurs, 0 otherwise.
300 */
301int
302flt_stream_init(struct stream *s)
303{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100304 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100305
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100306 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
307 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100308 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
309 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100310 return -1;
311 }
312 return 0;
313}
314
315/*
316 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
317 * happens after each request/response exchange). When analyze ends, backend
318 * filters are removed. When the stream is closed, all filters attached to the
319 * stream are removed.
320 */
321void
322flt_stream_release(struct stream *s, int only_backend)
323{
324 struct filter *filter, *back;
325
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100326 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100327 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100328 LIST_DEL(&filter->list);
329 pool_free2(pool2_filter, filter);
330 }
331 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100332 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100333 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100334}
335
Christopher Fauletd7c91962015-04-30 11:48:27 +0200336/*
337 * Calls 'stream_start' for all filters attached to a stream. This happens when
338 * the stream is created, just after calling flt_stream_init
339 * function. Returns -1 if an error occurs, 0 otherwise.
340 */
341int
342flt_stream_start(struct stream *s)
343{
344 struct filter *filter;
345
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100346 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100347 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200348 return -1;
349 }
350 return 0;
351}
352
353/*
354 * Calls 'stream_stop' for all filters attached to a stream. This happens when
355 * the stream is stopped, just before calling flt_stream_release function.
356 */
357void
358flt_stream_stop(struct stream *s)
359{
360 struct filter *filter;
361
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100362 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100363 if (FLT_OPS(filter)->stream_stop)
364 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200365 }
366}
367
Christopher Faulet92d36382015-11-05 13:35:03 +0100368/*
369 * Called when a backend is set for a stream. If the frontend and the backend
370 * are the same, this function does nothing. Else it attaches all backend
371 * filters to the stream. Returns -1 if an error occurs, 0 otherwise.
372 */
373int
374flt_set_stream_backend(struct stream *s, struct proxy *be)
375{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100376 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100377
378 if (strm_fe(s) == be)
379 return 0;
380
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100381 list_for_each_entry(fconf, &be->filter_configs, list) {
382 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100383 return -1;
384 }
385 return 0;
386}
387
Christopher Fauletd7c91962015-04-30 11:48:27 +0200388/*
389 * Calls 'http_data' callback for all "data" filters attached to a stream. This
390 * function is called when incoming data are available (excluding chunks
391 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
392 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
393 * filters and adjusts available data to be sure that a filter cannot parse more
394 * data than its predecessors. A filter can choose to not consume all available
395 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
396 */
397int
398flt_http_data(struct stream *s, struct http_msg *msg)
399{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100400 struct filter *filter;
401 struct buffer *buf = msg->chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200402 unsigned int buf_i;
403 int ret = 0;
404
Christopher Fauletd7c91962015-04-30 11:48:27 +0200405 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100406 buf_i = buf->i;
407
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100408 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100409 unsigned int *nxt;
410
411 /* Call "data" filters only */
412 if (!IS_DATA_FILTER(filter, msg->chn))
413 continue;
414
Christopher Faulet2fb28802015-12-01 10:40:57 +0100415 /* If the HTTP parser is ahead, we update the next offset of the
416 * current filter. This happens for chunked messages, at the
417 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100418 nxt = &FLT_NXT(filter, msg->chn);
419 if (msg->next > *nxt)
420 *nxt = msg->next;
421
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100422 if (FLT_OPS(filter)->http_data) {
423 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100424 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200425 break;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100426
427 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100428 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100429
430 /* And set this value as the bound for the next
431 * filter. It will not able to parse more data than this
432 * one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100433 buf->i = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200434 }
435 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100436 /* Consume all available data and update the next offset
437 * of the current filter. buf->i is untouched here. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100438 ret = MIN(msg->chunk_len + msg->next, buf->i) - *nxt;
439 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200440 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200441 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100442
Christopher Fauletd7c91962015-04-30 11:48:27 +0200443 /* Restore the original buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100444 buf->i = buf_i;
445
Christopher Fauletd7c91962015-04-30 11:48:27 +0200446 return ret;
447}
448
Christopher Fauletd7c91962015-04-30 11:48:27 +0200449/*
450 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
451 * stream. This function is called for chunked messages only when a part of the
452 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
453 * analyzers. Filters can know how much data were parsed by the HTTP parsing
454 * until the last call with the msg->sol value. Returns a negative value if an
455 * error occurs, any other value otherwise.
456 */
457int
458flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
459{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100460 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100461 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200462
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100463 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100464 unsigned int *nxt;
465
466 /* Call "data" filters only */
467 if (!IS_DATA_FILTER(filter, msg->chn))
468 continue;
469
Christopher Faulet2fb28802015-12-01 10:40:57 +0100470 /* Be sure to set the next offset of the filter at the right
471 * place. This is really useful when the first part of the
472 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100473 nxt = &FLT_NXT(filter, msg->chn);
474 *nxt = msg->next;
475
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100476 if (FLT_OPS(filter)->http_chunk_trailers) {
477 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100478 if (ret < 0)
479 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200480 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100481 /* Update the next offset of the current filter. Here all data
482 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100483 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100484 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200485 return ret;
486}
487
488/*
489 * Calls 'http_end' callback for all filters attached to a stream. All filters
490 * are called here, but only if there is at least one "data" filter. This
491 * functions is called when all data were parsed and forwarded. 'http_end'
492 * callback is resumable, so this function returns a negative value if an error
493 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
494 */
495int
496flt_http_end(struct stream *s, struct http_msg *msg)
497{
498 int ret = 1;
499
Christopher Fauletd7c91962015-04-30 11:48:27 +0200500 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100501 if (FLT_OPS(filter)->http_end) {
502 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200503 if (ret <= 0)
504 BREAK_EXECUTION(s, msg->chn, end);
505 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200506 } RESUME_FILTER_END;
507end:
508 return ret;
509}
510
511/*
512 * Calls 'http_reset' callback for all filters attached to a stream. This
513 * happens when a 100-continue response is received.
514 */
515void
516flt_http_reset(struct stream *s, struct http_msg *msg)
517{
518 struct filter *filter;
519
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100520 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100521 if (FLT_OPS(filter)->http_reset)
522 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200523 }
524}
525
526/*
527 * Calls 'http_reply' callback for all filters attached to a stream when HA
528 * decides to stop the HTTP message processing.
529 */
530void
531flt_http_reply(struct stream *s, short status, const struct chunk *msg)
532{
533 struct filter *filter;
534
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100535 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100536 if (FLT_OPS(filter)->http_reply)
537 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200538 }
539}
540
541/*
542 * Calls 'http_forward_data' callback for all "data" filters attached to a
543 * stream. This function is called when some data can be forwarded in the
544 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
545 * update the forward offset of filters and adjusts "forwardable" data to be
546 * sure that a filter cannot forward more data than its predecessors. A filter
547 * can choose to not forward all parsed data. Returns a negative value if an
548 * error occurs, else the number of forwarded bytes.
549 */
550int
551flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
552{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100553 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200554 int ret = len;
555
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100556 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100557 unsigned int *nxt, *fwd;
558
559 /* Call "data" filters only */
560 if (!IS_DATA_FILTER(filter, msg->chn))
561 continue;
562
Christopher Faulet2fb28802015-12-01 10:40:57 +0100563 /* If the HTTP parser is ahead, we update the next offset of the
564 * current filter. This happens for chunked messages, when the
565 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100566 nxt = &FLT_NXT(filter, msg->chn);
567 fwd = &FLT_FWD(filter, msg->chn);
568 if (msg->next > *nxt)
569 *nxt = msg->next;
570
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100571 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100572 /* Remove bytes that the current filter considered as
573 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100574 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200575 if (ret < 0)
576 goto end;
577 }
578
579 /* Adjust bytes that the current filter considers as
580 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100581 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200582
583 /* And set this value as the bound for the next filter. It will
584 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100585 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200586 }
587
588 if (!ret)
589 goto end;
590
591 /* Finally, adjust filters offsets by removing data that HAProxy will
592 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100593 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100594 if (!IS_DATA_FILTER(filter, msg->chn))
595 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200596 FLT_NXT(filter, msg->chn) -= ret;
597 FLT_FWD(filter, msg->chn) -= ret;
598 }
599 end:
600 return ret;
601}
602
603/*
604 * Calls 'channel_start_analyze' callback for all filters attached to a
605 * stream. This function is called when we start to analyze a request or a
606 * response. For frontend filters, it is called before all other analyzers. For
607 * backend ones, it is called before all backend
608 * analyzers. 'channel_start_analyze' callback is resumable, so this function
609 * returns 0 if an error occurs or if it needs to wait, any other value
610 * otherwise.
611 */
612int
613flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
614{
615 int ret = 1;
616
617 /* If this function is called, this means there is at least one filter,
618 * so we do not need to check the filter list's emptiness. */
619
620 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100621 if (an_bit == AN_FLT_START_BE && !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
Christopher Fauletd7c91962015-04-30 11:48:27 +0200622 continue;
623
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100624 FLT_NXT(filter, chn) = 0;
625 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200626
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100627 if (FLT_OPS(filter)->channel_start_analyze) {
628 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200629 if (ret <= 0)
630 BREAK_EXECUTION(s, chn, end);
631 }
632 } RESUME_FILTER_END;
633
634 end:
635 return handle_analyzer_result(s, chn, an_bit, ret);
636}
637
638/*
639 * Calls 'channel_analyze' callback for all filters attached to a stream. This
640 * function is called before each analyzer attached to a channel, expects
641 * analyzers responsible for data sending. 'channel_analyze' callback is
642 * resumable, so this function returns 0 if an error occurs or if it needs to
643 * wait, any other value otherwise.
644 */
645int
646flt_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
647{
648 int ret = 1;
649
Christopher Fauletd7c91962015-04-30 11:48:27 +0200650 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100651 if (FLT_OPS(filter)->channel_analyze) {
652 ret = FLT_OPS(filter)->channel_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200653 if (ret <= 0)
654 BREAK_EXECUTION(s, chn, check_result);
655 }
656 } RESUME_FILTER_END;
657
658 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100659 return handle_analyzer_result(s, chn, 0, ret);
660}
661
662/*
663 * This function do the same that the previsous one, but for the
664 * AN_FLT_HTTP_HDRS analyzer. The difference is what is done when all filters
665 * have been called. Returns 0 if an error occurs or if it needs to wait, any
666 * other value otherwise.
667 */
668int
669flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
670{
Christopher Fauletda02e172015-12-04 09:25:05 +0100671 struct filter *filter;
672 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100673
674 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100675 if (FLT_OPS(filter)->channel_analyze) {
676 ret = FLT_OPS(filter)->channel_analyze(s, filter, chn, an_bit);
Christopher Faulet309c6412015-12-02 09:57:32 +0100677 if (ret <= 0)
678 BREAK_EXECUTION(s, chn, check_result);
679 }
680 } RESUME_FILTER_END;
681
682 /* We increase next offset of all "data" filters after all processing on
683 * headers because any filter can alter them. So the definitive size of
684 * headers (msg->sov) is only known when all filters have been
685 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100686 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100687 /* Handle "data" filters only */
688 if (!IS_DATA_FILTER(filter, chn))
689 continue;
690
691 FLT_NXT(filter, chn) = ((chn->flags & CF_ISRESP)
692 ? s->txn->rsp.sov : s->txn->req.sov);
Christopher Faulet309c6412015-12-02 09:57:32 +0100693 }
694
695 check_result:
696 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200697}
698
699/*
700 * Calls 'channel_end_analyze' callback for all filters attached to a
701 * stream. This function is called when we stop to analyze a request or a
702 * response. It is called after all other analyzers. 'channel_end_analyze'
703 * callback is resumable, so this function returns 0 if an error occurs or if it
704 * needs to wait, any other value otherwise.
705 */
706int
707flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
708{
709 int ret = 1;
710
Christopher Fauletd7c91962015-04-30 11:48:27 +0200711 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100712 FLT_NXT(filter, chn) = 0;
713 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100714 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200715
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100716 if (FLT_OPS(filter)->channel_end_analyze) {
717 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200718 if (ret <= 0)
719 BREAK_EXECUTION(s, chn, end);
720 }
721 } RESUME_FILTER_END;
722
723end:
724 ret = handle_analyzer_result(s, chn, an_bit, ret);
Christopher Faulet02c7b222015-12-22 12:01:29 +0100725
726 /* Check if 'channel_end_analyze' callback has been called for the
727 * request and the response. */
728 if (!(s->req.analysers & AN_FLT_END) && !(s->res.analysers & AN_FLT_END)) {
Christopher Faulet02c7b222015-12-22 12:01:29 +0100729 /* When we are waiting for a new request, so we must reset
730 * stream analyzers. The input must not be closed the request
731 * channel, else it is useless to wait. */
732 if (s->txn && (s->txn->flags & TX_WAIT_NEXT_RQ) && !channel_input_closed(&s->req)) {
733 s->req.analysers = strm_li(s) ? strm_li(s)->analysers : 0;
734 s->res.analysers = 0;
735 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200736
Christopher Faulet92d36382015-11-05 13:35:03 +0100737 /* Remove backend filters from the list */
738 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200739 }
740 else if (ret) {
741 /* Analyzer ends only for one channel. So wake up the stream to
742 * be sure to process it for the other side as soon as
743 * possible. */
744 task_wakeup(s->task, TASK_WOKEN_MSG);
745 }
746 return ret;
747}
748
749
750/*
751 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
752 * function is called when incoming data are available. It takes care to update
753 * the next offset of filters and adjusts available data to be sure that a
754 * filter cannot parse more data than its predecessors. A filter can choose to
755 * not consume all available data. Returns -1 if an error occurs, the number of
756 * consumed bytes otherwise.
757 */
758static int
759flt_data(struct stream *s, struct channel *chn)
760{
Christopher Fauletda02e172015-12-04 09:25:05 +0100761 struct filter *filter;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100762 struct buffer *buf = chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200763 unsigned int buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100764 int ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200765
766 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100767 buf_i = buf->i;
768
769 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100770 unsigned int *nxt;
771
772 /* Call "data" filters only */
773 if (!IS_DATA_FILTER(filter, chn))
774 continue;
775
776 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100777 if (FLT_OPS(filter)->tcp_data) {
778 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200779 if (ret < 0)
780 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200781
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100782 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100783 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100784
785 /* And set this value as the bound for the next
786 * filter. It will not able to parse more data than the
787 * current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100788 buf->i = *nxt;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100789 }
790 else {
791 /* Consume all available data */
Christopher Fauletda02e172015-12-04 09:25:05 +0100792 *nxt = buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100793 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200794
795 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +0100796 * exit from the loop. This value will be used to know how much
797 * data are "forwardable" */
798 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200799 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100800
801 /* Restore the original buffer state */
Christopher Fauletd7c91962015-04-30 11:48:27 +0200802 chn->buf->i = buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100803
Christopher Fauletd7c91962015-04-30 11:48:27 +0200804 return ret;
805}
806
807/*
808 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
809 * stream. This function is called when some data can be forwarded. It takes
810 * care to update the forward offset of filters and adjusts "forwardable" data
811 * to be sure that a filter cannot forward more data than its predecessors. A
812 * filter can choose to not forward all parsed data. Returns a negative value if
813 * an error occurs, else the number of forwarded bytes.
814 */
815static int
816flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
817{
Christopher Fauletda02e172015-12-04 09:25:05 +0100818 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200819 int ret = len;
820
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100821 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100822 unsigned int *fwd;
823
824 /* Call "data" filters only */
825 if (!IS_DATA_FILTER(filter, chn))
826 continue;
827
828 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100829 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200830 /* Remove bytes that the current filter considered as
831 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100832 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200833 if (ret < 0)
834 goto end;
835 }
836
Christopher Fauletda02e172015-12-04 09:25:05 +0100837 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +0200838 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100839 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200840
841 /* And set this value as the bound for the next filter. It will
842 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100843 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200844 }
845
846 if (!ret)
847 goto end;
848
Christopher Fauletda02e172015-12-04 09:25:05 +0100849 /* Finally, adjust filters offsets by removing data that HAProxy will
850 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100851 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100852 if (!IS_DATA_FILTER(filter, chn))
853 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200854 FLT_NXT(filter, chn) -= ret;
855 FLT_FWD(filter, chn) -= ret;
856 }
857
Christopher Fauletd7c91962015-04-30 11:48:27 +0200858 end:
859 return ret;
860}
861
862/*
863 * Called when TCP data must be filtered on a channel. This function is the
864 * AN_FLT_XFER_DATA analyzer. When called, it is responsible to forward data
865 * when the proxy is not in http mode. Behind the scene, it calls consecutively
866 * 'tcp_data' and 'tcp_forward_data' callbacks for all "data" filters attached
867 * to a stream. Returns 0 if an error occurs or if it needs to wait, any other
868 * value otherwise.
869 */
870int
871flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
872{
873 int ret = 1;
874
Christopher Fauletda02e172015-12-04 09:25:05 +0100875 /* If there is no "data" filters, we do nothing */
876 if (!HAS_DATA_FILTERS(s, chn))
877 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200878
879 /* Be sure that the output is still opened. Else we stop the data
880 * filtering. */
881 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
882 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
883 goto end;
884
885 /* Let all "data" filters parsing incoming data */
886 ret = flt_data(s, chn);
887 if (ret < 0)
888 goto end;
889
890 /* And forward them */
891 ret = flt_forward_data(s, chn, ret);
892 if (ret < 0)
893 goto end;
894
Christopher Fauletda02e172015-12-04 09:25:05 +0100895 /* Consume data that all filters consider as forwarded. */
896 b_adv(chn->buf, ret);
897
Christopher Fauletd7c91962015-04-30 11:48:27 +0200898 /* Stop waiting data if the input in closed and no data is pending or if
899 * the output is closed. */
900 if ((chn->flags & CF_SHUTW) ||
901 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
902 ret = 1;
903 goto end;
904 }
905
906 /* Wait for data */
907 return 0;
908 end:
909 /* Terminate the data filtering. If <ret> is negative, an error was
910 * encountered during the filtering. */
911 return handle_analyzer_result(s, chn, an_bit, ret);
912}
913
914/*
915 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
916 * it needs to wait, any other value otherwise.
917 */
918static int
919handle_analyzer_result(struct stream *s, struct channel *chn,
920 unsigned int an_bit, int ret)
921{
922 int finst;
923
924 if (ret < 0)
925 goto return_bad_req;
926 else if (!ret)
927 goto wait;
928
929 /* End of job, return OK */
930 if (an_bit) {
931 chn->analysers &= ~an_bit;
932 chn->analyse_exp = TICK_ETERNITY;
933 }
934 return 1;
935
936 return_bad_req:
937 /* An error occurs */
938 channel_abort(&s->req);
939 channel_abort(&s->res);
940
941 if (!(chn->flags & CF_ISRESP)) {
942 s->req.analysers &= AN_FLT_END;
943 finst = SF_FINST_R;
944 /* FIXME: incr counters */
945 }
946 else {
947 s->res.analysers &= AN_FLT_END;
948 finst = SF_FINST_H;
949 /* FIXME: incr counters */
950 }
951
952 if (s->txn) {
953 /* Do not do that when we are waiting for the next request */
954 if (s->txn->status)
955 http_reply_and_close(s, s->txn->status, NULL);
956 else {
957 s->txn->status = 400;
958 http_reply_and_close(s, 400, http_error_message(s, HTTP_ERR_400));
959 }
960 }
961
962 if (!(s->flags & SF_ERR_MASK))
963 s->flags |= SF_ERR_PRXCOND;
964 if (!(s->flags & SF_FINST_MASK))
965 s->flags |= finst;
966 return 0;
967
968 wait:
969 if (!(chn->flags & CF_ISRESP))
970 channel_dont_connect(chn);
971 return 0;
972}
973
974
975/* Note: must not be declared <const> as its list will be overwritten.
976 * Please take care of keeping this list alphabetically sorted, doing so helps
977 * all code contributors.
978 * Optional keywords are also declared with a NULL ->parse() function so that
979 * the config parser can report an appropriate error when a known keyword was
980 * not enabled. */
981static struct cfg_kw_list cfg_kws = {ILH, {
982 { CFG_LISTEN, "filter", parse_filter },
983 { 0, NULL, NULL },
984 }
985};
986
987__attribute__((constructor))
988static void
989__filters_init(void)
990{
991 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
992 cfg_register_keywords(&cfg_kws);
993}
994
995__attribute__((destructor))
996static void
997__filters_deinit(void)
998{
999 pool_destroy2(pool2_filter);
1000}
1001
1002/*
1003 * Local variables:
1004 * c-indent-level: 8
1005 * c-basic-offset: 8
1006 * End:
1007 */