blob: 139440d15c23a327e981dcce6bc356af72d888d8 [file] [log] [blame]
Christopher Fauletd7c91962015-04-30 11:48:27 +02001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/debug.h>
15#include <common/cfgparse.h>
16#include <common/compat.h>
17#include <common/config.h>
18#include <common/errors.h>
19#include <common/namespace.h>
20#include <common/standard.h>
21
22#include <types/filters.h>
23#include <types/proto_http.h>
24
25#include <proto/compression.h>
26#include <proto/filters.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010027#include <proto/flt_http_comp.h>
Christopher Fauletd7c91962015-04-30 11:48:27 +020028#include <proto/proto_http.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31
32/* Pool used to allocate filters */
33struct pool_head *pool2_filter = NULL;
34
35static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
36
37/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
38 * The first one begins a loop and the seconds one ends it.
39 *
40 * - BREAK_EXECUTION must be used to break the loop and set the filter from
41 * which to resume the next time.
42 *
43 * Here is an exemple:
44 *
45 * RESUME_FILTER_LOOP(stream, channel) {
46 * ...
47 * if (cond)
48 * BREAK_EXECUTION(stream, channel, label);
49 * ...
50 * } RESUME_FILTER_END;
51 * ...
52 * label:
53 * ...
54 *
55 */
56#define RESUME_FILTER_LOOP(strm, chn) \
57 do { \
58 struct filter *filter; \
59 \
Christopher Fauletda02e172015-12-04 09:25:05 +010060 if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
61 filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
62 strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020063 goto resume_execution; \
64 } \
65 \
Christopher Fauletfcf035c2015-12-03 11:48:03 +010066 list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
Christopher Fauletda02e172015-12-04 09:25:05 +010067 resume_execution:
Christopher Fauletd7c91962015-04-30 11:48:27 +020068
69#define RESUME_FILTER_END \
70 } \
71 } while(0)
72
Christopher Fauletda02e172015-12-04 09:25:05 +010073#define BREAK_EXECUTION(strm, chn, label) \
74 do { \
75 strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
76 goto label; \
Christopher Fauletd7c91962015-04-30 11:48:27 +020077 } while (0)
78
79
80/* List head of all known filter keywords */
81static struct flt_kw_list flt_keywords = {
82 .list = LIST_HEAD_INIT(flt_keywords.list)
83};
84
85/*
86 * Registers the filter keyword list <kwl> as a list of valid keywords for next
87 * parsing sessions.
88 */
89void
90flt_register_keywords(struct flt_kw_list *kwl)
91{
92 LIST_ADDQ(&flt_keywords.list, &kwl->list);
93}
94
95/*
96 * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
97 * keyword is found with a NULL ->parse() function, then an attempt is made to
98 * find one with a valid ->parse() function. This way it is possible to declare
99 * platform-dependant, known keywords as NULL, then only declare them as valid
100 * if some options are met. Note that if the requested keyword contains an
101 * opening parenthesis, everything from this point is ignored.
102 */
103struct flt_kw *
104flt_find_kw(const char *kw)
105{
106 int index;
107 const char *kwend;
108 struct flt_kw_list *kwl;
109 struct flt_kw *ret = NULL;
110
111 kwend = strchr(kw, '(');
112 if (!kwend)
113 kwend = kw + strlen(kw);
114
115 list_for_each_entry(kwl, &flt_keywords.list, list) {
116 for (index = 0; kwl->kw[index].kw != NULL; index++) {
117 if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
118 kwl->kw[index].kw[kwend-kw] == 0) {
119 if (kwl->kw[index].parse)
120 return &kwl->kw[index]; /* found it !*/
121 else
122 ret = &kwl->kw[index]; /* may be OK */
123 }
124 }
125 }
126 return ret;
127}
128
129/*
130 * Dumps all registered "filter" keywords to the <out> string pointer. The
131 * unsupported keywords are only dumped if their supported form was not found.
132 */
133void
134flt_dump_kws(char **out)
135{
136 struct flt_kw_list *kwl;
137 int index;
138
139 *out = NULL;
140 list_for_each_entry(kwl, &flt_keywords.list, list) {
141 for (index = 0; kwl->kw[index].kw != NULL; index++) {
142 if (kwl->kw[index].parse ||
143 flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
144 memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
145 kwl->scope,
146 kwl->kw[index].kw,
147 kwl->kw[index].parse ? "" : " (not supported)");
148 }
149 }
150 }
151}
152
153/*
Christopher Fauletb3f4e142016-03-07 12:46:38 +0100154 * Lists the known filters on <out>
155 */
156void
157list_filters(FILE *out)
158{
159 char *filters, *p, *f;
160
161 fprintf(out, "Available filters :\n");
162 flt_dump_kws(&filters);
163 for (p = filters; (f = strtok_r(p,"\n",&p));)
164 fprintf(out, "\t%s\n", f);
165 free(filters);
166}
167
168/*
Christopher Fauletd7c91962015-04-30 11:48:27 +0200169 * Parses the "filter" keyword. All keywords must be handled by filters
170 * themselves
171 */
172static int
173parse_filter(char **args, int section_type, struct proxy *curpx,
174 struct proxy *defpx, const char *file, int line, char **err)
175{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100176 struct flt_conf *fconf = NULL;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200177
178 /* Filter cannot be defined on a default proxy */
179 if (curpx == defpx) {
Christopher Fauletcc7317d2016-04-04 10:51:17 +0200180 memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
Christopher Fauletd7c91962015-04-30 11:48:27 +0200181 file, line, args[0]);
182 return -1;
183 }
184 if (!strcmp(args[0], "filter")) {
185 struct flt_kw *kw;
186 int cur_arg;
187
188 if (!*args[1]) {
189 memprintf(err,
190 "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
191 file, line, args[0], proxy_type_str(curpx), curpx->id);
192 goto error;
193 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100194 fconf = calloc(1, sizeof(*fconf));
195 if (!fconf) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200196 memprintf(err, "'%s' : out of memory", args[0]);
197 goto error;
198 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200199
200 cur_arg = 1;
201 kw = flt_find_kw(args[cur_arg]);
202 if (kw) {
203 if (!kw->parse) {
204 memprintf(err, "parsing [%s:%d] : '%s' : "
205 "'%s' option is not implemented in this version (check build options).",
206 file, line, args[0], args[cur_arg]);
207 goto error;
208 }
Thierry Fournier3610c392016-04-13 18:27:51 +0200209 if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200210 if (err && *err)
211 memprintf(err, "'%s' : '%s'",
212 args[0], *err);
213 else
214 memprintf(err, "'%s' : error encountered while processing '%s'",
215 args[0], args[cur_arg]);
216 goto error;
217 }
218 }
219 else {
220 flt_dump_kws(err);
221 indent_msg(err, 4);
222 memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
223 args[0], args[cur_arg],
224 err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
225 goto error;
226 }
227 if (*args[cur_arg]) {
228 memprintf(err, "'%s %s' : unknown keyword '%s'.",
229 args[0], args[1], args[cur_arg]);
230 goto error;
231 }
Christopher Faulet00e818a2016-04-19 17:00:44 +0200232 if (fconf->ops == NULL) {
233 memprintf(err, "'%s %s' : no callbacks defined.",
234 args[0], args[1]);
235 goto error;
236 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200237
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100238 LIST_ADDQ(&curpx->filter_configs, &fconf->list);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200239 }
240 return 0;
241
242 error:
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100243 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200244 return -1;
245
246
247}
248
249/*
250 * Calls 'init' callback for all filters attached to a proxy. This happens after
251 * the configuration parsing. Filters can finish to fill their config. Returns
252 * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
253 */
254int
255flt_init(struct proxy *proxy)
256{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100257 struct flt_conf *fconf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200258
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100259 list_for_each_entry(fconf, &proxy->filter_configs, list) {
260 if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200261 return ERR_ALERT|ERR_FATAL;
262 }
263 return 0;
264}
265
266/*
267 * Calls 'check' callback for all filters attached to a proxy. This happens
268 * after the configuration parsing but before filters initialization. Returns
269 * the number of encountered errors.
270 */
271int
272flt_check(struct proxy *proxy)
273{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100274 struct flt_conf *fconf;
275 int err = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200276
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100277 list_for_each_entry(fconf, &proxy->filter_configs, list) {
278 if (fconf->ops->check)
279 err += fconf->ops->check(proxy, fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200280 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100281 err += check_legacy_http_comp_flt(proxy);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200282 return err;
283}
284
285/*
286 * Calls 'denit' callback for all filters attached to a proxy. This happens when
287 * HAProxy is stopped.
288 */
289void
290flt_deinit(struct proxy *proxy)
291{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100292 struct flt_conf *fconf, *back;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200293
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100294 list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
295 if (fconf->ops->deinit)
296 fconf->ops->deinit(proxy, fconf);
297 LIST_DEL(&fconf->list);
298 free(fconf);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200299 }
300}
301
Christopher Faulet92d36382015-11-05 13:35:03 +0100302/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
303static int
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100304flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
Christopher Faulet92d36382015-11-05 13:35:03 +0100305{
306 struct filter *f = pool_alloc2(pool2_filter);
307 if (!f) /* not enough memory */
308 return -1;
309 memset(f, 0, sizeof(*f));
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100310 f->config = fconf;
Christopher Fauletda02e172015-12-04 09:25:05 +0100311 f->flags |= flags;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100312 LIST_ADDQ(&strm_flt(s)->filters, &f->list);
Christopher Fauletda02e172015-12-04 09:25:05 +0100313 strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100314 return 0;
315}
316
317/*
318 * Called when a stream is created. It attaches all frontend filters to the
319 * stream. Returns -1 if an error occurs, 0 otherwise.
320 */
321int
322flt_stream_init(struct stream *s)
323{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100324 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100325
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100326 memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
327 LIST_INIT(&strm_flt(s)->filters);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100328 list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
329 if (flt_stream_add_filter(s, fconf, 0) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100330 return -1;
331 }
332 return 0;
333}
334
335/*
336 * Called when a stream is closed or when analyze ends (For an HTTP stream, this
337 * happens after each request/response exchange). When analyze ends, backend
338 * filters are removed. When the stream is closed, all filters attached to the
339 * stream are removed.
340 */
341void
342flt_stream_release(struct stream *s, int only_backend)
343{
344 struct filter *filter, *back;
345
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100346 list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100347 if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100348 LIST_DEL(&filter->list);
349 pool_free2(pool2_filter, filter);
350 }
351 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100352 if (LIST_ISEMPTY(&strm_flt(s)->filters))
Christopher Fauletda02e172015-12-04 09:25:05 +0100353 strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
Christopher Faulet92d36382015-11-05 13:35:03 +0100354}
355
Christopher Fauletd7c91962015-04-30 11:48:27 +0200356/*
357 * Calls 'stream_start' for all filters attached to a stream. This happens when
358 * the stream is created, just after calling flt_stream_init
359 * function. Returns -1 if an error occurs, 0 otherwise.
360 */
361int
362flt_stream_start(struct stream *s)
363{
364 struct filter *filter;
365
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100366 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100367 if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200368 return -1;
369 }
370 return 0;
371}
372
373/*
374 * Calls 'stream_stop' for all filters attached to a stream. This happens when
375 * the stream is stopped, just before calling flt_stream_release function.
376 */
377void
378flt_stream_stop(struct stream *s)
379{
380 struct filter *filter;
381
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100382 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100383 if (FLT_OPS(filter)->stream_stop)
384 FLT_OPS(filter)->stream_stop(s, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200385 }
386}
387
Christopher Faulet92d36382015-11-05 13:35:03 +0100388/*
389 * Called when a backend is set for a stream. If the frontend and the backend
390 * are the same, this function does nothing. Else it attaches all backend
391 * filters to the stream. Returns -1 if an error occurs, 0 otherwise.
392 */
393int
394flt_set_stream_backend(struct stream *s, struct proxy *be)
395{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100396 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100397
398 if (strm_fe(s) == be)
399 return 0;
400
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100401 list_for_each_entry(fconf, &be->filter_configs, list) {
402 if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
Christopher Faulet92d36382015-11-05 13:35:03 +0100403 return -1;
404 }
405 return 0;
406}
407
Christopher Fauletd7c91962015-04-30 11:48:27 +0200408/*
409 * Calls 'http_data' callback for all "data" filters attached to a stream. This
410 * function is called when incoming data are available (excluding chunks
411 * envelope for chunked messages) in the AN_REQ_HTTP_XFER_BODY and
412 * AN_RES_HTTP_XFER_BODY analyzers. It takes care to update the next offset of
413 * filters and adjusts available data to be sure that a filter cannot parse more
414 * data than its predecessors. A filter can choose to not consume all available
415 * data. Returns -1 if an error occurs, the number of consumed bytes otherwise.
416 */
417int
418flt_http_data(struct stream *s, struct http_msg *msg)
419{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100420 struct filter *filter;
421 struct buffer *buf = msg->chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200422 unsigned int buf_i;
423 int ret = 0;
424
Christopher Fauletd7c91962015-04-30 11:48:27 +0200425 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100426 buf_i = buf->i;
427
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100428 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100429 unsigned int *nxt;
430
431 /* Call "data" filters only */
432 if (!IS_DATA_FILTER(filter, msg->chn))
433 continue;
434
Christopher Faulet2fb28802015-12-01 10:40:57 +0100435 /* If the HTTP parser is ahead, we update the next offset of the
436 * current filter. This happens for chunked messages, at the
437 * begining of a new chunk. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100438 nxt = &FLT_NXT(filter, msg->chn);
439 if (msg->next > *nxt)
440 *nxt = msg->next;
441
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100442 if (FLT_OPS(filter)->http_data) {
443 ret = FLT_OPS(filter)->http_data(s, filter, msg);
Christopher Fauletda02e172015-12-04 09:25:05 +0100444 if (ret < 0)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200445 break;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100446
447 /* Update the next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100448 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100449
450 /* And set this value as the bound for the next
451 * filter. It will not able to parse more data than this
452 * one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100453 buf->i = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200454 }
455 else {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100456 /* Consume all available data and update the next offset
457 * of the current filter. buf->i is untouched here. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100458 ret = MIN(msg->chunk_len + msg->next, buf->i) - *nxt;
459 *nxt += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200460 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200461 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100462
Christopher Fauletd7c91962015-04-30 11:48:27 +0200463 /* Restore the original buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100464 buf->i = buf_i;
465
Christopher Fauletd7c91962015-04-30 11:48:27 +0200466 return ret;
467}
468
Christopher Fauletd7c91962015-04-30 11:48:27 +0200469/*
470 * Calls 'http_chunk_trailers' callback for all "data" filters attached to a
471 * stream. This function is called for chunked messages only when a part of the
472 * trailers was parsed in the AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY
473 * analyzers. Filters can know how much data were parsed by the HTTP parsing
474 * until the last call with the msg->sol value. Returns a negative value if an
475 * error occurs, any other value otherwise.
476 */
477int
478flt_http_chunk_trailers(struct stream *s, struct http_msg *msg)
479{
Christopher Faulet2fb28802015-12-01 10:40:57 +0100480 struct filter *filter;
Christopher Fauletda02e172015-12-04 09:25:05 +0100481 int ret = 1;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200482
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100483 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100484 unsigned int *nxt;
485
486 /* Call "data" filters only */
487 if (!IS_DATA_FILTER(filter, msg->chn))
488 continue;
489
Christopher Faulet2fb28802015-12-01 10:40:57 +0100490 /* Be sure to set the next offset of the filter at the right
491 * place. This is really useful when the first part of the
492 * trailers was parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100493 nxt = &FLT_NXT(filter, msg->chn);
494 *nxt = msg->next;
495
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100496 if (FLT_OPS(filter)->http_chunk_trailers) {
497 ret = FLT_OPS(filter)->http_chunk_trailers(s, filter, msg);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100498 if (ret < 0)
499 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200500 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100501 /* Update the next offset of the current filter. Here all data
502 * are always consumed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100503 *nxt += msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100504 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200505 return ret;
506}
507
508/*
509 * Calls 'http_end' callback for all filters attached to a stream. All filters
510 * are called here, but only if there is at least one "data" filter. This
511 * functions is called when all data were parsed and forwarded. 'http_end'
512 * callback is resumable, so this function returns a negative value if an error
513 * occurs, 0 if it needs to wait for some reason, any other value otherwise.
514 */
515int
516flt_http_end(struct stream *s, struct http_msg *msg)
517{
518 int ret = 1;
519
Christopher Fauletd7c91962015-04-30 11:48:27 +0200520 RESUME_FILTER_LOOP(s, msg->chn) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100521 if (FLT_OPS(filter)->http_end) {
522 ret = FLT_OPS(filter)->http_end(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200523 if (ret <= 0)
524 BREAK_EXECUTION(s, msg->chn, end);
525 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200526 } RESUME_FILTER_END;
527end:
528 return ret;
529}
530
531/*
532 * Calls 'http_reset' callback for all filters attached to a stream. This
533 * happens when a 100-continue response is received.
534 */
535void
536flt_http_reset(struct stream *s, struct http_msg *msg)
537{
538 struct filter *filter;
539
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100540 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100541 if (FLT_OPS(filter)->http_reset)
542 FLT_OPS(filter)->http_reset(s, filter, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200543 }
544}
545
546/*
547 * Calls 'http_reply' callback for all filters attached to a stream when HA
548 * decides to stop the HTTP message processing.
549 */
550void
551flt_http_reply(struct stream *s, short status, const struct chunk *msg)
552{
553 struct filter *filter;
554
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100555 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100556 if (FLT_OPS(filter)->http_reply)
557 FLT_OPS(filter)->http_reply(s, filter, status, msg);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200558 }
559}
560
561/*
562 * Calls 'http_forward_data' callback for all "data" filters attached to a
563 * stream. This function is called when some data can be forwarded in the
564 * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
565 * update the forward offset of filters and adjusts "forwardable" data to be
566 * sure that a filter cannot forward more data than its predecessors. A filter
567 * can choose to not forward all parsed data. Returns a negative value if an
568 * error occurs, else the number of forwarded bytes.
569 */
570int
571flt_http_forward_data(struct stream *s, struct http_msg *msg, unsigned int len)
572{
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100573 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200574 int ret = len;
575
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100576 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100577 unsigned int *nxt, *fwd;
578
579 /* Call "data" filters only */
580 if (!IS_DATA_FILTER(filter, msg->chn))
581 continue;
582
Christopher Faulet2fb28802015-12-01 10:40:57 +0100583 /* If the HTTP parser is ahead, we update the next offset of the
584 * current filter. This happens for chunked messages, when the
585 * chunk envelope is parsed. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100586 nxt = &FLT_NXT(filter, msg->chn);
587 fwd = &FLT_FWD(filter, msg->chn);
588 if (msg->next > *nxt)
589 *nxt = msg->next;
590
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100591 if (FLT_OPS(filter)->http_forward_data) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100592 /* Remove bytes that the current filter considered as
593 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100594 ret = FLT_OPS(filter)->http_forward_data(s, filter, msg, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200595 if (ret < 0)
596 goto end;
597 }
598
599 /* Adjust bytes that the current filter considers as
600 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100601 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200602
603 /* And set this value as the bound for the next filter. It will
604 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100605 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200606 }
607
608 if (!ret)
609 goto end;
610
611 /* Finally, adjust filters offsets by removing data that HAProxy will
612 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100613 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100614 if (!IS_DATA_FILTER(filter, msg->chn))
615 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200616 FLT_NXT(filter, msg->chn) -= ret;
617 FLT_FWD(filter, msg->chn) -= ret;
618 }
619 end:
620 return ret;
621}
622
623/*
624 * Calls 'channel_start_analyze' callback for all filters attached to a
625 * stream. This function is called when we start to analyze a request or a
626 * response. For frontend filters, it is called before all other analyzers. For
627 * backend ones, it is called before all backend
628 * analyzers. 'channel_start_analyze' callback is resumable, so this function
629 * returns 0 if an error occurs or if it needs to wait, any other value
630 * otherwise.
631 */
632int
633flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
634{
635 int ret = 1;
636
637 /* If this function is called, this means there is at least one filter,
638 * so we do not need to check the filter list's emptiness. */
639
640 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100641 if (an_bit == AN_FLT_START_BE && !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
Christopher Fauletd7c91962015-04-30 11:48:27 +0200642 continue;
643
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100644 FLT_NXT(filter, chn) = 0;
645 FLT_FWD(filter, chn) = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200646
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100647 if (FLT_OPS(filter)->channel_start_analyze) {
648 ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200649 if (ret <= 0)
650 BREAK_EXECUTION(s, chn, end);
651 }
652 } RESUME_FILTER_END;
653
654 end:
655 return handle_analyzer_result(s, chn, an_bit, ret);
656}
657
658/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200659 * Calls 'channel_pre_analyze' callback for all filters attached to a
660 * stream. This function is called BEFORE each analyzer attached to a channel,
661 * expects analyzers responsible for data sending. 'channel_pre_analyze'
662 * callback is resumable, so this function returns 0 if an error occurs or if it
663 * needs to wait, any other value otherwise.
664 *
665 * Note this function can be called many times for the same analyzer. In fact,
666 * it is called until the analyzer finishes its processing.
Christopher Fauletd7c91962015-04-30 11:48:27 +0200667 */
668int
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200669flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
Christopher Fauletd7c91962015-04-30 11:48:27 +0200670{
671 int ret = 1;
672
Christopher Fauletd7c91962015-04-30 11:48:27 +0200673 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200674 if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
675 ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200676 if (ret <= 0)
677 BREAK_EXECUTION(s, chn, check_result);
678 }
679 } RESUME_FILTER_END;
680
681 check_result:
Christopher Faulet309c6412015-12-02 09:57:32 +0100682 return handle_analyzer_result(s, chn, 0, ret);
683}
684
685/*
Christopher Faulet3a394fa2016-05-11 17:13:39 +0200686 * Calls 'channel_post_analyze' callback for all filters attached to a
687 * stream. This function is called AFTER each analyzer attached to a channel,
688 * expects analyzers responsible for data sending. 'channel_post_analyze'
689 * callback is NOT resumable, so this function returns a 0 if an error occurs,
690 * any other value otherwise.
691 *
692 * Here, AFTER means when the analyzer finishes its processing.
693 */
694int
695flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
696{
697 struct filter *filter;
698 int ret = 1;
699
700 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
701 if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
702 ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
703 if (ret < 0)
704 break;
705 }
706 }
707 return handle_analyzer_result(s, chn, 0, ret);
708}
709
710/*
Christopher Faulet1339d742016-05-11 16:48:33 +0200711 * This function is the AN_FLT_HTTP_HDRS analyzer, used to filter HTTP headers
712 * or a request or a response. Returns 0 if an error occurs or if it needs to
713 * wait, any other value otherwise.
Christopher Faulet309c6412015-12-02 09:57:32 +0100714 */
715int
716flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
717{
Christopher Faulet1339d742016-05-11 16:48:33 +0200718 struct filter *filter;
719 struct http_msg *msg;
720 int ret = 1;
Christopher Faulet309c6412015-12-02 09:57:32 +0100721
Christopher Faulet1339d742016-05-11 16:48:33 +0200722 msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
Christopher Faulet309c6412015-12-02 09:57:32 +0100723 RESUME_FILTER_LOOP(s, chn) {
Christopher Faulet1339d742016-05-11 16:48:33 +0200724 if (FLT_OPS(filter)->http_headers) {
725 ret = FLT_OPS(filter)->http_headers(s, filter, msg);
Christopher Faulet309c6412015-12-02 09:57:32 +0100726 if (ret <= 0)
727 BREAK_EXECUTION(s, chn, check_result);
728 }
729 } RESUME_FILTER_END;
730
731 /* We increase next offset of all "data" filters after all processing on
732 * headers because any filter can alter them. So the definitive size of
733 * headers (msg->sov) is only known when all filters have been
734 * called. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100735 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100736 /* Handle "data" filters only */
737 if (!IS_DATA_FILTER(filter, chn))
738 continue;
Christopher Faulet1339d742016-05-11 16:48:33 +0200739 FLT_NXT(filter, chn) = msg->sov;
Christopher Faulet309c6412015-12-02 09:57:32 +0100740 }
741
742 check_result:
743 return handle_analyzer_result(s, chn, an_bit, ret);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200744}
745
746/*
747 * Calls 'channel_end_analyze' callback for all filters attached to a
748 * stream. This function is called when we stop to analyze a request or a
749 * response. It is called after all other analyzers. 'channel_end_analyze'
750 * callback is resumable, so this function returns 0 if an error occurs or if it
751 * needs to wait, any other value otherwise.
752 */
753int
754flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
755{
756 int ret = 1;
757
Christopher Fauletd7c91962015-04-30 11:48:27 +0200758 RESUME_FILTER_LOOP(s, chn) {
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100759 FLT_NXT(filter, chn) = 0;
760 FLT_FWD(filter, chn) = 0;
Christopher Fauletda02e172015-12-04 09:25:05 +0100761 unregister_data_filter(s, chn, filter);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200762
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100763 if (FLT_OPS(filter)->channel_end_analyze) {
764 ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200765 if (ret <= 0)
766 BREAK_EXECUTION(s, chn, end);
767 }
768 } RESUME_FILTER_END;
769
770end:
771 ret = handle_analyzer_result(s, chn, an_bit, ret);
Christopher Faulet02c7b222015-12-22 12:01:29 +0100772
773 /* Check if 'channel_end_analyze' callback has been called for the
774 * request and the response. */
775 if (!(s->req.analysers & AN_FLT_END) && !(s->res.analysers & AN_FLT_END)) {
Christopher Faulet02c7b222015-12-22 12:01:29 +0100776 /* When we are waiting for a new request, so we must reset
777 * stream analyzers. The input must not be closed the request
778 * channel, else it is useless to wait. */
779 if (s->txn && (s->txn->flags & TX_WAIT_NEXT_RQ) && !channel_input_closed(&s->req)) {
780 s->req.analysers = strm_li(s) ? strm_li(s)->analysers : 0;
781 s->res.analysers = 0;
782 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200783
Christopher Faulet92d36382015-11-05 13:35:03 +0100784 /* Remove backend filters from the list */
785 flt_stream_release(s, 1);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200786 }
787 else if (ret) {
788 /* Analyzer ends only for one channel. So wake up the stream to
789 * be sure to process it for the other side as soon as
790 * possible. */
791 task_wakeup(s->task, TASK_WOKEN_MSG);
792 }
793 return ret;
794}
795
796
797/*
798 * Calls 'tcp_data' callback for all "data" filters attached to a stream. This
799 * function is called when incoming data are available. It takes care to update
800 * the next offset of filters and adjusts available data to be sure that a
801 * filter cannot parse more data than its predecessors. A filter can choose to
802 * not consume all available data. Returns -1 if an error occurs, the number of
803 * consumed bytes otherwise.
804 */
805static int
806flt_data(struct stream *s, struct channel *chn)
807{
Christopher Fauletda02e172015-12-04 09:25:05 +0100808 struct filter *filter;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100809 struct buffer *buf = chn->buf;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200810 unsigned int buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100811 int ret = 0;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200812
813 /* Save buffer state */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100814 buf_i = buf->i;
815
816 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100817 unsigned int *nxt;
818
819 /* Call "data" filters only */
820 if (!IS_DATA_FILTER(filter, chn))
821 continue;
822
823 nxt = &FLT_NXT(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100824 if (FLT_OPS(filter)->tcp_data) {
825 ret = FLT_OPS(filter)->tcp_data(s, filter, chn);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200826 if (ret < 0)
827 break;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200828
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100829 /* Increase next offset of the current filter */
Christopher Fauletda02e172015-12-04 09:25:05 +0100830 *nxt += ret;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100831
832 /* And set this value as the bound for the next
833 * filter. It will not able to parse more data than the
834 * current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100835 buf->i = *nxt;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100836 }
837 else {
838 /* Consume all available data */
Christopher Fauletda02e172015-12-04 09:25:05 +0100839 *nxt = buf->i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100840 }
Christopher Fauletd7c91962015-04-30 11:48:27 +0200841
842 /* Update <ret> value to be sure to have the last one when we
Christopher Fauletda02e172015-12-04 09:25:05 +0100843 * exit from the loop. This value will be used to know how much
844 * data are "forwardable" */
845 ret = *nxt;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200846 }
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100847
848 /* Restore the original buffer state */
Christopher Fauletd7c91962015-04-30 11:48:27 +0200849 chn->buf->i = buf_i;
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100850
Christopher Fauletd7c91962015-04-30 11:48:27 +0200851 return ret;
852}
853
854/*
855 * Calls 'tcp_forward_data' callback for all "data" filters attached to a
856 * stream. This function is called when some data can be forwarded. It takes
857 * care to update the forward offset of filters and adjusts "forwardable" data
858 * to be sure that a filter cannot forward more data than its predecessors. A
859 * filter can choose to not forward all parsed data. Returns a negative value if
860 * an error occurs, else the number of forwarded bytes.
861 */
862static int
863flt_forward_data(struct stream *s, struct channel *chn, unsigned int len)
864{
Christopher Fauletda02e172015-12-04 09:25:05 +0100865 struct filter *filter;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200866 int ret = len;
867
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100868 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100869 unsigned int *fwd;
870
871 /* Call "data" filters only */
872 if (!IS_DATA_FILTER(filter, chn))
873 continue;
874
875 fwd = &FLT_FWD(filter, chn);
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100876 if (FLT_OPS(filter)->tcp_forward_data) {
Christopher Fauletd7c91962015-04-30 11:48:27 +0200877 /* Remove bytes that the current filter considered as
878 * forwarded */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100879 ret = FLT_OPS(filter)->tcp_forward_data(s, filter, chn, ret - *fwd);
Christopher Fauletd7c91962015-04-30 11:48:27 +0200880 if (ret < 0)
881 goto end;
882 }
883
Christopher Fauletda02e172015-12-04 09:25:05 +0100884 /* Adjust bytes that the current filter considers as
Christopher Fauletd7c91962015-04-30 11:48:27 +0200885 * forwarded */
Christopher Fauletda02e172015-12-04 09:25:05 +0100886 *fwd += ret;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200887
888 /* And set this value as the bound for the next filter. It will
889 * not able to forward more data than the current one. */
Christopher Fauletda02e172015-12-04 09:25:05 +0100890 ret = *fwd;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200891 }
892
893 if (!ret)
894 goto end;
895
Christopher Fauletda02e172015-12-04 09:25:05 +0100896 /* Finally, adjust filters offsets by removing data that HAProxy will
897 * forward. */
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100898 list_for_each_entry(filter, &strm_flt(s)->filters, list) {
Christopher Fauletda02e172015-12-04 09:25:05 +0100899 if (!IS_DATA_FILTER(filter, chn))
900 continue;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200901 FLT_NXT(filter, chn) -= ret;
902 FLT_FWD(filter, chn) -= ret;
903 }
904
Christopher Fauletd7c91962015-04-30 11:48:27 +0200905 end:
906 return ret;
907}
908
909/*
910 * Called when TCP data must be filtered on a channel. This function is the
911 * AN_FLT_XFER_DATA analyzer. When called, it is responsible to forward data
912 * when the proxy is not in http mode. Behind the scene, it calls consecutively
913 * 'tcp_data' and 'tcp_forward_data' callbacks for all "data" filters attached
914 * to a stream. Returns 0 if an error occurs or if it needs to wait, any other
915 * value otherwise.
916 */
917int
918flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
919{
920 int ret = 1;
921
Christopher Fauletda02e172015-12-04 09:25:05 +0100922 /* If there is no "data" filters, we do nothing */
923 if (!HAS_DATA_FILTERS(s, chn))
924 goto end;
Christopher Fauletd7c91962015-04-30 11:48:27 +0200925
926 /* Be sure that the output is still opened. Else we stop the data
927 * filtering. */
928 if ((chn->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
929 ((chn->flags & CF_SHUTW) && (chn->to_forward || chn->buf->o)))
930 goto end;
931
932 /* Let all "data" filters parsing incoming data */
933 ret = flt_data(s, chn);
934 if (ret < 0)
935 goto end;
936
937 /* And forward them */
938 ret = flt_forward_data(s, chn, ret);
939 if (ret < 0)
940 goto end;
941
Christopher Fauletda02e172015-12-04 09:25:05 +0100942 /* Consume data that all filters consider as forwarded. */
943 b_adv(chn->buf, ret);
944
Christopher Fauletd7c91962015-04-30 11:48:27 +0200945 /* Stop waiting data if the input in closed and no data is pending or if
946 * the output is closed. */
947 if ((chn->flags & CF_SHUTW) ||
948 ((chn->flags & CF_SHUTR) && !buffer_pending(chn->buf))) {
949 ret = 1;
950 goto end;
951 }
952
953 /* Wait for data */
954 return 0;
955 end:
956 /* Terminate the data filtering. If <ret> is negative, an error was
957 * encountered during the filtering. */
958 return handle_analyzer_result(s, chn, an_bit, ret);
959}
960
961/*
962 * Handles result of filter's analyzers. It returns 0 if an error occurs or if
963 * it needs to wait, any other value otherwise.
964 */
965static int
966handle_analyzer_result(struct stream *s, struct channel *chn,
967 unsigned int an_bit, int ret)
968{
969 int finst;
970
971 if (ret < 0)
972 goto return_bad_req;
973 else if (!ret)
974 goto wait;
975
976 /* End of job, return OK */
977 if (an_bit) {
978 chn->analysers &= ~an_bit;
979 chn->analyse_exp = TICK_ETERNITY;
980 }
981 return 1;
982
983 return_bad_req:
984 /* An error occurs */
985 channel_abort(&s->req);
986 channel_abort(&s->res);
987
988 if (!(chn->flags & CF_ISRESP)) {
989 s->req.analysers &= AN_FLT_END;
990 finst = SF_FINST_R;
991 /* FIXME: incr counters */
992 }
993 else {
994 s->res.analysers &= AN_FLT_END;
995 finst = SF_FINST_H;
996 /* FIXME: incr counters */
997 }
998
999 if (s->txn) {
1000 /* Do not do that when we are waiting for the next request */
1001 if (s->txn->status)
1002 http_reply_and_close(s, s->txn->status, NULL);
1003 else {
1004 s->txn->status = 400;
1005 http_reply_and_close(s, 400, http_error_message(s, HTTP_ERR_400));
1006 }
1007 }
1008
1009 if (!(s->flags & SF_ERR_MASK))
1010 s->flags |= SF_ERR_PRXCOND;
1011 if (!(s->flags & SF_FINST_MASK))
1012 s->flags |= finst;
1013 return 0;
1014
1015 wait:
1016 if (!(chn->flags & CF_ISRESP))
1017 channel_dont_connect(chn);
1018 return 0;
1019}
1020
1021
1022/* Note: must not be declared <const> as its list will be overwritten.
1023 * Please take care of keeping this list alphabetically sorted, doing so helps
1024 * all code contributors.
1025 * Optional keywords are also declared with a NULL ->parse() function so that
1026 * the config parser can report an appropriate error when a known keyword was
1027 * not enabled. */
1028static struct cfg_kw_list cfg_kws = {ILH, {
1029 { CFG_LISTEN, "filter", parse_filter },
1030 { 0, NULL, NULL },
1031 }
1032};
1033
1034__attribute__((constructor))
1035static void
1036__filters_init(void)
1037{
1038 pool2_filter = create_pool("filter", sizeof(struct filter), MEM_F_SHARED);
1039 cfg_register_keywords(&cfg_kws);
1040}
1041
1042__attribute__((destructor))
1043static void
1044__filters_deinit(void)
1045{
1046 pool_destroy2(pool2_filter);
1047}
1048
1049/*
1050 * Local variables:
1051 * c-indent-level: 8
1052 * c-basic-offset: 8
1053 * End:
1054 */