blob: cac7dc04be8225620e360d279616af7dd150d340 [file] [log] [blame]
Christopher Faulet3d97c902015-12-09 14:59:38 +01001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020013#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020014#include <haproxy/cfgparse.h>
Willy Tarreau0a3bd392020-06-04 08:52:38 +020015#include <haproxy/compression.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020016#include <haproxy/dynbuf.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020017#include <haproxy/filters.h>
Willy Tarreaucd72d8c2020-06-02 19:11:26 +020018#include <haproxy/http.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020019#include <haproxy/http_ana-t.h>
Willy Tarreau87735332020-06-04 09:08:41 +020020#include <haproxy/http_htx.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020021#include <haproxy/htx.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020022#include <haproxy/list.h>
Willy Tarreau202f93d2021-05-08 20:34:16 +020023#include <haproxy/proxy.h>
Willy Tarreaue6ce10b2020-06-04 15:33:47 +020024#include <haproxy/sample.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020025#include <haproxy/stream.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020026#include <haproxy/tools.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010027
Christopher Faulet12554d02021-06-09 17:12:44 +020028#define COMP_STATE_PROCESSING 0x01
29
Christopher Fauletf4a4ef72018-12-07 17:39:53 +010030const char *http_comp_flt_id = "compression filter";
Christopher Faulet92d36382015-11-05 13:35:03 +010031
32struct flt_ops comp_ops;
33
Christopher Faulet92d36382015-11-05 13:35:03 +010034struct comp_state {
35 struct comp_ctx *comp_ctx; /* compression context */
36 struct comp_algo *comp_algo; /* compression algorithm if not NULL */
Christopher Faulet12554d02021-06-09 17:12:44 +020037 unsigned int flags; /* COMP_STATE_* */
Christopher Faulet92d36382015-11-05 13:35:03 +010038};
39
Willy Tarreau8ceae722018-11-26 11:58:30 +010040/* Pools used to allocate comp_state structs */
41DECLARE_STATIC_POOL(pool_head_comp_state, "comp_state", sizeof(struct comp_state));
42
43static THREAD_LOCAL struct buffer tmpbuf;
44static THREAD_LOCAL struct buffer zbuf;
Willy Tarreau8ceae722018-11-26 11:58:30 +010045
Christopher Faulet92d36382015-11-05 13:35:03 +010046static int select_compression_request_header(struct comp_state *st,
47 struct stream *s,
48 struct http_msg *msg);
49static int select_compression_response_header(struct comp_state *st,
50 struct stream *s,
51 struct http_msg *msg);
Christopher Faulet27d93c32018-12-15 22:32:02 +010052static int set_compression_response_header(struct comp_state *st,
53 struct stream *s,
54 struct http_msg *msg);
Christopher Faulet92d36382015-11-05 13:35:03 +010055
Christopher Faulete6902cd2018-11-30 22:29:48 +010056static int htx_compression_buffer_init(struct htx *htx, struct buffer *out);
57static int htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
58 struct buffer *out);
59static int htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end);
60
Christopher Faulet92d36382015-11-05 13:35:03 +010061/***********************************************************************/
62static int
Christopher Faulete6902cd2018-11-30 22:29:48 +010063comp_flt_init(struct proxy *px, struct flt_conf *fconf)
64{
Christopher Faulet6e540952018-12-03 22:43:41 +010065 fconf->flags |= FLT_CFG_FL_HTX;
Christopher Faulete6902cd2018-11-30 22:29:48 +010066 return 0;
67}
68
69static int
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020070comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010071{
Willy Tarreau862ad822021-03-22 16:16:22 +010072 if (b_alloc(&tmpbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010073 return -1;
Willy Tarreau862ad822021-03-22 16:16:22 +010074 if (b_alloc(&zbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010075 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010076 return 0;
77}
78
79static void
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020080comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010081{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020082 if (tmpbuf.size)
Christopher Faulet92d36382015-11-05 13:35:03 +010083 b_free(&tmpbuf);
Willy Tarreauc9fa0482018-07-10 17:43:27 +020084 if (zbuf.size)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010085 b_free(&zbuf);
Christopher Faulet92d36382015-11-05 13:35:03 +010086}
87
88static int
Christopher Faulet5e896512020-03-06 14:59:05 +010089comp_strm_init(struct stream *s, struct filter *filter)
Christopher Faulet92d36382015-11-05 13:35:03 +010090{
Christopher Faulet5e896512020-03-06 14:59:05 +010091 struct comp_state *st;
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020092
Willy Tarreau5bfeb212021-03-22 15:08:17 +010093 st = pool_alloc(pool_head_comp_state);
Christopher Faulet5e896512020-03-06 14:59:05 +010094 if (st == NULL)
95 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010096
Christopher Faulet5e896512020-03-06 14:59:05 +010097 st->comp_algo = NULL;
98 st->comp_ctx = NULL;
Christopher Faulet12554d02021-06-09 17:12:44 +020099 st->flags = 0;
Christopher Faulet5e896512020-03-06 14:59:05 +0100100 filter->ctx = st;
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200101
Christopher Faulet5e896512020-03-06 14:59:05 +0100102 /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
103 * analyze response headers before http-response rules execution
104 * to be sure we can use res.comp and res.comp_algo sample
105 * fetches */
106 filter->post_analyzers |= AN_RES_WAIT_HTTP;
Christopher Faulet92d36382015-11-05 13:35:03 +0100107 return 1;
108}
109
Christopher Faulet5e896512020-03-06 14:59:05 +0100110static void
111comp_strm_deinit(struct stream *s, struct filter *filter)
Christopher Faulet92d36382015-11-05 13:35:03 +0100112{
113 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100114
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200115 if (!st)
Christopher Faulet5e896512020-03-06 14:59:05 +0100116 return;
Christopher Faulet92d36382015-11-05 13:35:03 +0100117
Christopher Faulet92d36382015-11-05 13:35:03 +0100118 /* release any possible compression context */
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200119 if (st->comp_algo)
120 st->comp_algo->end(&st->comp_ctx);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100121 pool_free(pool_head_comp_state, st);
Christopher Faulet92d36382015-11-05 13:35:03 +0100122 filter->ctx = NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100123}
124
125static int
Christopher Faulet1339d742016-05-11 16:48:33 +0200126comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
127{
128 struct comp_state *st = filter->ctx;
129
130 if (!strm_fe(s)->comp && !s->be->comp)
131 goto end;
132
133 if (!(msg->chn->flags & CF_ISRESP))
134 select_compression_request_header(st, s, msg);
135 else {
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200136 /* Response headers have already been checked in
137 * comp_http_post_analyze callback. */
Christopher Faulet1339d742016-05-11 16:48:33 +0200138 if (st->comp_algo) {
Christopher Faulet27d93c32018-12-15 22:32:02 +0100139 if (!set_compression_response_header(st, s, msg))
140 goto end;
Christopher Faulet1339d742016-05-11 16:48:33 +0200141 register_data_filter(s, msg->chn, filter);
Christopher Faulet12554d02021-06-09 17:12:44 +0200142 st->flags |= COMP_STATE_PROCESSING;
Christopher Faulet1339d742016-05-11 16:48:33 +0200143 }
144 }
145
146 end:
147 return 1;
148}
149
150static int
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200151comp_http_post_analyze(struct stream *s, struct filter *filter,
152 struct channel *chn, unsigned an_bit)
153{
154 struct http_txn *txn = s->txn;
155 struct http_msg *msg = &txn->rsp;
156 struct comp_state *st = filter->ctx;
157
158 if (an_bit != AN_RES_WAIT_HTTP)
159 goto end;
160
161 if (!strm_fe(s)->comp && !s->be->comp)
162 goto end;
163
164 select_compression_response_header(st, s, msg);
165
166 end:
167 return 1;
168}
169
170static int
Christopher Faulete6902cd2018-11-30 22:29:48 +0100171comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
172 unsigned int offset, unsigned int len)
173{
174 struct comp_state *st = filter->ctx;
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100175 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6a62bf2020-03-02 16:20:05 +0100176 struct htx_ret htxret = htx_find_offset(htx, offset);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100177 struct htx_blk *blk, *next;
178 int ret, consumed = 0, to_forward = 0, last = 0;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100179
Christopher Faulete6a62bf2020-03-02 16:20:05 +0100180 blk = htxret.blk;
181 offset = htxret.ret;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100182 for (next = NULL; blk && len; blk = next) {
Christopher Faulete6902cd2018-11-30 22:29:48 +0100183 enum htx_blk_type type = htx_get_blk_type(blk);
184 uint32_t sz = htx_get_blksz(blk);
185 struct ist v;
186
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100187 next = htx_get_next_blk(htx, blk);
188 while (next && htx_get_blk_type(next) == HTX_BLK_UNUSED)
Christopher Faulet86ca0e52021-06-09 16:59:02 +0200189 next = htx_get_next_blk(htx, next);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100190
Christopher Faulet12554d02021-06-09 17:12:44 +0200191 if (!(st->flags & COMP_STATE_PROCESSING))
192 goto consume;
193
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100194 if (htx_compression_buffer_init(htx, &trash) < 0) {
195 msg->chn->flags |= CF_WAKE_WRITE;
196 goto end;
197 }
198
199 switch (type) {
Christopher Faulete6902cd2018-11-30 22:29:48 +0100200 case HTX_BLK_DATA:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100201 /* it is the last data block */
202 last = ((!next && (htx->flags & HTX_FL_EOM)) || (next && htx_get_blk_type(next) != HTX_BLK_DATA));
Christopher Faulete6902cd2018-11-30 22:29:48 +0100203 v = htx_get_blk_value(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100204 v = istadv(v, offset);
205 if (v.len > len) {
206 last = 0;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100207 v.len = len;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100208 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100209
Christopher Faulete6902cd2018-11-30 22:29:48 +0100210 ret = htx_compression_buffer_add_data(st, v.ptr, v.len, &trash);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100211 if (ret < 0 || htx_compression_buffer_end(st, &trash, last) < 0)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100212 goto error;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100213 BUG_ON(v.len != ret);
214
215 if (ret == sz && !b_data(&trash))
216 next = htx_remove_blk(htx, blk);
Christopher Faulet402740c2021-06-09 17:04:37 +0200217 else {
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100218 blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
Christopher Faulet402740c2021-06-09 17:04:37 +0200219 next = htx_get_next_blk(htx, blk);
220 }
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100221
Christopher Faulete6902cd2018-11-30 22:29:48 +0100222 len -= ret;
223 consumed += ret;
224 to_forward += b_data(&trash);
Christopher Faulet12554d02021-06-09 17:12:44 +0200225 if (last)
226 st->flags &= ~COMP_STATE_PROCESSING;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100227 break;
228
Christopher Faulete6902cd2018-11-30 22:29:48 +0100229 case HTX_BLK_TLR:
Christopher Faulet2d7c5392019-06-03 10:41:26 +0200230 case HTX_BLK_EOT:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100231 if (htx_compression_buffer_end(st, &trash, 1) < 0)
232 goto error;
233 if (b_data(&trash)) {
234 struct htx_blk *last = htx_add_last_data(htx, ist2(b_head(&trash), b_data(&trash)));
235 if (!last)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100236 goto error;
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100237 blk = htx_get_next_blk(htx, last);
238 if (!blk)
239 goto error;
Christopher Faulet402740c2021-06-09 17:04:37 +0200240 next = htx_get_next_blk(htx, blk);
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100241 to_forward += b_data(&trash);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100242 }
Christopher Faulet12554d02021-06-09 17:12:44 +0200243 st->flags &= ~COMP_STATE_PROCESSING;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100244 /* fall through */
245
246 default:
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100247 consume:
Christopher Faulete6902cd2018-11-30 22:29:48 +0100248 sz -= offset;
249 if (sz > len)
250 sz = len;
251 consumed += sz;
252 to_forward += sz;
253 len -= sz;
254 break;
255 }
256
257 offset = 0;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100258 }
259
260 end:
261 if (to_forward != consumed)
262 flt_update_offsets(filter, msg->chn, to_forward - consumed);
263
264 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Willy Tarreauef6fd852019-02-04 11:48:03 +0100265 update_freq_ctr(&global.comp_bps_in, consumed);
Olivier Houchard43da3432019-03-08 18:50:27 +0100266 _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in, consumed);
267 _HA_ATOMIC_ADD(&s->be->be_counters.comp_in, consumed);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100268 update_freq_ctr(&global.comp_bps_out, to_forward);
Olivier Houchard43da3432019-03-08 18:50:27 +0100269 _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
270 _HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
Willy Tarreauef6fd852019-02-04 11:48:03 +0100271 } else {
Olivier Houchard43da3432019-03-08 18:50:27 +0100272 _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp, consumed);
273 _HA_ATOMIC_ADD(&s->be->be_counters.comp_byp, consumed);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100274 }
275 return to_forward;
276
277 error:
278 return -1;
279}
280
Christopher Faulet2fb28802015-12-01 10:40:57 +0100281
Christopher Faulet92d36382015-11-05 13:35:03 +0100282static int
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200283comp_http_end(struct stream *s, struct filter *filter,
284 struct http_msg *msg)
285{
286 struct comp_state *st = filter->ctx;
287
288 if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo)
289 goto end;
290
291 if (strm_fe(s)->mode == PR_MODE_HTTP)
Willy Tarreau4781b152021-04-06 13:53:36 +0200292 _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.p.http.comp_rsp);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200293 if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
Willy Tarreau4781b152021-04-06 13:53:36 +0200294 _HA_ATOMIC_INC(&s->be->be_counters.p.http.comp_rsp);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200295 end:
296 return 1;
297}
Christopher Faulet27d93c32018-12-15 22:32:02 +0100298
Christopher Faulet89f2b162019-07-15 21:16:04 +0200299/***********************************************************************/
Christopher Faulet27d93c32018-12-15 22:32:02 +0100300static int
Christopher Faulet89f2b162019-07-15 21:16:04 +0200301set_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet27d93c32018-12-15 22:32:02 +0100302{
303 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulet39e436e2022-04-15 15:32:03 +0200304 struct htx_sl *sl;
Tim Duesterhusb229f012019-01-29 16:38:56 +0100305 struct http_hdr_ctx ctx;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100306
307 /*
308 * Add Content-Encoding header when it's not identity encoding.
309 * RFC 2616 : Identity encoding: This content-coding is used only in the
310 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
311 * header.
312 */
313 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
314 struct ist v = ist2(st->comp_algo->ua_name, st->comp_algo->ua_name_len);
315
316 if (!http_add_header(htx, ist("Content-Encoding"), v))
317 goto error;
318 }
319
Christopher Faulet39e436e2022-04-15 15:32:03 +0200320 sl = http_get_stline(htx);
321 if (!sl)
322 goto error;
323
Christopher Faulet27d93c32018-12-15 22:32:02 +0100324 /* remove Content-Length header */
325 if (msg->flags & HTTP_MSGF_CNT_LEN) {
Christopher Faulet27d93c32018-12-15 22:32:02 +0100326 ctx.blk = NULL;
327 while (http_find_header(htx, ist("Content-Length"), &ctx, 1))
328 http_remove_header(htx, &ctx);
Christopher Faulet39e436e2022-04-15 15:32:03 +0200329 msg->flags &= ~HTTP_MSGF_CNT_LEN;
330 sl->flags &= ~HTX_SL_F_CLEN;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100331 }
332
333 /* add "Transfer-Encoding: chunked" header */
334 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
335 if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
336 goto error;
Christopher Faulet39e436e2022-04-15 15:32:03 +0200337 msg->flags |= HTTP_MSGF_TE_CHNK;
338 sl->flags |= (HTX_SL_F_XFER_ENC|HTX_SL_F_CHNK);
Christopher Faulet27d93c32018-12-15 22:32:02 +0100339 }
340
Tim Duesterhusb229f012019-01-29 16:38:56 +0100341 /* convert "ETag" header to a weak ETag */
342 ctx.blk = NULL;
343 if (http_find_header(htx, ist("ETag"), &ctx, 1)) {
344 if (ctx.value.ptr[0] == '"') {
345 /* This a strong ETag. Convert it to a weak one. */
346 struct ist v = ist2(trash.area, 0);
347 if (istcat(&v, ist("W/"), trash.size) == -1 || istcat(&v, ctx.value, trash.size) == -1)
348 goto error;
349
350 if (!http_replace_header_value(htx, &ctx, v))
351 goto error;
352 }
353 }
354
Tim Duesterhus721d6862019-06-17 16:10:07 +0200355 if (!http_add_header(htx, ist("Vary"), ist("Accept-Encoding")))
356 goto error;
357
Christopher Faulet27d93c32018-12-15 22:32:02 +0100358 return 1;
359
360 error:
361 st->comp_algo->end(&st->comp_ctx);
362 st->comp_algo = NULL;
363 return 0;
364}
365
Christopher Faulet3d97c902015-12-09 14:59:38 +0100366/*
367 * Selects a compression algorithm depending on the client request.
368 */
Christopher Faulete6902cd2018-11-30 22:29:48 +0100369static int
Christopher Faulet89f2b162019-07-15 21:16:04 +0200370select_compression_request_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100371{
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100372 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100373 struct http_hdr_ctx ctx;
374 struct comp_algo *comp_algo = NULL;
375 struct comp_algo *comp_algo_back = NULL;
376
377 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
378 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
379 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
380 */
381 ctx.blk = NULL;
382 if (http_find_header(htx, ist("User-Agent"), &ctx, 1) &&
383 ctx.value.len >= 9 &&
384 memcmp(ctx.value.ptr, "Mozilla/4", 9) == 0 &&
385 (ctx.value.len < 31 ||
386 memcmp(ctx.value.ptr + 25, "MSIE ", 5) != 0 ||
387 *(ctx.value.ptr + 30) < '6' ||
388 (*(ctx.value.ptr + 30) == '6' &&
389 (ctx.value.len < 54 || memcmp(ctx.value.ptr + 51, "SV1", 3) != 0)))) {
390 st->comp_algo = NULL;
391 return 0;
392 }
393
394 /* search for the algo in the backend in priority or the frontend */
395 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
396 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
397 int best_q = 0;
398
399 ctx.blk = NULL;
400 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 0)) {
401 const char *qval;
402 int q;
403 int toklen;
404
405 /* try to isolate the token from the optional q-value */
406 toklen = 0;
407 while (toklen < ctx.value.len && HTTP_IS_TOKEN(*(ctx.value.ptr + toklen)))
408 toklen++;
409
410 qval = ctx.value.ptr + toklen;
411 while (1) {
Tim Duesterhus77508502022-03-15 13:11:06 +0100412 while (qval < istend(ctx.value) && HTTP_IS_LWS(*qval))
Christopher Faulete6902cd2018-11-30 22:29:48 +0100413 qval++;
414
Tim Duesterhus77508502022-03-15 13:11:06 +0100415 if (qval >= istend(ctx.value) || *qval != ';') {
Christopher Faulete6902cd2018-11-30 22:29:48 +0100416 qval = NULL;
417 break;
418 }
419 qval++;
420
Tim Duesterhus77508502022-03-15 13:11:06 +0100421 while (qval < istend(ctx.value) && HTTP_IS_LWS(*qval))
Christopher Faulete6902cd2018-11-30 22:29:48 +0100422 qval++;
423
Tim Duesterhus77508502022-03-15 13:11:06 +0100424 if (qval >= istend(ctx.value)) {
Christopher Faulete6902cd2018-11-30 22:29:48 +0100425 qval = NULL;
426 break;
427 }
Tim Duesterhus77508502022-03-15 13:11:06 +0100428 if (strncmp(qval, "q=", MIN(istend(ctx.value) - qval, 2)) == 0)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100429 break;
430
Tim Duesterhus77508502022-03-15 13:11:06 +0100431 while (qval < istend(ctx.value) && *qval != ';')
Christopher Faulete6902cd2018-11-30 22:29:48 +0100432 qval++;
433 }
434
435 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
436 q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
437
438 if (q <= best_q)
439 continue;
440
441 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
442 if (*(ctx.value.ptr) == '*' ||
443 word_match(ctx.value.ptr, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
444 st->comp_algo = comp_algo;
445 best_q = q;
446 break;
447 }
448 }
449 }
450 }
451
452 /* remove all occurrences of the header when "compression offload" is set */
453 if (st->comp_algo) {
454 if ((s->be->comp && s->be->comp->offload) ||
455 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
456 http_remove_header(htx, &ctx);
457 ctx.blk = NULL;
458 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 1))
459 http_remove_header(htx, &ctx);
460 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100461 return 1;
462 }
463
464 /* identity is implicit does not require headers */
Christopher Faulet92d36382015-11-05 13:35:03 +0100465 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
466 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100467 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
468 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100469 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100470 return 1;
471 }
472 }
473 }
474
Christopher Faulet92d36382015-11-05 13:35:03 +0100475 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100476 return 0;
477}
478
479/*
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500480 * Selects a compression algorithm depending of the server response.
Christopher Faulet3d97c902015-12-09 14:59:38 +0100481 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100482static int
Christopher Faulet89f2b162019-07-15 21:16:04 +0200483select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100484{
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100485 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100486 struct http_txn *txn = s->txn;
487 struct http_hdr_ctx ctx;
488 struct comp_type *comp_type;
489
490 /* no common compression algorithm was found in request header */
491 if (st->comp_algo == NULL)
492 goto fail;
493
Christopher Faulet1d3613a2019-01-07 14:41:59 +0100494 /* compression already in progress */
495 if (msg->flags & HTTP_MSGF_COMPRESSING)
496 goto fail;
497
Christopher Faulete6902cd2018-11-30 22:29:48 +0100498 /* HTTP < 1.1 should not be compressed */
499 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
500 goto fail;
501
502 if (txn->meth == HTTP_METH_HEAD)
503 goto fail;
504
505 /* compress 200,201,202,203 responses only */
506 if ((txn->status != 200) &&
507 (txn->status != 201) &&
508 (txn->status != 202) &&
509 (txn->status != 203))
510 goto fail;
511
Christopher Fauletc963eb22018-12-21 14:53:54 +0100512 if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & HTTP_MSGF_BODYLESS)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100513 goto fail;
514
515 /* content is already compressed */
516 ctx.blk = NULL;
517 if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
518 goto fail;
519
520 /* no compression when Cache-Control: no-transform is present in the message */
521 ctx.blk = NULL;
522 while (http_find_header(htx, ist("Cache-Control"), &ctx, 0)) {
523 if (word_match(ctx.value.ptr, ctx.value.len, "no-transform", 12))
524 goto fail;
525 }
526
Tim Duesterhusb229f012019-01-29 16:38:56 +0100527 /* no compression when ETag is malformed */
528 ctx.blk = NULL;
529 if (http_find_header(htx, ist("ETag"), &ctx, 1)) {
Tim Duesterhus6414cd12020-09-01 18:32:35 +0200530 if (http_get_etag_type(ctx.value) == ETAG_INVALID)
Tim Duesterhusb229f012019-01-29 16:38:56 +0100531 goto fail;
Tim Duesterhusb229f012019-01-29 16:38:56 +0100532 }
533 /* no compression when multiple ETags are present
534 * Note: Do not reset ctx.blk!
535 */
536 if (http_find_header(htx, ist("ETag"), &ctx, 1))
537 goto fail;
538
Christopher Faulete6902cd2018-11-30 22:29:48 +0100539 comp_type = NULL;
540
541 /* we don't want to compress multipart content-types, nor content-types that are
542 * not listed in the "compression type" directive if any. If no content-type was
543 * found but configuration requires one, we don't compress either. Backend has
544 * the priority.
545 */
546 ctx.blk = NULL;
547 if (http_find_header(htx, ist("Content-Type"), &ctx, 1)) {
548 if (ctx.value.len >= 9 && strncasecmp("multipart", ctx.value.ptr, 9) == 0)
549 goto fail;
550
551 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
552 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
553 for (; comp_type; comp_type = comp_type->next) {
554 if (ctx.value.len >= comp_type->name_len &&
555 strncasecmp(ctx.value.ptr, comp_type->name, comp_type->name_len) == 0)
556 /* this Content-Type should be compressed */
557 break;
558 }
559 /* this Content-Type should not be compressed */
560 if (comp_type == NULL)
561 goto fail;
562 }
563 }
564 else { /* no content-type header */
565 if ((s->be->comp && s->be->comp->types) ||
566 (strm_fe(s)->comp && strm_fe(s)->comp->types))
567 goto fail; /* a content-type was required */
568 }
569
570 /* limit compression rate */
571 if (global.comp_rate_lim > 0)
572 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
573 goto fail;
574
575 /* limit cpu usage */
Willy Tarreau45c38e22021-09-30 18:28:49 +0200576 if (th_ctx->idle_pct < compress_min_idle)
Christopher Faulete6902cd2018-11-30 22:29:48 +0100577 goto fail;
578
579 /* initialize compression */
580 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
581 goto fail;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100582 msg->flags |= HTTP_MSGF_COMPRESSING;
583 return 1;
584
Christopher Faulete6902cd2018-11-30 22:29:48 +0100585 fail:
586 st->comp_algo = NULL;
587 return 0;
588}
589
Christopher Faulet3d97c902015-12-09 14:59:38 +0100590/***********************************************************************/
Christopher Faulete6902cd2018-11-30 22:29:48 +0100591static int
592htx_compression_buffer_init(struct htx *htx, struct buffer *out)
593{
594 /* output stream requires at least 10 bytes for the gzip header, plus
595 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
596 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
597 */
598 if (htx_free_space(htx) < 20 + 5 * ((htx->data + 32767) >> 15))
599 return -1;
600 b_reset(out);
601 return 0;
602}
603
Christopher Faulete6902cd2018-11-30 22:29:48 +0100604static int
605htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
606 struct buffer *out)
607{
608 return st->comp_algo->add_data(st->comp_ctx, data, len, out);
609}
610
Christopher Faulete6902cd2018-11-30 22:29:48 +0100611static int
612htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end)
613{
614 if (end)
615 return st->comp_algo->finish(st->comp_ctx, out);
616 else
617 return st->comp_algo->flush(st->comp_ctx, out);
618}
619
Christopher Faulet3d97c902015-12-09 14:59:38 +0100620
621/***********************************************************************/
Christopher Faulet92d36382015-11-05 13:35:03 +0100622struct flt_ops comp_ops = {
Christopher Faulete6902cd2018-11-30 22:29:48 +0100623 .init = comp_flt_init,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200624 .init_per_thread = comp_flt_init_per_thread,
625 .deinit_per_thread = comp_flt_deinit_per_thread,
Christopher Faulet92d36382015-11-05 13:35:03 +0100626
Christopher Faulet5e896512020-03-06 14:59:05 +0100627 .attach = comp_strm_init,
628 .detach = comp_strm_deinit,
629
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200630 .channel_post_analyze = comp_http_post_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +0100631
Christopher Faulet1339d742016-05-11 16:48:33 +0200632 .http_headers = comp_http_headers,
Christopher Faulete6902cd2018-11-30 22:29:48 +0100633 .http_payload = comp_http_payload,
634 .http_end = comp_http_end,
Christopher Faulet92d36382015-11-05 13:35:03 +0100635};
636
Christopher Faulet3d97c902015-12-09 14:59:38 +0100637static int
638parse_compression_options(char **args, int section, struct proxy *proxy,
Willy Tarreau01825162021-03-09 09:53:46 +0100639 const struct proxy *defpx, const char *file, int line,
Christopher Faulet3d97c902015-12-09 14:59:38 +0100640 char **err)
641{
Christopher Faulet92d36382015-11-05 13:35:03 +0100642 struct comp *comp;
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100643 int ret = 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100644
645 if (proxy->comp == NULL) {
Vincent Bernat02779b62016-04-03 13:48:43 +0200646 comp = calloc(1, sizeof(*comp));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100647 proxy->comp = comp;
648 }
649 else
650 comp = proxy->comp;
651
Tim Duesterhuse5ff1412021-01-02 22:31:53 +0100652 if (strcmp(args[1], "algo") == 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100653 struct comp_ctx *ctx;
654 int cur_arg = 2;
655
656 if (!*args[cur_arg]) {
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100657 memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>.",
Christopher Faulet3d97c902015-12-09 14:59:38 +0100658 file, line, args[0]);
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100659 ret = -1;
660 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100661 }
662 while (*(args[cur_arg])) {
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200663 int retval = comp_append_algo(comp, args[cur_arg]);
664 if (retval) {
665 if (retval < 0)
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100666 memprintf(err, "'%s' : '%s' is not a supported algorithm.",
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200667 args[0], args[cur_arg]);
668 else
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100669 memprintf(err, "'%s' : out of memory while parsing algo '%s'.",
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200670 args[0], args[cur_arg]);
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100671 ret = -1;
672 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100673 }
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200674
Christopher Faulet3d97c902015-12-09 14:59:38 +0100675 if (proxy->comp->algos->init(&ctx, 9) == 0)
676 proxy->comp->algos->end(&ctx);
677 else {
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100678 memprintf(err, "'%s' : Can't init '%s' algorithm.",
Christopher Faulet3d97c902015-12-09 14:59:38 +0100679 args[0], args[cur_arg]);
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100680 ret = -1;
681 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100682 }
683 cur_arg++;
684 continue;
685 }
686 }
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100687 else if (strcmp(args[1], "offload") == 0) {
688 if (proxy->cap & PR_CAP_DEF) {
689 memprintf(err, "'%s' : '%s' ignored in 'defaults' section.",
690 args[0], args[1]);
691 ret = 1;
692 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100693 comp->offload = 1;
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100694 }
Tim Duesterhuse5ff1412021-01-02 22:31:53 +0100695 else if (strcmp(args[1], "type") == 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100696 int cur_arg = 2;
697
698 if (!*args[cur_arg]) {
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100699 memprintf(err, "'%s' expects <type>.", args[0]);
700 ret = -1;
701 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100702 }
703 while (*(args[cur_arg])) {
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200704 if (comp_append_type(comp, args[cur_arg])) {
705 memprintf(err, "'%s': out of memory.", args[0]);
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100706 ret = -1;
707 goto end;
Remi Tricot-Le Breton6443bcc2021-05-17 10:35:08 +0200708 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100709 cur_arg++;
710 continue;
711 }
712 }
713 else {
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100714 memprintf(err, "'%s' expects 'algo', 'type' or 'offload'",
Christopher Faulet3d97c902015-12-09 14:59:38 +0100715 args[0]);
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100716 ret = -1;
717 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100718 }
719
Christopher Faulet44d34bf2021-11-05 12:06:14 +0100720 end:
721 return ret;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100722}
723
Christopher Faulet92d36382015-11-05 13:35:03 +0100724static int
725parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
Thierry Fournier3610c392016-04-13 18:27:51 +0200726 struct flt_conf *fconf, char **err, void *private)
Christopher Faulet92d36382015-11-05 13:35:03 +0100727{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100728 struct flt_conf *fc, *back;
Christopher Faulet92d36382015-11-05 13:35:03 +0100729
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100730 list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
731 if (fc->id == http_comp_flt_id) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100732 memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
733 return -1;
734 }
735 }
736
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100737 fconf->id = http_comp_flt_id;
738 fconf->conf = NULL;
739 fconf->ops = &comp_ops;
Christopher Faulet92d36382015-11-05 13:35:03 +0100740 (*cur_arg)++;
741
742 return 0;
743}
744
745
746int
Christopher Fauletc9df7f72018-12-10 16:14:04 +0100747check_implicit_http_comp_flt(struct proxy *proxy)
Christopher Faulet92d36382015-11-05 13:35:03 +0100748{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100749 struct flt_conf *fconf;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100750 int explicit = 0;
751 int comp = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100752 int err = 0;
753
754 if (proxy->comp == NULL)
755 goto end;
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100756 if (!LIST_ISEMPTY(&proxy->filter_configs)) {
757 list_for_each_entry(fconf, &proxy->filter_configs, list) {
758 if (fconf->id == http_comp_flt_id)
Christopher Faulet27d93c32018-12-15 22:32:02 +0100759 comp = 1;
760 else if (fconf->id == cache_store_flt_id) {
761 if (comp) {
762 ha_alert("config: %s '%s': unable to enable the compression filter "
763 "before any cache filter.\n",
764 proxy_type_str(proxy), proxy->id);
765 err++;
766 goto end;
767 }
768 }
Christopher Faulet78fbb9f2019-08-11 23:11:03 +0200769 else if (fconf->id == fcgi_flt_id)
770 continue;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100771 else
772 explicit = 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100773 }
Christopher Faulet27d93c32018-12-15 22:32:02 +0100774 }
775 if (comp)
776 goto end;
777 else if (explicit) {
778 ha_alert("config: %s '%s': require an explicit filter declaration to use "
779 "HTTP compression\n", proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100780 err++;
781 goto end;
782 }
783
Christopher Faulet27d93c32018-12-15 22:32:02 +0100784 /* Implicit declaration of the compression filter is always the last
785 * one */
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100786 fconf = calloc(1, sizeof(*fconf));
787 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100788 ha_alert("config: %s '%s': out of memory\n",
789 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100790 err++;
791 goto end;
792 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100793 fconf->id = http_comp_flt_id;
794 fconf->conf = NULL;
795 fconf->ops = &comp_ops;
Willy Tarreau2b718102021-04-21 07:32:39 +0200796 LIST_APPEND(&proxy->filter_configs, &fconf->list);
Christopher Faulet92d36382015-11-05 13:35:03 +0100797 end:
798 return err;
799}
800
801/*
802 * boolean, returns true if compression is used (either gzip or deflate) in the
803 * response.
804 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100805static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100806smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
807 void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100808{
Willy Tarreaube508f12016-03-10 11:47:01 +0100809 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100810
Christopher Faulet3d97c902015-12-09 14:59:38 +0100811 smp->data.type = SMP_T_BOOL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100812 smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100813 return 1;
814}
815
Christopher Faulet92d36382015-11-05 13:35:03 +0100816/*
817 * string, returns algo
818 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100819static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100820smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
821 const char *kw, void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100822{
Willy Tarreaube508f12016-03-10 11:47:01 +0100823 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100824 struct filter *filter;
825 struct comp_state *st;
826
Christopher Faulet03d85532017-09-15 10:14:43 +0200827 if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100828 return 0;
829
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100830 list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100831 if (FLT_ID(filter) != http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +0100832 continue;
833
834 if (!(st = filter->ctx))
835 break;
836
837 smp->data.type = SMP_T_STR;
838 smp->flags = SMP_F_CONST;
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200839 smp->data.u.str.area = st->comp_algo->cfg_name;
840 smp->data.u.str.data = st->comp_algo->cfg_name_len;
Christopher Faulet92d36382015-11-05 13:35:03 +0100841 return 1;
842 }
843 return 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100844}
845
846/* Declare the config parser for "compression" keyword */
847static struct cfg_kw_list cfg_kws = {ILH, {
848 { CFG_LISTEN, "compression", parse_compression_options },
849 { 0, NULL, NULL },
850 }
851};
852
Willy Tarreau0108d902018-11-25 19:14:37 +0100853INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
854
Christopher Faulet92d36382015-11-05 13:35:03 +0100855/* Declare the filter parser for "compression" keyword */
856static struct flt_kw_list filter_kws = { "COMP", { }, {
Thierry Fournier3610c392016-04-13 18:27:51 +0200857 { "compression", parse_http_comp_flt, NULL },
858 { NULL, NULL, NULL },
Christopher Faulet92d36382015-11-05 13:35:03 +0100859 }
860};
861
Willy Tarreau0108d902018-11-25 19:14:37 +0100862INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
863
Christopher Faulet3d97c902015-12-09 14:59:38 +0100864/* Note: must not be declared <const> as its list will be overwritten */
865static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
Christopher Faulet92d36382015-11-05 13:35:03 +0100866 { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
867 { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
868 { /* END */ },
869 }
870};
Christopher Faulet3d97c902015-12-09 14:59:38 +0100871
Willy Tarreau0108d902018-11-25 19:14:37 +0100872INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);