blob: 35b0b72eef7d775a6736c662a64da5256ea2cc44 [file] [log] [blame]
Christopher Faulet3d97c902015-12-09 14:59:38 +01001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/cfgparse.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010015#include <common/initcall.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010016#include <common/mini-clist.h>
17#include <common/standard.h>
18
19#include <types/compression.h>
20#include <types/filters.h>
Willy Tarreau35b51c62018-09-10 15:38:55 +020021#include <types/h1.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010022#include <types/proxy.h>
23#include <types/sample.h>
24
25#include <proto/compression.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010026#include <proto/filters.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010027#include <proto/hdr_idx.h>
Christopher Faulete6902cd2018-11-30 22:29:48 +010028#include <proto/http_htx.h>
29#include <proto/htx.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010030#include <proto/proto_http.h>
31#include <proto/sample.h>
32#include <proto/stream.h>
33
Christopher Faulet92d36382015-11-05 13:35:03 +010034static const char *http_comp_flt_id = "compression filter";
35
36struct flt_ops comp_ops;
37
Christopher Faulet92d36382015-11-05 13:35:03 +010038struct comp_state {
39 struct comp_ctx *comp_ctx; /* compression context */
40 struct comp_algo *comp_algo; /* compression algorithm if not NULL */
Christopher Faulete6902cd2018-11-30 22:29:48 +010041
42 /* Following fields are used by the legacy code only: */
Christopher Fauletb77c5c22015-12-07 16:48:42 +010043 int hdrs_len;
44 int tlrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +010045 int consumed;
46 int initialized;
Christopher Fauletb77c5c22015-12-07 16:48:42 +010047 int finished;
Christopher Faulet92d36382015-11-05 13:35:03 +010048};
49
Willy Tarreau8ceae722018-11-26 11:58:30 +010050/* Pools used to allocate comp_state structs */
51DECLARE_STATIC_POOL(pool_head_comp_state, "comp_state", sizeof(struct comp_state));
52
53static THREAD_LOCAL struct buffer tmpbuf;
54static THREAD_LOCAL struct buffer zbuf;
55static THREAD_LOCAL unsigned int buf_output;
56
Christopher Faulet92d36382015-11-05 13:35:03 +010057static int select_compression_request_header(struct comp_state *st,
58 struct stream *s,
59 struct http_msg *msg);
60static int select_compression_response_header(struct comp_state *st,
61 struct stream *s,
62 struct http_msg *msg);
63
Christopher Faulete6902cd2018-11-30 22:29:48 +010064static int htx_compression_buffer_init(struct htx *htx, struct buffer *out);
65static int htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
66 struct buffer *out);
67static int htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end);
68
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020069static int http_compression_buffer_init(struct channel *inc, struct buffer *out, unsigned int *out_len);
Christopher Faulet92d36382015-11-05 13:35:03 +010070static int http_compression_buffer_add_data(struct comp_state *st,
71 struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020072 int in_out,
Christopher Faulet92d36382015-11-05 13:35:03 +010073 struct buffer *out, int sz);
74static int http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreauc9fa0482018-07-10 17:43:27 +020075 struct channel *chn, struct buffer *out,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020076 unsigned int *out_len, int end);
Christopher Faulet92d36382015-11-05 13:35:03 +010077
78/***********************************************************************/
79static int
Christopher Faulete6902cd2018-11-30 22:29:48 +010080comp_flt_init(struct proxy *px, struct flt_conf *fconf)
81{
82 fconf->flags |= STRM_FLT_FL_HAS_FILTERS;
83 return 0;
84}
85
86static int
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020087comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010088{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020089 if (!tmpbuf.size && b_alloc(&tmpbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010090 return -1;
Willy Tarreauc9fa0482018-07-10 17:43:27 +020091 if (!zbuf.size && b_alloc(&zbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010092 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010093 return 0;
94}
95
96static void
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020097comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010098{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020099 if (tmpbuf.size)
Christopher Faulet92d36382015-11-05 13:35:03 +0100100 b_free(&tmpbuf);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200101 if (zbuf.size)
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100102 b_free(&zbuf);
Christopher Faulet92d36382015-11-05 13:35:03 +0100103}
104
105static int
106comp_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
107{
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200108
Christopher Faulet92d36382015-11-05 13:35:03 +0100109 if (filter->ctx == NULL) {
110 struct comp_state *st;
111
Willy Tarreaubafbe012017-11-24 17:34:44 +0100112 st = pool_alloc_dirty(pool_head_comp_state);
Christopher Fauleta03d4ad2017-06-26 16:53:33 +0200113 if (st == NULL)
Christopher Faulet92d36382015-11-05 13:35:03 +0100114 return -1;
115
Christopher Faulet2fb28802015-12-01 10:40:57 +0100116 st->comp_algo = NULL;
117 st->comp_ctx = NULL;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100118 st->hdrs_len = 0;
119 st->tlrs_len = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100120 st->consumed = 0;
121 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100122 st->finished = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100123 filter->ctx = st;
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200124
125 /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
126 * analyze response headers before http-response rules execution
127 * to be sure we can use res.comp and res.comp_algo sample
128 * fetches */
129 filter->post_analyzers |= AN_RES_WAIT_HTTP;
Christopher Faulet92d36382015-11-05 13:35:03 +0100130 }
131 return 1;
132}
133
134static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100135comp_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
136{
137 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100138
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200139 if (!st)
Christopher Faulet92d36382015-11-05 13:35:03 +0100140 goto end;
141
Christopher Faulet92d36382015-11-05 13:35:03 +0100142 /* release any possible compression context */
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200143 if (st->comp_algo)
144 st->comp_algo->end(&st->comp_ctx);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100145 pool_free(pool_head_comp_state, st);
Christopher Faulet92d36382015-11-05 13:35:03 +0100146 filter->ctx = NULL;
147 end:
148 return 1;
149}
150
151static int
Christopher Faulet1339d742016-05-11 16:48:33 +0200152comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
153{
154 struct comp_state *st = filter->ctx;
155
156 if (!strm_fe(s)->comp && !s->be->comp)
157 goto end;
158
159 if (!(msg->chn->flags & CF_ISRESP))
160 select_compression_request_header(st, s, msg);
161 else {
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200162 /* Response headers have already been checked in
163 * comp_http_post_analyze callback. */
Christopher Faulet1339d742016-05-11 16:48:33 +0200164 if (st->comp_algo) {
165 register_data_filter(s, msg->chn, filter);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100166 if (!IS_HTX_STRM(s))
167 st->hdrs_len = s->txn->rsp.sov;
Christopher Faulet1339d742016-05-11 16:48:33 +0200168 }
169 }
170
171 end:
172 return 1;
173}
174
175static int
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200176comp_http_post_analyze(struct stream *s, struct filter *filter,
177 struct channel *chn, unsigned an_bit)
178{
179 struct http_txn *txn = s->txn;
180 struct http_msg *msg = &txn->rsp;
181 struct comp_state *st = filter->ctx;
182
183 if (an_bit != AN_RES_WAIT_HTTP)
184 goto end;
185
186 if (!strm_fe(s)->comp && !s->be->comp)
187 goto end;
188
189 select_compression_response_header(st, s, msg);
190
191 end:
192 return 1;
193}
194
195static int
Christopher Faulete6902cd2018-11-30 22:29:48 +0100196comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
197 unsigned int offset, unsigned int len)
198{
199 struct comp_state *st = filter->ctx;
200 struct htx *htx = htx_from_buf(&msg->chn->buf);
201 struct htx_blk *blk;
202 struct htx_ret htx_ret;
203 int ret, consumed = 0, to_forward = 0;
204
205 htx_ret = htx_find_blk(htx, offset);
206 blk = htx_ret.blk;
207 offset = htx_ret.ret;
208
209 while (blk && len) {
210 enum htx_blk_type type = htx_get_blk_type(blk);
211 uint32_t sz = htx_get_blksz(blk);
212 struct ist v;
213
214 switch (type) {
215 case HTX_BLK_UNUSED:
216 break;
217
218 case HTX_BLK_DATA:
219 v = htx_get_blk_value(htx, blk);
220 v.ptr += offset;
221 v.len -= offset;
222 if (v.len > len)
223 v.len = len;
224 if (htx_compression_buffer_init(htx, &trash) < 0) {
225 msg->chn->flags |= CF_WAKE_WRITE;
226 goto end;
227 }
228 ret = htx_compression_buffer_add_data(st, v.ptr, v.len, &trash);
229 if (ret < 0)
230 goto error;
231 if (htx_compression_buffer_end(st, &trash, 0) < 0)
232 goto error;
233 len -= ret;
234 consumed += ret;
235 to_forward += b_data(&trash);
236 if (ret == sz && !b_data(&trash)) {
237 offset = 0;
238 blk = htx_remove_blk(htx, blk);
239 continue;
240 }
241 v.len = ret;
242 blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
243 break;
244
245 case HTX_BLK_EOD:
246 case HTX_BLK_TLR:
247 case HTX_BLK_EOM:
248 if (msg->flags & HTTP_MSGF_COMPRESSING) {
249 if (htx_compression_buffer_init(htx, &trash) < 0) {
250 msg->chn->flags |= CF_WAKE_WRITE;
251 goto end;
252 }
253 if (htx_compression_buffer_end(st, &trash, 1) < 0)
254 goto error;
255 blk = htx_add_data_before(htx, blk, ist2(b_head(&trash), b_data(&trash)));
256 if (!blk)
257 goto error;
258 to_forward += b_data(&trash);
259 msg->flags &= ~HTTP_MSGF_COMPRESSING;
260 /* We let the mux add last empty chunk and empty trailers */
261 }
262 /* fall through */
263
264 default:
265 sz -= offset;
266 if (sz > len)
267 sz = len;
268 consumed += sz;
269 to_forward += sz;
270 len -= sz;
271 break;
272 }
273
274 offset = 0;
275 blk = htx_get_next_blk(htx, blk);
276 }
277
278 end:
279 if (to_forward != consumed)
280 flt_update_offsets(filter, msg->chn, to_forward - consumed);
281
282 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
283 update_freq_ctr(&global.comp_bps_out, to_forward);
284 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
285 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
286 }
287 return to_forward;
288
289 error:
290 return -1;
291}
292
293static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100294comp_http_data(struct stream *s, struct filter *filter, struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100295{
296 struct comp_state *st = filter->ctx;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200297 struct channel *chn = msg->chn;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100298 unsigned int *nxt = &flt_rsp_nxt(filter);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100299 unsigned int len;
Christopher Faulet92d36382015-11-05 13:35:03 +0100300 int ret;
301
Olivier Houchard0b662842018-06-29 18:16:31 +0200302 len = MIN(msg->chunk_len + msg->next, ci_data(chn)) - *nxt;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100303 if (!len)
304 return len;
305
306 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100307 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100308
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200309 b_reset(&tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200310 c_adv(chn, fwd);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200311 ret = http_compression_buffer_init(chn, &zbuf, &buf_output);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200312 c_rew(chn, fwd);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100313 if (ret < 0) {
314 msg->chn->flags |= CF_WAKE_WRITE;
315 return 0;
316 }
317 }
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100318
319 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200320 int block;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100321
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200322 len = MIN(b_room(&tmpbuf), len);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200323
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200324 c_adv(chn, *nxt);
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200325 block = ci_contig_data(chn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200326 memcpy(b_tail(&tmpbuf), ci_head(chn), block);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200327 if (len > block)
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200328 memcpy(b_tail(&tmpbuf)+block, b_orig(&chn->buf), len-block);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200329 c_rew(chn, *nxt);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200330
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200331 b_add(&tmpbuf, len);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100332 ret = len;
333 }
334 else {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200335 c_adv(chn, *nxt);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200336 ret = http_compression_buffer_add_data(st, &chn->buf, co_data(chn), &zbuf, len);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200337 c_rew(chn, *nxt);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100338 if (ret < 0)
339 return ret;
340 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100341
Christopher Faulet2fb28802015-12-01 10:40:57 +0100342 st->initialized = 1;
343 msg->next += ret;
344 msg->chunk_len -= ret;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100345 *nxt = msg->next;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100346 return 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100347}
348
349static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100350comp_http_chunk_trailers(struct stream *s, struct filter *filter,
351 struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100352{
353 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100354
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100355 if (!st->initialized) {
356 if (!st->finished) {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200357 struct channel *chn = msg->chn;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100358 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100359
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200360 b_reset(&tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200361 c_adv(chn, fwd);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200362 http_compression_buffer_init(chn, &zbuf, &buf_output);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200363 c_rew(chn, fwd);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100364 st->initialized = 1;
365 }
366 }
367 st->tlrs_len = msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100368 return 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100369}
370
Christopher Faulet2fb28802015-12-01 10:40:57 +0100371
Christopher Faulet92d36382015-11-05 13:35:03 +0100372static int
373comp_http_forward_data(struct stream *s, struct filter *filter,
374 struct http_msg *msg, unsigned int len)
375{
376 struct comp_state *st = filter->ctx;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100377 int ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100378
Christopher Faulet2fb28802015-12-01 10:40:57 +0100379 /* To work, previous filters MUST forward all data */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100380 if (flt_rsp_fwd(filter) + len != flt_rsp_nxt(filter)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100381 ha_warning("HTTP compression failed: unexpected behavior of previous filters\n");
Christopher Faulet2fb28802015-12-01 10:40:57 +0100382 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100383 }
384
Christopher Faulet2fb28802015-12-01 10:40:57 +0100385 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100386 if (!len) {
Joseph Herlant942eea32018-11-15 13:57:22 -0800387 /* Nothing to forward */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100388 ret = len;
389 }
390 else if (st->hdrs_len > len) {
391 /* Forward part of headers */
392 ret = len;
393 st->hdrs_len -= len;
394 }
395 else if (st->hdrs_len > 0) {
396 /* Forward remaining headers */
397 ret = st->hdrs_len;
398 st->hdrs_len = 0;
399 }
400 else if (msg->msg_state < HTTP_MSG_TRAILERS) {
401 /* Do not forward anything for now. This only happens
402 * with chunk-encoded responses. Waiting data are part
403 * of the chunk envelope (the chunk size or the chunk
404 * CRLF). These data will be skipped during the
405 * compression. */
406 ret = 0;
407 }
408 else {
409 /* Forward trailers data */
410 ret = len;
411 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100412 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100413 }
414
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100415 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200416 ret = http_compression_buffer_add_data(st, &tmpbuf, 0,
417 &zbuf, b_data(&tmpbuf));
418 if (ret != b_data(&tmpbuf)) {
Willy Tarreau506a29a2018-07-18 10:07:58 +0200419 ha_warning("HTTP compression failed: Must consume %u bytes but only %d bytes consumed\n",
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200420 (unsigned int)b_data(&tmpbuf), ret);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100421 return -1;
422 }
423 }
424
425 st->consumed = len - st->hdrs_len - st->tlrs_len;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200426 c_adv(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200427 ret = http_compression_buffer_end(st, s, msg->chn, &zbuf, &buf_output, msg->msg_state >= HTTP_MSG_TRAILERS);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200428 c_rew(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100429 if (ret < 0)
430 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100431
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100432 flt_change_forward_size(filter, msg->chn, ret - st->consumed);
433 msg->next += (ret - st->consumed);
434 ret += st->hdrs_len + st->tlrs_len;
435
Christopher Faulet2fb28802015-12-01 10:40:57 +0100436 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100437 st->finished = (msg->msg_state >= HTTP_MSG_TRAILERS);
438 st->hdrs_len = 0;
439 st->tlrs_len = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100440 return ret;
441}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100442
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200443static int
444comp_http_end(struct stream *s, struct filter *filter,
445 struct http_msg *msg)
446{
447 struct comp_state *st = filter->ctx;
448
449 if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo)
450 goto end;
451
452 if (strm_fe(s)->mode == PR_MODE_HTTP)
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200453 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200454 if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200455 HA_ATOMIC_ADD(&s->be->be_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200456 end:
457 return 1;
458}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100459/***********************************************************************/
460/*
461 * Selects a compression algorithm depending on the client request.
462 */
Christopher Faulete6902cd2018-11-30 22:29:48 +0100463static int
464http_select_comp_reqhdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100465{
466 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200467 struct channel *req = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100468 struct hdr_ctx ctx;
469 struct comp_algo *comp_algo = NULL;
470 struct comp_algo *comp_algo_back = NULL;
471
472 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
473 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
474 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
475 */
476 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200477 if (http_find_header2("User-Agent", 10, ci_head(req), &txn->hdr_idx, &ctx) &&
Christopher Faulet3d97c902015-12-09 14:59:38 +0100478 ctx.vlen >= 9 &&
479 memcmp(ctx.line + ctx.val, "Mozilla/4", 9) == 0 &&
480 (ctx.vlen < 31 ||
481 memcmp(ctx.line + ctx.val + 25, "MSIE ", 5) != 0 ||
482 ctx.line[ctx.val + 30] < '6' ||
483 (ctx.line[ctx.val + 30] == '6' &&
484 (ctx.vlen < 54 || memcmp(ctx.line + 51, "SV1", 3) != 0)))) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100485 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100486 return 0;
487 }
488
489 /* search for the algo in the backend in priority or the frontend */
Christopher Faulet92d36382015-11-05 13:35:03 +0100490 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
491 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100492 int best_q = 0;
493
494 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200495 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100496 const char *qval;
497 int q;
498 int toklen;
499
500 /* try to isolate the token from the optional q-value */
501 toklen = 0;
Willy Tarreau2235b262016-11-05 15:50:20 +0100502 while (toklen < ctx.vlen && HTTP_IS_TOKEN(*(ctx.line + ctx.val + toklen)))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100503 toklen++;
504
505 qval = ctx.line + ctx.val + toklen;
506 while (1) {
Willy Tarreau2235b262016-11-05 15:50:20 +0100507 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100508 qval++;
509
510 if (qval >= ctx.line + ctx.val + ctx.vlen || *qval != ';') {
511 qval = NULL;
512 break;
513 }
514 qval++;
515
Willy Tarreau2235b262016-11-05 15:50:20 +0100516 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100517 qval++;
518
519 if (qval >= ctx.line + ctx.val + ctx.vlen) {
520 qval = NULL;
521 break;
522 }
523 if (strncmp(qval, "q=", MIN(ctx.line + ctx.val + ctx.vlen - qval, 2)) == 0)
524 break;
525
526 while (qval < ctx.line + ctx.val + ctx.vlen && *qval != ';')
527 qval++;
528 }
529
530 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
Willy Tarreauab813a42018-09-10 18:41:28 +0200531 q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100532
533 if (q <= best_q)
534 continue;
535
536 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
537 if (*(ctx.line + ctx.val) == '*' ||
538 word_match(ctx.line + ctx.val, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100539 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100540 best_q = q;
541 break;
542 }
543 }
544 }
545 }
546
547 /* remove all occurrences of the header when "compression offload" is set */
Christopher Faulet92d36382015-11-05 13:35:03 +0100548 if (st->comp_algo) {
549 if ((s->be->comp && s->be->comp->offload) ||
550 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100551 http_remove_header2(msg, &txn->hdr_idx, &ctx);
552 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200553 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100554 http_remove_header2(msg, &txn->hdr_idx, &ctx);
555 }
556 }
Christopher Faulete6902cd2018-11-30 22:29:48 +0100557 return 1;
558 }
559
560 /* identity is implicit does not require headers */
561 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
562 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
563 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
564 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
565 st->comp_algo = comp_algo;
566 return 1;
567 }
568 }
569 }
570
571 st->comp_algo = NULL;
572 return 0;
573}
574
575static int
576htx_select_comp_reqhdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
577{
578 struct htx *htx = htx_from_buf(&msg->chn->buf);
579 struct http_hdr_ctx ctx;
580 struct comp_algo *comp_algo = NULL;
581 struct comp_algo *comp_algo_back = NULL;
582
583 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
584 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
585 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
586 */
587 ctx.blk = NULL;
588 if (http_find_header(htx, ist("User-Agent"), &ctx, 1) &&
589 ctx.value.len >= 9 &&
590 memcmp(ctx.value.ptr, "Mozilla/4", 9) == 0 &&
591 (ctx.value.len < 31 ||
592 memcmp(ctx.value.ptr + 25, "MSIE ", 5) != 0 ||
593 *(ctx.value.ptr + 30) < '6' ||
594 (*(ctx.value.ptr + 30) == '6' &&
595 (ctx.value.len < 54 || memcmp(ctx.value.ptr + 51, "SV1", 3) != 0)))) {
596 st->comp_algo = NULL;
597 return 0;
598 }
599
600 /* search for the algo in the backend in priority or the frontend */
601 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
602 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
603 int best_q = 0;
604
605 ctx.blk = NULL;
606 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 0)) {
607 const char *qval;
608 int q;
609 int toklen;
610
611 /* try to isolate the token from the optional q-value */
612 toklen = 0;
613 while (toklen < ctx.value.len && HTTP_IS_TOKEN(*(ctx.value.ptr + toklen)))
614 toklen++;
615
616 qval = ctx.value.ptr + toklen;
617 while (1) {
618 while (qval < ctx.value.ptr + ctx.value.len && HTTP_IS_LWS(*qval))
619 qval++;
620
621 if (qval >= ctx.value.ptr + ctx.value.len || *qval != ';') {
622 qval = NULL;
623 break;
624 }
625 qval++;
626
627 while (qval < ctx.value.ptr + ctx.value.len && HTTP_IS_LWS(*qval))
628 qval++;
629
630 if (qval >= ctx.value.ptr + ctx.value.len) {
631 qval = NULL;
632 break;
633 }
634 if (strncmp(qval, "q=", MIN(ctx.value.ptr + ctx.value.len - qval, 2)) == 0)
635 break;
636
637 while (qval < ctx.value.ptr + ctx.value.len && *qval != ';')
638 qval++;
639 }
640
641 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
642 q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
643
644 if (q <= best_q)
645 continue;
646
647 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
648 if (*(ctx.value.ptr) == '*' ||
649 word_match(ctx.value.ptr, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
650 st->comp_algo = comp_algo;
651 best_q = q;
652 break;
653 }
654 }
655 }
656 }
657
658 /* remove all occurrences of the header when "compression offload" is set */
659 if (st->comp_algo) {
660 if ((s->be->comp && s->be->comp->offload) ||
661 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
662 http_remove_header(htx, &ctx);
663 ctx.blk = NULL;
664 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 1))
665 http_remove_header(htx, &ctx);
666 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100667 return 1;
668 }
669
670 /* identity is implicit does not require headers */
Christopher Faulet92d36382015-11-05 13:35:03 +0100671 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
672 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100673 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
674 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100675 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100676 return 1;
677 }
678 }
679 }
680
Christopher Faulet92d36382015-11-05 13:35:03 +0100681 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100682 return 0;
683}
684
Christopher Faulete6902cd2018-11-30 22:29:48 +0100685static int
686select_compression_request_header(struct comp_state *st, struct stream *s,
687 struct http_msg *msg)
688{
689 if (IS_HTX_STRM(s))
690 return htx_select_comp_reqhdr(st, s, msg);
691 else
692 return http_select_comp_reqhdr(st, s, msg);
693}
Christopher Faulet92d36382015-11-05 13:35:03 +0100694
Christopher Faulet3d97c902015-12-09 14:59:38 +0100695/*
696 * Selects a comression algorithm depending of the server response.
697 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100698static int
Christopher Faulete6902cd2018-11-30 22:29:48 +0100699http_select_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100700{
701 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200702 struct channel *c = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100703 struct hdr_ctx ctx;
704 struct comp_type *comp_type;
705
706 /* no common compression algorithm was found in request header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100707 if (st->comp_algo == NULL)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100708 goto fail;
709
710 /* HTTP < 1.1 should not be compressed */
711 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
712 goto fail;
713
Christopher Faulet92d36382015-11-05 13:35:03 +0100714 if (txn->meth == HTTP_METH_HEAD)
715 goto fail;
716
Christopher Faulet3d97c902015-12-09 14:59:38 +0100717 /* compress 200,201,202,203 responses only */
718 if ((txn->status != 200) &&
719 (txn->status != 201) &&
720 (txn->status != 202) &&
721 (txn->status != 203))
722 goto fail;
723
724
725 /* Content-Length is null */
726 if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->body_len == 0)
727 goto fail;
728
729 /* content is already compressed */
730 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200731 if (http_find_header2("Content-Encoding", 16, ci_head(c), &txn->hdr_idx, &ctx))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100732 goto fail;
733
734 /* no compression when Cache-Control: no-transform is present in the message */
735 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200736 while (http_find_header2("Cache-Control", 13, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100737 if (word_match(ctx.line + ctx.val, ctx.vlen, "no-transform", 12))
738 goto fail;
739 }
740
741 comp_type = NULL;
742
743 /* we don't want to compress multipart content-types, nor content-types that are
744 * not listed in the "compression type" directive if any. If no content-type was
745 * found but configuration requires one, we don't compress either. Backend has
746 * the priority.
747 */
748 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200749 if (http_find_header2("Content-Type", 12, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100750 if (ctx.vlen >= 9 && strncasecmp("multipart", ctx.line+ctx.val, 9) == 0)
751 goto fail;
752
753 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
754 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
755 for (; comp_type; comp_type = comp_type->next) {
756 if (ctx.vlen >= comp_type->name_len &&
757 strncasecmp(ctx.line+ctx.val, comp_type->name, comp_type->name_len) == 0)
758 /* this Content-Type should be compressed */
759 break;
760 }
761 /* this Content-Type should not be compressed */
762 if (comp_type == NULL)
763 goto fail;
764 }
765 }
766 else { /* no content-type header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100767 if ((s->be->comp && s->be->comp->types) ||
768 (strm_fe(s)->comp && strm_fe(s)->comp->types))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100769 goto fail; /* a content-type was required */
770 }
771
772 /* limit compression rate */
773 if (global.comp_rate_lim > 0)
774 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
775 goto fail;
776
777 /* limit cpu usage */
778 if (idle_pct < compress_min_idle)
779 goto fail;
780
781 /* initialize compression */
Christopher Faulet92d36382015-11-05 13:35:03 +0100782 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100783 goto fail;
784
Christopher Faulet3d97c902015-12-09 14:59:38 +0100785 /* remove Content-Length header */
786 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200787 if ((msg->flags & HTTP_MSGF_CNT_LEN) && http_find_header2("Content-Length", 14, ci_head(c), &txn->hdr_idx, &ctx))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100788 http_remove_header2(msg, &txn->hdr_idx, &ctx);
789
790 /* add Transfer-Encoding header */
791 if (!(msg->flags & HTTP_MSGF_TE_CHNK))
792 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, "Transfer-Encoding: chunked", 26);
793
794 /*
795 * Add Content-Encoding header when it's not identity encoding.
796 * RFC 2616 : Identity encoding: This content-coding is used only in the
797 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
798 * header.
799 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100800 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200801 trash.data = 18;
802 memcpy(trash.area, "Content-Encoding: ", trash.data);
803 memcpy(trash.area + trash.data, st->comp_algo->ua_name,
804 st->comp_algo->ua_name_len);
805 trash.data += st->comp_algo->ua_name_len;
806 trash.area[trash.data] = '\0';
807 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, trash.area,
808 trash.data);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100809 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100810 msg->flags |= HTTP_MSGF_COMPRESSING;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100811 return 1;
812
813fail:
Christopher Faulet92d36382015-11-05 13:35:03 +0100814 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100815 return 0;
816}
817
Christopher Faulete6902cd2018-11-30 22:29:48 +0100818static int
819htx_select_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
820{
821 struct htx *htx = htx_from_buf(&msg->chn->buf);
822 struct http_txn *txn = s->txn;
823 struct http_hdr_ctx ctx;
824 struct comp_type *comp_type;
825
826 /* no common compression algorithm was found in request header */
827 if (st->comp_algo == NULL)
828 goto fail;
829
830 /* HTTP < 1.1 should not be compressed */
831 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
832 goto fail;
833
834 if (txn->meth == HTTP_METH_HEAD)
835 goto fail;
836
837 /* compress 200,201,202,203 responses only */
838 if ((txn->status != 200) &&
839 (txn->status != 201) &&
840 (txn->status != 202) &&
841 (txn->status != 203))
842 goto fail;
843
844 if (msg->flags & HTTP_MSGF_BODYLESS)
845 goto fail;
846
847 /* content is already compressed */
848 ctx.blk = NULL;
849 if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
850 goto fail;
851
852 /* no compression when Cache-Control: no-transform is present in the message */
853 ctx.blk = NULL;
854 while (http_find_header(htx, ist("Cache-Control"), &ctx, 0)) {
855 if (word_match(ctx.value.ptr, ctx.value.len, "no-transform", 12))
856 goto fail;
857 }
858
859 comp_type = NULL;
860
861 /* we don't want to compress multipart content-types, nor content-types that are
862 * not listed in the "compression type" directive if any. If no content-type was
863 * found but configuration requires one, we don't compress either. Backend has
864 * the priority.
865 */
866 ctx.blk = NULL;
867 if (http_find_header(htx, ist("Content-Type"), &ctx, 1)) {
868 if (ctx.value.len >= 9 && strncasecmp("multipart", ctx.value.ptr, 9) == 0)
869 goto fail;
870
871 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
872 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
873 for (; comp_type; comp_type = comp_type->next) {
874 if (ctx.value.len >= comp_type->name_len &&
875 strncasecmp(ctx.value.ptr, comp_type->name, comp_type->name_len) == 0)
876 /* this Content-Type should be compressed */
877 break;
878 }
879 /* this Content-Type should not be compressed */
880 if (comp_type == NULL)
881 goto fail;
882 }
883 }
884 else { /* no content-type header */
885 if ((s->be->comp && s->be->comp->types) ||
886 (strm_fe(s)->comp && strm_fe(s)->comp->types))
887 goto fail; /* a content-type was required */
888 }
889
890 /* limit compression rate */
891 if (global.comp_rate_lim > 0)
892 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
893 goto fail;
894
895 /* limit cpu usage */
896 if (idle_pct < compress_min_idle)
897 goto fail;
898
899 /* initialize compression */
900 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
901 goto fail;
902
903 /*
904 * Add Content-Encoding header when it's not identity encoding.
905 * RFC 2616 : Identity encoding: This content-coding is used only in the
906 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
907 * header.
908 */
909 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
910 struct ist v = ist2(st->comp_algo->ua_name, st->comp_algo->ua_name_len);
911
912 if (!http_add_header(htx, ist("Content-Encoding"), v))
913 goto deinit_comp_ctx;
914 }
915
916 /* remove Content-Length header */
917 if (msg->flags & HTTP_MSGF_CNT_LEN) {
918 ctx.blk = NULL;
919
920 while (http_find_header(htx, ist("Content-Length"), &ctx, 1))
921 http_remove_header(htx, &ctx);
922 }
923
924 /* add "Transfer-Encoding: chunked" header */
925 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
926 if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
927 goto deinit_comp_ctx;
928 }
929
930 msg->flags |= HTTP_MSGF_COMPRESSING;
931 return 1;
932
933 deinit_comp_ctx:
934 st->comp_algo->end(&st->comp_ctx);
935 fail:
936 st->comp_algo = NULL;
937 return 0;
938}
939
940static int
941select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
942{
943 if (IS_HTX_STRM(s))
944 return htx_select_comp_reshdr(st, s, msg);
945 else
946 return http_select_comp_reshdr(st, s, msg);
947}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100948/***********************************************************************/
949/* emit the chunksize followed by a CRLF on the output and return the number of
950 * bytes written. It goes backwards and starts with the byte before <end>. It
951 * returns the number of bytes written which will not exceed 10 (8 digits, CR,
952 * and LF). The caller is responsible for ensuring there is enough room left in
953 * the output buffer for the string.
954 */
955static int
956http_emit_chunk_size(char *end, unsigned int chksz)
957{
958 char *beg = end;
959
960 *--beg = '\n';
961 *--beg = '\r';
962 do {
963 *--beg = hextab[chksz & 0xF];
964 } while (chksz >>= 4);
965 return end - beg;
966}
967
968/*
969 * Init HTTP compression
970 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100971static int
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200972http_compression_buffer_init(struct channel *inc, struct buffer *out, unsigned int *out_len)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100973{
974 /* output stream requires at least 10 bytes for the gzip header, plus
975 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
976 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
977 */
Olivier Houchard0b662842018-06-29 18:16:31 +0200978 if (c_room(inc) < 20 + 5 * ((ci_data(inc) + 32767) >> 15))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100979 return -1;
980
981 /* prepare an empty output buffer in which we reserve enough room for
982 * copying the output bytes from <in>, plus 10 extra bytes to write
983 * the chunk size. We don't copy the bytes yet so that if we have to
984 * cancel the operation later, it's cheap.
985 */
986 b_reset(out);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200987 *out_len = co_data(inc);
988 out->head += *out_len + 10;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100989 return 0;
990}
991
Christopher Faulete6902cd2018-11-30 22:29:48 +0100992static int
993htx_compression_buffer_init(struct htx *htx, struct buffer *out)
994{
995 /* output stream requires at least 10 bytes for the gzip header, plus
996 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
997 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
998 */
999 if (htx_free_space(htx) < 20 + 5 * ((htx->data + 32767) >> 15))
1000 return -1;
1001 b_reset(out);
1002 return 0;
1003}
1004
Christopher Faulet3d97c902015-12-09 14:59:38 +01001005/*
1006 * Add data to compress
1007 */
Christopher Faulet92d36382015-11-05 13:35:03 +01001008static int
1009http_compression_buffer_add_data(struct comp_state *st, struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001010 int in_out, struct buffer *out, int sz)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001011{
Christopher Faulet3d97c902015-12-09 14:59:38 +01001012 int consumed_data = 0;
1013 int data_process_len;
1014 int block1, block2;
1015
Christopher Faulet92d36382015-11-05 13:35:03 +01001016 if (!sz)
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001017 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001018
Christopher Faulet92d36382015-11-05 13:35:03 +01001019 /* select the smallest size between the announced chunk size, the input
Christopher Faulet3d97c902015-12-09 14:59:38 +01001020 * data, and the available output buffer size. The compressors are
Christopher Faulet92d36382015-11-05 13:35:03 +01001021 * assumed to be able to process all the bytes we pass to them at
1022 * once. */
Willy Tarreaueac52592018-06-15 13:59:36 +02001023 data_process_len = MIN(b_room(out), sz);
Christopher Faulet92d36382015-11-05 13:35:03 +01001024
Christopher Faulet3d97c902015-12-09 14:59:38 +01001025 block1 = data_process_len;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001026 if (block1 > b_contig_data(in, in_out))
1027 block1 = b_contig_data(in, in_out);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001028 block2 = data_process_len - block1;
1029
1030 /* compressors return < 0 upon error or the amount of bytes read */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001031 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_head(in) + in_out, block1, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001032 if (consumed_data != block1 || !block2)
1033 goto end;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001034 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_peek(in, 0), block2, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001035 if (consumed_data < 0)
1036 goto end;
1037 consumed_data += block1;
1038
1039 end:
Christopher Faulet3d97c902015-12-09 14:59:38 +01001040 return consumed_data;
1041}
1042
Christopher Faulete6902cd2018-11-30 22:29:48 +01001043static int
1044htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
1045 struct buffer *out)
1046{
1047 return st->comp_algo->add_data(st->comp_ctx, data, len, out);
1048}
1049
Christopher Faulet3d97c902015-12-09 14:59:38 +01001050/*
1051 * Flush data in process, and write the header and footer of the chunk. Upon
1052 * success, in and out buffers are swapped to avoid a copy.
1053 */
Christopher Faulet92d36382015-11-05 13:35:03 +01001054static int
1055http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001056 struct channel *chn, struct buffer *out,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001057 unsigned int *buf_out, int end)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001058{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001059 struct buffer tmp_buf;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001060 char *tail;
Christopher Faulet92d36382015-11-05 13:35:03 +01001061 int to_forward, left;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001062 unsigned int tmp_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001063
1064#if defined(USE_SLZ) || defined(USE_ZLIB)
1065 int ret;
1066
1067 /* flush data here */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001068 if (end)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001069 ret = st->comp_algo->finish(st->comp_ctx, out); /* end of data */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001070 else
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001071 ret = st->comp_algo->flush(st->comp_ctx, out); /* end of buffer */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001072
1073 if (ret < 0)
1074 return -1; /* flush failed */
1075
1076#endif /* USE_ZLIB */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001077 if (b_data(out) == 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +01001078 /* No data were appended, let's drop the output buffer and
1079 * keep the input buffer unchanged.
1080 */
1081 return 0;
1082 }
1083
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001084 /* OK so at this stage, we have an output buffer <out> looking like this :
Christopher Faulet3d97c902015-12-09 14:59:38 +01001085 *
1086 * <-- o --> <------ i ----->
1087 * +---------+---+------------+-----------+
1088 * | out | c | comp_in | empty |
1089 * +---------+---+------------+-----------+
1090 * data p size
1091 *
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001092 * <out> is the room reserved to copy the channel output. It starts at
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001093 * out->area and has not yet been filled. <c> is the room reserved to
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001094 * write the chunk size (10 bytes). <comp_in> is the compressed
1095 * equivalent of the data part of ib->len. <empty> is the amount of
1096 * empty bytes at the end of the buffer, into which we may have to
1097 * copy the remaining bytes from ib->len after the data
1098 * (chunk size, trailers, ...).
Christopher Faulet3d97c902015-12-09 14:59:38 +01001099 */
1100
Joseph Herlant942eea32018-11-15 13:57:22 -08001101 /* Write real size at the beginning of the chunk, no need of wrapping.
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001102 * We write the chunk using a dynamic length and adjust out->p and out->i
Christopher Faulet3d97c902015-12-09 14:59:38 +01001103 * accordingly afterwards. That will move <out> away from <data>.
1104 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001105 left = http_emit_chunk_size(b_head(out), b_data(out));
1106 b_add(out, left);
1107 out->head -= *buf_out + (left);
1108 /* Copy previous data from chn into out */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001109 if (co_data(chn) > 0) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001110 left = b_contig_data(&chn->buf, 0);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001111 if (left > *buf_out)
1112 left = *buf_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001113
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001114 memcpy(b_head(out), co_head(chn), left);
1115 b_add(out, left);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001116 if (co_data(chn) - left) {/* second part of the buffer */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001117 memcpy(b_head(out) + left, b_orig(&chn->buf), co_data(chn) - left);
1118 b_add(out, co_data(chn) - left);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001119 }
Christopher Faulet3d97c902015-12-09 14:59:38 +01001120 }
1121
1122 /* chunked encoding requires CRLF after data */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001123 tail = b_tail(out);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001124 *tail++ = '\r';
1125 *tail++ = '\n';
1126
Christopher Faulet2fb28802015-12-01 10:40:57 +01001127 /* At the end of data, we must write the empty chunk 0<CRLF>,
1128 * and terminate the trailers section with a last <CRLF>. If
1129 * we're forwarding a chunked-encoded response, we'll have a
1130 * trailers section after the empty chunk which needs to be
1131 * forwarded and which will provide the last CRLF. Otherwise
1132 * we write it ourselves.
1133 */
1134 if (end) {
1135 struct http_msg *msg = &s->txn->rsp;
1136
1137 memcpy(tail, "0\r\n", 3);
1138 tail += 3;
Christopher Fauletb77c5c22015-12-07 16:48:42 +01001139 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
Christopher Faulet2fb28802015-12-01 10:40:57 +01001140 memcpy(tail, "\r\n", 2);
1141 tail += 2;
1142 }
1143 }
1144
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001145 b_add(out, tail - b_tail(out));
1146 to_forward = b_data(out) - *buf_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001147
1148 /* update input rate */
Christopher Faulet92d36382015-11-05 13:35:03 +01001149 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet2fb28802015-12-01 10:40:57 +01001150 update_freq_ctr(&global.comp_bps_in, st->consumed);
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001151 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in, st->consumed);
1152 HA_ATOMIC_ADD(&s->be->be_counters.comp_in, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001153 } else {
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001154 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp, st->consumed);
1155 HA_ATOMIC_ADD(&s->be->be_counters.comp_byp, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001156 }
1157
1158 /* copy the remaining data in the tmp buffer. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +02001159 c_adv(chn, st->consumed);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001160 if (b_data(&chn->buf) - co_data(chn) > 0) {
Willy Tarreau7194d3c2018-06-06 16:55:45 +02001161 left = ci_contig_data(chn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001162 memcpy(b_tail(out), ci_head(chn), left);
1163 b_add(out, left);
1164 if (b_data(&chn->buf) - (co_data(chn) + left)) {
1165 memcpy(b_tail(out), b_orig(&chn->buf), b_data(&chn->buf) - left);
1166 b_add(out, b_data(&chn->buf) - left);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001167 }
1168 }
Christopher Faulet3d97c902015-12-09 14:59:38 +01001169 /* swap the buffers */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001170 tmp_buf = chn->buf;
1171 chn->buf = *out;
1172 *out = tmp_buf;
1173
Olivier Houchard08afac02018-06-22 19:26:39 +02001174 tmp_out = chn->output;
1175 chn->output = *buf_out;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001176 *buf_out = tmp_out;
1177
Christopher Faulet3d97c902015-12-09 14:59:38 +01001178
Christopher Faulet92d36382015-11-05 13:35:03 +01001179
1180 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +01001181 update_freq_ctr(&global.comp_bps_out, to_forward);
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001182 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
1183 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001184 }
1185
Christopher Faulet3d97c902015-12-09 14:59:38 +01001186 return to_forward;
1187}
1188
Christopher Faulete6902cd2018-11-30 22:29:48 +01001189static int
1190htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end)
1191{
1192 if (end)
1193 return st->comp_algo->finish(st->comp_ctx, out);
1194 else
1195 return st->comp_algo->flush(st->comp_ctx, out);
1196}
1197
Christopher Faulet3d97c902015-12-09 14:59:38 +01001198
1199/***********************************************************************/
Christopher Faulet92d36382015-11-05 13:35:03 +01001200struct flt_ops comp_ops = {
Christopher Faulete6902cd2018-11-30 22:29:48 +01001201 .init = comp_flt_init,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +02001202 .init_per_thread = comp_flt_init_per_thread,
1203 .deinit_per_thread = comp_flt_deinit_per_thread,
Christopher Faulet92d36382015-11-05 13:35:03 +01001204
1205 .channel_start_analyze = comp_start_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +01001206 .channel_end_analyze = comp_end_analyze,
Christopher Faulet3dc860d2017-09-15 11:39:36 +02001207 .channel_post_analyze = comp_http_post_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +01001208
Christopher Faulet1339d742016-05-11 16:48:33 +02001209 .http_headers = comp_http_headers,
Christopher Faulete6902cd2018-11-30 22:29:48 +01001210 .http_payload = comp_http_payload,
1211 .http_end = comp_http_end,
1212
Christopher Faulet309c6412015-12-02 09:57:32 +01001213 .http_data = comp_http_data,
1214 .http_chunk_trailers = comp_http_chunk_trailers,
1215 .http_forward_data = comp_http_forward_data,
Christopher Faulet92d36382015-11-05 13:35:03 +01001216};
1217
Christopher Faulet3d97c902015-12-09 14:59:38 +01001218static int
1219parse_compression_options(char **args, int section, struct proxy *proxy,
1220 struct proxy *defpx, const char *file, int line,
1221 char **err)
1222{
Christopher Faulet92d36382015-11-05 13:35:03 +01001223 struct comp *comp;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001224
1225 if (proxy->comp == NULL) {
Vincent Bernat02779b62016-04-03 13:48:43 +02001226 comp = calloc(1, sizeof(*comp));
Christopher Faulet3d97c902015-12-09 14:59:38 +01001227 proxy->comp = comp;
1228 }
1229 else
1230 comp = proxy->comp;
1231
1232 if (!strcmp(args[1], "algo")) {
1233 struct comp_ctx *ctx;
1234 int cur_arg = 2;
1235
1236 if (!*args[cur_arg]) {
1237 memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>\n",
1238 file, line, args[0]);
1239 return -1;
1240 }
1241 while (*(args[cur_arg])) {
1242 if (comp_append_algo(comp, args[cur_arg]) < 0) {
1243 memprintf(err, "'%s' : '%s' is not a supported algorithm.\n",
1244 args[0], args[cur_arg]);
1245 return -1;
1246 }
1247 if (proxy->comp->algos->init(&ctx, 9) == 0)
1248 proxy->comp->algos->end(&ctx);
1249 else {
1250 memprintf(err, "'%s' : Can't init '%s' algorithm.\n",
1251 args[0], args[cur_arg]);
1252 return -1;
1253 }
1254 cur_arg++;
1255 continue;
1256 }
1257 }
1258 else if (!strcmp(args[1], "offload"))
1259 comp->offload = 1;
1260 else if (!strcmp(args[1], "type")) {
1261 int cur_arg = 2;
1262
1263 if (!*args[cur_arg]) {
1264 memprintf(err, "'%s' expects <type>\n", args[0]);
1265 return -1;
1266 }
1267 while (*(args[cur_arg])) {
1268 comp_append_type(comp, args[cur_arg]);
1269 cur_arg++;
1270 continue;
1271 }
1272 }
1273 else {
1274 memprintf(err, "'%s' expects 'algo', 'type' or 'offload'\n",
1275 args[0]);
1276 return -1;
1277 }
1278
1279 return 0;
1280}
1281
Christopher Faulet92d36382015-11-05 13:35:03 +01001282static int
1283parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
Thierry Fournier3610c392016-04-13 18:27:51 +02001284 struct flt_conf *fconf, char **err, void *private)
Christopher Faulet92d36382015-11-05 13:35:03 +01001285{
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001286 struct flt_conf *fc, *back;
Christopher Faulet92d36382015-11-05 13:35:03 +01001287
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001288 list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
1289 if (fc->id == http_comp_flt_id) {
Christopher Faulet92d36382015-11-05 13:35:03 +01001290 memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
1291 return -1;
1292 }
1293 }
1294
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001295 fconf->id = http_comp_flt_id;
1296 fconf->conf = NULL;
1297 fconf->ops = &comp_ops;
Christopher Faulet92d36382015-11-05 13:35:03 +01001298 (*cur_arg)++;
1299
1300 return 0;
1301}
1302
1303
1304int
1305check_legacy_http_comp_flt(struct proxy *proxy)
1306{
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001307 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +01001308 int err = 0;
1309
1310 if (proxy->comp == NULL)
1311 goto end;
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001312 if (!LIST_ISEMPTY(&proxy->filter_configs)) {
1313 list_for_each_entry(fconf, &proxy->filter_configs, list) {
1314 if (fconf->id == http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +01001315 goto end;
1316 }
Christopher Faulet767a84b2017-11-24 16:50:31 +01001317 ha_alert("config: %s '%s': require an explicit filter declaration to use HTTP compression\n",
1318 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +01001319 err++;
1320 goto end;
1321 }
1322
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001323 fconf = calloc(1, sizeof(*fconf));
1324 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001325 ha_alert("config: %s '%s': out of memory\n",
1326 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +01001327 err++;
1328 goto end;
1329 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001330 fconf->id = http_comp_flt_id;
1331 fconf->conf = NULL;
1332 fconf->ops = &comp_ops;
1333 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
Christopher Faulet92d36382015-11-05 13:35:03 +01001334
1335 end:
1336 return err;
1337}
1338
1339/*
1340 * boolean, returns true if compression is used (either gzip or deflate) in the
1341 * response.
1342 */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001343static int
Christopher Faulet92d36382015-11-05 13:35:03 +01001344smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
1345 void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001346{
Willy Tarreaube508f12016-03-10 11:47:01 +01001347 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001348
Christopher Faulet3d97c902015-12-09 14:59:38 +01001349 smp->data.type = SMP_T_BOOL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001350 smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
Christopher Faulet3d97c902015-12-09 14:59:38 +01001351 return 1;
1352}
1353
Christopher Faulet92d36382015-11-05 13:35:03 +01001354/*
1355 * string, returns algo
1356 */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001357static int
Christopher Faulet92d36382015-11-05 13:35:03 +01001358smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
1359 const char *kw, void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001360{
Willy Tarreaube508f12016-03-10 11:47:01 +01001361 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001362 struct filter *filter;
1363 struct comp_state *st;
1364
Christopher Faulet03d85532017-09-15 10:14:43 +02001365 if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
Christopher Faulet3d97c902015-12-09 14:59:38 +01001366 return 0;
1367
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001368 list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001369 if (FLT_ID(filter) != http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +01001370 continue;
1371
1372 if (!(st = filter->ctx))
1373 break;
1374
1375 smp->data.type = SMP_T_STR;
1376 smp->flags = SMP_F_CONST;
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001377 smp->data.u.str.area = st->comp_algo->cfg_name;
1378 smp->data.u.str.data = st->comp_algo->cfg_name_len;
Christopher Faulet92d36382015-11-05 13:35:03 +01001379 return 1;
1380 }
1381 return 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001382}
1383
1384/* Declare the config parser for "compression" keyword */
1385static struct cfg_kw_list cfg_kws = {ILH, {
1386 { CFG_LISTEN, "compression", parse_compression_options },
1387 { 0, NULL, NULL },
1388 }
1389};
1390
Willy Tarreau0108d902018-11-25 19:14:37 +01001391INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
1392
Christopher Faulet92d36382015-11-05 13:35:03 +01001393/* Declare the filter parser for "compression" keyword */
1394static struct flt_kw_list filter_kws = { "COMP", { }, {
Thierry Fournier3610c392016-04-13 18:27:51 +02001395 { "compression", parse_http_comp_flt, NULL },
1396 { NULL, NULL, NULL },
Christopher Faulet92d36382015-11-05 13:35:03 +01001397 }
1398};
1399
Willy Tarreau0108d902018-11-25 19:14:37 +01001400INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
1401
Christopher Faulet3d97c902015-12-09 14:59:38 +01001402/* Note: must not be declared <const> as its list will be overwritten */
1403static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
Christopher Faulet92d36382015-11-05 13:35:03 +01001404 { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
1405 { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
1406 { /* END */ },
1407 }
1408};
Christopher Faulet3d97c902015-12-09 14:59:38 +01001409
Willy Tarreau0108d902018-11-25 19:14:37 +01001410INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);