blob: ddcdce614f6184900cea1f566e23afc68d75315d [file] [log] [blame]
Christopher Faulet3d97c902015-12-09 14:59:38 +01001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/cfgparse.h>
Willy Tarreaub96b77e2018-12-11 10:22:41 +010015#include <common/htx.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010016#include <common/initcall.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010017#include <common/mini-clist.h>
18#include <common/standard.h>
19
20#include <types/compression.h>
21#include <types/filters.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010022#include <types/proxy.h>
23#include <types/sample.h>
24
25#include <proto/compression.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010026#include <proto/filters.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010027#include <proto/hdr_idx.h>
Christopher Faulete6902cd2018-11-30 22:29:48 +010028#include <proto/http_htx.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010029#include <proto/proto_http.h>
30#include <proto/sample.h>
31#include <proto/stream.h>
32
Christopher Fauletf4a4ef72018-12-07 17:39:53 +010033const char *http_comp_flt_id = "compression filter";
Christopher Faulet92d36382015-11-05 13:35:03 +010034
35struct flt_ops comp_ops;
36
Christopher Faulet92d36382015-11-05 13:35:03 +010037struct comp_state {
38 struct comp_ctx *comp_ctx; /* compression context */
39 struct comp_algo *comp_algo; /* compression algorithm if not NULL */
Christopher Faulete6902cd2018-11-30 22:29:48 +010040
41 /* Following fields are used by the legacy code only: */
Christopher Fauletb77c5c22015-12-07 16:48:42 +010042 int hdrs_len;
43 int tlrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +010044 int consumed;
45 int initialized;
Christopher Fauletb77c5c22015-12-07 16:48:42 +010046 int finished;
Christopher Faulet92d36382015-11-05 13:35:03 +010047};
48
Willy Tarreau8ceae722018-11-26 11:58:30 +010049/* Pools used to allocate comp_state structs */
50DECLARE_STATIC_POOL(pool_head_comp_state, "comp_state", sizeof(struct comp_state));
51
52static THREAD_LOCAL struct buffer tmpbuf;
53static THREAD_LOCAL struct buffer zbuf;
Willy Tarreau8ceae722018-11-26 11:58:30 +010054
Christopher Faulet92d36382015-11-05 13:35:03 +010055static int select_compression_request_header(struct comp_state *st,
56 struct stream *s,
57 struct http_msg *msg);
58static int select_compression_response_header(struct comp_state *st,
59 struct stream *s,
60 struct http_msg *msg);
Christopher Faulet27d93c32018-12-15 22:32:02 +010061static int set_compression_response_header(struct comp_state *st,
62 struct stream *s,
63 struct http_msg *msg);
Christopher Faulet92d36382015-11-05 13:35:03 +010064
Christopher Faulete6902cd2018-11-30 22:29:48 +010065static int htx_compression_buffer_init(struct htx *htx, struct buffer *out);
66static int htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
67 struct buffer *out);
68static int htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end);
69
Christopher Fauletb61481c2018-12-17 13:17:53 +010070static int http_compression_buffer_init(struct channel *inc, struct buffer *out);
Christopher Faulet92d36382015-11-05 13:35:03 +010071static int http_compression_buffer_add_data(struct comp_state *st,
72 struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020073 int in_out,
Christopher Faulet92d36382015-11-05 13:35:03 +010074 struct buffer *out, int sz);
75static int http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreauc9fa0482018-07-10 17:43:27 +020076 struct channel *chn, struct buffer *out,
Christopher Fauletb61481c2018-12-17 13:17:53 +010077 int end);
Christopher Faulet92d36382015-11-05 13:35:03 +010078
79/***********************************************************************/
80static int
Christopher Faulete6902cd2018-11-30 22:29:48 +010081comp_flt_init(struct proxy *px, struct flt_conf *fconf)
82{
Christopher Faulet6e540952018-12-03 22:43:41 +010083 fconf->flags |= FLT_CFG_FL_HTX;
Christopher Faulete6902cd2018-11-30 22:29:48 +010084 return 0;
85}
86
87static int
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020088comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010089{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020090 if (!tmpbuf.size && b_alloc(&tmpbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010091 return -1;
Willy Tarreauc9fa0482018-07-10 17:43:27 +020092 if (!zbuf.size && b_alloc(&zbuf) == NULL)
Christopher Fauletb77c5c22015-12-07 16:48:42 +010093 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010094 return 0;
95}
96
97static void
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020098comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010099{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200100 if (tmpbuf.size)
Christopher Faulet92d36382015-11-05 13:35:03 +0100101 b_free(&tmpbuf);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200102 if (zbuf.size)
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100103 b_free(&zbuf);
Christopher Faulet92d36382015-11-05 13:35:03 +0100104}
105
106static int
107comp_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
108{
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200109
Christopher Faulet92d36382015-11-05 13:35:03 +0100110 if (filter->ctx == NULL) {
111 struct comp_state *st;
112
Willy Tarreaubafbe012017-11-24 17:34:44 +0100113 st = pool_alloc_dirty(pool_head_comp_state);
Christopher Fauleta03d4ad2017-06-26 16:53:33 +0200114 if (st == NULL)
Christopher Faulet92d36382015-11-05 13:35:03 +0100115 return -1;
116
Christopher Faulet2fb28802015-12-01 10:40:57 +0100117 st->comp_algo = NULL;
118 st->comp_ctx = NULL;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100119 st->hdrs_len = 0;
120 st->tlrs_len = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100121 st->consumed = 0;
122 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100123 st->finished = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100124 filter->ctx = st;
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200125
126 /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
127 * analyze response headers before http-response rules execution
128 * to be sure we can use res.comp and res.comp_algo sample
129 * fetches */
130 filter->post_analyzers |= AN_RES_WAIT_HTTP;
Christopher Faulet92d36382015-11-05 13:35:03 +0100131 }
132 return 1;
133}
134
135static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100136comp_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
137{
138 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100139
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200140 if (!st)
Christopher Faulet92d36382015-11-05 13:35:03 +0100141 goto end;
142
Christopher Faulet92d36382015-11-05 13:35:03 +0100143 /* release any possible compression context */
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200144 if (st->comp_algo)
145 st->comp_algo->end(&st->comp_ctx);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100146 pool_free(pool_head_comp_state, st);
Christopher Faulet92d36382015-11-05 13:35:03 +0100147 filter->ctx = NULL;
148 end:
149 return 1;
150}
151
152static int
Christopher Faulet1339d742016-05-11 16:48:33 +0200153comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
154{
155 struct comp_state *st = filter->ctx;
156
157 if (!strm_fe(s)->comp && !s->be->comp)
158 goto end;
159
160 if (!(msg->chn->flags & CF_ISRESP))
161 select_compression_request_header(st, s, msg);
162 else {
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200163 /* Response headers have already been checked in
164 * comp_http_post_analyze callback. */
Christopher Faulet1339d742016-05-11 16:48:33 +0200165 if (st->comp_algo) {
Christopher Faulet27d93c32018-12-15 22:32:02 +0100166 if (!set_compression_response_header(st, s, msg))
167 goto end;
Christopher Faulet1339d742016-05-11 16:48:33 +0200168 register_data_filter(s, msg->chn, filter);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100169 if (!IS_HTX_STRM(s))
170 st->hdrs_len = s->txn->rsp.sov;
Christopher Faulet1339d742016-05-11 16:48:33 +0200171 }
172 }
173
174 end:
175 return 1;
176}
177
178static int
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200179comp_http_post_analyze(struct stream *s, struct filter *filter,
180 struct channel *chn, unsigned an_bit)
181{
182 struct http_txn *txn = s->txn;
183 struct http_msg *msg = &txn->rsp;
184 struct comp_state *st = filter->ctx;
185
186 if (an_bit != AN_RES_WAIT_HTTP)
187 goto end;
188
189 if (!strm_fe(s)->comp && !s->be->comp)
190 goto end;
191
192 select_compression_response_header(st, s, msg);
193
194 end:
195 return 1;
196}
197
198static int
Christopher Faulete6902cd2018-11-30 22:29:48 +0100199comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
200 unsigned int offset, unsigned int len)
201{
202 struct comp_state *st = filter->ctx;
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100203 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100204 struct htx_blk *blk;
205 struct htx_ret htx_ret;
206 int ret, consumed = 0, to_forward = 0;
207
208 htx_ret = htx_find_blk(htx, offset);
209 blk = htx_ret.blk;
210 offset = htx_ret.ret;
211
212 while (blk && len) {
213 enum htx_blk_type type = htx_get_blk_type(blk);
214 uint32_t sz = htx_get_blksz(blk);
215 struct ist v;
216
217 switch (type) {
218 case HTX_BLK_UNUSED:
219 break;
220
221 case HTX_BLK_DATA:
222 v = htx_get_blk_value(htx, blk);
223 v.ptr += offset;
224 v.len -= offset;
225 if (v.len > len)
226 v.len = len;
227 if (htx_compression_buffer_init(htx, &trash) < 0) {
228 msg->chn->flags |= CF_WAKE_WRITE;
229 goto end;
230 }
231 ret = htx_compression_buffer_add_data(st, v.ptr, v.len, &trash);
232 if (ret < 0)
233 goto error;
234 if (htx_compression_buffer_end(st, &trash, 0) < 0)
235 goto error;
236 len -= ret;
237 consumed += ret;
238 to_forward += b_data(&trash);
239 if (ret == sz && !b_data(&trash)) {
240 offset = 0;
241 blk = htx_remove_blk(htx, blk);
242 continue;
243 }
244 v.len = ret;
245 blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
246 break;
247
248 case HTX_BLK_EOD:
249 case HTX_BLK_TLR:
250 case HTX_BLK_EOM:
251 if (msg->flags & HTTP_MSGF_COMPRESSING) {
252 if (htx_compression_buffer_init(htx, &trash) < 0) {
253 msg->chn->flags |= CF_WAKE_WRITE;
254 goto end;
255 }
256 if (htx_compression_buffer_end(st, &trash, 1) < 0)
257 goto error;
258 blk = htx_add_data_before(htx, blk, ist2(b_head(&trash), b_data(&trash)));
259 if (!blk)
260 goto error;
261 to_forward += b_data(&trash);
262 msg->flags &= ~HTTP_MSGF_COMPRESSING;
263 /* We let the mux add last empty chunk and empty trailers */
264 }
265 /* fall through */
266
267 default:
268 sz -= offset;
269 if (sz > len)
270 sz = len;
271 consumed += sz;
272 to_forward += sz;
273 len -= sz;
274 break;
275 }
276
277 offset = 0;
278 blk = htx_get_next_blk(htx, blk);
279 }
280
281 end:
282 if (to_forward != consumed)
283 flt_update_offsets(filter, msg->chn, to_forward - consumed);
284
285 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
286 update_freq_ctr(&global.comp_bps_out, to_forward);
287 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
288 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
289 }
290 return to_forward;
291
292 error:
293 return -1;
294}
295
296static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100297comp_http_data(struct stream *s, struct filter *filter, struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100298{
299 struct comp_state *st = filter->ctx;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200300 struct channel *chn = msg->chn;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100301 unsigned int *nxt = &flt_rsp_nxt(filter);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100302 unsigned int len;
Christopher Faulet92d36382015-11-05 13:35:03 +0100303 int ret;
304
Olivier Houchard0b662842018-06-29 18:16:31 +0200305 len = MIN(msg->chunk_len + msg->next, ci_data(chn)) - *nxt;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100306 if (!len)
307 return len;
308
309 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100310 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100311
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200312 b_reset(&tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200313 c_adv(chn, fwd);
Christopher Fauletb61481c2018-12-17 13:17:53 +0100314 ret = http_compression_buffer_init(chn, &zbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200315 c_rew(chn, fwd);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100316 if (ret < 0) {
317 msg->chn->flags |= CF_WAKE_WRITE;
318 return 0;
319 }
320 }
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100321
322 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200323 int block;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100324
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200325 len = MIN(b_room(&tmpbuf), len);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200326
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200327 c_adv(chn, *nxt);
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200328 block = ci_contig_data(chn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200329 memcpy(b_tail(&tmpbuf), ci_head(chn), block);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200330 if (len > block)
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200331 memcpy(b_tail(&tmpbuf)+block, b_orig(&chn->buf), len-block);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200332 c_rew(chn, *nxt);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200333
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200334 b_add(&tmpbuf, len);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100335 ret = len;
336 }
337 else {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200338 c_adv(chn, *nxt);
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200339 ret = http_compression_buffer_add_data(st, &chn->buf, co_data(chn), &zbuf, len);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200340 c_rew(chn, *nxt);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100341 if (ret < 0)
342 return ret;
343 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100344
Christopher Faulet2fb28802015-12-01 10:40:57 +0100345 st->initialized = 1;
346 msg->next += ret;
347 msg->chunk_len -= ret;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100348 *nxt = msg->next;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100349 return 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100350}
351
352static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100353comp_http_chunk_trailers(struct stream *s, struct filter *filter,
354 struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100355{
356 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100357
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100358 if (!st->initialized) {
359 if (!st->finished) {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200360 struct channel *chn = msg->chn;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100361 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100362
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200363 b_reset(&tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200364 c_adv(chn, fwd);
Christopher Fauletb61481c2018-12-17 13:17:53 +0100365 http_compression_buffer_init(chn, &zbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200366 c_rew(chn, fwd);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100367 st->initialized = 1;
368 }
369 }
370 st->tlrs_len = msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100371 return 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100372}
373
Christopher Faulet2fb28802015-12-01 10:40:57 +0100374
Christopher Faulet92d36382015-11-05 13:35:03 +0100375static int
376comp_http_forward_data(struct stream *s, struct filter *filter,
377 struct http_msg *msg, unsigned int len)
378{
379 struct comp_state *st = filter->ctx;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100380 int ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100381
Christopher Faulet2fb28802015-12-01 10:40:57 +0100382 /* To work, previous filters MUST forward all data */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100383 if (flt_rsp_fwd(filter) + len != flt_rsp_nxt(filter)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100384 ha_warning("HTTP compression failed: unexpected behavior of previous filters\n");
Christopher Faulet2fb28802015-12-01 10:40:57 +0100385 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100386 }
387
Christopher Faulet2fb28802015-12-01 10:40:57 +0100388 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100389 if (!len) {
Joseph Herlant942eea32018-11-15 13:57:22 -0800390 /* Nothing to forward */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100391 ret = len;
392 }
393 else if (st->hdrs_len > len) {
394 /* Forward part of headers */
395 ret = len;
396 st->hdrs_len -= len;
397 }
398 else if (st->hdrs_len > 0) {
399 /* Forward remaining headers */
400 ret = st->hdrs_len;
401 st->hdrs_len = 0;
402 }
403 else if (msg->msg_state < HTTP_MSG_TRAILERS) {
404 /* Do not forward anything for now. This only happens
405 * with chunk-encoded responses. Waiting data are part
406 * of the chunk envelope (the chunk size or the chunk
407 * CRLF). These data will be skipped during the
408 * compression. */
409 ret = 0;
410 }
411 else {
412 /* Forward trailers data */
413 ret = len;
414 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100415 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100416 }
417
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100418 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200419 ret = http_compression_buffer_add_data(st, &tmpbuf, 0,
420 &zbuf, b_data(&tmpbuf));
421 if (ret != b_data(&tmpbuf)) {
Willy Tarreau506a29a2018-07-18 10:07:58 +0200422 ha_warning("HTTP compression failed: Must consume %u bytes but only %d bytes consumed\n",
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200423 (unsigned int)b_data(&tmpbuf), ret);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100424 return -1;
425 }
426 }
427
428 st->consumed = len - st->hdrs_len - st->tlrs_len;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200429 c_adv(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Christopher Fauletb61481c2018-12-17 13:17:53 +0100430 ret = http_compression_buffer_end(st, s, msg->chn, &zbuf, msg->msg_state >= HTTP_MSG_TRAILERS);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200431 c_rew(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100432 if (ret < 0)
433 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100434
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100435 flt_change_forward_size(filter, msg->chn, ret - st->consumed);
436 msg->next += (ret - st->consumed);
437 ret += st->hdrs_len + st->tlrs_len;
438
Christopher Faulet2fb28802015-12-01 10:40:57 +0100439 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100440 st->finished = (msg->msg_state >= HTTP_MSG_TRAILERS);
441 st->hdrs_len = 0;
442 st->tlrs_len = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100443 return ret;
444}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100445
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200446static int
447comp_http_end(struct stream *s, struct filter *filter,
448 struct http_msg *msg)
449{
450 struct comp_state *st = filter->ctx;
451
452 if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo)
453 goto end;
454
455 if (strm_fe(s)->mode == PR_MODE_HTTP)
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200456 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200457 if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200458 HA_ATOMIC_ADD(&s->be->be_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200459 end:
460 return 1;
461}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100462/***********************************************************************/
Christopher Faulet27d93c32018-12-15 22:32:02 +0100463static int
464http_set_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
465{
466 struct http_txn *txn = s->txn;
467
468 /*
469 * Add Content-Encoding header when it's not identity encoding.
470 * RFC 2616 : Identity encoding: This content-coding is used only in the
471 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
472 * header.
473 */
474 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
475 trash.data = 18;
476 memcpy(trash.area, "Content-Encoding: ", trash.data);
477 memcpy(trash.area + trash.data, st->comp_algo->ua_name,
478 st->comp_algo->ua_name_len);
479 trash.data += st->comp_algo->ua_name_len;
480 trash.area[trash.data] = '\0';
481 if (http_header_add_tail2(msg, &txn->hdr_idx, trash.area, trash.data) < 0)
482 goto error;
483 }
484
485 /* remove Content-Length header */
486 if (msg->flags & HTTP_MSGF_CNT_LEN) {
487 struct hdr_ctx ctx;
488
489 ctx.idx = 0;
490 while (http_find_header2("Content-Length", 14, ci_head(&s->res), &txn->hdr_idx, &ctx))
491 http_remove_header2(msg, &txn->hdr_idx, &ctx);
492 }
493
494 /* add Transfer-Encoding header */
495 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
496 if (http_header_add_tail2(msg, &txn->hdr_idx, "Transfer-Encoding: chunked", 26) < 0)
497 goto error;
498 }
499
500
501 return 1;
502
503 error:
504 st->comp_algo->end(&st->comp_ctx);
505 st->comp_algo = NULL;
506 return 0;
507}
508
509static int
510htx_set_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
511{
512 struct htx *htx = htxbuf(&msg->chn->buf);
513
514 /*
515 * Add Content-Encoding header when it's not identity encoding.
516 * RFC 2616 : Identity encoding: This content-coding is used only in the
517 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
518 * header.
519 */
520 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
521 struct ist v = ist2(st->comp_algo->ua_name, st->comp_algo->ua_name_len);
522
523 if (!http_add_header(htx, ist("Content-Encoding"), v))
524 goto error;
525 }
526
527 /* remove Content-Length header */
528 if (msg->flags & HTTP_MSGF_CNT_LEN) {
529 struct http_hdr_ctx ctx;
530
531 ctx.blk = NULL;
532 while (http_find_header(htx, ist("Content-Length"), &ctx, 1))
533 http_remove_header(htx, &ctx);
534 }
535
536 /* add "Transfer-Encoding: chunked" header */
537 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
538 if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
539 goto error;
540 }
541
542 return 1;
543
544 error:
545 st->comp_algo->end(&st->comp_ctx);
546 st->comp_algo = NULL;
547 return 0;
548}
549
550static int
551set_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
552{
553 if (IS_HTX_STRM(s))
554 return htx_set_comp_reshdr(st, s, msg);
555 else
556 return http_set_comp_reshdr(st, s, msg);
557}
558
Christopher Faulet3d97c902015-12-09 14:59:38 +0100559/*
560 * Selects a compression algorithm depending on the client request.
561 */
Christopher Faulete6902cd2018-11-30 22:29:48 +0100562static int
563http_select_comp_reqhdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100564{
565 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200566 struct channel *req = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100567 struct hdr_ctx ctx;
568 struct comp_algo *comp_algo = NULL;
569 struct comp_algo *comp_algo_back = NULL;
570
571 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
572 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
573 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
574 */
575 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200576 if (http_find_header2("User-Agent", 10, ci_head(req), &txn->hdr_idx, &ctx) &&
Christopher Faulet3d97c902015-12-09 14:59:38 +0100577 ctx.vlen >= 9 &&
578 memcmp(ctx.line + ctx.val, "Mozilla/4", 9) == 0 &&
579 (ctx.vlen < 31 ||
580 memcmp(ctx.line + ctx.val + 25, "MSIE ", 5) != 0 ||
581 ctx.line[ctx.val + 30] < '6' ||
582 (ctx.line[ctx.val + 30] == '6' &&
583 (ctx.vlen < 54 || memcmp(ctx.line + 51, "SV1", 3) != 0)))) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100584 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100585 return 0;
586 }
587
588 /* search for the algo in the backend in priority or the frontend */
Christopher Faulet92d36382015-11-05 13:35:03 +0100589 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
590 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100591 int best_q = 0;
592
593 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200594 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100595 const char *qval;
596 int q;
597 int toklen;
598
599 /* try to isolate the token from the optional q-value */
600 toklen = 0;
Willy Tarreau2235b262016-11-05 15:50:20 +0100601 while (toklen < ctx.vlen && HTTP_IS_TOKEN(*(ctx.line + ctx.val + toklen)))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100602 toklen++;
603
604 qval = ctx.line + ctx.val + toklen;
605 while (1) {
Willy Tarreau2235b262016-11-05 15:50:20 +0100606 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100607 qval++;
608
609 if (qval >= ctx.line + ctx.val + ctx.vlen || *qval != ';') {
610 qval = NULL;
611 break;
612 }
613 qval++;
614
Willy Tarreau2235b262016-11-05 15:50:20 +0100615 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100616 qval++;
617
618 if (qval >= ctx.line + ctx.val + ctx.vlen) {
619 qval = NULL;
620 break;
621 }
622 if (strncmp(qval, "q=", MIN(ctx.line + ctx.val + ctx.vlen - qval, 2)) == 0)
623 break;
624
625 while (qval < ctx.line + ctx.val + ctx.vlen && *qval != ';')
626 qval++;
627 }
628
629 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
Willy Tarreauab813a42018-09-10 18:41:28 +0200630 q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100631
632 if (q <= best_q)
633 continue;
634
635 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
636 if (*(ctx.line + ctx.val) == '*' ||
637 word_match(ctx.line + ctx.val, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100638 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100639 best_q = q;
640 break;
641 }
642 }
643 }
644 }
645
646 /* remove all occurrences of the header when "compression offload" is set */
Christopher Faulet92d36382015-11-05 13:35:03 +0100647 if (st->comp_algo) {
648 if ((s->be->comp && s->be->comp->offload) ||
649 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100650 http_remove_header2(msg, &txn->hdr_idx, &ctx);
651 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200652 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100653 http_remove_header2(msg, &txn->hdr_idx, &ctx);
654 }
655 }
Christopher Faulete6902cd2018-11-30 22:29:48 +0100656 return 1;
657 }
658
659 /* identity is implicit does not require headers */
660 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
661 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
662 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
663 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
664 st->comp_algo = comp_algo;
665 return 1;
666 }
667 }
668 }
669
670 st->comp_algo = NULL;
671 return 0;
672}
673
674static int
675htx_select_comp_reqhdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
676{
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100677 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100678 struct http_hdr_ctx ctx;
679 struct comp_algo *comp_algo = NULL;
680 struct comp_algo *comp_algo_back = NULL;
681
682 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
683 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
684 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
685 */
686 ctx.blk = NULL;
687 if (http_find_header(htx, ist("User-Agent"), &ctx, 1) &&
688 ctx.value.len >= 9 &&
689 memcmp(ctx.value.ptr, "Mozilla/4", 9) == 0 &&
690 (ctx.value.len < 31 ||
691 memcmp(ctx.value.ptr + 25, "MSIE ", 5) != 0 ||
692 *(ctx.value.ptr + 30) < '6' ||
693 (*(ctx.value.ptr + 30) == '6' &&
694 (ctx.value.len < 54 || memcmp(ctx.value.ptr + 51, "SV1", 3) != 0)))) {
695 st->comp_algo = NULL;
696 return 0;
697 }
698
699 /* search for the algo in the backend in priority or the frontend */
700 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
701 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
702 int best_q = 0;
703
704 ctx.blk = NULL;
705 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 0)) {
706 const char *qval;
707 int q;
708 int toklen;
709
710 /* try to isolate the token from the optional q-value */
711 toklen = 0;
712 while (toklen < ctx.value.len && HTTP_IS_TOKEN(*(ctx.value.ptr + toklen)))
713 toklen++;
714
715 qval = ctx.value.ptr + toklen;
716 while (1) {
717 while (qval < ctx.value.ptr + ctx.value.len && HTTP_IS_LWS(*qval))
718 qval++;
719
720 if (qval >= ctx.value.ptr + ctx.value.len || *qval != ';') {
721 qval = NULL;
722 break;
723 }
724 qval++;
725
726 while (qval < ctx.value.ptr + ctx.value.len && HTTP_IS_LWS(*qval))
727 qval++;
728
729 if (qval >= ctx.value.ptr + ctx.value.len) {
730 qval = NULL;
731 break;
732 }
733 if (strncmp(qval, "q=", MIN(ctx.value.ptr + ctx.value.len - qval, 2)) == 0)
734 break;
735
736 while (qval < ctx.value.ptr + ctx.value.len && *qval != ';')
737 qval++;
738 }
739
740 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
741 q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
742
743 if (q <= best_q)
744 continue;
745
746 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
747 if (*(ctx.value.ptr) == '*' ||
748 word_match(ctx.value.ptr, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
749 st->comp_algo = comp_algo;
750 best_q = q;
751 break;
752 }
753 }
754 }
755 }
756
757 /* remove all occurrences of the header when "compression offload" is set */
758 if (st->comp_algo) {
759 if ((s->be->comp && s->be->comp->offload) ||
760 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
761 http_remove_header(htx, &ctx);
762 ctx.blk = NULL;
763 while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 1))
764 http_remove_header(htx, &ctx);
765 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100766 return 1;
767 }
768
769 /* identity is implicit does not require headers */
Christopher Faulet92d36382015-11-05 13:35:03 +0100770 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
771 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100772 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
773 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100774 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100775 return 1;
776 }
777 }
778 }
779
Christopher Faulet92d36382015-11-05 13:35:03 +0100780 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100781 return 0;
782}
783
Christopher Faulete6902cd2018-11-30 22:29:48 +0100784static int
785select_compression_request_header(struct comp_state *st, struct stream *s,
786 struct http_msg *msg)
787{
788 if (IS_HTX_STRM(s))
789 return htx_select_comp_reqhdr(st, s, msg);
790 else
791 return http_select_comp_reqhdr(st, s, msg);
792}
Christopher Faulet92d36382015-11-05 13:35:03 +0100793
Christopher Faulet3d97c902015-12-09 14:59:38 +0100794/*
795 * Selects a comression algorithm depending of the server response.
796 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100797static int
Christopher Faulete6902cd2018-11-30 22:29:48 +0100798http_select_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100799{
800 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200801 struct channel *c = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100802 struct hdr_ctx ctx;
803 struct comp_type *comp_type;
804
805 /* no common compression algorithm was found in request header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100806 if (st->comp_algo == NULL)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100807 goto fail;
808
809 /* HTTP < 1.1 should not be compressed */
810 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
811 goto fail;
812
Christopher Faulet92d36382015-11-05 13:35:03 +0100813 if (txn->meth == HTTP_METH_HEAD)
814 goto fail;
815
Christopher Faulet3d97c902015-12-09 14:59:38 +0100816 /* compress 200,201,202,203 responses only */
817 if ((txn->status != 200) &&
818 (txn->status != 201) &&
819 (txn->status != 202) &&
820 (txn->status != 203))
821 goto fail;
822
823
824 /* Content-Length is null */
825 if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->body_len == 0)
826 goto fail;
827
828 /* content is already compressed */
829 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200830 if (http_find_header2("Content-Encoding", 16, ci_head(c), &txn->hdr_idx, &ctx))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100831 goto fail;
832
833 /* no compression when Cache-Control: no-transform is present in the message */
834 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200835 while (http_find_header2("Cache-Control", 13, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100836 if (word_match(ctx.line + ctx.val, ctx.vlen, "no-transform", 12))
837 goto fail;
838 }
839
840 comp_type = NULL;
841
842 /* we don't want to compress multipart content-types, nor content-types that are
843 * not listed in the "compression type" directive if any. If no content-type was
844 * found but configuration requires one, we don't compress either. Backend has
845 * the priority.
846 */
847 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200848 if (http_find_header2("Content-Type", 12, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100849 if (ctx.vlen >= 9 && strncasecmp("multipart", ctx.line+ctx.val, 9) == 0)
850 goto fail;
851
852 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
853 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
854 for (; comp_type; comp_type = comp_type->next) {
855 if (ctx.vlen >= comp_type->name_len &&
856 strncasecmp(ctx.line+ctx.val, comp_type->name, comp_type->name_len) == 0)
857 /* this Content-Type should be compressed */
858 break;
859 }
860 /* this Content-Type should not be compressed */
861 if (comp_type == NULL)
862 goto fail;
863 }
864 }
865 else { /* no content-type header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100866 if ((s->be->comp && s->be->comp->types) ||
867 (strm_fe(s)->comp && strm_fe(s)->comp->types))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100868 goto fail; /* a content-type was required */
869 }
870
871 /* limit compression rate */
872 if (global.comp_rate_lim > 0)
873 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
874 goto fail;
875
876 /* limit cpu usage */
877 if (idle_pct < compress_min_idle)
878 goto fail;
879
880 /* initialize compression */
Christopher Faulet92d36382015-11-05 13:35:03 +0100881 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100882 goto fail;
Christopher Faulet92d36382015-11-05 13:35:03 +0100883 msg->flags |= HTTP_MSGF_COMPRESSING;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100884 return 1;
885
886fail:
Christopher Faulet92d36382015-11-05 13:35:03 +0100887 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100888 return 0;
889}
890
Christopher Faulete6902cd2018-11-30 22:29:48 +0100891static int
892htx_select_comp_reshdr(struct comp_state *st, struct stream *s, struct http_msg *msg)
893{
Christopher Faulet27ba2dc2018-12-05 11:53:24 +0100894 struct htx *htx = htxbuf(&msg->chn->buf);
Christopher Faulete6902cd2018-11-30 22:29:48 +0100895 struct http_txn *txn = s->txn;
896 struct http_hdr_ctx ctx;
897 struct comp_type *comp_type;
898
899 /* no common compression algorithm was found in request header */
900 if (st->comp_algo == NULL)
901 goto fail;
902
903 /* HTTP < 1.1 should not be compressed */
904 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
905 goto fail;
906
907 if (txn->meth == HTTP_METH_HEAD)
908 goto fail;
909
910 /* compress 200,201,202,203 responses only */
911 if ((txn->status != 200) &&
912 (txn->status != 201) &&
913 (txn->status != 202) &&
914 (txn->status != 203))
915 goto fail;
916
917 if (msg->flags & HTTP_MSGF_BODYLESS)
918 goto fail;
919
920 /* content is already compressed */
921 ctx.blk = NULL;
922 if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
923 goto fail;
924
925 /* no compression when Cache-Control: no-transform is present in the message */
926 ctx.blk = NULL;
927 while (http_find_header(htx, ist("Cache-Control"), &ctx, 0)) {
928 if (word_match(ctx.value.ptr, ctx.value.len, "no-transform", 12))
929 goto fail;
930 }
931
932 comp_type = NULL;
933
934 /* we don't want to compress multipart content-types, nor content-types that are
935 * not listed in the "compression type" directive if any. If no content-type was
936 * found but configuration requires one, we don't compress either. Backend has
937 * the priority.
938 */
939 ctx.blk = NULL;
940 if (http_find_header(htx, ist("Content-Type"), &ctx, 1)) {
941 if (ctx.value.len >= 9 && strncasecmp("multipart", ctx.value.ptr, 9) == 0)
942 goto fail;
943
944 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
945 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
946 for (; comp_type; comp_type = comp_type->next) {
947 if (ctx.value.len >= comp_type->name_len &&
948 strncasecmp(ctx.value.ptr, comp_type->name, comp_type->name_len) == 0)
949 /* this Content-Type should be compressed */
950 break;
951 }
952 /* this Content-Type should not be compressed */
953 if (comp_type == NULL)
954 goto fail;
955 }
956 }
957 else { /* no content-type header */
958 if ((s->be->comp && s->be->comp->types) ||
959 (strm_fe(s)->comp && strm_fe(s)->comp->types))
960 goto fail; /* a content-type was required */
961 }
962
963 /* limit compression rate */
964 if (global.comp_rate_lim > 0)
965 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
966 goto fail;
967
968 /* limit cpu usage */
969 if (idle_pct < compress_min_idle)
970 goto fail;
971
972 /* initialize compression */
973 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
974 goto fail;
Christopher Faulete6902cd2018-11-30 22:29:48 +0100975 msg->flags |= HTTP_MSGF_COMPRESSING;
976 return 1;
977
978 deinit_comp_ctx:
979 st->comp_algo->end(&st->comp_ctx);
980 fail:
981 st->comp_algo = NULL;
982 return 0;
983}
984
985static int
986select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
987{
988 if (IS_HTX_STRM(s))
989 return htx_select_comp_reshdr(st, s, msg);
990 else
991 return http_select_comp_reshdr(st, s, msg);
992}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100993/***********************************************************************/
994/* emit the chunksize followed by a CRLF on the output and return the number of
995 * bytes written. It goes backwards and starts with the byte before <end>. It
996 * returns the number of bytes written which will not exceed 10 (8 digits, CR,
997 * and LF). The caller is responsible for ensuring there is enough room left in
998 * the output buffer for the string.
999 */
1000static int
1001http_emit_chunk_size(char *end, unsigned int chksz)
1002{
1003 char *beg = end;
1004
1005 *--beg = '\n';
1006 *--beg = '\r';
1007 do {
1008 *--beg = hextab[chksz & 0xF];
1009 } while (chksz >>= 4);
1010 return end - beg;
1011}
1012
1013/*
1014 * Init HTTP compression
1015 */
Christopher Faulet92d36382015-11-05 13:35:03 +01001016static int
Christopher Fauletb61481c2018-12-17 13:17:53 +01001017http_compression_buffer_init(struct channel *inc, struct buffer *out)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001018{
1019 /* output stream requires at least 10 bytes for the gzip header, plus
1020 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
1021 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
1022 */
Olivier Houchard0b662842018-06-29 18:16:31 +02001023 if (c_room(inc) < 20 + 5 * ((ci_data(inc) + 32767) >> 15))
Christopher Faulet3d97c902015-12-09 14:59:38 +01001024 return -1;
1025
1026 /* prepare an empty output buffer in which we reserve enough room for
1027 * copying the output bytes from <in>, plus 10 extra bytes to write
1028 * the chunk size. We don't copy the bytes yet so that if we have to
1029 * cancel the operation later, it's cheap.
1030 */
1031 b_reset(out);
Christopher Fauletb61481c2018-12-17 13:17:53 +01001032 out->head += co_data(inc) + 10;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001033 return 0;
1034}
1035
Christopher Faulete6902cd2018-11-30 22:29:48 +01001036static int
1037htx_compression_buffer_init(struct htx *htx, struct buffer *out)
1038{
1039 /* output stream requires at least 10 bytes for the gzip header, plus
1040 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
1041 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
1042 */
1043 if (htx_free_space(htx) < 20 + 5 * ((htx->data + 32767) >> 15))
1044 return -1;
1045 b_reset(out);
1046 return 0;
1047}
1048
Christopher Faulet3d97c902015-12-09 14:59:38 +01001049/*
1050 * Add data to compress
1051 */
Christopher Faulet92d36382015-11-05 13:35:03 +01001052static int
1053http_compression_buffer_add_data(struct comp_state *st, struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001054 int in_out, struct buffer *out, int sz)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001055{
Christopher Faulet3d97c902015-12-09 14:59:38 +01001056 int consumed_data = 0;
1057 int data_process_len;
1058 int block1, block2;
1059
Christopher Faulet92d36382015-11-05 13:35:03 +01001060 if (!sz)
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001061 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001062
Christopher Faulet92d36382015-11-05 13:35:03 +01001063 /* select the smallest size between the announced chunk size, the input
Christopher Faulet3d97c902015-12-09 14:59:38 +01001064 * data, and the available output buffer size. The compressors are
Christopher Faulet92d36382015-11-05 13:35:03 +01001065 * assumed to be able to process all the bytes we pass to them at
1066 * once. */
Willy Tarreaueac52592018-06-15 13:59:36 +02001067 data_process_len = MIN(b_room(out), sz);
Christopher Faulet92d36382015-11-05 13:35:03 +01001068
Christopher Faulet3d97c902015-12-09 14:59:38 +01001069 block1 = data_process_len;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001070 if (block1 > b_contig_data(in, in_out))
1071 block1 = b_contig_data(in, in_out);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001072 block2 = data_process_len - block1;
1073
1074 /* compressors return < 0 upon error or the amount of bytes read */
Christopher Faulet96667202018-12-17 12:02:57 +01001075 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_peek(in, in_out), block1, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001076 if (consumed_data != block1 || !block2)
1077 goto end;
Christopher Faulet96667202018-12-17 12:02:57 +01001078 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_orig(in), block2, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +01001079 if (consumed_data < 0)
1080 goto end;
1081 consumed_data += block1;
1082
1083 end:
Christopher Faulet3d97c902015-12-09 14:59:38 +01001084 return consumed_data;
1085}
1086
Christopher Faulete6902cd2018-11-30 22:29:48 +01001087static int
1088htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
1089 struct buffer *out)
1090{
1091 return st->comp_algo->add_data(st->comp_ctx, data, len, out);
1092}
1093
Christopher Faulet3d97c902015-12-09 14:59:38 +01001094/*
1095 * Flush data in process, and write the header and footer of the chunk. Upon
1096 * success, in and out buffers are swapped to avoid a copy.
1097 */
Christopher Faulet92d36382015-11-05 13:35:03 +01001098static int
1099http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001100 struct channel *chn, struct buffer *out,
Christopher Fauletb61481c2018-12-17 13:17:53 +01001101 int end)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001102{
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001103 struct buffer tmp_buf;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001104 char *tail;
Christopher Faulet92d36382015-11-05 13:35:03 +01001105 int to_forward, left;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001106
1107#if defined(USE_SLZ) || defined(USE_ZLIB)
1108 int ret;
1109
1110 /* flush data here */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001111 if (end)
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001112 ret = st->comp_algo->finish(st->comp_ctx, out); /* end of data */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001113 else
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001114 ret = st->comp_algo->flush(st->comp_ctx, out); /* end of buffer */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001115
1116 if (ret < 0)
1117 return -1; /* flush failed */
1118
1119#endif /* USE_ZLIB */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001120 if (b_data(out) == 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +01001121 /* No data were appended, let's drop the output buffer and
1122 * keep the input buffer unchanged.
1123 */
1124 return 0;
1125 }
1126
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001127 /* OK so at this stage, we have an output buffer <out> looking like this :
Christopher Faulet3d97c902015-12-09 14:59:38 +01001128 *
1129 * <-- o --> <------ i ----->
1130 * +---------+---+------------+-----------+
1131 * | out | c | comp_in | empty |
1132 * +---------+---+------------+-----------+
1133 * data p size
1134 *
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001135 * <out> is the room reserved to copy the channel output. It starts at
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001136 * out->area and has not yet been filled. <c> is the room reserved to
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001137 * write the chunk size (10 bytes). <comp_in> is the compressed
1138 * equivalent of the data part of ib->len. <empty> is the amount of
1139 * empty bytes at the end of the buffer, into which we may have to
1140 * copy the remaining bytes from ib->len after the data
1141 * (chunk size, trailers, ...).
Christopher Faulet3d97c902015-12-09 14:59:38 +01001142 */
1143
Joseph Herlant942eea32018-11-15 13:57:22 -08001144 /* Write real size at the beginning of the chunk, no need of wrapping.
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001145 * We write the chunk using a dynamic length and adjust out->p and out->i
Christopher Faulet3d97c902015-12-09 14:59:38 +01001146 * accordingly afterwards. That will move <out> away from <data>.
1147 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001148 left = http_emit_chunk_size(b_head(out), b_data(out));
1149 b_add(out, left);
Christopher Fauletb61481c2018-12-17 13:17:53 +01001150 out->head -= co_data(chn) + (left);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001151 /* Copy previous data from chn into out */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001152 if (co_data(chn) > 0) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001153 left = b_contig_data(&chn->buf, 0);
Christopher Fauletb61481c2018-12-17 13:17:53 +01001154 if (left > co_data(chn))
1155 left = co_data(chn);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001156
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001157 memcpy(b_head(out), co_head(chn), left);
1158 b_add(out, left);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001159 if (co_data(chn) - left) {/* second part of the buffer */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001160 memcpy(b_head(out) + left, b_orig(&chn->buf), co_data(chn) - left);
1161 b_add(out, co_data(chn) - left);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +02001162 }
Christopher Faulet3d97c902015-12-09 14:59:38 +01001163 }
1164
1165 /* chunked encoding requires CRLF after data */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001166 tail = b_tail(out);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001167 *tail++ = '\r';
1168 *tail++ = '\n';
1169
Christopher Faulet2fb28802015-12-01 10:40:57 +01001170 /* At the end of data, we must write the empty chunk 0<CRLF>,
1171 * and terminate the trailers section with a last <CRLF>. If
1172 * we're forwarding a chunked-encoded response, we'll have a
1173 * trailers section after the empty chunk which needs to be
1174 * forwarded and which will provide the last CRLF. Otherwise
1175 * we write it ourselves.
1176 */
1177 if (end) {
1178 struct http_msg *msg = &s->txn->rsp;
1179
1180 memcpy(tail, "0\r\n", 3);
1181 tail += 3;
Christopher Fauletb77c5c22015-12-07 16:48:42 +01001182 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
Christopher Faulet2fb28802015-12-01 10:40:57 +01001183 memcpy(tail, "\r\n", 2);
1184 tail += 2;
1185 }
1186 }
1187
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001188 b_add(out, tail - b_tail(out));
Christopher Fauletb61481c2018-12-17 13:17:53 +01001189 to_forward = b_data(out) - co_data(chn);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001190
1191 /* update input rate */
Christopher Faulet92d36382015-11-05 13:35:03 +01001192 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet2fb28802015-12-01 10:40:57 +01001193 update_freq_ctr(&global.comp_bps_in, st->consumed);
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001194 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in, st->consumed);
1195 HA_ATOMIC_ADD(&s->be->be_counters.comp_in, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001196 } else {
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001197 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp, st->consumed);
1198 HA_ATOMIC_ADD(&s->be->be_counters.comp_byp, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001199 }
1200
1201 /* copy the remaining data in the tmp buffer. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +02001202 c_adv(chn, st->consumed);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001203 if (b_data(&chn->buf) - co_data(chn) > 0) {
Willy Tarreau7194d3c2018-06-06 16:55:45 +02001204 left = ci_contig_data(chn);
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001205 memcpy(b_tail(out), ci_head(chn), left);
1206 b_add(out, left);
1207 if (b_data(&chn->buf) - (co_data(chn) + left)) {
1208 memcpy(b_tail(out), b_orig(&chn->buf), b_data(&chn->buf) - left);
1209 b_add(out, b_data(&chn->buf) - left);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001210 }
1211 }
Christopher Fauletb61481c2018-12-17 13:17:53 +01001212 c_rew(chn, st->consumed);
1213
Christopher Faulet3d97c902015-12-09 14:59:38 +01001214 /* swap the buffers */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001215 tmp_buf = chn->buf;
1216 chn->buf = *out;
1217 *out = tmp_buf;
1218
Christopher Faulet92d36382015-11-05 13:35:03 +01001219 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +01001220 update_freq_ctr(&global.comp_bps_out, to_forward);
Christopher Fauletff8abcd2017-06-02 15:33:24 +02001221 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
1222 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001223 }
1224
Christopher Faulet3d97c902015-12-09 14:59:38 +01001225 return to_forward;
1226}
1227
Christopher Faulete6902cd2018-11-30 22:29:48 +01001228static int
1229htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end)
1230{
1231 if (end)
1232 return st->comp_algo->finish(st->comp_ctx, out);
1233 else
1234 return st->comp_algo->flush(st->comp_ctx, out);
1235}
1236
Christopher Faulet3d97c902015-12-09 14:59:38 +01001237
1238/***********************************************************************/
Christopher Faulet92d36382015-11-05 13:35:03 +01001239struct flt_ops comp_ops = {
Christopher Faulete6902cd2018-11-30 22:29:48 +01001240 .init = comp_flt_init,
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +02001241 .init_per_thread = comp_flt_init_per_thread,
1242 .deinit_per_thread = comp_flt_deinit_per_thread,
Christopher Faulet92d36382015-11-05 13:35:03 +01001243
1244 .channel_start_analyze = comp_start_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +01001245 .channel_end_analyze = comp_end_analyze,
Christopher Faulet3dc860d2017-09-15 11:39:36 +02001246 .channel_post_analyze = comp_http_post_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +01001247
Christopher Faulet1339d742016-05-11 16:48:33 +02001248 .http_headers = comp_http_headers,
Christopher Faulete6902cd2018-11-30 22:29:48 +01001249 .http_payload = comp_http_payload,
1250 .http_end = comp_http_end,
1251
Christopher Faulet309c6412015-12-02 09:57:32 +01001252 .http_data = comp_http_data,
1253 .http_chunk_trailers = comp_http_chunk_trailers,
1254 .http_forward_data = comp_http_forward_data,
Christopher Faulet92d36382015-11-05 13:35:03 +01001255};
1256
Christopher Faulet3d97c902015-12-09 14:59:38 +01001257static int
1258parse_compression_options(char **args, int section, struct proxy *proxy,
1259 struct proxy *defpx, const char *file, int line,
1260 char **err)
1261{
Christopher Faulet92d36382015-11-05 13:35:03 +01001262 struct comp *comp;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001263
1264 if (proxy->comp == NULL) {
Vincent Bernat02779b62016-04-03 13:48:43 +02001265 comp = calloc(1, sizeof(*comp));
Christopher Faulet3d97c902015-12-09 14:59:38 +01001266 proxy->comp = comp;
1267 }
1268 else
1269 comp = proxy->comp;
1270
1271 if (!strcmp(args[1], "algo")) {
1272 struct comp_ctx *ctx;
1273 int cur_arg = 2;
1274
1275 if (!*args[cur_arg]) {
1276 memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>\n",
1277 file, line, args[0]);
1278 return -1;
1279 }
1280 while (*(args[cur_arg])) {
1281 if (comp_append_algo(comp, args[cur_arg]) < 0) {
1282 memprintf(err, "'%s' : '%s' is not a supported algorithm.\n",
1283 args[0], args[cur_arg]);
1284 return -1;
1285 }
1286 if (proxy->comp->algos->init(&ctx, 9) == 0)
1287 proxy->comp->algos->end(&ctx);
1288 else {
1289 memprintf(err, "'%s' : Can't init '%s' algorithm.\n",
1290 args[0], args[cur_arg]);
1291 return -1;
1292 }
1293 cur_arg++;
1294 continue;
1295 }
1296 }
1297 else if (!strcmp(args[1], "offload"))
1298 comp->offload = 1;
1299 else if (!strcmp(args[1], "type")) {
1300 int cur_arg = 2;
1301
1302 if (!*args[cur_arg]) {
1303 memprintf(err, "'%s' expects <type>\n", args[0]);
1304 return -1;
1305 }
1306 while (*(args[cur_arg])) {
1307 comp_append_type(comp, args[cur_arg]);
1308 cur_arg++;
1309 continue;
1310 }
1311 }
1312 else {
1313 memprintf(err, "'%s' expects 'algo', 'type' or 'offload'\n",
1314 args[0]);
1315 return -1;
1316 }
1317
1318 return 0;
1319}
1320
Christopher Faulet92d36382015-11-05 13:35:03 +01001321static int
1322parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
Thierry Fournier3610c392016-04-13 18:27:51 +02001323 struct flt_conf *fconf, char **err, void *private)
Christopher Faulet92d36382015-11-05 13:35:03 +01001324{
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001325 struct flt_conf *fc, *back;
Christopher Faulet92d36382015-11-05 13:35:03 +01001326
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001327 list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
1328 if (fc->id == http_comp_flt_id) {
Christopher Faulet92d36382015-11-05 13:35:03 +01001329 memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
1330 return -1;
1331 }
1332 }
1333
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001334 fconf->id = http_comp_flt_id;
1335 fconf->conf = NULL;
1336 fconf->ops = &comp_ops;
Christopher Faulet92d36382015-11-05 13:35:03 +01001337 (*cur_arg)++;
1338
1339 return 0;
1340}
1341
1342
1343int
Christopher Fauletc9df7f72018-12-10 16:14:04 +01001344check_implicit_http_comp_flt(struct proxy *proxy)
Christopher Faulet92d36382015-11-05 13:35:03 +01001345{
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001346 struct flt_conf *fconf;
Christopher Faulet27d93c32018-12-15 22:32:02 +01001347 int explicit = 0;
1348 int comp = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +01001349 int err = 0;
1350
1351 if (proxy->comp == NULL)
1352 goto end;
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001353 if (!LIST_ISEMPTY(&proxy->filter_configs)) {
1354 list_for_each_entry(fconf, &proxy->filter_configs, list) {
1355 if (fconf->id == http_comp_flt_id)
Christopher Faulet27d93c32018-12-15 22:32:02 +01001356 comp = 1;
1357 else if (fconf->id == cache_store_flt_id) {
1358 if (comp) {
1359 ha_alert("config: %s '%s': unable to enable the compression filter "
1360 "before any cache filter.\n",
1361 proxy_type_str(proxy), proxy->id);
1362 err++;
1363 goto end;
1364 }
1365 }
1366 else
1367 explicit = 1;
Christopher Faulet92d36382015-11-05 13:35:03 +01001368 }
Christopher Faulet27d93c32018-12-15 22:32:02 +01001369 }
1370 if (comp)
1371 goto end;
1372 else if (explicit) {
1373 ha_alert("config: %s '%s': require an explicit filter declaration to use "
1374 "HTTP compression\n", proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +01001375 err++;
1376 goto end;
1377 }
1378
Christopher Faulet27d93c32018-12-15 22:32:02 +01001379 /* Implicit declaration of the compression filter is always the last
1380 * one */
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001381 fconf = calloc(1, sizeof(*fconf));
1382 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001383 ha_alert("config: %s '%s': out of memory\n",
1384 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +01001385 err++;
1386 goto end;
1387 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001388 fconf->id = http_comp_flt_id;
1389 fconf->conf = NULL;
1390 fconf->ops = &comp_ops;
1391 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
Christopher Faulet92d36382015-11-05 13:35:03 +01001392
1393 end:
1394 return err;
1395}
1396
1397/*
1398 * boolean, returns true if compression is used (either gzip or deflate) in the
1399 * response.
1400 */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001401static int
Christopher Faulet92d36382015-11-05 13:35:03 +01001402smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
1403 void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001404{
Willy Tarreaube508f12016-03-10 11:47:01 +01001405 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001406
Christopher Faulet3d97c902015-12-09 14:59:38 +01001407 smp->data.type = SMP_T_BOOL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001408 smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
Christopher Faulet3d97c902015-12-09 14:59:38 +01001409 return 1;
1410}
1411
Christopher Faulet92d36382015-11-05 13:35:03 +01001412/*
1413 * string, returns algo
1414 */
Christopher Faulet3d97c902015-12-09 14:59:38 +01001415static int
Christopher Faulet92d36382015-11-05 13:35:03 +01001416smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
1417 const char *kw, void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001418{
Willy Tarreaube508f12016-03-10 11:47:01 +01001419 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +01001420 struct filter *filter;
1421 struct comp_state *st;
1422
Christopher Faulet03d85532017-09-15 10:14:43 +02001423 if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
Christopher Faulet3d97c902015-12-09 14:59:38 +01001424 return 0;
1425
Christopher Fauletfcf035c2015-12-03 11:48:03 +01001426 list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +01001427 if (FLT_ID(filter) != http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +01001428 continue;
1429
1430 if (!(st = filter->ctx))
1431 break;
1432
1433 smp->data.type = SMP_T_STR;
1434 smp->flags = SMP_F_CONST;
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001435 smp->data.u.str.area = st->comp_algo->cfg_name;
1436 smp->data.u.str.data = st->comp_algo->cfg_name_len;
Christopher Faulet92d36382015-11-05 13:35:03 +01001437 return 1;
1438 }
1439 return 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +01001440}
1441
1442/* Declare the config parser for "compression" keyword */
1443static struct cfg_kw_list cfg_kws = {ILH, {
1444 { CFG_LISTEN, "compression", parse_compression_options },
1445 { 0, NULL, NULL },
1446 }
1447};
1448
Willy Tarreau0108d902018-11-25 19:14:37 +01001449INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
1450
Christopher Faulet92d36382015-11-05 13:35:03 +01001451/* Declare the filter parser for "compression" keyword */
1452static struct flt_kw_list filter_kws = { "COMP", { }, {
Thierry Fournier3610c392016-04-13 18:27:51 +02001453 { "compression", parse_http_comp_flt, NULL },
1454 { NULL, NULL, NULL },
Christopher Faulet92d36382015-11-05 13:35:03 +01001455 }
1456};
1457
Willy Tarreau0108d902018-11-25 19:14:37 +01001458INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
1459
Christopher Faulet3d97c902015-12-09 14:59:38 +01001460/* Note: must not be declared <const> as its list will be overwritten */
1461static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
Christopher Faulet92d36382015-11-05 13:35:03 +01001462 { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
1463 { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
1464 { /* END */ },
1465 }
1466};
Christopher Faulet3d97c902015-12-09 14:59:38 +01001467
Willy Tarreau0108d902018-11-25 19:14:37 +01001468INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);