blob: 99a767c926c663a788991860a04946bc37575c9f [file] [log] [blame]
Christopher Faulet3d97c902015-12-09 14:59:38 +01001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/cfgparse.h>
15#include <common/mini-clist.h>
16#include <common/standard.h>
17
18#include <types/compression.h>
19#include <types/filters.h>
20#include <types/proto_http.h>
21#include <types/proxy.h>
22#include <types/sample.h>
23
24#include <proto/compression.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010025#include <proto/filters.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010026#include <proto/hdr_idx.h>
27#include <proto/proto_http.h>
28#include <proto/sample.h>
29#include <proto/stream.h>
30
Christopher Faulet92d36382015-11-05 13:35:03 +010031static const char *http_comp_flt_id = "compression filter";
32
33struct flt_ops comp_ops;
34
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020035
36/* Pools used to allocate comp_state structs */
Willy Tarreaubafbe012017-11-24 17:34:44 +010037static struct pool_head *pool_head_comp_state = NULL;
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020038
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020039static THREAD_LOCAL struct buffer *tmpbuf = &buf_empty;
40static THREAD_LOCAL struct buffer *zbuf = &buf_empty;
Christopher Faulet92d36382015-11-05 13:35:03 +010041
Christopher Faulet92d36382015-11-05 13:35:03 +010042struct comp_state {
43 struct comp_ctx *comp_ctx; /* compression context */
44 struct comp_algo *comp_algo; /* compression algorithm if not NULL */
Christopher Fauletb77c5c22015-12-07 16:48:42 +010045 int hdrs_len;
46 int tlrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +010047 int consumed;
48 int initialized;
Christopher Fauletb77c5c22015-12-07 16:48:42 +010049 int finished;
Christopher Faulet92d36382015-11-05 13:35:03 +010050};
51
Christopher Faulet92d36382015-11-05 13:35:03 +010052static int select_compression_request_header(struct comp_state *st,
53 struct stream *s,
54 struct http_msg *msg);
55static int select_compression_response_header(struct comp_state *st,
56 struct stream *s,
57 struct http_msg *msg);
58
59static int http_compression_buffer_init(struct buffer *in, struct buffer *out);
60static int http_compression_buffer_add_data(struct comp_state *st,
61 struct buffer *in,
62 struct buffer *out, int sz);
63static int http_compression_buffer_end(struct comp_state *st, struct stream *s,
64 struct buffer **in, struct buffer **out,
Christopher Faulet2fb28802015-12-01 10:40:57 +010065 int end);
Christopher Faulet92d36382015-11-05 13:35:03 +010066
67/***********************************************************************/
68static int
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020069comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010070{
Christopher Fauletb77c5c22015-12-07 16:48:42 +010071 if (!tmpbuf->size && b_alloc(&tmpbuf) == NULL)
72 return -1;
73 if (!zbuf->size && b_alloc(&zbuf) == NULL)
74 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010075 return 0;
76}
77
78static void
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020079comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010080{
81 if (tmpbuf->size)
82 b_free(&tmpbuf);
Christopher Fauletb77c5c22015-12-07 16:48:42 +010083 if (zbuf->size)
84 b_free(&zbuf);
Christopher Faulet92d36382015-11-05 13:35:03 +010085}
86
87static int
88comp_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
89{
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020090
Christopher Faulet92d36382015-11-05 13:35:03 +010091 if (filter->ctx == NULL) {
92 struct comp_state *st;
93
Willy Tarreaubafbe012017-11-24 17:34:44 +010094 st = pool_alloc_dirty(pool_head_comp_state);
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020095 if (st == NULL)
Christopher Faulet92d36382015-11-05 13:35:03 +010096 return -1;
97
Christopher Faulet2fb28802015-12-01 10:40:57 +010098 st->comp_algo = NULL;
99 st->comp_ctx = NULL;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100100 st->hdrs_len = 0;
101 st->tlrs_len = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100102 st->consumed = 0;
103 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100104 st->finished = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100105 filter->ctx = st;
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200106
107 /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
108 * analyze response headers before http-response rules execution
109 * to be sure we can use res.comp and res.comp_algo sample
110 * fetches */
111 filter->post_analyzers |= AN_RES_WAIT_HTTP;
Christopher Faulet92d36382015-11-05 13:35:03 +0100112 }
113 return 1;
114}
115
116static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100117comp_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
118{
119 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100120
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200121 if (!st)
Christopher Faulet92d36382015-11-05 13:35:03 +0100122 goto end;
123
Christopher Faulet92d36382015-11-05 13:35:03 +0100124 /* release any possible compression context */
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200125 if (st->comp_algo)
126 st->comp_algo->end(&st->comp_ctx);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100127 pool_free(pool_head_comp_state, st);
Christopher Faulet92d36382015-11-05 13:35:03 +0100128 filter->ctx = NULL;
129 end:
130 return 1;
131}
132
133static int
Christopher Faulet1339d742016-05-11 16:48:33 +0200134comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
135{
136 struct comp_state *st = filter->ctx;
137
138 if (!strm_fe(s)->comp && !s->be->comp)
139 goto end;
140
141 if (!(msg->chn->flags & CF_ISRESP))
142 select_compression_request_header(st, s, msg);
143 else {
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200144 /* Response headers have already been checked in
145 * comp_http_post_analyze callback. */
Christopher Faulet1339d742016-05-11 16:48:33 +0200146 if (st->comp_algo) {
147 register_data_filter(s, msg->chn, filter);
148 st->hdrs_len = s->txn->rsp.sov;
149 }
150 }
151
152 end:
153 return 1;
154}
155
156static int
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200157comp_http_post_analyze(struct stream *s, struct filter *filter,
158 struct channel *chn, unsigned an_bit)
159{
160 struct http_txn *txn = s->txn;
161 struct http_msg *msg = &txn->rsp;
162 struct comp_state *st = filter->ctx;
163
164 if (an_bit != AN_RES_WAIT_HTTP)
165 goto end;
166
167 if (!strm_fe(s)->comp && !s->be->comp)
168 goto end;
169
170 select_compression_response_header(st, s, msg);
171
172 end:
173 return 1;
174}
175
176static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100177comp_http_data(struct stream *s, struct filter *filter, struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100178{
179 struct comp_state *st = filter->ctx;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100180 struct buffer *buf = msg->chn->buf;
181 unsigned int *nxt = &flt_rsp_nxt(filter);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100182 unsigned int len;
Christopher Faulet92d36382015-11-05 13:35:03 +0100183 int ret;
184
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100185 len = MIN(msg->chunk_len + msg->next, buf->i) - *nxt;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100186 if (!len)
187 return len;
188
189 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100190 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100191
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100192 b_reset(tmpbuf);
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100193 b_adv(buf, fwd);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100194 ret = http_compression_buffer_init(buf, zbuf);
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100195 b_rew(buf, fwd);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100196 if (ret < 0) {
197 msg->chn->flags |= CF_WAKE_WRITE;
198 return 0;
199 }
200 }
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100201
202 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200203 int block;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100204
205 len = MIN(tmpbuf->size - buffer_len(tmpbuf), len);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200206
207 b_adv(buf, *nxt);
208 block = bi_contig_data(buf);
209 memcpy(bi_end(tmpbuf), bi_ptr(buf), block);
210 if (len > block)
211 memcpy(bi_end(tmpbuf)+block, buf->data, len-block);
212 b_rew(buf, *nxt);
213
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100214 tmpbuf->i += len;
215 ret = len;
216 }
217 else {
218 b_adv(buf, *nxt);
219 ret = http_compression_buffer_add_data(st, buf, zbuf, len);
220 b_rew(buf, *nxt);
221 if (ret < 0)
222 return ret;
223 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100224
Christopher Faulet2fb28802015-12-01 10:40:57 +0100225 st->initialized = 1;
226 msg->next += ret;
227 msg->chunk_len -= ret;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100228 *nxt = msg->next;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100229 return 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100230}
231
232static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100233comp_http_chunk_trailers(struct stream *s, struct filter *filter,
234 struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100235{
236 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100237
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100238 if (!st->initialized) {
239 if (!st->finished) {
240 struct buffer *buf = msg->chn->buf;
241 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100242
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100243 b_reset(tmpbuf);
244 b_adv(buf, fwd);
245 http_compression_buffer_init(buf, zbuf);
246 b_rew(buf, fwd);
247 st->initialized = 1;
248 }
249 }
250 st->tlrs_len = msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100251 return 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100252}
253
Christopher Faulet2fb28802015-12-01 10:40:57 +0100254
Christopher Faulet92d36382015-11-05 13:35:03 +0100255static int
256comp_http_forward_data(struct stream *s, struct filter *filter,
257 struct http_msg *msg, unsigned int len)
258{
259 struct comp_state *st = filter->ctx;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100260 int ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100261
Christopher Faulet2fb28802015-12-01 10:40:57 +0100262 /* To work, previous filters MUST forward all data */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100263 if (flt_rsp_fwd(filter) + len != flt_rsp_nxt(filter)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100264 ha_warning("HTTP compression failed: unexpected behavior of previous filters\n");
Christopher Faulet2fb28802015-12-01 10:40:57 +0100265 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100266 }
267
Christopher Faulet2fb28802015-12-01 10:40:57 +0100268 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100269 if (!len) {
270 /* Nothing to foward */
271 ret = len;
272 }
273 else if (st->hdrs_len > len) {
274 /* Forward part of headers */
275 ret = len;
276 st->hdrs_len -= len;
277 }
278 else if (st->hdrs_len > 0) {
279 /* Forward remaining headers */
280 ret = st->hdrs_len;
281 st->hdrs_len = 0;
282 }
283 else if (msg->msg_state < HTTP_MSG_TRAILERS) {
284 /* Do not forward anything for now. This only happens
285 * with chunk-encoded responses. Waiting data are part
286 * of the chunk envelope (the chunk size or the chunk
287 * CRLF). These data will be skipped during the
288 * compression. */
289 ret = 0;
290 }
291 else {
292 /* Forward trailers data */
293 ret = len;
294 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100295 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100296 }
297
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100298 if (msg->flags & HTTP_MSGF_TE_CHNK) {
299 ret = http_compression_buffer_add_data(st, tmpbuf, zbuf, tmpbuf->i);
300 if (ret != tmpbuf->i) {
Willy Tarreau506a29a2018-07-18 10:07:58 +0200301 ha_warning("HTTP compression failed: Must consume %u bytes but only %d bytes consumed\n",
302 (unsigned int)tmpbuf->i, ret);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100303 return -1;
304 }
305 }
306
307 st->consumed = len - st->hdrs_len - st->tlrs_len;
308 b_adv(msg->chn->buf, flt_rsp_fwd(filter) + st->hdrs_len);
309 ret = http_compression_buffer_end(st, s, &msg->chn->buf, &zbuf, msg->msg_state >= HTTP_MSG_TRAILERS);
310 b_rew(msg->chn->buf, flt_rsp_fwd(filter) + st->hdrs_len);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100311 if (ret < 0)
312 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100313
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100314 flt_change_forward_size(filter, msg->chn, ret - st->consumed);
315 msg->next += (ret - st->consumed);
316 ret += st->hdrs_len + st->tlrs_len;
317
Christopher Faulet2fb28802015-12-01 10:40:57 +0100318 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100319 st->finished = (msg->msg_state >= HTTP_MSG_TRAILERS);
320 st->hdrs_len = 0;
321 st->tlrs_len = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100322 return ret;
323}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100324
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200325static int
326comp_http_end(struct stream *s, struct filter *filter,
327 struct http_msg *msg)
328{
329 struct comp_state *st = filter->ctx;
330
331 if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo)
332 goto end;
333
334 if (strm_fe(s)->mode == PR_MODE_HTTP)
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200335 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200336 if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200337 HA_ATOMIC_ADD(&s->be->be_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200338 end:
339 return 1;
340}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100341/***********************************************************************/
342/*
343 * Selects a compression algorithm depending on the client request.
344 */
345int
Christopher Faulet92d36382015-11-05 13:35:03 +0100346select_compression_request_header(struct comp_state *st, struct stream *s,
347 struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100348{
349 struct http_txn *txn = s->txn;
Christopher Faulet92d36382015-11-05 13:35:03 +0100350 struct buffer *req = msg->chn->buf;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100351 struct hdr_ctx ctx;
352 struct comp_algo *comp_algo = NULL;
353 struct comp_algo *comp_algo_back = NULL;
354
355 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
356 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
357 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
358 */
359 ctx.idx = 0;
360 if (http_find_header2("User-Agent", 10, req->p, &txn->hdr_idx, &ctx) &&
361 ctx.vlen >= 9 &&
362 memcmp(ctx.line + ctx.val, "Mozilla/4", 9) == 0 &&
363 (ctx.vlen < 31 ||
364 memcmp(ctx.line + ctx.val + 25, "MSIE ", 5) != 0 ||
365 ctx.line[ctx.val + 30] < '6' ||
366 (ctx.line[ctx.val + 30] == '6' &&
367 (ctx.vlen < 54 || memcmp(ctx.line + 51, "SV1", 3) != 0)))) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100368 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100369 return 0;
370 }
371
372 /* search for the algo in the backend in priority or the frontend */
Christopher Faulet92d36382015-11-05 13:35:03 +0100373 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
374 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100375 int best_q = 0;
376
377 ctx.idx = 0;
378 while (http_find_header2("Accept-Encoding", 15, req->p, &txn->hdr_idx, &ctx)) {
379 const char *qval;
380 int q;
381 int toklen;
382
383 /* try to isolate the token from the optional q-value */
384 toklen = 0;
Willy Tarreau2235b262016-11-05 15:50:20 +0100385 while (toklen < ctx.vlen && HTTP_IS_TOKEN(*(ctx.line + ctx.val + toklen)))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100386 toklen++;
387
388 qval = ctx.line + ctx.val + toklen;
389 while (1) {
Willy Tarreau2235b262016-11-05 15:50:20 +0100390 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100391 qval++;
392
393 if (qval >= ctx.line + ctx.val + ctx.vlen || *qval != ';') {
394 qval = NULL;
395 break;
396 }
397 qval++;
398
Willy Tarreau2235b262016-11-05 15:50:20 +0100399 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100400 qval++;
401
402 if (qval >= ctx.line + ctx.val + ctx.vlen) {
403 qval = NULL;
404 break;
405 }
406 if (strncmp(qval, "q=", MIN(ctx.line + ctx.val + ctx.vlen - qval, 2)) == 0)
407 break;
408
409 while (qval < ctx.line + ctx.val + ctx.vlen && *qval != ';')
410 qval++;
411 }
412
413 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
414 q = qval ? parse_qvalue(qval + 2, NULL) : 1000;
415
416 if (q <= best_q)
417 continue;
418
419 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
420 if (*(ctx.line + ctx.val) == '*' ||
421 word_match(ctx.line + ctx.val, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100422 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100423 best_q = q;
424 break;
425 }
426 }
427 }
428 }
429
430 /* remove all occurrences of the header when "compression offload" is set */
Christopher Faulet92d36382015-11-05 13:35:03 +0100431 if (st->comp_algo) {
432 if ((s->be->comp && s->be->comp->offload) ||
433 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100434 http_remove_header2(msg, &txn->hdr_idx, &ctx);
435 ctx.idx = 0;
436 while (http_find_header2("Accept-Encoding", 15, req->p, &txn->hdr_idx, &ctx)) {
437 http_remove_header2(msg, &txn->hdr_idx, &ctx);
438 }
439 }
440 return 1;
441 }
442
443 /* identity is implicit does not require headers */
Christopher Faulet92d36382015-11-05 13:35:03 +0100444 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
445 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100446 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
447 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100448 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100449 return 1;
450 }
451 }
452 }
453
Christopher Faulet92d36382015-11-05 13:35:03 +0100454 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100455 return 0;
456}
457
Christopher Faulet92d36382015-11-05 13:35:03 +0100458
Christopher Faulet3d97c902015-12-09 14:59:38 +0100459/*
460 * Selects a comression algorithm depending of the server response.
461 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100462static int
463select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100464{
465 struct http_txn *txn = s->txn;
Christopher Faulet92d36382015-11-05 13:35:03 +0100466 struct buffer *res = msg->chn->buf;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100467 struct hdr_ctx ctx;
468 struct comp_type *comp_type;
469
470 /* no common compression algorithm was found in request header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100471 if (st->comp_algo == NULL)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100472 goto fail;
473
474 /* HTTP < 1.1 should not be compressed */
475 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
476 goto fail;
477
Christopher Faulet92d36382015-11-05 13:35:03 +0100478 if (txn->meth == HTTP_METH_HEAD)
479 goto fail;
480
Christopher Faulet3d97c902015-12-09 14:59:38 +0100481 /* compress 200,201,202,203 responses only */
482 if ((txn->status != 200) &&
483 (txn->status != 201) &&
484 (txn->status != 202) &&
485 (txn->status != 203))
486 goto fail;
487
488
489 /* Content-Length is null */
490 if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->body_len == 0)
491 goto fail;
492
493 /* content is already compressed */
494 ctx.idx = 0;
495 if (http_find_header2("Content-Encoding", 16, res->p, &txn->hdr_idx, &ctx))
496 goto fail;
497
498 /* no compression when Cache-Control: no-transform is present in the message */
499 ctx.idx = 0;
500 while (http_find_header2("Cache-Control", 13, res->p, &txn->hdr_idx, &ctx)) {
501 if (word_match(ctx.line + ctx.val, ctx.vlen, "no-transform", 12))
502 goto fail;
503 }
504
505 comp_type = NULL;
506
507 /* we don't want to compress multipart content-types, nor content-types that are
508 * not listed in the "compression type" directive if any. If no content-type was
509 * found but configuration requires one, we don't compress either. Backend has
510 * the priority.
511 */
512 ctx.idx = 0;
513 if (http_find_header2("Content-Type", 12, res->p, &txn->hdr_idx, &ctx)) {
514 if (ctx.vlen >= 9 && strncasecmp("multipart", ctx.line+ctx.val, 9) == 0)
515 goto fail;
516
517 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
518 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
519 for (; comp_type; comp_type = comp_type->next) {
520 if (ctx.vlen >= comp_type->name_len &&
521 strncasecmp(ctx.line+ctx.val, comp_type->name, comp_type->name_len) == 0)
522 /* this Content-Type should be compressed */
523 break;
524 }
525 /* this Content-Type should not be compressed */
526 if (comp_type == NULL)
527 goto fail;
528 }
529 }
530 else { /* no content-type header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100531 if ((s->be->comp && s->be->comp->types) ||
532 (strm_fe(s)->comp && strm_fe(s)->comp->types))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100533 goto fail; /* a content-type was required */
534 }
535
536 /* limit compression rate */
537 if (global.comp_rate_lim > 0)
538 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
539 goto fail;
540
541 /* limit cpu usage */
542 if (idle_pct < compress_min_idle)
543 goto fail;
544
545 /* initialize compression */
Christopher Faulet92d36382015-11-05 13:35:03 +0100546 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100547 goto fail;
548
Christopher Faulet3d97c902015-12-09 14:59:38 +0100549 /* remove Content-Length header */
550 ctx.idx = 0;
551 if ((msg->flags & HTTP_MSGF_CNT_LEN) && http_find_header2("Content-Length", 14, res->p, &txn->hdr_idx, &ctx))
552 http_remove_header2(msg, &txn->hdr_idx, &ctx);
553
554 /* add Transfer-Encoding header */
555 if (!(msg->flags & HTTP_MSGF_TE_CHNK))
556 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, "Transfer-Encoding: chunked", 26);
557
558 /*
559 * Add Content-Encoding header when it's not identity encoding.
560 * RFC 2616 : Identity encoding: This content-coding is used only in the
561 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
562 * header.
563 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100564 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100565 trash.len = 18;
566 memcpy(trash.str, "Content-Encoding: ", trash.len);
Christopher Faulet92d36382015-11-05 13:35:03 +0100567 memcpy(trash.str + trash.len, st->comp_algo->ua_name, st->comp_algo->ua_name_len);
568 trash.len += st->comp_algo->ua_name_len;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100569 trash.str[trash.len] = '\0';
570 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, trash.str, trash.len);
571 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100572 msg->flags |= HTTP_MSGF_COMPRESSING;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100573 return 1;
574
575fail:
Christopher Faulet92d36382015-11-05 13:35:03 +0100576 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100577 return 0;
578}
579
580/***********************************************************************/
581/* emit the chunksize followed by a CRLF on the output and return the number of
582 * bytes written. It goes backwards and starts with the byte before <end>. It
583 * returns the number of bytes written which will not exceed 10 (8 digits, CR,
584 * and LF). The caller is responsible for ensuring there is enough room left in
585 * the output buffer for the string.
586 */
587static int
588http_emit_chunk_size(char *end, unsigned int chksz)
589{
590 char *beg = end;
591
592 *--beg = '\n';
593 *--beg = '\r';
594 do {
595 *--beg = hextab[chksz & 0xF];
596 } while (chksz >>= 4);
597 return end - beg;
598}
599
600/*
601 * Init HTTP compression
602 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100603static int
604http_compression_buffer_init(struct buffer *in, struct buffer *out)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100605{
606 /* output stream requires at least 10 bytes for the gzip header, plus
607 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
608 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
609 */
610 if (in->size - buffer_len(in) < 20 + 5 * ((in->i + 32767) >> 15))
611 return -1;
612
613 /* prepare an empty output buffer in which we reserve enough room for
614 * copying the output bytes from <in>, plus 10 extra bytes to write
615 * the chunk size. We don't copy the bytes yet so that if we have to
616 * cancel the operation later, it's cheap.
617 */
618 b_reset(out);
619 out->o = in->o;
620 out->p += out->o;
621 out->i = 10;
622 return 0;
623}
624
625/*
626 * Add data to compress
627 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100628static int
629http_compression_buffer_add_data(struct comp_state *st, struct buffer *in,
630 struct buffer *out, int sz)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100631{
Christopher Faulet3d97c902015-12-09 14:59:38 +0100632 int consumed_data = 0;
633 int data_process_len;
634 int block1, block2;
635
Christopher Faulet92d36382015-11-05 13:35:03 +0100636 if (!sz)
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100637 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100638
Christopher Faulet92d36382015-11-05 13:35:03 +0100639 /* select the smallest size between the announced chunk size, the input
Christopher Faulet3d97c902015-12-09 14:59:38 +0100640 * data, and the available output buffer size. The compressors are
Christopher Faulet92d36382015-11-05 13:35:03 +0100641 * assumed to be able to process all the bytes we pass to them at
642 * once. */
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100643 data_process_len = MIN(out->size - buffer_len(out), sz);
Christopher Faulet92d36382015-11-05 13:35:03 +0100644
Christopher Faulet3d97c902015-12-09 14:59:38 +0100645 block1 = data_process_len;
646 if (block1 > bi_contig_data(in))
647 block1 = bi_contig_data(in);
648 block2 = data_process_len - block1;
649
650 /* compressors return < 0 upon error or the amount of bytes read */
Christopher Faulet92d36382015-11-05 13:35:03 +0100651 consumed_data = st->comp_algo->add_data(st->comp_ctx, bi_ptr(in), block1, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100652 if (consumed_data != block1 || !block2)
653 goto end;
654 consumed_data = st->comp_algo->add_data(st->comp_ctx, in->data, block2, out);
655 if (consumed_data < 0)
656 goto end;
657 consumed_data += block1;
658
659 end:
Christopher Faulet3d97c902015-12-09 14:59:38 +0100660 return consumed_data;
661}
662
663/*
664 * Flush data in process, and write the header and footer of the chunk. Upon
665 * success, in and out buffers are swapped to avoid a copy.
666 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100667static int
668http_compression_buffer_end(struct comp_state *st, struct stream *s,
669 struct buffer **in, struct buffer **out,
Christopher Faulet2fb28802015-12-01 10:40:57 +0100670 int end)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100671{
Christopher Faulet3d97c902015-12-09 14:59:38 +0100672 struct buffer *ib = *in, *ob = *out;
673 char *tail;
Christopher Faulet92d36382015-11-05 13:35:03 +0100674 int to_forward, left;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100675
676#if defined(USE_SLZ) || defined(USE_ZLIB)
677 int ret;
678
679 /* flush data here */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100680 if (end)
Christopher Faulet92d36382015-11-05 13:35:03 +0100681 ret = st->comp_algo->finish(st->comp_ctx, ob); /* end of data */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100682 else
Christopher Faulet92d36382015-11-05 13:35:03 +0100683 ret = st->comp_algo->flush(st->comp_ctx, ob); /* end of buffer */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100684
685 if (ret < 0)
686 return -1; /* flush failed */
687
688#endif /* USE_ZLIB */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100689 if (ob->i == 10) {
690 /* No data were appended, let's drop the output buffer and
691 * keep the input buffer unchanged.
692 */
693 return 0;
694 }
695
696 /* OK so at this stage, we have an output buffer <ob> looking like this :
697 *
698 * <-- o --> <------ i ----->
699 * +---------+---+------------+-----------+
700 * | out | c | comp_in | empty |
701 * +---------+---+------------+-----------+
702 * data p size
703 *
704 * <out> is the room reserved to copy ib->o. It starts at ob->data and
705 * has not yet been filled. <c> is the room reserved to write the chunk
706 * size (10 bytes). <comp_in> is the compressed equivalent of the data
707 * part of ib->i. <empty> is the amount of empty bytes at the end of
708 * the buffer, into which we may have to copy the remaining bytes from
709 * ib->i after the data (chunk size, trailers, ...).
710 */
711
712 /* Write real size at the begining of the chunk, no need of wrapping.
713 * We write the chunk using a dynamic length and adjust ob->p and ob->i
714 * accordingly afterwards. That will move <out> away from <data>.
715 */
716 left = 10 - http_emit_chunk_size(ob->p + 10, ob->i - 10);
717 ob->p += left;
718 ob->i -= left;
719
720 /* Copy previous data from ib->o into ob->o */
721 if (ib->o > 0) {
722 left = bo_contig_data(ib);
723 memcpy(ob->p - ob->o, bo_ptr(ib), left);
724 if (ib->o - left) /* second part of the buffer */
725 memcpy(ob->p - ob->o + left, ib->data, ib->o - left);
726 }
727
728 /* chunked encoding requires CRLF after data */
729 tail = ob->p + ob->i;
730 *tail++ = '\r';
731 *tail++ = '\n';
732
Christopher Faulet2fb28802015-12-01 10:40:57 +0100733 /* At the end of data, we must write the empty chunk 0<CRLF>,
734 * and terminate the trailers section with a last <CRLF>. If
735 * we're forwarding a chunked-encoded response, we'll have a
736 * trailers section after the empty chunk which needs to be
737 * forwarded and which will provide the last CRLF. Otherwise
738 * we write it ourselves.
739 */
740 if (end) {
741 struct http_msg *msg = &s->txn->rsp;
742
743 memcpy(tail, "0\r\n", 3);
744 tail += 3;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100745 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100746 memcpy(tail, "\r\n", 2);
747 tail += 2;
748 }
749 }
750
Christopher Faulet3d97c902015-12-09 14:59:38 +0100751 ob->i = tail - ob->p;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100752 to_forward = ob->i;
753
754 /* update input rate */
Christopher Faulet92d36382015-11-05 13:35:03 +0100755 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100756 update_freq_ctr(&global.comp_bps_in, st->consumed);
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200757 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in, st->consumed);
758 HA_ATOMIC_ADD(&s->be->be_counters.comp_in, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100759 } else {
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200760 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp, st->consumed);
761 HA_ATOMIC_ADD(&s->be->be_counters.comp_byp, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100762 }
763
764 /* copy the remaining data in the tmp buffer. */
Christopher Faulet2fb28802015-12-01 10:40:57 +0100765 b_adv(ib, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100766 if (ib->i > 0) {
767 left = bi_contig_data(ib);
768 memcpy(ob->p + ob->i, bi_ptr(ib), left);
769 ob->i += left;
770 if (ib->i - left) {
771 memcpy(ob->p + ob->i, ib->data, ib->i - left);
772 ob->i += ib->i - left;
773 }
774 }
775
776 /* swap the buffers */
777 *in = ob;
778 *out = ib;
779
Christopher Faulet92d36382015-11-05 13:35:03 +0100780
781 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100782 update_freq_ctr(&global.comp_bps_out, to_forward);
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200783 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
784 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100785 }
786
Christopher Faulet3d97c902015-12-09 14:59:38 +0100787 return to_forward;
788}
789
790
791/***********************************************************************/
Christopher Faulet92d36382015-11-05 13:35:03 +0100792struct flt_ops comp_ops = {
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200793 .init_per_thread = comp_flt_init_per_thread,
794 .deinit_per_thread = comp_flt_deinit_per_thread,
Christopher Faulet92d36382015-11-05 13:35:03 +0100795
796 .channel_start_analyze = comp_start_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +0100797 .channel_end_analyze = comp_end_analyze,
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200798 .channel_post_analyze = comp_http_post_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +0100799
Christopher Faulet1339d742016-05-11 16:48:33 +0200800 .http_headers = comp_http_headers,
Christopher Faulet309c6412015-12-02 09:57:32 +0100801 .http_data = comp_http_data,
802 .http_chunk_trailers = comp_http_chunk_trailers,
803 .http_forward_data = comp_http_forward_data,
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200804 .http_end = comp_http_end,
Christopher Faulet92d36382015-11-05 13:35:03 +0100805};
806
Christopher Faulet3d97c902015-12-09 14:59:38 +0100807static int
808parse_compression_options(char **args, int section, struct proxy *proxy,
809 struct proxy *defpx, const char *file, int line,
810 char **err)
811{
Christopher Faulet92d36382015-11-05 13:35:03 +0100812 struct comp *comp;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100813
814 if (proxy->comp == NULL) {
Vincent Bernat02779b62016-04-03 13:48:43 +0200815 comp = calloc(1, sizeof(*comp));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100816 proxy->comp = comp;
817 }
818 else
819 comp = proxy->comp;
820
821 if (!strcmp(args[1], "algo")) {
822 struct comp_ctx *ctx;
823 int cur_arg = 2;
824
825 if (!*args[cur_arg]) {
826 memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>\n",
827 file, line, args[0]);
828 return -1;
829 }
830 while (*(args[cur_arg])) {
831 if (comp_append_algo(comp, args[cur_arg]) < 0) {
832 memprintf(err, "'%s' : '%s' is not a supported algorithm.\n",
833 args[0], args[cur_arg]);
834 return -1;
835 }
836 if (proxy->comp->algos->init(&ctx, 9) == 0)
837 proxy->comp->algos->end(&ctx);
838 else {
839 memprintf(err, "'%s' : Can't init '%s' algorithm.\n",
840 args[0], args[cur_arg]);
841 return -1;
842 }
843 cur_arg++;
844 continue;
845 }
846 }
847 else if (!strcmp(args[1], "offload"))
848 comp->offload = 1;
849 else if (!strcmp(args[1], "type")) {
850 int cur_arg = 2;
851
852 if (!*args[cur_arg]) {
853 memprintf(err, "'%s' expects <type>\n", args[0]);
854 return -1;
855 }
856 while (*(args[cur_arg])) {
857 comp_append_type(comp, args[cur_arg]);
858 cur_arg++;
859 continue;
860 }
861 }
862 else {
863 memprintf(err, "'%s' expects 'algo', 'type' or 'offload'\n",
864 args[0]);
865 return -1;
866 }
867
868 return 0;
869}
870
Christopher Faulet92d36382015-11-05 13:35:03 +0100871static int
872parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
Thierry Fournier3610c392016-04-13 18:27:51 +0200873 struct flt_conf *fconf, char **err, void *private)
Christopher Faulet92d36382015-11-05 13:35:03 +0100874{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100875 struct flt_conf *fc, *back;
Christopher Faulet92d36382015-11-05 13:35:03 +0100876
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100877 list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
878 if (fc->id == http_comp_flt_id) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100879 memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
880 return -1;
881 }
882 }
883
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100884 fconf->id = http_comp_flt_id;
885 fconf->conf = NULL;
886 fconf->ops = &comp_ops;
Christopher Faulet92d36382015-11-05 13:35:03 +0100887 (*cur_arg)++;
888
889 return 0;
890}
891
892
893int
894check_legacy_http_comp_flt(struct proxy *proxy)
895{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100896 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100897 int err = 0;
898
899 if (proxy->comp == NULL)
900 goto end;
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100901 if (!LIST_ISEMPTY(&proxy->filter_configs)) {
902 list_for_each_entry(fconf, &proxy->filter_configs, list) {
903 if (fconf->id == http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +0100904 goto end;
905 }
Christopher Faulet767a84b2017-11-24 16:50:31 +0100906 ha_alert("config: %s '%s': require an explicit filter declaration to use HTTP compression\n",
907 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100908 err++;
909 goto end;
910 }
911
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100912 fconf = calloc(1, sizeof(*fconf));
913 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100914 ha_alert("config: %s '%s': out of memory\n",
915 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100916 err++;
917 goto end;
918 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100919 fconf->id = http_comp_flt_id;
920 fconf->conf = NULL;
921 fconf->ops = &comp_ops;
922 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
Christopher Faulet92d36382015-11-05 13:35:03 +0100923
924 end:
925 return err;
926}
927
928/*
929 * boolean, returns true if compression is used (either gzip or deflate) in the
930 * response.
931 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100932static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100933smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
934 void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100935{
Willy Tarreaube508f12016-03-10 11:47:01 +0100936 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100937
Christopher Faulet3d97c902015-12-09 14:59:38 +0100938 smp->data.type = SMP_T_BOOL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100939 smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100940 return 1;
941}
942
Christopher Faulet92d36382015-11-05 13:35:03 +0100943/*
944 * string, returns algo
945 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100946static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100947smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
948 const char *kw, void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100949{
Willy Tarreaube508f12016-03-10 11:47:01 +0100950 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100951 struct filter *filter;
952 struct comp_state *st;
953
Christopher Faulet03d85532017-09-15 10:14:43 +0200954 if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100955 return 0;
956
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100957 list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100958 if (FLT_ID(filter) != http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +0100959 continue;
960
961 if (!(st = filter->ctx))
962 break;
963
964 smp->data.type = SMP_T_STR;
965 smp->flags = SMP_F_CONST;
966 smp->data.u.str.str = st->comp_algo->cfg_name;
967 smp->data.u.str.len = st->comp_algo->cfg_name_len;
968 return 1;
969 }
970 return 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100971}
972
973/* Declare the config parser for "compression" keyword */
974static struct cfg_kw_list cfg_kws = {ILH, {
975 { CFG_LISTEN, "compression", parse_compression_options },
976 { 0, NULL, NULL },
977 }
978};
979
Christopher Faulet92d36382015-11-05 13:35:03 +0100980/* Declare the filter parser for "compression" keyword */
981static struct flt_kw_list filter_kws = { "COMP", { }, {
Thierry Fournier3610c392016-04-13 18:27:51 +0200982 { "compression", parse_http_comp_flt, NULL },
983 { NULL, NULL, NULL },
Christopher Faulet92d36382015-11-05 13:35:03 +0100984 }
985};
986
Christopher Faulet3d97c902015-12-09 14:59:38 +0100987/* Note: must not be declared <const> as its list will be overwritten */
988static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
Christopher Faulet92d36382015-11-05 13:35:03 +0100989 { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
990 { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
991 { /* END */ },
992 }
993};
Christopher Faulet3d97c902015-12-09 14:59:38 +0100994
995__attribute__((constructor))
Christopher Faulet92d36382015-11-05 13:35:03 +0100996static void
997__flt_http_comp_init(void)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100998{
999 cfg_register_keywords(&cfg_kws);
Christopher Faulet92d36382015-11-05 13:35:03 +01001000 flt_register_keywords(&filter_kws);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001001 sample_register_fetches(&sample_fetch_keywords);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001002 pool_head_comp_state = create_pool("comp_state", sizeof(struct comp_state), MEM_F_SHARED);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001003}