blob: 2c15d795d9fbdf977530c90aa1b717fac933f1aa [file] [log] [blame]
Christopher Faulet3d97c902015-12-09 14:59:38 +01001/*
2 * Stream filters related variables and functions.
3 *
4 * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <common/buffer.h>
14#include <common/cfgparse.h>
15#include <common/mini-clist.h>
16#include <common/standard.h>
17
18#include <types/compression.h>
19#include <types/filters.h>
20#include <types/proto_http.h>
21#include <types/proxy.h>
22#include <types/sample.h>
23
24#include <proto/compression.h>
Christopher Faulet92d36382015-11-05 13:35:03 +010025#include <proto/filters.h>
Christopher Faulet3d97c902015-12-09 14:59:38 +010026#include <proto/hdr_idx.h>
27#include <proto/proto_http.h>
28#include <proto/sample.h>
29#include <proto/stream.h>
30
Christopher Faulet92d36382015-11-05 13:35:03 +010031static const char *http_comp_flt_id = "compression filter";
32
33struct flt_ops comp_ops;
34
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020035
36/* Pools used to allocate comp_state structs */
Willy Tarreaubafbe012017-11-24 17:34:44 +010037static struct pool_head *pool_head_comp_state = NULL;
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020038
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020039static THREAD_LOCAL struct buffer *tmpbuf = &buf_empty;
40static THREAD_LOCAL struct buffer *zbuf = &buf_empty;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020041static THREAD_LOCAL unsigned int buf_output;
Christopher Faulet92d36382015-11-05 13:35:03 +010042
Christopher Faulet92d36382015-11-05 13:35:03 +010043struct comp_state {
44 struct comp_ctx *comp_ctx; /* compression context */
45 struct comp_algo *comp_algo; /* compression algorithm if not NULL */
Christopher Fauletb77c5c22015-12-07 16:48:42 +010046 int hdrs_len;
47 int tlrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +010048 int consumed;
49 int initialized;
Christopher Fauletb77c5c22015-12-07 16:48:42 +010050 int finished;
Christopher Faulet92d36382015-11-05 13:35:03 +010051};
52
Christopher Faulet92d36382015-11-05 13:35:03 +010053static int select_compression_request_header(struct comp_state *st,
54 struct stream *s,
55 struct http_msg *msg);
56static int select_compression_response_header(struct comp_state *st,
57 struct stream *s,
58 struct http_msg *msg);
59
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020060static int http_compression_buffer_init(struct channel *inc, struct buffer *out, unsigned int *out_len);
Christopher Faulet92d36382015-11-05 13:35:03 +010061static int http_compression_buffer_add_data(struct comp_state *st,
62 struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020063 int in_out,
Christopher Faulet92d36382015-11-05 13:35:03 +010064 struct buffer *out, int sz);
65static int http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreau4d452382018-06-06 07:15:47 +020066 struct channel *chn, struct buffer **out,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +020067 unsigned int *out_len, int end);
Christopher Faulet92d36382015-11-05 13:35:03 +010068
69/***********************************************************************/
70static int
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020071comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010072{
Christopher Fauletb77c5c22015-12-07 16:48:42 +010073 if (!tmpbuf->size && b_alloc(&tmpbuf) == NULL)
74 return -1;
75 if (!zbuf->size && b_alloc(&zbuf) == NULL)
76 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +010077 return 0;
78}
79
80static void
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020081comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
Christopher Faulet92d36382015-11-05 13:35:03 +010082{
83 if (tmpbuf->size)
84 b_free(&tmpbuf);
Christopher Fauletb77c5c22015-12-07 16:48:42 +010085 if (zbuf->size)
86 b_free(&zbuf);
Christopher Faulet92d36382015-11-05 13:35:03 +010087}
88
89static int
90comp_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
91{
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +020092
Christopher Faulet92d36382015-11-05 13:35:03 +010093 if (filter->ctx == NULL) {
94 struct comp_state *st;
95
Willy Tarreaubafbe012017-11-24 17:34:44 +010096 st = pool_alloc_dirty(pool_head_comp_state);
Christopher Fauleta03d4ad2017-06-26 16:53:33 +020097 if (st == NULL)
Christopher Faulet92d36382015-11-05 13:35:03 +010098 return -1;
99
Christopher Faulet2fb28802015-12-01 10:40:57 +0100100 st->comp_algo = NULL;
101 st->comp_ctx = NULL;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100102 st->hdrs_len = 0;
103 st->tlrs_len = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100104 st->consumed = 0;
105 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100106 st->finished = 0;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100107 filter->ctx = st;
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200108
109 /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
110 * analyze response headers before http-response rules execution
111 * to be sure we can use res.comp and res.comp_algo sample
112 * fetches */
113 filter->post_analyzers |= AN_RES_WAIT_HTTP;
Christopher Faulet92d36382015-11-05 13:35:03 +0100114 }
115 return 1;
116}
117
118static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100119comp_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
120{
121 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100122
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200123 if (!st)
Christopher Faulet92d36382015-11-05 13:35:03 +0100124 goto end;
125
Christopher Faulet92d36382015-11-05 13:35:03 +0100126 /* release any possible compression context */
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200127 if (st->comp_algo)
128 st->comp_algo->end(&st->comp_ctx);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100129 pool_free(pool_head_comp_state, st);
Christopher Faulet92d36382015-11-05 13:35:03 +0100130 filter->ctx = NULL;
131 end:
132 return 1;
133}
134
135static int
Christopher Faulet1339d742016-05-11 16:48:33 +0200136comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
137{
138 struct comp_state *st = filter->ctx;
139
140 if (!strm_fe(s)->comp && !s->be->comp)
141 goto end;
142
143 if (!(msg->chn->flags & CF_ISRESP))
144 select_compression_request_header(st, s, msg);
145 else {
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200146 /* Response headers have already been checked in
147 * comp_http_post_analyze callback. */
Christopher Faulet1339d742016-05-11 16:48:33 +0200148 if (st->comp_algo) {
149 register_data_filter(s, msg->chn, filter);
150 st->hdrs_len = s->txn->rsp.sov;
151 }
152 }
153
154 end:
155 return 1;
156}
157
158static int
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200159comp_http_post_analyze(struct stream *s, struct filter *filter,
160 struct channel *chn, unsigned an_bit)
161{
162 struct http_txn *txn = s->txn;
163 struct http_msg *msg = &txn->rsp;
164 struct comp_state *st = filter->ctx;
165
166 if (an_bit != AN_RES_WAIT_HTTP)
167 goto end;
168
169 if (!strm_fe(s)->comp && !s->be->comp)
170 goto end;
171
172 select_compression_response_header(st, s, msg);
173
174 end:
175 return 1;
176}
177
178static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100179comp_http_data(struct stream *s, struct filter *filter, struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100180{
181 struct comp_state *st = filter->ctx;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200182 struct channel *chn = msg->chn;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100183 unsigned int *nxt = &flt_rsp_nxt(filter);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100184 unsigned int len;
Christopher Faulet92d36382015-11-05 13:35:03 +0100185 int ret;
186
Olivier Houchard0b662842018-06-29 18:16:31 +0200187 len = MIN(msg->chunk_len + msg->next, ci_data(chn)) - *nxt;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100188 if (!len)
189 return len;
190
191 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100192 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100193
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100194 b_reset(tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200195 c_adv(chn, fwd);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200196 ret = http_compression_buffer_init(chn, zbuf, &buf_output);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200197 c_rew(chn, fwd);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100198 if (ret < 0) {
199 msg->chn->flags |= CF_WAKE_WRITE;
200 return 0;
201 }
202 }
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100203
204 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200205 int block;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100206
Willy Tarreaueac52592018-06-15 13:59:36 +0200207 len = MIN(b_room(tmpbuf), len);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200208
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200209 c_adv(chn, *nxt);
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200210 block = ci_contig_data(chn);
Willy Tarreau8f9c72d2018-06-07 18:46:28 +0200211 memcpy(b_tail(tmpbuf), ci_head(chn), block);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200212 if (len > block)
Olivier Houchard0b662842018-06-29 18:16:31 +0200213 memcpy(b_tail(tmpbuf)+block, b_orig(chn->buf), len-block);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200214 c_rew(chn, *nxt);
Christopher Faulet06ecf3a2016-09-22 15:31:43 +0200215
Olivier Houchardacd14032018-06-28 18:17:23 +0200216 b_add(tmpbuf, len);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100217 ret = len;
218 }
219 else {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200220 c_adv(chn, *nxt);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200221 ret = http_compression_buffer_add_data(st, chn->buf, co_data(chn), zbuf, len);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200222 c_rew(chn, *nxt);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100223 if (ret < 0)
224 return ret;
225 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100226
Christopher Faulet2fb28802015-12-01 10:40:57 +0100227 st->initialized = 1;
228 msg->next += ret;
229 msg->chunk_len -= ret;
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100230 *nxt = msg->next;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100231 return 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100232}
233
234static int
Christopher Faulet2fb28802015-12-01 10:40:57 +0100235comp_http_chunk_trailers(struct stream *s, struct filter *filter,
236 struct http_msg *msg)
Christopher Faulet92d36382015-11-05 13:35:03 +0100237{
238 struct comp_state *st = filter->ctx;
Christopher Faulet92d36382015-11-05 13:35:03 +0100239
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100240 if (!st->initialized) {
241 if (!st->finished) {
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200242 struct channel *chn = msg->chn;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100243 unsigned int fwd = flt_rsp_fwd(filter) + st->hdrs_len;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100244
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100245 b_reset(tmpbuf);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200246 c_adv(chn, fwd);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200247 http_compression_buffer_init(chn, zbuf, &buf_output);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200248 c_rew(chn, fwd);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100249 st->initialized = 1;
250 }
251 }
252 st->tlrs_len = msg->sol;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100253 return 1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100254}
255
Christopher Faulet2fb28802015-12-01 10:40:57 +0100256
Christopher Faulet92d36382015-11-05 13:35:03 +0100257static int
258comp_http_forward_data(struct stream *s, struct filter *filter,
259 struct http_msg *msg, unsigned int len)
260{
261 struct comp_state *st = filter->ctx;
Christopher Faulet2fb28802015-12-01 10:40:57 +0100262 int ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100263
Christopher Faulet2fb28802015-12-01 10:40:57 +0100264 /* To work, previous filters MUST forward all data */
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100265 if (flt_rsp_fwd(filter) + len != flt_rsp_nxt(filter)) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100266 ha_warning("HTTP compression failed: unexpected behavior of previous filters\n");
Christopher Faulet2fb28802015-12-01 10:40:57 +0100267 return -1;
Christopher Faulet92d36382015-11-05 13:35:03 +0100268 }
269
Christopher Faulet2fb28802015-12-01 10:40:57 +0100270 if (!st->initialized) {
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100271 if (!len) {
272 /* Nothing to foward */
273 ret = len;
274 }
275 else if (st->hdrs_len > len) {
276 /* Forward part of headers */
277 ret = len;
278 st->hdrs_len -= len;
279 }
280 else if (st->hdrs_len > 0) {
281 /* Forward remaining headers */
282 ret = st->hdrs_len;
283 st->hdrs_len = 0;
284 }
285 else if (msg->msg_state < HTTP_MSG_TRAILERS) {
286 /* Do not forward anything for now. This only happens
287 * with chunk-encoded responses. Waiting data are part
288 * of the chunk envelope (the chunk size or the chunk
289 * CRLF). These data will be skipped during the
290 * compression. */
291 ret = 0;
292 }
293 else {
294 /* Forward trailers data */
295 ret = len;
296 }
Christopher Faulet2fb28802015-12-01 10:40:57 +0100297 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100298 }
299
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100300 if (msg->flags & HTTP_MSGF_TE_CHNK) {
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200301 ret = http_compression_buffer_add_data(st, tmpbuf, 0,
302 zbuf, b_data(tmpbuf));
303 if (ret != b_data(tmpbuf)) {
Willy Tarreau506a29a2018-07-18 10:07:58 +0200304 ha_warning("HTTP compression failed: Must consume %u bytes but only %d bytes consumed\n",
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200305 (unsigned int)b_data(tmpbuf), ret);
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100306 return -1;
307 }
308 }
309
310 st->consumed = len - st->hdrs_len - st->tlrs_len;
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200311 c_adv(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200312 ret = http_compression_buffer_end(st, s, msg->chn, &zbuf, &buf_output, msg->msg_state >= HTTP_MSG_TRAILERS);
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200313 c_rew(msg->chn, flt_rsp_fwd(filter) + st->hdrs_len);
Christopher Faulet2fb28802015-12-01 10:40:57 +0100314 if (ret < 0)
315 return ret;
Christopher Faulet92d36382015-11-05 13:35:03 +0100316
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100317 flt_change_forward_size(filter, msg->chn, ret - st->consumed);
318 msg->next += (ret - st->consumed);
319 ret += st->hdrs_len + st->tlrs_len;
320
Christopher Faulet2fb28802015-12-01 10:40:57 +0100321 st->initialized = 0;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100322 st->finished = (msg->msg_state >= HTTP_MSG_TRAILERS);
323 st->hdrs_len = 0;
324 st->tlrs_len = 0;
Christopher Faulet92d36382015-11-05 13:35:03 +0100325 return ret;
326}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100327
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200328static int
329comp_http_end(struct stream *s, struct filter *filter,
330 struct http_msg *msg)
331{
332 struct comp_state *st = filter->ctx;
333
334 if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo)
335 goto end;
336
337 if (strm_fe(s)->mode == PR_MODE_HTTP)
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200338 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200339 if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200340 HA_ATOMIC_ADD(&s->be->be_counters.p.http.comp_rsp, 1);
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200341 end:
342 return 1;
343}
Christopher Faulet3d97c902015-12-09 14:59:38 +0100344/***********************************************************************/
345/*
346 * Selects a compression algorithm depending on the client request.
347 */
348int
Christopher Faulet92d36382015-11-05 13:35:03 +0100349select_compression_request_header(struct comp_state *st, struct stream *s,
350 struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100351{
352 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200353 struct channel *req = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100354 struct hdr_ctx ctx;
355 struct comp_algo *comp_algo = NULL;
356 struct comp_algo *comp_algo_back = NULL;
357
358 /* Disable compression for older user agents announcing themselves as "Mozilla/4"
359 * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
360 * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
361 */
362 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200363 if (http_find_header2("User-Agent", 10, ci_head(req), &txn->hdr_idx, &ctx) &&
Christopher Faulet3d97c902015-12-09 14:59:38 +0100364 ctx.vlen >= 9 &&
365 memcmp(ctx.line + ctx.val, "Mozilla/4", 9) == 0 &&
366 (ctx.vlen < 31 ||
367 memcmp(ctx.line + ctx.val + 25, "MSIE ", 5) != 0 ||
368 ctx.line[ctx.val + 30] < '6' ||
369 (ctx.line[ctx.val + 30] == '6' &&
370 (ctx.vlen < 54 || memcmp(ctx.line + 51, "SV1", 3) != 0)))) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100371 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100372 return 0;
373 }
374
375 /* search for the algo in the backend in priority or the frontend */
Christopher Faulet92d36382015-11-05 13:35:03 +0100376 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
377 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100378 int best_q = 0;
379
380 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200381 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100382 const char *qval;
383 int q;
384 int toklen;
385
386 /* try to isolate the token from the optional q-value */
387 toklen = 0;
Willy Tarreau2235b262016-11-05 15:50:20 +0100388 while (toklen < ctx.vlen && HTTP_IS_TOKEN(*(ctx.line + ctx.val + toklen)))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100389 toklen++;
390
391 qval = ctx.line + ctx.val + toklen;
392 while (1) {
Willy Tarreau2235b262016-11-05 15:50:20 +0100393 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100394 qval++;
395
396 if (qval >= ctx.line + ctx.val + ctx.vlen || *qval != ';') {
397 qval = NULL;
398 break;
399 }
400 qval++;
401
Willy Tarreau2235b262016-11-05 15:50:20 +0100402 while (qval < ctx.line + ctx.val + ctx.vlen && HTTP_IS_LWS(*qval))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100403 qval++;
404
405 if (qval >= ctx.line + ctx.val + ctx.vlen) {
406 qval = NULL;
407 break;
408 }
409 if (strncmp(qval, "q=", MIN(ctx.line + ctx.val + ctx.vlen - qval, 2)) == 0)
410 break;
411
412 while (qval < ctx.line + ctx.val + ctx.vlen && *qval != ';')
413 qval++;
414 }
415
416 /* here we have qval pointing to the first "q=" attribute or NULL if not found */
417 q = qval ? parse_qvalue(qval + 2, NULL) : 1000;
418
419 if (q <= best_q)
420 continue;
421
422 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
423 if (*(ctx.line + ctx.val) == '*' ||
424 word_match(ctx.line + ctx.val, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100425 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100426 best_q = q;
427 break;
428 }
429 }
430 }
431 }
432
433 /* remove all occurrences of the header when "compression offload" is set */
Christopher Faulet92d36382015-11-05 13:35:03 +0100434 if (st->comp_algo) {
435 if ((s->be->comp && s->be->comp->offload) ||
436 (strm_fe(s)->comp && strm_fe(s)->comp->offload)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100437 http_remove_header2(msg, &txn->hdr_idx, &ctx);
438 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200439 while (http_find_header2("Accept-Encoding", 15, ci_head(req), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100440 http_remove_header2(msg, &txn->hdr_idx, &ctx);
441 }
442 }
443 return 1;
444 }
445
446 /* identity is implicit does not require headers */
Christopher Faulet92d36382015-11-05 13:35:03 +0100447 if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) ||
448 (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos))) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100449 for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
450 if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100451 st->comp_algo = comp_algo;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100452 return 1;
453 }
454 }
455 }
456
Christopher Faulet92d36382015-11-05 13:35:03 +0100457 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100458 return 0;
459}
460
Christopher Faulet92d36382015-11-05 13:35:03 +0100461
Christopher Faulet3d97c902015-12-09 14:59:38 +0100462/*
463 * Selects a comression algorithm depending of the server response.
464 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100465static int
466select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100467{
468 struct http_txn *txn = s->txn;
Olivier Houchard0b662842018-06-29 18:16:31 +0200469 struct channel *c = msg->chn;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100470 struct hdr_ctx ctx;
471 struct comp_type *comp_type;
472
473 /* no common compression algorithm was found in request header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100474 if (st->comp_algo == NULL)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100475 goto fail;
476
477 /* HTTP < 1.1 should not be compressed */
478 if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
479 goto fail;
480
Christopher Faulet92d36382015-11-05 13:35:03 +0100481 if (txn->meth == HTTP_METH_HEAD)
482 goto fail;
483
Christopher Faulet3d97c902015-12-09 14:59:38 +0100484 /* compress 200,201,202,203 responses only */
485 if ((txn->status != 200) &&
486 (txn->status != 201) &&
487 (txn->status != 202) &&
488 (txn->status != 203))
489 goto fail;
490
491
492 /* Content-Length is null */
493 if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->body_len == 0)
494 goto fail;
495
496 /* content is already compressed */
497 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200498 if (http_find_header2("Content-Encoding", 16, ci_head(c), &txn->hdr_idx, &ctx))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100499 goto fail;
500
501 /* no compression when Cache-Control: no-transform is present in the message */
502 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200503 while (http_find_header2("Cache-Control", 13, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100504 if (word_match(ctx.line + ctx.val, ctx.vlen, "no-transform", 12))
505 goto fail;
506 }
507
508 comp_type = NULL;
509
510 /* we don't want to compress multipart content-types, nor content-types that are
511 * not listed in the "compression type" directive if any. If no content-type was
512 * found but configuration requires one, we don't compress either. Backend has
513 * the priority.
514 */
515 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200516 if (http_find_header2("Content-Type", 12, ci_head(c), &txn->hdr_idx, &ctx)) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100517 if (ctx.vlen >= 9 && strncasecmp("multipart", ctx.line+ctx.val, 9) == 0)
518 goto fail;
519
520 if ((s->be->comp && (comp_type = s->be->comp->types)) ||
521 (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types))) {
522 for (; comp_type; comp_type = comp_type->next) {
523 if (ctx.vlen >= comp_type->name_len &&
524 strncasecmp(ctx.line+ctx.val, comp_type->name, comp_type->name_len) == 0)
525 /* this Content-Type should be compressed */
526 break;
527 }
528 /* this Content-Type should not be compressed */
529 if (comp_type == NULL)
530 goto fail;
531 }
532 }
533 else { /* no content-type header */
Christopher Faulet92d36382015-11-05 13:35:03 +0100534 if ((s->be->comp && s->be->comp->types) ||
535 (strm_fe(s)->comp && strm_fe(s)->comp->types))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100536 goto fail; /* a content-type was required */
537 }
538
539 /* limit compression rate */
540 if (global.comp_rate_lim > 0)
541 if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
542 goto fail;
543
544 /* limit cpu usage */
545 if (idle_pct < compress_min_idle)
546 goto fail;
547
548 /* initialize compression */
Christopher Faulet92d36382015-11-05 13:35:03 +0100549 if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100550 goto fail;
551
Christopher Faulet3d97c902015-12-09 14:59:38 +0100552 /* remove Content-Length header */
553 ctx.idx = 0;
Olivier Houchard0b662842018-06-29 18:16:31 +0200554 if ((msg->flags & HTTP_MSGF_CNT_LEN) && http_find_header2("Content-Length", 14, ci_head(c), &txn->hdr_idx, &ctx))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100555 http_remove_header2(msg, &txn->hdr_idx, &ctx);
556
557 /* add Transfer-Encoding header */
558 if (!(msg->flags & HTTP_MSGF_TE_CHNK))
559 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, "Transfer-Encoding: chunked", 26);
560
561 /*
562 * Add Content-Encoding header when it's not identity encoding.
563 * RFC 2616 : Identity encoding: This content-coding is used only in the
564 * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
565 * header.
566 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100567 if (st->comp_algo->cfg_name_len != 8 || memcmp(st->comp_algo->cfg_name, "identity", 8) != 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100568 trash.len = 18;
569 memcpy(trash.str, "Content-Encoding: ", trash.len);
Christopher Faulet92d36382015-11-05 13:35:03 +0100570 memcpy(trash.str + trash.len, st->comp_algo->ua_name, st->comp_algo->ua_name_len);
571 trash.len += st->comp_algo->ua_name_len;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100572 trash.str[trash.len] = '\0';
573 http_header_add_tail2(&txn->rsp, &txn->hdr_idx, trash.str, trash.len);
574 }
Christopher Faulet92d36382015-11-05 13:35:03 +0100575 msg->flags |= HTTP_MSGF_COMPRESSING;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100576 return 1;
577
578fail:
Christopher Faulet92d36382015-11-05 13:35:03 +0100579 st->comp_algo = NULL;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100580 return 0;
581}
582
583/***********************************************************************/
584/* emit the chunksize followed by a CRLF on the output and return the number of
585 * bytes written. It goes backwards and starts with the byte before <end>. It
586 * returns the number of bytes written which will not exceed 10 (8 digits, CR,
587 * and LF). The caller is responsible for ensuring there is enough room left in
588 * the output buffer for the string.
589 */
590static int
591http_emit_chunk_size(char *end, unsigned int chksz)
592{
593 char *beg = end;
594
595 *--beg = '\n';
596 *--beg = '\r';
597 do {
598 *--beg = hextab[chksz & 0xF];
599 } while (chksz >>= 4);
600 return end - beg;
601}
602
603/*
604 * Init HTTP compression
605 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100606static int
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200607http_compression_buffer_init(struct channel *inc, struct buffer *out, unsigned int *out_len)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100608{
609 /* output stream requires at least 10 bytes for the gzip header, plus
610 * at least 8 bytes for the gzip trailer (crc+len), plus a possible
611 * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
612 */
Olivier Houchard0b662842018-06-29 18:16:31 +0200613 if (c_room(inc) < 20 + 5 * ((ci_data(inc) + 32767) >> 15))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100614 return -1;
615
616 /* prepare an empty output buffer in which we reserve enough room for
617 * copying the output bytes from <in>, plus 10 extra bytes to write
618 * the chunk size. We don't copy the bytes yet so that if we have to
619 * cancel the operation later, it's cheap.
620 */
621 b_reset(out);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200622 *out_len = co_data(inc);
623 out->head += *out_len + 10;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100624 return 0;
625}
626
627/*
628 * Add data to compress
629 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100630static int
631http_compression_buffer_add_data(struct comp_state *st, struct buffer *in,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200632 int in_out, struct buffer *out, int sz)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100633{
Christopher Faulet3d97c902015-12-09 14:59:38 +0100634 int consumed_data = 0;
635 int data_process_len;
636 int block1, block2;
637
Christopher Faulet92d36382015-11-05 13:35:03 +0100638 if (!sz)
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100639 goto end;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100640
Christopher Faulet92d36382015-11-05 13:35:03 +0100641 /* select the smallest size between the announced chunk size, the input
Christopher Faulet3d97c902015-12-09 14:59:38 +0100642 * data, and the available output buffer size. The compressors are
Christopher Faulet92d36382015-11-05 13:35:03 +0100643 * assumed to be able to process all the bytes we pass to them at
644 * once. */
Willy Tarreaueac52592018-06-15 13:59:36 +0200645 data_process_len = MIN(b_room(out), sz);
Christopher Faulet92d36382015-11-05 13:35:03 +0100646
Christopher Faulet3d97c902015-12-09 14:59:38 +0100647 block1 = data_process_len;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200648 if (block1 > b_contig_data(in, in_out))
649 block1 = b_contig_data(in, in_out);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100650 block2 = data_process_len - block1;
651
652 /* compressors return < 0 upon error or the amount of bytes read */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200653 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_head(in) + in_out, block1, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100654 if (consumed_data != block1 || !block2)
655 goto end;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200656 consumed_data = st->comp_algo->add_data(st->comp_ctx, b_peek(in, 0), block2, out);
Christopher Faulet3e7bc672015-12-07 13:39:08 +0100657 if (consumed_data < 0)
658 goto end;
659 consumed_data += block1;
660
661 end:
Christopher Faulet3d97c902015-12-09 14:59:38 +0100662 return consumed_data;
663}
664
665/*
666 * Flush data in process, and write the header and footer of the chunk. Upon
667 * success, in and out buffers are swapped to avoid a copy.
668 */
Christopher Faulet92d36382015-11-05 13:35:03 +0100669static int
670http_compression_buffer_end(struct comp_state *st, struct stream *s,
Willy Tarreau4d452382018-06-06 07:15:47 +0200671 struct channel *chn, struct buffer **out,
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200672 unsigned int *buf_out, int end)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100673{
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200674 struct buffer *ob = *out;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100675 char *tail;
Christopher Faulet92d36382015-11-05 13:35:03 +0100676 int to_forward, left;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200677 unsigned int tmp_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100678
679#if defined(USE_SLZ) || defined(USE_ZLIB)
680 int ret;
681
682 /* flush data here */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100683 if (end)
Christopher Faulet92d36382015-11-05 13:35:03 +0100684 ret = st->comp_algo->finish(st->comp_ctx, ob); /* end of data */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100685 else
Christopher Faulet92d36382015-11-05 13:35:03 +0100686 ret = st->comp_algo->flush(st->comp_ctx, ob); /* end of buffer */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100687
688 if (ret < 0)
689 return -1; /* flush failed */
690
691#endif /* USE_ZLIB */
Olivier Houchard0b662842018-06-29 18:16:31 +0200692 if (b_data(ob) == 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100693 /* No data were appended, let's drop the output buffer and
694 * keep the input buffer unchanged.
695 */
696 return 0;
697 }
698
699 /* OK so at this stage, we have an output buffer <ob> looking like this :
700 *
701 * <-- o --> <------ i ----->
702 * +---------+---+------------+-----------+
703 * | out | c | comp_in | empty |
704 * +---------+---+------------+-----------+
705 * data p size
706 *
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200707 * <out> is the room reserved to copy the channel output. It starts at
Willy Tarreau892f1db2018-07-09 10:55:37 +0200708 * ob->area and has not yet been filled. <c> is the room reserved to
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200709 * write the chunk size (10 bytes). <comp_in> is the compressed
710 * equivalent of the data part of ib->len. <empty> is the amount of
711 * empty bytes at the end of the buffer, into which we may have to
712 * copy the remaining bytes from ib->len after the data
713 * (chunk size, trailers, ...).
Christopher Faulet3d97c902015-12-09 14:59:38 +0100714 */
715
716 /* Write real size at the begining of the chunk, no need of wrapping.
717 * We write the chunk using a dynamic length and adjust ob->p and ob->i
718 * accordingly afterwards. That will move <out> away from <data>.
719 */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200720 left = http_emit_chunk_size(b_head(ob), b_data(ob));
721 b_add(ob, left);
722 ob->head -= *buf_out + (left);
723 /* Copy previous data from chn into ob */
724 if (co_data(chn) > 0) {
725 left = b_contig_data(chn->buf, 0);
726 if (left > *buf_out)
727 left = *buf_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100728
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200729 memcpy(b_head(ob), co_head(chn), left);
730 b_add(ob, left);
731 if (co_data(chn) - left) {/* second part of the buffer */
732 memcpy(b_head(ob) + left, b_orig(chn->buf), co_data(chn) - left);
733 b_add(ob, co_data(chn) - left);
734 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100735 }
736
737 /* chunked encoding requires CRLF after data */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200738 tail = b_tail(ob);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100739 *tail++ = '\r';
740 *tail++ = '\n';
741
Christopher Faulet2fb28802015-12-01 10:40:57 +0100742 /* At the end of data, we must write the empty chunk 0<CRLF>,
743 * and terminate the trailers section with a last <CRLF>. If
744 * we're forwarding a chunked-encoded response, we'll have a
745 * trailers section after the empty chunk which needs to be
746 * forwarded and which will provide the last CRLF. Otherwise
747 * we write it ourselves.
748 */
749 if (end) {
750 struct http_msg *msg = &s->txn->rsp;
751
752 memcpy(tail, "0\r\n", 3);
753 tail += 3;
Christopher Fauletb77c5c22015-12-07 16:48:42 +0100754 if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100755 memcpy(tail, "\r\n", 2);
756 tail += 2;
757 }
758 }
759
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200760 b_add(ob, tail - b_tail(ob));
761 to_forward = b_data(ob) - *buf_out;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100762
763 /* update input rate */
Christopher Faulet92d36382015-11-05 13:35:03 +0100764 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet2fb28802015-12-01 10:40:57 +0100765 update_freq_ctr(&global.comp_bps_in, st->consumed);
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200766 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in, st->consumed);
767 HA_ATOMIC_ADD(&s->be->be_counters.comp_in, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100768 } else {
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200769 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp, st->consumed);
770 HA_ATOMIC_ADD(&s->be->be_counters.comp_byp, st->consumed);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100771 }
772
773 /* copy the remaining data in the tmp buffer. */
Willy Tarreaubcbd3932018-06-06 07:13:22 +0200774 c_adv(chn, st->consumed);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200775 if (b_data(chn->buf) - co_data(chn) > 0) {
Willy Tarreau7194d3c2018-06-06 16:55:45 +0200776 left = ci_contig_data(chn);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200777 memcpy(b_tail(ob), ci_head(chn), left);
Olivier Houchardacd14032018-06-28 18:17:23 +0200778 b_add(ob, left);
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200779 if (b_data(chn->buf) - (co_data(chn) + left)) {
780 memcpy(b_tail(ob), b_orig(chn->buf), b_data(chn->buf) - left);
781 b_add(ob, b_data(chn->buf) - left);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100782 }
783 }
Christopher Faulet3d97c902015-12-09 14:59:38 +0100784 /* swap the buffers */
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200785 *out = chn->buf;
Willy Tarreau4d452382018-06-06 07:15:47 +0200786 chn->buf = ob;
Willy Tarreaud54a8ce2018-06-29 18:42:02 +0200787 tmp_out = chn->buf->output;
788 chn->buf->output = *buf_out;
789 *buf_out = tmp_out;
790
Christopher Faulet3d97c902015-12-09 14:59:38 +0100791
Christopher Faulet92d36382015-11-05 13:35:03 +0100792
793 if (st->comp_ctx && st->comp_ctx->cur_lvl > 0) {
Christopher Faulet3d97c902015-12-09 14:59:38 +0100794 update_freq_ctr(&global.comp_bps_out, to_forward);
Christopher Fauletff8abcd2017-06-02 15:33:24 +0200795 HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out, to_forward);
796 HA_ATOMIC_ADD(&s->be->be_counters.comp_out, to_forward);
Christopher Faulet3d97c902015-12-09 14:59:38 +0100797 }
798
Christopher Faulet3d97c902015-12-09 14:59:38 +0100799 return to_forward;
800}
801
802
803/***********************************************************************/
Christopher Faulet92d36382015-11-05 13:35:03 +0100804struct flt_ops comp_ops = {
Christopher Faulet8ca3b4b2017-07-25 11:07:15 +0200805 .init_per_thread = comp_flt_init_per_thread,
806 .deinit_per_thread = comp_flt_deinit_per_thread,
Christopher Faulet92d36382015-11-05 13:35:03 +0100807
808 .channel_start_analyze = comp_start_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +0100809 .channel_end_analyze = comp_end_analyze,
Christopher Faulet3dc860d2017-09-15 11:39:36 +0200810 .channel_post_analyze = comp_http_post_analyze,
Christopher Faulet92d36382015-11-05 13:35:03 +0100811
Christopher Faulet1339d742016-05-11 16:48:33 +0200812 .http_headers = comp_http_headers,
Christopher Faulet309c6412015-12-02 09:57:32 +0100813 .http_data = comp_http_data,
814 .http_chunk_trailers = comp_http_chunk_trailers,
815 .http_forward_data = comp_http_forward_data,
Christopher Fauletd60b3cf2017-06-26 11:47:13 +0200816 .http_end = comp_http_end,
Christopher Faulet92d36382015-11-05 13:35:03 +0100817};
818
Christopher Faulet3d97c902015-12-09 14:59:38 +0100819static int
820parse_compression_options(char **args, int section, struct proxy *proxy,
821 struct proxy *defpx, const char *file, int line,
822 char **err)
823{
Christopher Faulet92d36382015-11-05 13:35:03 +0100824 struct comp *comp;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100825
826 if (proxy->comp == NULL) {
Vincent Bernat02779b62016-04-03 13:48:43 +0200827 comp = calloc(1, sizeof(*comp));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100828 proxy->comp = comp;
829 }
830 else
831 comp = proxy->comp;
832
833 if (!strcmp(args[1], "algo")) {
834 struct comp_ctx *ctx;
835 int cur_arg = 2;
836
837 if (!*args[cur_arg]) {
838 memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>\n",
839 file, line, args[0]);
840 return -1;
841 }
842 while (*(args[cur_arg])) {
843 if (comp_append_algo(comp, args[cur_arg]) < 0) {
844 memprintf(err, "'%s' : '%s' is not a supported algorithm.\n",
845 args[0], args[cur_arg]);
846 return -1;
847 }
848 if (proxy->comp->algos->init(&ctx, 9) == 0)
849 proxy->comp->algos->end(&ctx);
850 else {
851 memprintf(err, "'%s' : Can't init '%s' algorithm.\n",
852 args[0], args[cur_arg]);
853 return -1;
854 }
855 cur_arg++;
856 continue;
857 }
858 }
859 else if (!strcmp(args[1], "offload"))
860 comp->offload = 1;
861 else if (!strcmp(args[1], "type")) {
862 int cur_arg = 2;
863
864 if (!*args[cur_arg]) {
865 memprintf(err, "'%s' expects <type>\n", args[0]);
866 return -1;
867 }
868 while (*(args[cur_arg])) {
869 comp_append_type(comp, args[cur_arg]);
870 cur_arg++;
871 continue;
872 }
873 }
874 else {
875 memprintf(err, "'%s' expects 'algo', 'type' or 'offload'\n",
876 args[0]);
877 return -1;
878 }
879
880 return 0;
881}
882
Christopher Faulet92d36382015-11-05 13:35:03 +0100883static int
884parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
Thierry Fournier3610c392016-04-13 18:27:51 +0200885 struct flt_conf *fconf, char **err, void *private)
Christopher Faulet92d36382015-11-05 13:35:03 +0100886{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100887 struct flt_conf *fc, *back;
Christopher Faulet92d36382015-11-05 13:35:03 +0100888
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100889 list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
890 if (fc->id == http_comp_flt_id) {
Christopher Faulet92d36382015-11-05 13:35:03 +0100891 memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
892 return -1;
893 }
894 }
895
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100896 fconf->id = http_comp_flt_id;
897 fconf->conf = NULL;
898 fconf->ops = &comp_ops;
Christopher Faulet92d36382015-11-05 13:35:03 +0100899 (*cur_arg)++;
900
901 return 0;
902}
903
904
905int
906check_legacy_http_comp_flt(struct proxy *proxy)
907{
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100908 struct flt_conf *fconf;
Christopher Faulet92d36382015-11-05 13:35:03 +0100909 int err = 0;
910
911 if (proxy->comp == NULL)
912 goto end;
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100913 if (!LIST_ISEMPTY(&proxy->filter_configs)) {
914 list_for_each_entry(fconf, &proxy->filter_configs, list) {
915 if (fconf->id == http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +0100916 goto end;
917 }
Christopher Faulet767a84b2017-11-24 16:50:31 +0100918 ha_alert("config: %s '%s': require an explicit filter declaration to use HTTP compression\n",
919 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100920 err++;
921 goto end;
922 }
923
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100924 fconf = calloc(1, sizeof(*fconf));
925 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100926 ha_alert("config: %s '%s': out of memory\n",
927 proxy_type_str(proxy), proxy->id);
Christopher Faulet92d36382015-11-05 13:35:03 +0100928 err++;
929 goto end;
930 }
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100931 fconf->id = http_comp_flt_id;
932 fconf->conf = NULL;
933 fconf->ops = &comp_ops;
934 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
Christopher Faulet92d36382015-11-05 13:35:03 +0100935
936 end:
937 return err;
938}
939
940/*
941 * boolean, returns true if compression is used (either gzip or deflate) in the
942 * response.
943 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100944static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100945smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
946 void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100947{
Willy Tarreaube508f12016-03-10 11:47:01 +0100948 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100949
Christopher Faulet3d97c902015-12-09 14:59:38 +0100950 smp->data.type = SMP_T_BOOL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100951 smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
Christopher Faulet3d97c902015-12-09 14:59:38 +0100952 return 1;
953}
954
Christopher Faulet92d36382015-11-05 13:35:03 +0100955/*
956 * string, returns algo
957 */
Christopher Faulet3d97c902015-12-09 14:59:38 +0100958static int
Christopher Faulet92d36382015-11-05 13:35:03 +0100959smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
960 const char *kw, void *private)
Christopher Faulet3d97c902015-12-09 14:59:38 +0100961{
Willy Tarreaube508f12016-03-10 11:47:01 +0100962 struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
Christopher Faulet92d36382015-11-05 13:35:03 +0100963 struct filter *filter;
964 struct comp_state *st;
965
Christopher Faulet03d85532017-09-15 10:14:43 +0200966 if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
Christopher Faulet3d97c902015-12-09 14:59:38 +0100967 return 0;
968
Christopher Fauletfcf035c2015-12-03 11:48:03 +0100969 list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
Christopher Faulet443ea1a2016-02-04 13:40:26 +0100970 if (FLT_ID(filter) != http_comp_flt_id)
Christopher Faulet92d36382015-11-05 13:35:03 +0100971 continue;
972
973 if (!(st = filter->ctx))
974 break;
975
976 smp->data.type = SMP_T_STR;
977 smp->flags = SMP_F_CONST;
978 smp->data.u.str.str = st->comp_algo->cfg_name;
979 smp->data.u.str.len = st->comp_algo->cfg_name_len;
980 return 1;
981 }
982 return 0;
Christopher Faulet3d97c902015-12-09 14:59:38 +0100983}
984
985/* Declare the config parser for "compression" keyword */
986static struct cfg_kw_list cfg_kws = {ILH, {
987 { CFG_LISTEN, "compression", parse_compression_options },
988 { 0, NULL, NULL },
989 }
990};
991
Christopher Faulet92d36382015-11-05 13:35:03 +0100992/* Declare the filter parser for "compression" keyword */
993static struct flt_kw_list filter_kws = { "COMP", { }, {
Thierry Fournier3610c392016-04-13 18:27:51 +0200994 { "compression", parse_http_comp_flt, NULL },
995 { NULL, NULL, NULL },
Christopher Faulet92d36382015-11-05 13:35:03 +0100996 }
997};
998
Christopher Faulet3d97c902015-12-09 14:59:38 +0100999/* Note: must not be declared <const> as its list will be overwritten */
1000static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
Christopher Faulet92d36382015-11-05 13:35:03 +01001001 { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
1002 { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
1003 { /* END */ },
1004 }
1005};
Christopher Faulet3d97c902015-12-09 14:59:38 +01001006
1007__attribute__((constructor))
Christopher Faulet92d36382015-11-05 13:35:03 +01001008static void
1009__flt_http_comp_init(void)
Christopher Faulet3d97c902015-12-09 14:59:38 +01001010{
1011 cfg_register_keywords(&cfg_kws);
Christopher Faulet92d36382015-11-05 13:35:03 +01001012 flt_register_keywords(&filter_kws);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001013 sample_register_fetches(&sample_fetch_keywords);
Willy Tarreaubafbe012017-11-24 17:34:44 +01001014 pool_head_comp_state = create_pool("comp_state", sizeof(struct comp_state), MEM_F_SHARED);
Christopher Faulet3d97c902015-12-09 14:59:38 +01001015}